def content(start_date, finish_date, site_id, typ, user): sess = f = writer = None try: sess = Session() running_name, finished_name = chellow.dloads.make_names( "site_hh_data_" + to_ct(start_date).strftime("%Y%m%d%H%M") + ".csv", user) f = open(running_name, mode="w", newline="") writer = csv.writer(f, lineterminator="\n") writer.writerow(("Site Code", "Type", "HH Start Clock-Time") + tuple(map(str, range(1, 51)))) site = Site.get_by_id(sess, site_id) line = None for hh in site.hh_data(sess, start_date, finish_date): hh_start_ct = to_ct(hh["start_date"]) if (hh_start_ct.hour, hh_start_ct.minute) == (0, 0): if line is not None: writer.writerow(line) line = [site.code, typ, hh_start_ct.strftime("%Y-%m-%d")] line.append(str(hh[typ])) if line is not None: writer.writerow(line) except BaseException: msg = traceback.format_exc() sys.stderr.write(msg) writer.writerow([msg]) finally: if sess is not None: sess.close() if f is not None: f.close() os.rename(running_name, finished_name)
def parse_to_date(date_str): if len(date_str) == 0: return None else: dt = to_ct(Datetime.strptime(date_str, "%d/%m/%Y")) dt += Timedelta(hours=23, minutes=30) return to_utc(dt)
def get_date(row, name, datemode): val = get_value(row, name) if val == '': return None else: dt_raw = Datetime(*xldate_as_tuple(get_value(row, name), datemode)) return to_utc(to_ct(dt_raw))
def run(self): while not self.stopped.isSet(): if self.lock.acquire(False): sess = self.global_alert = None try: sess = Session() self.log("Starting to check bmarketidx.") contract = Contract.get_non_core_by_name(sess, "bmarketidx") latest_rs = ( sess.query(RateScript) .filter(RateScript.contract_id == contract.id) .order_by(RateScript.start_date.desc()) .first() ) start_ct = to_ct(latest_rs.start_date) months = list( c_months_u( start_year=start_ct.year, start_month=start_ct.month, months=2, ) ) month_start, month_finish = months[1] now = utc_datetime_now() if now > month_finish: _process_month( self.log, sess, contract, latest_rs, month_start, month_finish, ) except BaseException: self.log(f"Outer problem {traceback.format_exc()}") sess.rollback() self.global_alert = ( "There's a problem with the " "bmarketidx automatic importer." ) finally: self.lock.release() self.log("Finished checking bmarketidx rates.") if sess is not None: sess.close() self.going.wait(2 * 60 * 60) self.going.clear()
def _process_month(log_f, sess, contract, latest_rs, month_start, month_finish): latest_rs_id = latest_rs.id log_f( f"Checking to see if data is available from {hh_format(month_start)} " f"to {hh_format(month_finish)} on BMRS." ) rates = {} month_finish_ct = to_ct(month_finish) for d in range(month_finish_ct.day): day_ct = ct_datetime(month_finish_ct.year, month_finish_ct.month, d + 1) params = { "q": f"ajax/alldata/MID/Date,SP,Provider,Price,Volume/NULL/" f'{day_ct.strftime("%Y-%m-%d")}/ALL' } r = requests.get( "https://www.bmreports.com/bmrs/", params=params, timeout=60, verify=False ) res = r.json() for h in res["arr"]: dt = to_utc(day_ct + (int(h["settlementPeriod"]) - 1) * HH) try: rate = rates[dt] except KeyError: rate = rates[dt] = {} rate[h["DataProviderId"]] = Decimal(h["MarketIndexPrice"]) / Decimal(1000) if month_finish in rates: log_f("The whole month's data is there.") script = {"rates": rates} rs = RateScript.get_by_id(sess, latest_rs_id) contract.update_rate_script( sess, rs, rs.start_date, month_finish, loads(rs.script) ) contract.insert_rate_script(sess, month_start, script) sess.commit() log_f(f"Added a new rate script starting at {hh_format(month_start)}.") else: msg = "There isn't a whole month there yet." if len(rates) > 0: msg += " The last date is {sorted(rates.keys())[-1]}" log_f(msg)
def find_cv(sess, caches, g_cv_id, dt, g_ldz_code): cvs = chellow.computer.hh_rate(sess, caches, g_cv_id, dt)["cvs"][g_ldz_code] ct = to_ct(dt) try: cv_props = cvs[ct.day] except KeyError: cv_props = sorted(cvs.items())[-1][1] cv = float(cv_props["cv"]) try: avg_cv = caches["g_engine"]["avg_cvs"][g_ldz_code][ct.year][ct.month] except KeyError: try: gec = caches["g_engine"] except KeyError: gec = caches["g_engine"] = {} try: avg_cache = gec["avg_cvs"] except KeyError: avg_cache = gec["avg_cvs"] = {} try: avg_cvs_cache = avg_cache[g_ldz_code] except KeyError: avg_cvs_cache = avg_cache[g_ldz_code] = {} try: year_cache = avg_cvs_cache[ct.year] except KeyError: year_cache = avg_cvs_cache[ct.year] = {} try: avg_cv = year_cache[ct.month] except KeyError: cv_list = [float(v["cv"]) for v in cvs.values()] avg_cv = year_cache[ct.month] = ( floor((sum(cv_list) / len(cv_list)) * 10) / 10 ) return cv, avg_cv
def hh_format_filter(dt, modifier="full"): if dt is None: return "Ongoing" else: return to_ct(dt).strftime(TEMPLATE_FORMATS[modifier])
def parse_date(date_str): if len(date_str) == 0: return None else: return to_utc(to_ct(Datetime.strptime(date_str, "%d/%m/%Y")))
def site_content(site_id, start_date, finish_date, user, file_name): sess = f = None try: sess = Session() running_name, finished_name = chellow.dloads.make_names( file_name, user) f = open(running_name, mode="w", newline="") writer = csv.writer(f, lineterminator="\n") site = Site.get_by_id(sess, site_id) sites = sess.query(Site).filter(Site.id == site_id) start_date_str = hh_format(start_date) finish_date_str = hh_format(finish_date) for site in sites: writer.writerow([ "Site Code", "Site Name", "Associated Site Codes", "Sources", "Generator Types", "From", "To", "Type", "Date", ] + list(map(str, range(1, 51)))) associates = " ".join( s.code for s in site.find_linked_sites(sess, start_date, finish_date)) source_codes = set() gen_types = set() for supply in (sess.query(Supply).join(Era).join(SiteEra).filter( SiteEra.is_physical == true(), SiteEra.site == site, Era.start_date <= finish_date, or_(Era.finish_date == null(), Era.finish_date >= start_date), ).distinct().options(joinedload(Supply.source), joinedload(Supply.generator_type))): source_codes.add(supply.source.code) gen_type = supply.generator_type if gen_type is not None: gen_types.add(gen_type.code) source_codes_str = ", ".join(sorted(source_codes)) gen_types_str = ", ".join(sorted(gen_types)) vals = None for hh in site.hh_data(sess, start_date, finish_date): hh_start_ct = to_ct(hh["start_date"]) if hh_start_ct.hour == 0 and hh_start_ct.minute == 0: if vals is not None: writer.writerow(vals) vals = [ site.code, site.name, associates, source_codes_str, gen_types_str, start_date_str, finish_date_str, "used", hh_start_ct.strftime("%Y-%m-%d"), ] used_gen_kwh = hh["imp_gen"] - hh["exp_net"] - hh["exp_gen"] used_3p_kwh = hh["imp_3p"] - hh["exp_3p"] used_kwh = hh["imp_net"] + used_gen_kwh + used_3p_kwh vals.append(str(round(used_kwh, 2))) if vals is not None: writer.writerow(vals) except BaseException: msg = traceback.format_exc() sys.stderr.write(msg) f.write(msg) finally: if sess is not None: sess.close() if f is not None: f.close() os.rename(running_name, finished_name)
def _parse_row(row, row_index, datemode, title_row): val = get_value(row, 'Meter Point') try: mpan_core = parse_mpan_core(str(int(val))) except ValueError as e: raise BadRequest( "Can't parse the MPAN core in column 'Meter Point' at row " + str(row_index + 1) + " with value '" + str(val) + "' : " + str(e)) bill_period = get_value(row, 'Bill Period') if '-' in bill_period: period_start, period_finish = [ to_utc(to_ct(Datetime.strptime(d, '%Y-%m-%d'))) for d in bill_period.split(' - ') ] period_finish += relativedelta(days=1) - HH else: period_start, period_finish = None, None from_date = get_date(row, 'From Date', datemode) if from_date is None: if period_start is None: raise BadRequest("Can't find a bill finish date in row " + str(row_index) + ".") else: from_date = period_start to_date = get_date(row, 'To Date', datemode) if to_date is None: if period_finish is None: raise BadRequest("Can't find a bill finish date in row " + str(row_index) + " .") else: to_date = period_finish else: to_date += relativedelta(days=1) - HH issue_date = get_date(row, 'Bill Date', datemode) bill_number = get_value(row, 'Bill Number') bill = { 'bill_type_code': 'N', 'kwh': Decimal(0), 'vat': Decimal('0.00'), 'net': Decimal('0.00'), 'reads': [], 'breakdown': { 'raw_lines': [str(title_row)] }, 'account': mpan_core, 'issue_date': issue_date, 'start_date': from_date, 'finish_date': to_date, 'mpans': [mpan_core], } bd = bill['breakdown'] usage = get_dec(row, 'Usage') # usage_units = get_value(row, 'Usage Unit') price = get_dec(row, 'Price') amount = get_dec(row, 'Amount') product_item_name = get_value(row, 'Product Item Name') rate_name = get_value(row, 'Rate Name') if product_item_name == 'Renewables Obligation (RO)': bill['kwh'] += round(usage, 2) description = get_value(row, 'Description') product_class = get_value(row, 'Product Item Class') if description in ('Standard VAT@20%', 'Reduced VAT@5%'): bill['vat'] += round(amount, 2) else: bill['net'] += round(amount, 2) path = [product_class, description, rate_name] names = _find_names(ELEM_MAP, path) if names is None: duos_avail_prefix = "DUoS Availability (" duos_excess_avail_prefix = "DUoS Excess Availability (" if description.startswith("DUoS Availability"): if description.startswith(duos_avail_prefix): bd_add(bd, 'duos-availability-kva', int(description[len(duos_avail_prefix):-5])) bd_add(bd, 'duos-availability-days', usage) bd_add(bd, 'duos-availability-rate', price) bd_add(bd, 'duos-availability-gbp', amount) elif description.startswith("DUoS Excess Availability"): if description.startswith(duos_excess_avail_prefix): kva = int(description[len(duos_excess_avail_prefix):-5]) bd_add(bd, 'duos-excess-availability-kva', kva) bd_add(bd, 'duos-excess-availability-days', usage) bd_add(bd, 'duos-excess-availability-rate', price) bd_add(bd, 'duos-excess-availability-gbp', amount) elif description.startswith('BSUoS Black Start '): bd_add(bd, 'black-start-gbp', amount) elif description.startswith('BSUoS Reconciliation - '): if usage is not None: bd_add(bd, 'bsuos-nbp-kwh', usage) if price is not None: bd_add(bd, 'bsuos-rate', price) bd_add(bd, 'bsuos-gbp', amount) elif description.startswith("FiT Rec - "): bd_add(bd, 'fit-gbp', amount) elif description.startswith("FiT Reconciliation "): bd_add(bd, 'fit-gbp', amount) elif description.startswith("CfD FiT Rec - "): bd_add(bd, 'cfd-fit-gbp', amount) elif description.startswith("Flex "): bd_add(bd, 'reconciliation-gbp', amount) elif description.startswith("Legacy TNUoS Reversal "): bd_add(bd, 'triad-gbp', amount) elif description.startswith("Hand Held Read -"): bd_add(bd, 'meter-rental-gbp', amount) elif description.startswith("RO Mutualisation "): bd_add(bd, 'ro-gbp', amount) elif description.startswith("OOC MOP - "): bd_add(bd, 'meter-rental-gbp', amount) elif description.startswith("KVa Adjustment "): bd_add(bd, 'duos-availability-gbp', amount) else: raise BadRequest("For the path " + str(path) + " the parser can't work out the element.") else: for elem_k, elem_v in zip(names, (usage, price, amount)): if elem_k is not None: bd_add(bd, elem_k, elem_v) reference = str(bill_number) + '_' + str(row_index + 1) for k, v in tuple(bd.items()): if isinstance(v, set): bd[k] = list(v) elif k.endswith("-gbp"): reference += "_" + k[:-4] bill['reference'] = reference bill['gross'] = bill['net'] + bill['vat'] return bill
def content(start_date, finish_date, supply_id, mpan_cores, is_zipped, user): if is_zipped: file_extension = ".zip" else: file_extension = ".csv" base_name = ( "hh_data_row_" + to_ct(start_date).strftime("%Y%m%d%H%M") + file_extension ) tls = ["Site Code", "Imp MPAN Core", "Exp Mpan Core", "HH Start Clock-Time"] for polarity in ("Import", "Export"): for suffix in ( "ACTIVE kWh", "ACTIVE Status", "ACTIVE Modified", "REACTIVE_IMP kVArh", "REACTIVE_IMP Status", "REACTIVE_IMP Modified", "REACTIVE_EXP kVArh", "REACTIVE_EXP Status", "REACTIVE_EXP Modified", ): tls.append(polarity + " " + suffix) titles = csv_str(tls) running_name, finished_name = chellow.dloads.make_names(base_name, user) if is_zipped: zf = zipfile.ZipFile(running_name, "w") else: tmp_file = open(running_name, "w") sess = None try: sess = Session() caches = {} supplies = ( sess.query(Supply) .join(Era) .filter( Era.start_date <= finish_date, or_(Era.finish_date == null(), Era.finish_date >= start_date), ) .order_by(Era.supply_id, Era.start_date) .distinct() ) if supply_id is not None: sup = Supply.get_by_id(sess, supply_id) supplies = supplies.filter(Era.supply == sup) if mpan_cores is not None: supplies = supplies.filter( or_( Era.imp_mpan_core.in_(mpan_cores), Era.exp_mpan_core.in_(mpan_cores) ) ) if not is_zipped: tmp_file.write(titles) for supply in supplies: site, era = ( sess.query(Site, Era) .join(Era.site_eras) .filter( Era.supply == supply, Era.start_date <= finish_date, SiteEra.site_id == Site.id, or_(Era.finish_date == null(), Era.finish_date >= start_date), SiteEra.is_physical == true(), ) .order_by(Era.id) .first() ) outs = [] data = iter( sess.execute( """ select hh_base.start_date, max(imp_active.value), max(imp_active.status), max(imp_active.last_modified), max(imp_reactive_imp.value), max(imp_reactive_imp.status), max(imp_reactive_imp.last_modified), max(imp_reactive_exp.value), max(imp_reactive_exp.status), max(imp_reactive_exp.last_modified), max(exp_active.value), max(exp_active.status), max(exp_active.last_modified), max(exp_reactive_imp.value), max(imp_reactive_imp.status), max(exp_reactive_imp.last_modified), max(exp_reactive_exp.value), max(exp_reactive_exp.status), max(exp_reactive_exp.last_modified) from hh_datum hh_base join channel on hh_base.channel_id = channel.id join era on channel.era_id = era.id left join hh_datum imp_active on (imp_active.id = hh_base.id and channel.imp_related is true and channel.channel_type = 'ACTIVE') left join hh_datum imp_reactive_imp on (imp_reactive_imp.id = hh_base.id and channel.imp_related is true and channel.channel_type = 'REACTIVE_IMP') left join hh_datum imp_reactive_exp on (imp_reactive_exp.id = hh_base.id and channel.imp_related is true and channel.channel_type = 'REACTIVE_EXP') left join hh_datum exp_active on (exp_active.id = hh_base.id and channel.imp_related is false and channel.channel_type = 'ACTIVE') left join hh_datum exp_reactive_imp on (exp_reactive_imp.id = hh_base.id and channel.imp_related is false and channel.channel_type = 'REACTIVE_IMP') left join hh_datum exp_reactive_exp on (exp_reactive_exp.id = hh_base.id and channel.imp_related is false and channel.channel_type = 'REACTIVE_EXP') where supply_id = :supply_id and hh_base.start_date between :start_date and :finish_date group by hh_base.start_date order by hh_base.start_date """, params={ "supply_id": supply.id, "start_date": start_date, "finish_date": finish_date, }, ) ) datum = next(data, None) for dt in hh_range(caches, start_date, finish_date): row = [site.code, era.imp_mpan_core, era.exp_mpan_core, dt] if datum is not None: ( hh_start_date, imp_active, imp_active_status, imp_active_modified, imp_reactive_imp, imp_reactive_imp_status, imp_reactive_imp_modified, imp_reactive_exp, imp_reactive_exp_status, imp_reactive_exp_modified, exp_active, exp_active_status, exp_active_modified, exp_reactive_imp, exp_reactive_imp_status, exp_reactive_imp_modified, exp_reactive_exp, exp_reactive_exp_status, exp_reactive_exp_modified, ) = datum if hh_start_date == dt: datum = next(data, None) row += [ imp_active, imp_active_status, imp_active_modified, imp_reactive_imp, imp_reactive_imp_status, imp_reactive_imp_modified, imp_reactive_exp, imp_reactive_exp_status, imp_reactive_exp_modified, exp_active, exp_active_status, exp_active_modified, exp_reactive_imp, exp_reactive_imp_status, exp_reactive_imp_modified, exp_reactive_exp, exp_reactive_exp_status, exp_reactive_exp_modified, ] outs.append(csv_str(row)) if is_zipped: zf.writestr( ( "hh_data_row_" + str(era.id) + "_" + str(era.imp_mpan_core) + "_" + str(era.exp_mpan_core) ).replace(" ", "") + ".csv", titles + "".join(outs), ) else: tmp_file.write("".join(outs)) # Avoid a long-running transaction sess.rollback() except BaseException: msg = "Problem " + traceback.format_exc() if is_zipped: zf.writestr("error.txt", msg) else: tmp_file.write(msg) finally: if sess is not None: sess.close() if is_zipped: zf.close() else: tmp_file.close() os.rename(running_name, finished_name)
def _parse_row(row, row_index, datemode, title_row): val = get_value(row, "Meter Point") try: mpan_core = parse_mpan_core(str(int(val))) except ValueError as e: raise BadRequest( "Can't parse the MPAN core in column 'Meter Point' with value '" + str(val) + "' : " + str(e)) bill_period = get_value(row, "Bill Period") if "-" in bill_period: period_start_naive, period_finish_naive = [ Datetime.strptime(v, "%Y-%m-%d") for v in bill_period.split(" - ") ] period_start = to_utc(to_ct(period_start_naive)) period_finish = to_utc( to_ct(period_finish_naive + relativedelta(days=1) - HH)) else: period_start, period_finish = None, None from_date = get_date(row, "From Date", datemode) if from_date is None: if period_start is None: raise BadRequest("Can't find a bill start date.") else: from_date = period_start to_date_naive = get_date_naive(row, "To Date", datemode) if to_date_naive is None: if period_finish is None: raise BadRequest("Can't find a bill finish date.") else: to_date = period_finish else: to_date = to_utc(to_ct(to_date_naive + relativedelta(days=1) - HH)) issue_date = get_date(row, "Bill Date", datemode) bill_number = get_value(row, "Bill Number") bill = { "bill_type_code": "N", "kwh": Decimal(0), "vat": Decimal("0.00"), "net": Decimal("0.00"), "reads": [], "breakdown": { "raw_lines": [str(title_row)] }, "account": mpan_core, "issue_date": issue_date, "start_date": from_date, "finish_date": to_date, "mpan_core": mpan_core, } bd = bill["breakdown"] usage = get_dec(row, "Usage") # usage_units = get_value(row, 'Usage Unit') price = get_dec(row, "Price") amount = get_dec(row, "Amount") product_item_name = get_value(row, "Product Item Name") rate_name = get_value(row, "Rate Name") if product_item_name == "Renewables Obligation (RO)" and usage is not None: bill["kwh"] += round(usage, 2) description = get_value(row, "Description") product_class = get_value(row, "Product Item Class") if description in ("Standard VAT@20%", "Reduced VAT@5%"): bill["vat"] += round(amount, 2) else: bill["net"] += round(amount, 2) path = [product_class, description, rate_name] names = _find_names(ELEM_MAP, path) duos_avail_prefix = "DUoS Availability (" duos_excess_avail_prefix = "DUoS Excess Availability (" if description.startswith("DUoS Availability Adjustment "): _bd_add(bd, "duos-availability-gbp", amount) elif description.startswith("DUoS Availability"): if description.startswith(duos_avail_prefix): _bd_add( bd, "duos-availability-kva", int(description[len(duos_avail_prefix):-5]), ) _bd_add(bd, "duos-availability-days", usage) _bd_add(bd, "duos-availability-rate", price) _bd_add(bd, "duos-availability-gbp", amount) elif description.startswith("DUoS Excess Availability"): if description.startswith(duos_excess_avail_prefix): kva = int(description[len(duos_excess_avail_prefix):-5]) _bd_add(bd, "duos-excess-availability-kva", kva) _bd_add(bd, "duos-excess-availability-days", usage) _bd_add(bd, "duos-excess-availability-rate", price) _bd_add(bd, "duos-excess-availability-gbp", amount) elif description.startswith("BSUoS Black Start "): _bd_add(bd, "black-start-gbp", amount) elif description.startswith("BSUoS Reconciliation - "): if usage is not None: _bd_add(bd, "bsuos-nbp-kwh", usage) if price is not None: _bd_add(bd, "bsuos-rate", price) _bd_add(bd, "bsuos-gbp", amount) elif description.startswith("FiT Rec - "): _bd_add(bd, "fit-gbp", amount) elif description.startswith("FiT Reconciliation "): _bd_add(bd, "fit-gbp", amount) elif description.startswith( "CfD FiT Rec - ") or description.startswith( "CfD FiT Reconciliation"): _bd_add(bd, "cfd-fit-gbp", amount) elif description.startswith("Flex"): _bd_add(bd, "reconciliation-gbp", amount) elif description.startswith("Legacy TNUoS Reversal "): _bd_add(bd, "triad-gbp", amount) elif description.startswith("Hand Held Read -"): _bd_add(bd, "meter-rental-gbp", amount) elif description.startswith("RO Mutualisation "): _bd_add(bd, "ro-gbp", amount) elif description.startswith("OOC MOP - "): _bd_add(bd, "meter-rental-gbp", amount) elif description.startswith("KVa Adjustment "): _bd_add(bd, "duos-availability-gbp", amount) elif names is not None: for elem_k, elem_v in zip(names, (amount, price, usage)): if elem_k is not None: _bd_add(bd, elem_k, elem_v) else: raise BadRequest( f"For the path {path} the parser can't work out the element.") reference = str(bill_number) + "_" + str(row_index + 1) for k, v in tuple(bd.items()): if isinstance(v, set): bd[k] = list(v) elif k.endswith("-gbp"): reference += "_" + k[:-4] bill["reference"] = reference bill["gross"] = bill["net"] + bill["vat"] return bill
def get_date(row, name, datemode): val = get_value(row, name) if isinstance(val, float): return to_utc(to_ct(Datetime(*xldate_as_tuple(val, datemode)))) raise BadRequest("Can't find a date.")
def content( start_date_ct, finish_date_ct, imp_related, channel_type, is_zipped, supply_id, mpan_cores, user, ): start_date, finish_date = to_utc(start_date_ct), to_utc(finish_date_ct) zf = sess = tf = None base_name = ["supplies_hh_data", finish_date_ct.strftime("%Y%m%d%H%M")] cache = {} try: sess = Session() supplies = (sess.query(Supply).join(Era).filter( or_(Era.finish_date == null(), Era.finish_date >= start_date), Era.start_date <= finish_date, ).order_by(Supply.id).distinct()) if supply_id is not None: supply = Supply.get_by_id(sess, supply_id) supplies = supplies.filter(Supply.id == supply.id) first_era = (sess.query(Era).filter( Era.supply == supply, or_(Era.finish_date == null(), Era.finish_date >= start_date), Era.start_date <= finish_date, ).order_by(Era.start_date).first()) if first_era.imp_mpan_core is None: name_core = first_era.exp_mpan_core else: name_core = first_era.imp_mpan_core base_name.append("supply_" + name_core.replace(" ", "_")) if mpan_cores is not None: supplies = supplies.filter( or_(Era.imp_mpan_core.in_(mpan_cores), Era.exp_mpan_core.in_(mpan_cores))) base_name.append("filter") cf = StringIO() writer = csv.writer(cf, lineterminator="\n") titles = [ "Import MPAN Core", "Export MPAN Core", "Import Related?", "Channel Type", "HH Start Clock-Time", ] + list(range(1, 51)) writer.writerow(titles) titles_csv = cf.getvalue() cf.close() running_name, finished_name = chellow.dloads.make_names( "_".join(base_name) + (".zip" if is_zipped else ".csv"), user) if is_zipped: zf = zipfile.ZipFile(running_name, "w", zipfile.ZIP_DEFLATED) else: tf = open(running_name, mode="w", newline="") tf.write(titles_csv) for supply in supplies: cf = StringIO() writer = csv.writer(cf, lineterminator="\n") era = supply.find_era_at(sess, finish_date) if era is None: imp_mpan_core_str = exp_mpan_core_str = "NA" else: if era.imp_mpan_core is None: imp_mpan_core_str = "NA" else: imp_mpan_core_str = era.imp_mpan_core if era.exp_mpan_core is None: exp_mpan_core_str = "NA" else: exp_mpan_core_str = era.exp_mpan_core imp_related_str = "TRUE" if imp_related else "FALSE" hh_data = iter( sess.query(HhDatum).join(Channel).join(Era).filter( Era.supply == supply, HhDatum.start_date >= start_date, HhDatum.start_date <= finish_date, Channel.imp_related == imp_related, Channel.channel_type == channel_type, ).order_by(HhDatum.start_date)) datum = next(hh_data, None) row = [] for current_date in hh_range(cache, start_date, finish_date): dt_ct = to_ct(current_date) if dt_ct.hour == 0 and dt_ct.minute == 0: if len(row) > 0: writer.writerow(row) row = [ imp_mpan_core_str, exp_mpan_core_str, imp_related_str, channel_type, dt_ct.strftime("%Y-%m-%d"), ] if datum is not None and datum.start_date == current_date: row.append(datum.value) datum = next(hh_data, None) else: row.append(None) if len(row) > 0: writer.writerow(row) if is_zipped: fname = "_".join((imp_mpan_core_str, exp_mpan_core_str, str(supply.id) + ".csv")) zf.writestr(fname.encode("ascii"), titles_csv + cf.getvalue()) else: tf.write(cf.getvalue()) cf.close() # Avoid long-running transaction sess.rollback() if is_zipped: zf.close() else: tf.close() except BaseException: msg = traceback.format_exc() if is_zipped: zf.writestr("error.txt", msg) zf.close() else: tf.write(msg) finally: if sess is not None: sess.close() os.rename(running_name, finished_name)
def content(user, file_name, file_like): f = sess = None try: sess = Session() tps = {} llfc_tp = {} block = {"llfc_tp": llfc_tp, "tps": tps} llfc_code = line_dt = start_date_str = None tp_cand = {} llfc_data = OrderedDict() zip_file = ZipFile(file_like) name_list = zip_file.namelist() if len(name_list) != 1: raise Exception("The zip archive must contain exactly one file.") csv_file = StringIO(zip_file.read(name_list[0]).decode("utf-8")) for vals in csv.reader(csv_file, delimiter="|"): code = vals[0] if code in ("LLF", "ZPT"): if llfc_code is not None: # Compress days days = OrderedDict() for dt, slots in llfc_data.items(): day = days[dt] = [] prev_laf = None for slot, laf in slots.items(): if laf == prev_laf: day[-1]["slot_finish"] = slot else: day.append({ "slot_start": slot, "slot_finish": slot, "laf": laf, }) prev_laf = laf prev_day = last_block = None for dt, day in days.items(): if day == prev_day: last_block["finish_date"] = dt else: last_block = tp_cand[dt] = { "start_date": dt, "finish_date": dt, "slots": day, } prev_day = day for tp_id, tp in tps.items(): if tp_cand == tp: llfc_tp[llfc_code] = tp_id tp_cand = {} break if tp_cand != {}: tp_id = len(tps) tps[tp_id] = tp_cand llfc_tp[llfc_code] = tp_id tp_cand = {} if code == "LLF": llfc_code = vals[1] elif code == "SDT": line_dt = vals[1] if start_date_str is None: start_date_str = line_dt llfc_data[line_dt] = OrderedDict() elif code == "SPL": slot, laf = vals[1:] llfc_data[line_dt][slot] = laf start_date_raw = Datetime.strptime(start_date_str, "%Y%m%d") start_date_ct = to_ct(start_date_raw) start_date = to_utc(start_date_ct) finish_date_raw = Datetime.strptime(line_dt, "%Y%m%d") finish_date_ct = to_ct(finish_date_raw) finish_date_ct += Timedelta(minutes=30 * (int(slot) - 1)) finish_date = to_utc(finish_date_ct) running_name, finished_name = chellow.dloads.make_names( start_date.strftime("%Y%m%d%H%M") + "_" + finish_date.strftime("%Y%m%d%H%M") + ".zish", user, ) f = open(running_name, mode="w") llfc_tp = dict((k.zfill(3), v) for k, v in block["llfc_tp"].items()) block["llfc_tp"] = llfc_tp for tp in block["tps"].values(): for date_block in tp.values(): for slot in date_block["slots"]: slot["laf"] = Decimal(slot["laf"]) slot["slot_start"] = int(slot["slot_start"]) slot["slot_finish"] = int(slot["slot_finish"]) f.write(dumps(block)) except BaseException: f.write(traceback.format_exc()) finally: if sess is not None: sess.close() if f is not None: f.close() os.rename(running_name, finished_name)
def datum_range(sess, caches, years_back, start_date, finish_date): try: return caches['g_engine']['datum'][years_back][start_date][finish_date] except KeyError: try: g_engine_cache = caches['g_engine'] except KeyError: g_engine_cache = caches['g_engine'] = {} try: d_cache_datum = g_engine_cache['datum'] except KeyError: d_cache_datum = g_engine_cache['datum'] = {} try: d_cache_years = d_cache_datum[years_back] except KeyError: d_cache_years = d_cache_datum[years_back] = {} try: d_cache = d_cache_years[start_date] except KeyError: d_cache = d_cache_years[start_date] = {} datum_list = [] for dt in hh_range(caches, start_date, finish_date): hist_date = dt - relativedelta(years=years_back) ct_dt = to_ct(dt) utc_is_month_end = (dt + HH).day == 1 and dt.day != 1 ct_is_month_end = (ct_dt + HH).day == 1 and ct_dt.day != 1 utc_decimal_hour = dt.hour + dt.minute / 60 ct_decimal_hour = ct_dt.hour + ct_dt.minute / 60 bhs = hh_rate(sess, caches, chellow.bank_holidays.get_db_id(), dt)['bank_holidays'] bank_holidays = [b[5:] for b in bhs] utc_is_bank_holiday = dt.strftime("%m-%d") in bank_holidays ct_is_bank_holiday = ct_dt.strftime("%m-%d") in bank_holidays datum_list.append( MappingProxyType({ 'hist_start': hist_date, 'start_date': dt, 'ct_day': ct_dt.day, 'utc_month': dt.month, 'utc_day': dt.day, 'utc_decimal_hour': utc_decimal_hour, 'utc_year': dt.year, 'utc_hour': dt.hour, 'utc_minute': dt.minute, 'ct_year': ct_dt.year, 'ct_month': ct_dt.month, 'ct_decimal_hour': ct_decimal_hour, 'ct_day_of_week': ct_dt.weekday(), 'utc_day_of_week': dt.weekday(), 'utc_is_bank_holiday': utc_is_bank_holiday, 'ct_is_bank_holiday': ct_is_bank_holiday, 'utc_is_month_end': utc_is_month_end, 'ct_is_month_end': ct_is_month_end, 'status': 'X', 'kwh': 0, 'hist_kwh': 0, 'unit_code': 'M3', 'unit_factor': 1, 'units_consumed': 0, 'correction_factor': 1, 'calorific_value': 0, 'avg_cv': 0 })) datum_tuple = tuple(datum_list) d_cache[finish_date] = datum_tuple return datum_tuple
def run(self): while not self.stopped.isSet(): if self.lock.acquire(False): sess = book = sheet = None try: sess = Session() self.log("Starting to check BSUoS rates.") contract = Contract.get_non_core_by_name(sess, 'bsuos') latest_rs = sess.query(RateScript).filter( RateScript.contract == contract).order_by( RateScript.start_date.desc()).first() latest_rs_id = latest_rs.id this_month_start = latest_rs.start_date + \ relativedelta(months=1) next_month_start = this_month_start + \ relativedelta(months=1) now = utc_datetime_now() props = contract.make_properties() if props.get('enabled', False): if now > next_month_start: url = props['url'] self.log( "Checking to see if data is available from " + str(this_month_start) + " to " + str(next_month_start - HH) + " at " + url) res = requests.get(url) self.log( "Received " + str(res.status_code) + " " + res.reason) book = xlrd.open_workbook( file_contents=res.content) sheet = book.sheet_by_index(0) month_bsuos = {} for row_index in range(1, sheet.nrows): row = sheet.row(row_index) raw_date = Datetime( *xlrd.xldate_as_tuple( row[0].value, book.datemode)) hh_date_ct = to_ct(raw_date) hh_date = to_utc(hh_date_ct) hh_date += relativedelta( minutes=30*int(row[1].value)) if not hh_date < this_month_start and \ hh_date < next_month_start: month_bsuos[key_format(hh_date)] = \ row[2].value if key_format(next_month_start - HH) in \ month_bsuos: self.log("The whole month's data is there.") script = "def rates_gbp_per_mwh():\n " \ "return {\n" + ',\n'.join( "'" + k + "': " + str(month_bsuos[k]) for k in sorted( month_bsuos.keys())) + "}" contract = Contract.get_non_core_by_name( sess, 'bsuos') rs = RateScript.get_by_id(sess, latest_rs_id) contract.update_rate_script( sess, rs, rs.start_date, rs.start_date + relativedelta(months=2) - HH, rs.script) sess.flush() contract.insert_rate_script( sess, rs.start_date + relativedelta(months=1), script) sess.commit() self.log("Added new rate script.") else: self.log( "There isn't a whole month there yet. The " "last date is " + sorted(month_bsuos.keys())[-1]) else: self.log( "The automatic importer is disabled. To " "enable it, edit the contract properties to " "set 'enabled' to True.") except: self.log("Outer problem " + traceback.format_exc()) sess.rollback() finally: book = sheet = None if sess is not None: sess.close() self.lock.release() self.log("Finished checking BSUoS rates.") self.going.wait(30 * 60) self.going.clear()
def run(self): while not self.stopped.isSet(): if self.lock.acquire(False): sess = book = sbp_sheet = ssp_sheet = None try: sess = Session() self.log("Starting to check System Prices.") # ct_tz = pytz.timezone('Europe/London') contract = Contract.get_non_core_by_name( sess, 'system_price') contract_props = contract.make_properties() if contract_props.get('enabled', False): for rscript in sess.query(RateScript).filter( RateScript.contract == contract).order_by( RateScript.start_date.desc()): ns = json.loads(rscript.script) rates = ns['gbp_per_nbp_mwh'] if len(rates) == 0: fill_start = rscript.start_date break elif rates[ key_format( rscript.finish_date)]['run'] == 'DF': fill_start = rscript.finish_date + HH break config = Contract.get_non_core_by_name( sess, 'configuration') config_props = config.make_properties() scripting_key = config_props.get( ELEXON_PORTAL_SCRIPTING_KEY_KEY) if scripting_key is None: raise BadRequest( "The property " + ELEXON_PORTAL_SCRIPTING_KEY_KEY + " cannot be found in the configuration " "properties.") url_str = contract_props['url'] + \ 'file/download/BESTVIEWPRICES_FILE?key=' + \ scripting_key self.log( "Downloading from " + url_str + " and extracting data from " + hh_format(fill_start)) url = urllib.parse.urlparse(url_str) if url.scheme == 'https': conn = http.client.HTTPSConnection( url.hostname, url.port) else: conn = http.client.HTTPConnection( url.hostname, url.port) conn.request("GET", url.path + '?' + url.query) res = conn.getresponse() self.log( "Received " + str(res.status) + " " + res.reason) data = res.read() book = xlrd.open_workbook(file_contents=data) sbp_sheet = book.sheet_by_index(1) ssp_sheet = book.sheet_by_index(2) sp_months = [] sp_month = None for row_index in range(1, sbp_sheet.nrows): sbp_row = sbp_sheet.row(row_index) ssp_row = ssp_sheet.row(row_index) raw_date = datetime.datetime( *xlrd.xldate_as_tuple( sbp_row[0].value, book.datemode)) hh_date_ct = to_ct(raw_date) hh_date = to_utc(hh_date_ct) run_code = sbp_row[1].value for col_idx in range(2, 52): if hh_date >= fill_start: sbp_val = sbp_row[col_idx].value if sbp_val != '': if hh_date.day == 1 and \ hh_date.hour == 0 and \ hh_date.minute == 0: sp_month = {} sp_months.append(sp_month) ssp_val = ssp_row[col_idx].value sp_month[hh_date] = { 'run': run_code, 'sbp': sbp_val, 'ssp': ssp_val} hh_date += HH self.log("Successfully extracted data.") last_date = sorted(sp_months[-1].keys())[-1] if last_date.month == (last_date + HH).month: del sp_months[-1] if 'limit' in contract_props: sp_months = sp_months[0:1] for sp_month in sp_months: sorted_keys = sorted(sp_month.keys()) month_start = sorted_keys[0] month_finish = sorted_keys[-1] rs = sess.query(RateScript).filter( RateScript.contract == contract, RateScript.start_date == month_start).first() if rs is None: self.log( "Adding a new rate script starting at " + hh_format(month_start) + ".") latest_rs = sess.query(RateScript).filter( RateScript.contract == contract).\ order_by(RateScript.start_date.desc()). \ first() contract.update_rate_script( sess, latest_rs, latest_rs.start_date, month_finish, latest_rs.script) rs = contract.insert_rate_script( sess, month_start, '') sess.flush() script = { 'gbp_per_nbp_mwh': dict( (key_format(k), v) for k, v in sp_month.items())} self.log( "Updating rate script starting at " + hh_format(month_start) + ".") contract.update_rate_script( sess, rs, rs.start_date, rs.finish_date, json.dumps( script, indent=' ', sort_keys=True)) sess.commit() else: self.log( "The automatic importer is disabled. To " "enable it, edit the contract properties to " "set 'enabled' to True.") except: self.log("Outer problem " + traceback.format_exc()) sess.rollback() finally: book = sbp_sheet = ssp_sheet = None self.lock.release() self.log("Finished checking System Price rates.") if sess is not None: sess.close() self.going.wait(24 * 60 * 60) self.going.clear()
def laf_days(sess, progress, csv_file): llfc_ids = [] timestamps = [] values = [] llfc_code = line_dt_ct = dno = llfc_id = None llfc_valid_to = UTC_DATETIME_MIN timestamp_cache = {} for line_number, vals in enumerate(csv.reader(csv_file, delimiter="|")): progress["line_number"] = line_number code = vals[0] if code == "DIS": participant_code = vals[1] dno = Party.get_by_participant_code_role_code( sess, participant_code, "R") elif code == "LLF": llfc_code = vals[1] llfc_valid_to = UTC_DATETIME_MIN if len(llfc_ids) > 0: yield llfc_ids, timestamps, values llfc_ids = [] timestamps = [] values = [] elif code == "SDT": line_dt_str = vals[1] line_dt_ct = to_ct(Datetime.strptime(line_dt_str, "%Y%m%d")) elif code == "SPL": period, value = vals[1:] try: timestamp = timestamp_cache[line_dt_ct][period] except KeyError: try: day_cache = timestamp_cache[line_dt_ct] except KeyError: day_cache = timestamp_cache[line_dt_ct] = {} timestamp = day_cache[period] = to_utc(line_dt_ct + Timedelta( minutes=30 * (int(period) - 1))) if hh_after(timestamp, llfc_valid_to): llfc = dno.find_llfc_by_code(sess, llfc_code, timestamp) if llfc is None: continue llfc_id, llfc_valid_to = llfc.id, llfc.valid_to llfc_ids.append(llfc_id) timestamps.append(timestamp) values.append(Decimal(value)) elif code == "ZPT": earliest_list = sorted(timestamp_cache.keys()) if len(earliest_list) > 0: conf = Contract.get_non_core_by_name(sess, "configuration") props = conf.make_properties() try: laf_importer = props["laf_importer"] except KeyError: laf_importer = props["laf_importer"] = {} laf_importer[dno.participant.code] = min(earliest_list) conf.update_properties(props) sess.commit() if len(llfc_ids) > 0: yield llfc_ids, timestamps, values
def datum_range(sess, caches, years_back, start_date, finish_date): try: return caches['g_engine']['datum'][years_back, start_date, finish_date] except KeyError: try: g_engine_cache = caches['g_engine'] except KeyError: caches['g_engine'] = g_engine_cache = {} try: d_cache = g_engine_cache['datum'] except KeyError: g_engine_cache['datum'] = d_cache = {} datum_list = [] g_cv_id = get_non_core_contract_id('g_cv') for dt in hh_range(start_date, finish_date): hist_date = dt - relativedelta(years=years_back) ct_dt = to_ct(dt) utc_is_month_end = (dt + HH).day == 1 and dt.day != 1 ct_is_month_end = (ct_dt + HH).day == 1 and ct_dt.day != 1 utc_decimal_hour = dt.hour + dt.minute / 60 ct_decimal_hour = ct_dt.hour + ct_dt.minute / 60 bhs = chellow.computer.hh_rate( sess, caches, chellow.bank_holidays.get_db_id(), dt, 'bank_holidays') if bhs is None: raise BadRequest( "Can't find bank holidays for " + str(dt)) bank_holidays = [b[5:] for b in bhs] utc_is_bank_holiday = dt.strftime("%m-%d") in bank_holidays ct_is_bank_holiday = ct_dt.strftime("%m-%d") in bank_holidays cv = float( chellow.computer.ion_rs( sess, caches, g_cv_id, hist_date)[hist_date.day - 1]['SW']) / 3.6 datum_list.append( MappingProxyType( { 'hist_start': hist_date, 'start_date': dt, 'ct_day': ct_dt.day, 'utc_month': dt.month, 'utc_day': dt.day, 'utc_decimal_hour': utc_decimal_hour, 'utc_year': dt.year, 'utc_hour': dt.hour, 'utc_minute': dt.minute, 'ct_year': ct_dt.year, 'ct_month': ct_dt.month, 'ct_decimal_hour': ct_decimal_hour, 'ct_day_of_week': ct_dt.weekday(), 'utc_day_of_week': dt.weekday(), 'utc_is_bank_holiday': utc_is_bank_holiday, 'ct_is_bank_holiday': ct_is_bank_holiday, 'utc_is_month_end': utc_is_month_end, 'ct_is_month_end': ct_is_month_end, 'status': 'X', 'kwh': 0, 'hist_kwh': 0, 'cv': cv, 'correction_factor': CORRECTION_FACTOR, 'units_code': 'M3', 'units_factor': 1, 'units_consumed': 0})) datum_tuple = tuple(datum_list) d_cache[years_back, start_date, finish_date] = datum_tuple return datum_tuple
def content(user, file_name, file_like): f = sess = None try: sess = Session() tps = {} llfc_tp = {} block = { 'llfc_tp': llfc_tp, 'tps': tps } llfc_code = line_dt = start_date_str = None tp_cand = {} llfc_data = OrderedDict() for vals in csv.reader(file_like, delimiter='|'): code = vals[0] if code in ('LLF', 'ZPT'): if llfc_code is not None: # Compress days days = OrderedDict() for dt, slots in llfc_data.items(): day = days[dt] = [] prev_laf = None for slot, laf in slots.items(): if laf == prev_laf: day[-1]['slot_finish'] = slot else: day.append( { 'slot_start': slot, 'slot_finish': slot, 'laf': laf } ) prev_laf = laf prev_day = last_block = None for dt, day in days.items(): if day == prev_day: last_block['finish_date'] = dt else: last_block = tp_cand[dt] = { 'start_date': dt, 'finish_date': dt, 'slots': day } prev_day = day for tp_id, tp in tps.items(): if tp_cand == tp: llfc_tp[llfc_code] = tp_id tp_cand = {} break if tp_cand != {}: tp_id = len(tps) tps[tp_id] = tp_cand llfc_tp[llfc_code] = tp_id tp_cand = {} if code == 'LLF': llfc_code = vals[1] elif code == 'SDT': line_dt = vals[1] if start_date_str is None: start_date_str = line_dt llfc_data[line_dt] = OrderedDict() elif code == 'SPL': slot, laf = vals[1:] llfc_data[line_dt][slot] = laf start_date_raw = Datetime.strptime(start_date_str, "%Y%m%d") start_date_ct = to_ct(start_date_raw) start_date = to_utc(start_date_ct) finish_date_raw = Datetime.strptime(line_dt, "%Y%m%d") finish_date_ct = to_ct(finish_date_raw) finish_date_ct += Timedelta(minutes=30*(int(slot) - 1)) finish_date = to_utc(finish_date_ct) running_name, finished_name = chellow.dloads.make_names( start_date.strftime('%Y%m%d%H%M') + '_' + finish_date.strftime('%Y%m%d%H%M') + '.zish', user) f = open(running_name, mode='w') llfc_tp = dict((k.zfill(3), v) for k, v in block['llfc_tp'].items()) block['llfc_tp'] = llfc_tp for tp in block['tps'].values(): for date_block in tp.values(): for slot in date_block['slots']: slot['laf'] = Decimal(slot['laf']) slot['slot_start'] = int(slot['slot_start']) slot['slot_finish'] = int(slot['slot_finish']) f.write(dumps(block)) except BaseException: f.write(traceback.format_exc()) finally: if sess is not None: sess.close() if f is not None: f.close() os.rename(running_name, finished_name)
def to_ct_filter(dt): return to_ct(dt)
def _process_url(logger, sess, url, contract): logger(f"Checking to see if there's any new data at {url}") res = requests.get(url) content_disposition = res.headers.get("Content-Disposition") logger(f"Received {res.status_code} {res.reason} {content_disposition}") cache = {} parsed_rows = [] filetype = _find_file_type(content_disposition) if filetype == "csv": reader = csv.reader(res.text.splitlines()) next(reader) # Skip titles for row in reader: date_str = row[0] date = Datetime.strptime(date_str, "%d/%m/%Y") period_str = row[1] period = int(period_str) price_str = row[2] price = Decimal(price_str) run = row[5] parsed_rows.append((date, period, price, run)) elif filetype == "xls": book = xlrd.open_workbook(file_contents=res.content) sheet = book.sheet_by_index(0) for row_index in range(1, sheet.nrows): row = sheet.row(row_index) date_val = row[0].value if isinstance(date_val, float): date = Datetime(*xlrd.xldate_as_tuple(date_val, book.datemode)) elif isinstance(date_val, str): separator = date_val[2] fmat = separator.join(("%d", "%m", "%Y")) date = Datetime.strptime(date_val, fmat) else: raise BadRequest( f"Type of date field {date_val} not recognized.") period = int(row[1].value) price = Decimal(str(row[2].value)) run = row[5].value parsed_rows.append((date, period, price, run)) else: raise BadRequest(f"The file extension {filetype} is not recognised.") for date, period, price, run in parsed_rows: hh_date_ct = to_ct(date) hh_date_ct += relativedelta(minutes=30 * (period - 1)) hh_date = to_utc(hh_date_ct) try: rs, rates, rts = cache[hh_date.year][hh_date.month] except KeyError: _save_cache(sess, cache) try: yr_cache = cache[hh_date.year] except KeyError: yr_cache = cache[hh_date.year] = {} rs = (sess.query(RateScript).filter( RateScript.contract == contract, RateScript.start_date <= hh_date, or_( RateScript.finish_date == null(), RateScript.finish_date >= hh_date, ), ).first()) while rs is None: logger(f"There's no rate script at {hh_format(hh_date)}.") latest_rs = (sess.query(RateScript).filter( RateScript.contract == contract).order_by( RateScript.start_date.desc()).first()) contract.update_rate_script( sess, latest_rs, latest_rs.start_date, latest_rs.start_date + relativedelta(months=2) - HH, loads(latest_rs.script), ) new_rs_start = latest_rs.start_date + relativedelta(months=1) contract.insert_rate_script(sess, new_rs_start, {}) sess.commit() logger( f"Added a rate script starting at {hh_format(new_rs_start)}." ) rs = (sess.query(RateScript).filter( RateScript.contract == contract, RateScript.start_date <= hh_date, or_( RateScript.finish_date == null(), RateScript.finish_date >= hh_date, ), ).first()) rates = loads(rs.script) try: rts = rates["rates_gbp_per_mwh"] except KeyError: rts = rates["rates_gbp_per_mwh"] = {} yr_cache[hh_date.month] = rs, rates, rts key = key_format(hh_date) try: existing = rts[key] except KeyError: existing = rts[key] = {} if run not in existing: existing[run] = price logger(f"Added rate at {hh_format(hh_date)} for run {run}.") _save_cache(sess, cache)
def run(self): while not self.stopped.isSet(): if self.lock.acquire(False): sess = book = sbp_sheet = ssp_sheet = None try: sess = Session() self.log("Starting to check System Prices.") # ct_tz = pytz.timezone('Europe/London') contract = Contract.get_non_core_by_name( sess, 'system_price') contract_props = contract.make_properties() if contract_props.get('enabled', False): for rscript in sess.query(RateScript).filter( RateScript.contract == contract).order_by( RateScript.start_date.desc()): ns = loads(rscript.script) rates = ns['gbp_per_nbp_mwh'] if len(rates) == 0: fill_start = rscript.start_date break elif rates[key_format( rscript.finish_date)]['run'] == 'DF': fill_start = rscript.finish_date + HH break config = Contract.get_non_core_by_name( sess, 'configuration') config_props = config.make_properties() scripting_key = config_props.get( ELEXON_PORTAL_SCRIPTING_KEY_KEY) if scripting_key is None: raise BadRequest( "The property " + ELEXON_PORTAL_SCRIPTING_KEY_KEY + " cannot be found in the configuration " "properties.") url_str = contract_props['url'] + \ 'file/download/BESTVIEWPRICES_FILE?key=' + \ scripting_key self.log("Downloading from " + url_str + " and extracting data from " + hh_format(fill_start)) url = urllib.parse.urlparse(url_str) if url.scheme == 'https': conn = http.client.HTTPSConnection( url.hostname, url.port) else: conn = http.client.HTTPConnection( url.hostname, url.port) conn.request("GET", url.path + '?' + url.query) res = conn.getresponse() self.log("Received " + str(res.status) + " " + res.reason) data = res.read() book = xlrd.open_workbook(file_contents=data) sbp_sheet = book.sheet_by_index(1) ssp_sheet = book.sheet_by_index(2) sp_months = [] sp_month = None for row_index in range(1, sbp_sheet.nrows): sbp_row = sbp_sheet.row(row_index) ssp_row = ssp_sheet.row(row_index) raw_date = datetime.datetime(*xlrd.xldate_as_tuple( sbp_row[0].value, book.datemode)) hh_date_ct = to_ct(raw_date) hh_date = to_utc(hh_date_ct) run_code = sbp_row[1].value for col_idx in range(2, 52): if hh_date >= fill_start: sbp_val = sbp_row[col_idx].value if sbp_val != '': if hh_date.day == 1 and \ hh_date.hour == 0 and \ hh_date.minute == 0: sp_month = {} sp_months.append(sp_month) ssp_val = ssp_row[col_idx].value sp_month[hh_date] = { 'run': run_code, 'sbp': sbp_val, 'ssp': ssp_val } hh_date += HH self.log("Successfully extracted data.") last_date = sorted(sp_months[-1].keys())[-1] if last_date.month == (last_date + HH).month: del sp_months[-1] if 'limit' in contract_props: sp_months = sp_months[0:1] for sp_month in sp_months: sorted_keys = sorted(sp_month.keys()) month_start = sorted_keys[0] month_finish = sorted_keys[-1] rs = sess.query(RateScript).filter( RateScript.contract == contract, RateScript.start_date == month_start).first() if rs is None: self.log( "Adding a new rate script starting at " + hh_format(month_start) + ".") latest_rs = sess.query(RateScript).filter( RateScript.contract == contract).\ order_by(RateScript.start_date.desc()). \ first() contract.update_rate_script( sess, latest_rs, latest_rs.start_date, month_finish, loads(latest_rs.script)) rs = contract.insert_rate_script( sess, month_start, {}) sess.flush() script = { 'gbp_per_nbp_mwh': dict((key_format(k), v) for k, v in sp_month.items()) } self.log("Updating rate script starting at " + hh_format(month_start) + ".") contract.update_rate_script( sess, rs, rs.start_date, rs.finish_date, script) sess.commit() else: self.log("The automatic importer is disabled. To " "enable it, edit the contract properties to " "set 'enabled' to True.") except BaseException: self.log("Outer problem " + traceback.format_exc()) sess.rollback() finally: book = sbp_sheet = ssp_sheet = None self.lock.release() self.log("Finished checking System Price rates.") if sess is not None: sess.close() self.going.wait(24 * 60 * 60) self.going.clear()
def get_date(row, name, datemode): dt = get_date_naive(row, name, datemode) return dt if dt is None else to_utc(to_ct(dt))
def _process_line(cache, sess, contract, log_func, values): hh_date_ct = to_ct(Datetime.strptime(values[0], "%d/%m/%Y")) hh_date = to_utc(hh_date_ct) hh_date += relativedelta(minutes=30 * (int(values[2]) - 1)) run = values[1] gsp_group_code = GSP_GROUP_LOOKUP[values[3]] off_taking_str = values[4] try: off_taking = Decimal(off_taking_str) except InvalidOperation as e: raise BadRequest("Problem parsing 'off-taking' field '" + off_taking_str + "' in the row " + str(values) + ". " + str(e)) delivering = Decimal(values[5]) try: rs, rates, rts = cache[hh_date.year][hh_date.month] except KeyError: _save_cache(sess, cache) try: yr_cache = cache[hh_date.year] except KeyError: yr_cache = cache[hh_date.year] = {} rs = (sess.query(RateScript).filter( RateScript.contract == contract, RateScript.start_date <= hh_date, or_(RateScript.finish_date == null(), RateScript.finish_date >= hh_date), ).first()) while rs is None: log_func("There's no rate script at " + hh_format(hh_date) + ".") latest_rs = (sess.query(RateScript).filter( RateScript.contract == contract).order_by( RateScript.start_date.desc()).first()) contract.update_rate_script( sess, latest_rs, latest_rs.start_date, latest_rs.start_date + relativedelta(months=2) - HH, loads(latest_rs.script), ) new_rs_start = latest_rs.start_date + relativedelta(months=1) contract.insert_rate_script(sess, new_rs_start, {}) sess.commit() log_func("Added a rate script starting at " + hh_format(new_rs_start) + ".") rs = (sess.query(RateScript).filter( RateScript.contract == contract, RateScript.start_date <= hh_date, or_( RateScript.finish_date == null(), RateScript.finish_date >= hh_date, ), ).first()) rates = loads(rs.script) try: rts = rates["tlms"] except KeyError: rts = rates["tlms"] = {} yr_cache[hh_date.month] = rs, rates, rts sess.rollback() key = key_format(hh_date) try: existing = rts[key] except KeyError: existing = rts[key] = {} try: group = existing[gsp_group_code] except KeyError: group = existing[gsp_group_code] = {} if run not in group: group[run] = {"off_taking": off_taking, "delivering": delivering} log_func("Found rate at " + hh_format(hh_date) + " for GSP Group " + gsp_group_code + " and run " + run + ".")
def _to_date(component): return to_utc(to_ct(Datetime.strptime(component, "%y%m%d")))
def to_ct_date(component): return to_ct(Datetime.strptime(component, "%y%m%d"))
def run(self): while not self.stopped.isSet(): if self.lock.acquire(False): sess = None try: sess = Session() self.log("Starting to check TLMs.") contract = Contract.get_non_core_by_name(sess, 'tlms') latest_rs = sess.query(RateScript).filter( RateScript.contract_id == contract.id).order_by( RateScript.start_date.desc()).first() latest_rs_id = latest_rs.id next_month_start = latest_rs.start_date + \ relativedelta(months=1) next_month_finish = latest_rs.start_date + \ relativedelta(months=2) - HH now = utc_datetime_now() if now > next_month_start: self.log( "Checking to see if data is available from " + str(next_month_start) + " to " + str(next_month_finish) + " on Elexon Portal.") config = Contract.get_non_core_by_name( sess, 'configuration') props = config.make_properties() scripting_key = props.get( ELEXON_PORTAL_SCRIPTING_KEY_KEY) if scripting_key is None: raise BadRequest( "The property " + ELEXON_PORTAL_SCRIPTING_KEY_KEY + " cannot be found in the configuration " + "properties.") contract_props = contract.make_properties() url_str = ''.join( ( contract_props['url'], 'file/download/TLM_FILE?key=', scripting_key)) r = requests.get(url_str) parser = csv.reader( (l.decode() for l in r.iter_lines()), delimiter=',', quotechar='"') self.log("Opened " + url_str + ".") next(parser, None) month_tlms = {} for values in parser: hh_date_ct = to_ct( Datetime.strptime(values[0], "%d/%m/%Y")) hh_date = to_utc(hh_date_ct) hh_date += relativedelta(minutes=30*int(values[2])) if next_month_start <= hh_date <= \ next_month_finish: month_tlms[key_format(hh_date)] = { 'off-taking': values[3], 'delivering': values[4]} if key_format(next_month_finish) in month_tlms: self.log("The whole month's data is there.") script = "def tlms():\n return {\n" + \ ',\n'.join( "'" + k + "': " + month_tlms[k]['off-taking'] for k in sorted(month_tlms.keys())) + "}" contract = Contract.get_non_core_by_name( sess, 'tlms') rs = RateScript.get_by_id(sess, latest_rs_id) contract.update_rate_script( sess, rs, rs.start_date, rs.start_date + relativedelta(months=2) - HH, rs.script) sess.flush() contract.insert_rate_script( sess, rs.start_date + relativedelta(months=1), script) sess.commit() self.log("Added new rate script.") else: msg = "There isn't a whole month there yet." if len(month_tlms) > 0: msg += "The last date is " + \ sorted(month_tlms.keys())[-1] self.log(msg) except: self.log("Outer problem " + traceback.format_exc()) sess.rollback() finally: if sess is not None: sess.close() self.lock.release() self.log("Finished checking TLM rates.") self.going.wait(30 * 60) self.going.clear()
def _process_hh(ds, rate_period, est_kw, hh): month_start, month_finish = next( c_months_u(start_year=hh["ct-year"], start_month=hh["ct-month"])) month_start_ct = to_ct(month_start) if month_start_ct.month > 3: year = month_start_ct.year else: year = month_start_ct.year - 1 financial_year_start = to_utc(ct_datetime(year, 4, 1)) last_financial_year_start = to_utc(ct_datetime(year - 1, 4, 1)) financial_year_finish = to_utc(ct_datetime(year + 1, 3, 31, 23, 30)) est_triad_kws = [] earliest_triad = None for dt in get_file_rates(ds.caches, "triad_dates", last_financial_year_start)["triad_dates"]: triad_hh = None earliest_triad = hh_min(earliest_triad, dt) try: d = next(ds.get_data_sources(dt, dt, financial_year_start)) chellow.duos.duos_vb(d) triad_hh = d.hh_data[0] while dt < financial_year_start: dt += relativedelta(years=1) for d in ds.get_data_sources(dt, dt, financial_year_start): chellow.duos.duos_vb(d) datum = d.hh_data[0] triad_hh["laf"] = datum["laf"] triad_hh["gsp-kw"] = datum["laf"] * triad_hh["msp-kw"] except StopIteration: triad_hh = { "hist-start": dt, "msp-kw": 0, "start-date": dt, "status": "before start of MPAN", "laf": 1, "gsp-kw": 0, } est_triad_kws.append(triad_hh) if ds.site is None: era = ds.supply.find_era_at(ds.sess, earliest_triad) if (era is None or era.get_channel(ds.sess, ds.is_import, "ACTIVE") is None and est_kw is None): est_kw = 0.85 * max(datum["msp-kwh"] for datum in ds.hh_data) * 2 if est_kw is not None: for est_datum in est_triad_kws: est_datum["msp-kw"] = est_kw est_datum["gsp-kw"] = est_datum["msp-kw"] * est_datum["laf"] gsp_kw = 0 for i, triad_hh in enumerate(est_triad_kws): triad_prefix = "triad-estimate-" + str(i + 1) hh[triad_prefix + "-date"] = triad_hh["hist-start"] hh[triad_prefix + "-msp-kw"] = triad_hh["msp-kw"] hh[triad_prefix + "-status"] = triad_hh["status"] hh[triad_prefix + "-laf"] = triad_hh["laf"] hh[triad_prefix + "-gsp-kw"] = triad_hh["gsp-kw"] gsp_kw += triad_hh["gsp-kw"] hh["triad-estimate-gsp-kw"] = gsp_kw / 3 polarity = "import" if ds.llfc.is_import else "export" gsp_group_code = ds.gsp_group_code rate = float( get_file_rates( ds.caches, "triad_rates", month_start)["triad_gbp_per_gsp_kw"][polarity][gsp_group_code]) hh["triad-estimate-rate"] = rate est_triad_gbp = hh["triad-estimate-rate"] * hh["triad-estimate-gsp-kw"] if rate_period == "monthly": total_intervals = 12 est_intervals = 1 hh["triad-estimate-months"] = est_intervals else: dt = financial_year_start total_intervals = 0 while dt <= financial_year_finish: total_intervals += 1 dt += relativedelta(days=1) est_intervals = 0 for d in ds.get_data_sources(month_start, month_finish): for h in d.hh_data: if h["ct-decimal-hour"] == 0: est_intervals += 1 hh["triad-estimate-days"] = est_intervals hh["triad-estimate-gbp"] = est_triad_gbp / total_intervals * est_intervals if hh["ct-month"] == 3: triad_kws = [] for t_date in get_file_rates(ds.caches, "triad_dates", month_start)["triad_dates"]: try: d = next(ds.get_data_sources(t_date, t_date)) if (ds.supplier_contract is None or d.supplier_contract == ds.supplier_contract): chellow.duos.duos_vb(d) thh = d.hh_data[0] else: thh = { "hist-start": t_date, "msp-kw": 0, "start-date": t_date, "status": "before contract", "laf": "before contract", "gsp-kw": 0, } except StopIteration: thh = { "hist-start": t_date, "msp-kw": 0, "start-date": t_date, "status": "before start of supply", "laf": "before start of supply", "gsp-kw": 0, } while t_date < financial_year_start: t_date += relativedelta(years=1) try: d = next(ds.get_data_sources(t_date, t_date)) if (ds.supplier_contract is None or d.supplier_contract == ds.supplier_contract): chellow.duos.duos_vb(d) thh["laf"] = d.hh_data[0]["laf"] thh["gsp-kw"] = thh["laf"] * thh["msp-kw"] except StopIteration: pass triad_kws.append(thh) gsp_kw = 0 for i, triad_hh in enumerate(triad_kws): pref = "triad-actual-" + str(i + 1) hh[pref + "-date"] = triad_hh["start-date"] hh[pref + "-msp-kw"] = triad_hh["msp-kw"] hh[pref + "-status"] = triad_hh["status"] hh[pref + "-laf"] = triad_hh["laf"] hh[pref + "-gsp-kw"] = triad_hh["gsp-kw"] gsp_kw += triad_hh["gsp-kw"] hh["triad-actual-gsp-kw"] = gsp_kw / 3 polarity = "import" if ds.llfc.is_import else "export" gsp_group_code = ds.gsp_group_code tot_rate = 0 for start_date, finish_date, script in get_file_scripts("triad_rates"): if start_date <= financial_year_finish and not hh_before( finish_date, financial_year_start): start_month = to_ct(start_date).month if start_month < 4: start_month += 12 if finish_date is None: finish_month = 3 else: finish_month = to_ct(finish_date).month if finish_month < 4: finish_month += 12 rt = get_file_rates( ds.caches, "triad_rates", start_date )["triad_gbp_per_gsp_kw"][polarity][gsp_group_code] tot_rate += (finish_month - start_month + 1) * float(rt) rate = tot_rate / 12 hh["triad-actual-rate"] = rate hh["triad-actual-gbp"] = hh["triad-actual-rate"] * hh[ "triad-actual-gsp-kw"] era = ds.supply.find_era_at(ds.sess, month_finish) est_intervals = 0 interval = (relativedelta( months=1) if rate_period == "monthly" else relativedelta(days=1)) dt = month_finish while era is not None and dt > financial_year_start: est_intervals += 1 dt -= interval if hh_after(dt, era.finish_date): era = ds.supply.find_era_at(ds.sess, dt) if rate_period == "monthly": hh["triad-all-estimates-months"] = est_intervals else: hh["triad-all-estimates-days"] = est_intervals hh["triad-all-estimates-gbp"] = (est_triad_gbp / total_intervals * est_intervals * -1)
def get_date(row, name, datemode): val = get_value(row, name) if isinstance(val, float): return to_utc(to_ct(Datetime(*xldate_as_tuple(val, datemode)))) else: return None
def _to_finish_date(date_str): return to_utc( to_ct( Datetime.strptime(date_str, "%y%m%d") + relativedelta(days=1) - HH))
def none_content(site_codes, typ, start_date, finish_date, user, file_name): sess = zf = None try: sess = Session() running_name, finished_name = chellow.dloads.make_names( file_name, user) sites = (sess.query(Site).join(SiteEra).join(Era).filter( SiteEra.is_physical == true(), or_(Era.finish_date == null(), Era.finish_date >= start_date), Era.start_date <= finish_date, )) if site_codes is not None: sites = sites.filter(Site.code.in_(site_codes)) zf = zipfile.ZipFile(running_name, "w") start_date_str = hh_format(start_date) finish_date_str = hh_format(finish_date) for site in sites: buf = StringIO() writer = csv.writer(buf, lineterminator="\n") writer.writerow([ "Site Code", "Site Name", "Associated Site Codes", "Sources", "Generator Types", "From", "To", "Type", "Date", ] + list(map(str, range(1, 51)))) associates = " ".join( s.code for s in site.find_linked_sites(sess, start_date, finish_date)) source_codes = set() gen_types = set() for supply in (sess.query(Supply).join(Era).join(SiteEra).filter( SiteEra.is_physical == true(), SiteEra.site == site, Era.start_date <= finish_date, or_(Era.finish_date == null(), Era.finish_date >= start_date), ).distinct().options(joinedload(Supply.source), joinedload(Supply.generator_type))): source_codes.add(supply.source.code) gen_type = supply.generator_type if gen_type is not None: gen_types.add(gen_type.code) source_codes_str = ", ".join(sorted(source_codes)) gen_types_str = ", ".join(sorted(gen_types)) row = None for hh in site.hh_data(sess, start_date, finish_date): ct_start_date = to_ct(hh["start_date"]) if ct_start_date.hour == 0 and ct_start_date.minute == 0: if row is not None: writer.writerow(row) row = [ site.code, site.name, associates, source_codes_str, gen_types_str, start_date_str, finish_date_str, typ, ct_start_date.strftime("%Y-%m-%d"), ] row.append(str(round(hh[typ], 2))) if row is not None: writer.writerow(row) zf.writestr( f"{site.code}_{finish_date.strftime('%Y%m%d%M%H')}.csv", buf.getvalue(), ) # Avoid long-running transaction sess.rollback() except BaseException: msg = traceback.format_exc() sys.stderr.write(msg) zf.write(msg) finally: if sess is not None: sess.close() if zf is not None: zf.close() os.rename(running_name, finished_name)
def make_raw_bills(self): raw_bills = [] last_key = None title_row = self.sheet.row(0) for row_index in range(1, self.sheet.nrows): row = self.sheet.row(row_index) mpan_core = parse_mpan_core( str(int(get_value(row, 'Meter Point')))) bill_period = get_value(row, 'Bill Period') start_date, finish_date = [ to_utc(to_ct(Datetime.strptime(d, '%Y-%m-%d'))) for d in bill_period.split(' - ')] finish_date = finish_date + relativedelta(days=1) - HH key = (start_date, finish_date, mpan_core) from_date = get_date(row, 'From Date', self.book.datemode) # to_date = get_date(row, 'To Date', self.book.datemode) + \ # relativedelta(days=1) - HH issue_date = get_date(row, 'Bill Date', self.book.datemode) if last_key != key: last_key = key bd = {} bill = { 'bill_type_code': 'N', 'account': mpan_core, 'mpans': [mpan_core], 'reference': '_'.join( ( start_date.strftime('%Y%m%d'), finish_date.strftime('%Y%m%d'), mpan_core)), 'issue_date': issue_date, 'start_date': start_date, 'finish_date': finish_date, 'kwh': Decimal(0), 'net': Decimal('0.00'), 'vat': Decimal('0.00'), 'breakdown': bd, 'reads': []} raw_bills.append(bill) usage = get_value(row, 'Usage') usage_units = get_value(row, 'Usage Unit') price = get_value(row, 'Price') amount = get_value(row, 'Amount') amount_dec = Decimal(amount) product_item_name = get_value(row, 'Product Item Name') rate_name = get_value(row, 'Rate Name') if usage_units == 'kWh': if product_item_name == 'Renewables Obligation (RO)': bill['kwh'] += round(Decimal(usage), 2) elif product_item_name == "Unit Rate": bd_add(bd, 'sum-gsp-kwh', usage) description = get_value(row, 'Description') if description == 'Standard VAT@20%': bill['vat'] += round(amount_dec, 2) else: bill['net'] += round(amount_dec, 2) for q, qname in ( (usage, 'Usage'), (price, 'Price'), (amount, 'Amount')): try: elem_key = ELEM_MAP[(description, rate_name, qname)] bd_add(bd, elem_key, q) except KeyError: pass duos_avail_prefix = "DUoS Availability (" duos_excess_avail_prefix = "DUoS Excess Availability (" if description.startswith("DUoS Availability"): if description.startswith(duos_avail_prefix): bd_add( bd, 'duos-availability-kva', int(description[len(duos_avail_prefix):-5])) bd_add(bd, 'duos-availability-days', usage) bd_add(bd, 'duos-availability-rate', price) bd_add(bd, 'duos-availability-gbp', amount) elif description.startswith("DUoS Excess Availability"): if description.startswith(duos_excess_avail_prefix): bd_add( bd, 'duos-excess-availability-kva', int(description[len(duos_excess_avail_prefix):-5])) bd_add(bd, 'duos-excess-availability-days', usage) bd_add(bd, 'duos-excess-availability-rate', price) bd_add(bd, 'duos-excess-availability-gbp', amount) elif description == 'Balancing Services Use of System (BSUoS)': if from_date == start_date: bd_add(bd, 'bsuos-estimated-nbp-kwh', usage) bd_add(bd, 'bsuos-estimated-rate', price) bd_add(bd, 'bsuos-estimated-gbp', amount) elif amount < 0: bd_add(bd, 'bsuos-prev-estimated-nbp-kwh', usage) bd_add(bd, 'bsuos-prev-estimated-rate', price) bd_add(bd, 'bsuos-prev-estimated-gbp', amount) else: bd_add(bd, 'bsuos-prev-sf-nbp-kwh', usage) bd_add(bd, 'bsuos-prev-sf-rate', price) bd_add(bd, 'bsuos-prev-sf-gbp', amount) elif description.startswith("FiT Rec - "): bd_add(bd, 'fit-reconciliation-gbp', amount) elif description.startswith("CfD FiT Rec - "): bd_add(bd, 'cfd-fit-reconciliation-gbp', amount) bd['raw_lines'] = [str(title_row), str(row)] bill['gross'] = bill['net'] + bill['vat'] for raw_bill in raw_bills: bd = raw_bill['breakdown'] for k, v in tuple(bd.items()): if isinstance(v, set): val = ', '.join(sorted(map(str, v))) else: val = v bd[k] = val return raw_bills
def process_url(self, sess, url, contract): self.log("Checking to see if there's any new data at " + url) res = requests.get(url) self.log("Received " + str(res.status_code) + " " + res.reason) book = xlrd.open_workbook(file_contents=res.content) sheet = book.sheet_by_index(0) cache = {} for row_index in range(1, sheet.nrows): row = sheet.row(row_index) raw_date_val = row[0].value if isinstance(raw_date_val, float): raw_date = Datetime( *xlrd.xldate_as_tuple(raw_date_val, book.datemode)) elif isinstance(raw_date_val, str): separator = raw_date_val[2] fmat = separator.join(("%d", "%m", "%Y")) raw_date = Datetime.strptime(raw_date_val, fmat) else: raise BadRequest("Type of date field " + str(raw_date_val) + " not recognized.") hh_date_ct = to_ct(raw_date) hh_date_ct += relativedelta(minutes=30 * (int(row[1].value) - 1)) hh_date = to_utc(hh_date_ct) price = Decimal(str(row[2].value)) run = row[5].value try: rs, rates, rts = cache[hh_date.year][hh_date.month] except KeyError: _save_cache(sess, cache) try: yr_cache = cache[hh_date.year] except KeyError: yr_cache = cache[hh_date.year] = {} rs = sess.query(RateScript).filter( RateScript.contract == contract, RateScript.start_date <= hh_date, or_(RateScript.finish_date == null(), RateScript.finish_date >= hh_date)).first() while rs is None: self.log("There's no rate script at " + hh_format(hh_date) + ".") latest_rs = sess.query(RateScript).filter( RateScript.contract == contract).order_by( RateScript.start_date.desc()).first() contract.update_rate_script( sess, latest_rs, latest_rs.start_date, latest_rs.start_date + relativedelta(months=2) - HH, loads(latest_rs.script)) new_rs_start = latest_rs.start_date + relativedelta( months=1) contract.insert_rate_script(sess, new_rs_start, {}) sess.commit() self.log("Added a rate script starting at " + hh_format(new_rs_start) + ".") rs = sess.query(RateScript).filter( RateScript.contract == contract, RateScript.start_date <= hh_date, or_(RateScript.finish_date == null(), RateScript.finish_date >= hh_date)).first() rates = loads(rs.script) try: rts = rates['rates_gbp_per_mwh'] except KeyError: rts = rates['rates_gbp_per_mwh'] = {} yr_cache[hh_date.month] = rs, rates, rts key = key_format(hh_date) try: existing = rts[key] except KeyError: existing = rts[key] = {} if run not in existing: existing[run] = price self.log("Added rate at " + hh_format(hh_date) + " for run " + run + ".") _save_cache(sess, cache) book = sheet = None
def _make_site_deltas(sess, report_context, site, scenario_hh, forecast_from, supply_id): site_scenario_hh = scenario_hh.get(site.code, {}) site_deltas = {"hhs": {}} delts = site_deltas["supply_deltas"] = {} for is_import in (True, False): delts[is_import] = {} for src in ("gen", "net", "gen-net", "3rd-party", "3rd-party-reverse", "sub"): delts[is_import][src] = {"site": {}} earliest_delta = to_utc(Datetime.max) latest_delta = to_utc(Datetime.min) found_hh = False for typ in ("used", "generated", "parasitic", "gen_net"): hh_str = site_scenario_hh.get(typ, "") hh_data = site_scenario_hh[typ] = {} for row in csv.reader(StringIO(hh_str)): cells = [cell.strip() for cell in row] if len("".join(cells)) == 0: continue if len(cells) != 2: raise BadRequest("Can't interpret the row " + str(cells) + " it should be of the form 'timestamp, kWh'") date_str, kwh_str = cells ts = parse_hh_start(date_str) earliest_delta = min(ts, earliest_delta) latest_delta = max(ts, latest_delta) try: hh_data[ts] = float(kwh_str) except ValueError as e: raise BadRequest("When looking at " + typ + " hh data, can't parse the " "kWh at " + date_str + ": " + str(e)) found_hh = True if not found_hh: return site_deltas scenario_used = site_scenario_hh["used"] scenario_generated = site_scenario_hh["generated"] scenario_parasitic = site_scenario_hh["parasitic"] scenario_gen_net = site_scenario_hh["gen_net"] earliest_delta_ct = to_ct(earliest_delta) for month_start, month_finish in c_months_u(earliest_delta_ct.year, earliest_delta_ct.month, months=None): if month_start > latest_delta: break chunk_start = hh_max(month_start, earliest_delta) chunk_finish = hh_min(month_finish, latest_delta) site_ds = chellow.computer.SiteSource(sess, site, chunk_start, chunk_finish, forecast_from, report_context) hh_map = dict((h["start-date"], h) for h in site_ds.hh_data) for era in (sess.query(Era).join(SiteEra).join(Pc).filter( SiteEra.site == site, SiteEra.is_physical == true(), Era.imp_mpan_core != null(), Pc.code != "00", Era.start_date <= chunk_finish, or_(Era.finish_date == null(), Era.finish_date >= chunk_start), )): if supply_id is not None and era.supply_id != supply_id: continue ss_start = hh_max(era.start_date, chunk_start) ss_finish = hh_min(era.finish_date, chunk_finish) ss = SupplySource(sess, ss_start, ss_finish, forecast_from, era, True, report_context) for hh in ss.hh_data: sdatum = hh_map[hh["start-date"]] sdatum["import-net-kwh"] += hh["msp-kwh"] sdatum["used-kwh"] += hh["msp-kwh"] for era in (sess.query(Era).join(SiteEra).join(Pc).join(Supply).join( Source).filter( SiteEra.site == site, SiteEra.is_physical == true(), Era.imp_mpan_core != null(), Era.start_date <= chunk_finish, or_(Era.finish_date == null(), Era.finish_date >= chunk_start), Source.code == "gen-net", )): if supply_id is not None and era.supply_id != supply_id: continue ss_start = hh_max(era.start_date, chunk_start) ss_finish = hh_min(era.finish_date, chunk_finish) ss = SupplySource(sess, ss_start, ss_finish, forecast_from, era, False, report_context) for hh in ss.hh_data: sdatum = hh_map[hh["start-date"]] try: sdatum["gen-net-kwh"] += hh["msp-kwh"] except KeyError: sdatum["gen-net-kwh"] = hh["msp-kwh"] for hh_start, hh in hh_map.items(): if hh_start in scenario_used: used_delt = scenario_used[hh_start] - hh["used-kwh"] imp_net_delt = 0 exp_net_delt = 0 if used_delt < 0: diff = hh["import-net-kwh"] + used_delt if diff < 0: imp_net_delt -= hh["import-net-kwh"] exp_net_delt -= diff else: imp_net_delt += used_delt else: diff = hh["export-net-kwh"] - used_delt if diff < 0: exp_net_delt -= hh["export-net-kwh"] imp_net_delt -= diff else: exp_net_delt -= used_delt try: delts[False]["net"]["site"][hh_start] += exp_net_delt except KeyError: delts[False]["net"]["site"][hh_start] = exp_net_delt try: delts[True]["net"]["site"][hh_start] += imp_net_delt except KeyError: delts[True]["net"]["site"][hh_start] = imp_net_delt hh["import-net-kwh"] += imp_net_delt hh["export-net-kwh"] += exp_net_delt hh["used-kwh"] += used_delt hh["msp-kwh"] -= exp_net_delt if hh_start in scenario_generated: imp_gen_delt = scenario_generated[hh_start] - hh[ "import-gen-kwh"] imp_net_delt = 0 exp_net_delt = 0 if imp_gen_delt < 0: diff = hh["export-net-kwh"] + imp_gen_delt if diff < 0: exp_net_delt -= hh["export-net-kwh"] imp_net_delt -= diff else: exp_net_delt += imp_gen_delt else: diff = hh["import-net-kwh"] - imp_gen_delt if diff < 0: imp_net_delt -= hh["import-net-kwh"] exp_net_delt -= diff else: imp_net_delt -= imp_gen_delt try: delts[True]["gen"]["site"][hh_start] += imp_gen_delt except KeyError: delts[True]["gen"]["site"][hh_start] = imp_gen_delt try: delts[False]["net"]["site"][hh_start] += exp_net_delt except KeyError: delts[False]["net"]["site"][hh_start] = exp_net_delt try: delts[True]["net"]["site"][hh_start] += imp_net_delt except KeyError: delts[True]["net"]["site"][hh_start] = imp_net_delt hh["import-net-kwh"] += imp_net_delt hh["export-net-kwh"] += exp_net_delt hh["import-gen-kwh"] += imp_gen_delt hh["msp-kwh"] -= imp_net_delt if hh_start in scenario_parasitic: exp_gen_delt = scenario_parasitic[hh_start] - hh[ "export-gen-kwh"] imp_net_delt = 0 exp_net_delt = 0 if exp_gen_delt < 0: diff = hh["import-net-kwh"] + exp_gen_delt if diff < 0: imp_net_delt -= hh["import-net-kwh"] exp_net_delt -= diff else: imp_net_delt += exp_gen_delt else: diff = hh["export-net-kwh"] - exp_gen_delt if diff < 0: exp_net_delt -= hh["export-net-kwh"] imp_net_delt -= diff else: exp_net_delt -= exp_gen_delt try: delts[False]["gen"]["site"][hh_start] += imp_gen_delt except KeyError: delts[False]["gen"]["site"][hh_start] = exp_gen_delt try: delts[False]["net"]["site"][hh_start] += exp_net_delt except KeyError: delts[False]["net"]["site"][hh_start] = exp_net_delt try: delts[True]["net"]["site"][hh_start] += imp_net_delt except KeyError: delts[True]["net"]["site"][hh_start] = imp_net_delt hh["import-net-kwh"] += imp_net_delt hh["export-net-kwh"] += exp_net_delt hh["export-gen-kwh"] += exp_gen_delt hh["msp-kwh"] -= imp_net_delt if hh_start in scenario_gen_net: gen_net_delt = scenario_gen_net[hh_start] - hh["gen-net-kwh"] try: delts[False]["gen-net"]["site"][hh_start] += gen_net_delt except KeyError: delts[False]["gen-net"]["site"][hh_start] = gen_net_delt hh["import-gen-kwh"] += gen_net_delt hh["export-net-kwh"] += gen_net_delt site_deltas["hhs"][hh_start] = hh sup_deltas = site_deltas["supply_deltas"][False]["net"]["site"] if all(v == 0 for v in sup_deltas.values()): sup_deltas.clear() return site_deltas