예제 #1
0
import codecs
import sys
import pyshark
import argparse
import logbook
from collections import defaultdict
import os
from datetime import datetime
from pprint import pprint
import pandas as pd

packet_log = logbook.Logger('Packet Capture App!')

def init_logging(filename = None):
    """
    function to initialize logging, if filename is provided log to the file else log to console(stdout mode)
    @param filename: filename to log into(optional)
    """
    level = logbook.TRACE
    if filename:
        logbook.TimedRotatingFileHandler(filename, level=level).push_application()
    else:
        logbook.StreamHandler(sys.stdout, level=level).push_application()

    msg = 'Logging initialized, level: {}, mode: {}'.format(
        level,
        "stdout mode" if not filename else 'file mode: ' + filename
    )
    logger = logbook.Logger('Startup')
    logger.notice(msg)
예제 #2
0
from __future__ import (absolute_import, division,
                        print_function, unicode_literals)

from collections import Counter
from string import Template
import os

import py
import logbook

import six
from beetsplug.ftintitle import split_on_feat
from headphones2.postprocess.component_base import PostProcessor

logger = logbook.Logger(__name__)
Path = py.path.local


class Renamer(PostProcessor):
    def __init__(self):
        super(Renamer, self).__init__()

    def _components_from_item(self, item):
        """
        :param item:
        :type item: beets.library.Item
        :return: dictionary of track components
        :rtype dict
        """
        components = {
            "Album": item.album,
예제 #3
0
파일: log.py 프로젝트: divyabaid16/services
 def logbook_factory(*args, **kwargs):
     # Logger given to structlog
     logbook.compat.redirect_logging()
     return logbook.Logger(level=level, *args, **kwargs)
예제 #4
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import bisect
import logbook
import datetime

import pandas as pd

from zipline.data.loader import load_market_data
from zipline.utils import tradingcalendar
from zipline.utils.tradingcalendar import get_early_closes

log = logbook.Logger('Trading')

# The financial simulations in zipline depend on information
# about the benchmark index and the risk free rates of return.
# The benchmark index defines the benchmark returns used in
# the calculation of performance metrics such as alpha/beta. Many
# components, including risk, performance, transforms, and
# batch_transforms, need access to a calendar of trading days and
# market hours. The TradingEnvironment maintains two time keeping
# facilities:
#   - a DatetimeIndex of trading days for calendar calculations
#   - a timezone name, which should be local to the exchange
#   hosting the benchmark index. All dates are normalized to UTC
#   for serialization and storage, and the timezone is used to
#   ensure proper rollover through daylight savings and so on.
#
예제 #5
0
파일: period.py 프로젝트: zixan/zipline
import numpy.linalg as la

from zipline.finance import trading

import pandas as pd

import risk
from .risk import (
    alpha,
    check_entry,
    information_ratio,
    sharpe_ratio,
    sortino_ratio,
)

log = logbook.Logger('Risk Period')

choose_treasury = functools.partial(risk.choose_treasury,
                                    risk.select_treasury_duration)


class RiskMetricsPeriod(object):
    def __init__(self, start_date, end_date, returns, benchmark_returns=None):

        treasury_curves = trading.environment.treasury_curves
        if treasury_curves.index[-1] >= start_date:
            mask = ((treasury_curves.index >= start_date) &
                    (treasury_curves.index <= end_date))

            self.treasury_curves = treasury_curves[mask]
        else:
예제 #6
0
import logbook

import numpy

from numbers import Integral

import pandas as pd

from six import (string_types, itervalues, iteritems)

from zipline.utils.data import MutableIndexRollingPanel
from zipline.protocol import Event

from zipline.finance import trading

log = logbook.Logger('BatchTransform')
func_map = {
    'open_price': 'first',
    'close_price': 'last',
    'low': 'min',
    'high': 'max',
    'volume': 'sum'
}


def get_sample_func(item):
    if item in func_map:
        return func_map[item]
    else:
        return 'last'
예제 #7
0
# encoding: utf-8

"""
.. codeauthor:: Tsuyoshi Hombashi <*****@*****.**>
"""

from __future__ import absolute_import
from __future__ import unicode_literals

import logbook


LOG_FORMAT_STRING = "[{record.level_name}] {record.channel}: {record.message}"


logger = logbook.Logger("DataProperty")
logger.disable()


def set_logger(is_enable):
    if is_enable:
        logger.enable()
    else:
        logger.disable()


def set_log_level(log_level):
    """
    Set logging level of this module. The module using
    `logbook <http://logbook.readthedocs.io/en/stable/>`__ module for logging.
예제 #8
0
from __future__ import division

import logbook

import pandas as pd
from pandas.tseries.tools import normalize_date

from catalyst.finance.performance.period import PerformancePeriod
from catalyst.errors import NoFurtherDataError
import catalyst.finance.risk as risk

from . position_tracker import PositionTracker

from catalyst.constants import LOG_LEVEL

log = logbook.Logger('Performance', level=LOG_LEVEL)


class PerformanceTracker(object):
    """
    Tracks the performance of the algorithm.
    """
    def __init__(self, sim_params, trading_calendar, env):
        self.sim_params = sim_params
        self.trading_calendar = trading_calendar
        self.asset_finder = env.asset_finder
        self.treasury_curves = env.treasury_curves

        self.period_start = self.sim_params.start_session
        self.period_end = self.sim_params.end_session
        self.last_close = self.sim_params.last_close
# disable buffering
socket._fileobject.default_bufsize = 0

import httplib
httplib.HTTPConnection.debuglevel = 1

import urllib2
import MySQLdb
import anyjson
import logbook
import tweetsclient
import politwoops
replace_highpoints = politwoops.utils.replace_highpoints

_script_ = (os.path.basename(__file__) if __name__ == "__main__" else __name__)
log = logbook.Logger(_script_)


class Usage(Exception):
    def __init__(self, msg):
        self.msg = msg


class DeletedTweetsWorker(object):
    def __init__(self, heart, images):
        self.heart = heart
        self.images = images
        self.get_config()

    def init_database(self):
        log.debug("Making DB connection")
예제 #10
0
    |                 | for the portfolio returns between self.start_date  |
    |                 | and self.end_date.                                 |
    +-----------------+----------------------------------------------------+
    | max_leverage    | The largest gross leverage between self.start_date |
    |                 | and self.end_date                                  |
    +-----------------+----------------------------------------------------+


"""

import logbook
import numpy as np

from catalyst.constants import LOG_LEVEL

log = logbook.Logger('Risk', level=LOG_LEVEL)

TREASURY_DURATIONS = [
    '1month', '3month', '6month', '1year', '2year', '3year', '5year', '7year',
    '10year', '30year'
]


# check if a field in rval is nan, and replace it with
# None.
def check_entry(key, value):
    if key != 'period_label':
        return np.isnan(value) or np.isinf(value)
    else:
        return False
예제 #11
0
"""Subclonal composition inference"""

import pickle

import logbook
import numpy
import pandas
import scipy.special
import scipy.optimize
import scipy.signal
import scipy.stats
from itertools import chain

_LOG = logbook.Logger(__name__)

_MINIMAL_MUTATIONS = 50

_SMALLEST_CLONE = 10

_MINIMAL_SUBCLONE = 1e-7


def infer_single_sample(mutations, purity):
    """Infer subclones for a single sample.

    Parameters
    ----------
    mutations : pandas.DataFrame
        The mutation table.
    purity : float | str | NoneType
        The estimated tumour purity.
예제 #12
0
 def test_with_log_error():
     logger = logbook.Logger('some-logger')
     logger.error(message)
     logger.error(non_capture_message, extra={'capture': False})
예제 #13
0
@pytest.fixture
def links_dir(logs_dir):
    return logs_dir.join("links")


@pytest.fixture
def files_dir(logs_dir):
    return logs_dir.join("files")


_TOKEN = "logging-test"
_SESSION_START_MARK = "session-start-mark"
_SESSION_END_MARK = "session-end-mark"

_silenced_logger = logbook.Logger("silenced_logger")


################################################################################
## Legacy Tests


class LogFormattingTest(TestCase):

    def setUp(self):
        super(LogFormattingTest, self).setUp()
        self.log_path = self.get_new_path()
        self.override_config(
            "log.root", self.log_path
        )
        self.override_config(
예제 #14
0
파일: app03.py 프로젝트: xiyongjian/gateway
import sys
import logbook

# local import
import utils

import data
import watchlists

# logging setup
if True:
    handler = logbook.StreamHandler(sys.stdout, level=logbook.DEBUG)
    handler.formatter.format_string = '{record.time}|{record.level_name}|{record.module}|{record.func_name}|{record.lineno}|{record.message}'
    handler.push_application()
    # or using this : with handler.applicationbound():
    log = logbook.Logger("app01")

if False:  # hacking for url missing
    from pandas_datareader.google.daily import GoogleDailyReader

    @property
    def url(self):
        print("call @property get url, %s, %s" % (self, type(self)))
        return 'http://finance.google.com/finance/historical'

    GoogleDailyReader.url = url

stocks = ['600183', '600184', '601336']


def initialize(context):
예제 #15
0
파일: utils.py 프로젝트: pprett/zipline
"""
import types
import logbook

from copy import deepcopy
from datetime import datetime
from collections import deque
from abc import ABCMeta, abstractmethod

import pandas as pd

from zipline import ndict
from zipline.utils.tradingcalendar import non_trading_days
from zipline.gens.utils import assert_sort_unframe_protocol, hash_args

log = logbook.Logger('Transform')


class Passthrough(object):
    PASSTHROUGH = True
    """
    Trivial class for forwarding events.
    """
    def __init__(self):
        pass

    def update(self, event):
        pass


class TransformMeta(type):
예제 #16
0
def get_logger(logger_name):
    """ get logger """

    logger = logbook.Logger(logger_name)
    #print dir(logger)
    return logger
예제 #17
0
def main(stype, interval=1.0, loglevel="INFO"):
    """Computes the REF/MOV stacks.
    
    Parameters
    ----------
    stype : {'mov', 'ref'}
        Defines which of the REF or Moving-window stacks must be exported
    interval : float, optional
        Number of days before now to search for modified CC jobs

    """
    logger = logbook.Logger(__name__)
    # Reconfigure logger to show the pid number in log records
    logger = get_logger('msnoise.stack_child', loglevel, with_pid=True)
    logger.debug('Starting the %s stack' % stype)
    db = connect()

    export_format = get_config(db, 'export_format')

    if export_format == "BOTH":
        mseed = True
        sac = True
    elif export_format == "SAC":
        mseed = False
        sac = True
    elif export_format == "MSEED":
        mseed = True
        sac = False

    maxlag = float(get_config(db, "maxlag"))
    cc_sampling_rate = float(get_config(db, "cc_sampling_rate"))

    stack_method = get_config(db, 'stack_method')
    pws_timegate = float(get_config(db, 'pws_timegate'))
    pws_power = float(get_config(db, 'pws_power'))
    goal_sampling_rate = float(get_config(db, "cc_sampling_rate"))
    # Get Configuration
    params = get_params(db)
    plugins = get_config(db, "plugins")
    extra_jobtypes = []
    if plugins:
        plugins = plugins.split(",")
        for ep in pkg_resources.iter_entry_points(
                group='msnoise.plugins.jobtypes'):
            module_name = ep.module_name.split(".")[0]
            if module_name in plugins:
                jobtypes = ep.load()()
                for jobtype in jobtypes:
                    if jobtype["after"] == "refstack":
                        extra_jobtypes.append(jobtype["name"])

    if stype == "mov" or stype == "step":
        start, end, datelist = build_movstack_datelist(db)
        format = "matrix"
        mov_stack = get_config(db, "mov_stack")
        if mov_stack.count(',') == 0:
            mov_stacks = [
                int(mov_stack),
            ]
        else:
            mov_stacks = [int(mi) for mi in mov_stack.split(',')]
        if 1 in mov_stacks:
            mov_stacks.remove(1)  # remove 1 day stack, it should exist already

    elif stype == "ref":
        start, end, datelist = build_ref_datelist(db)
        format = "stack"

    if stype == "step":
        datelists = {}
        for mov_stack in mov_stacks:
            if mov_stack == 7:
                rng = pd.date_range(start, end, freq="W")
            elif mov_stack == 31:
                rng = pd.date_range(start, end, freq="M")
            elif mov_stack == 91:
                rng = pd.date_range(start, end, freq="Q")
            else:
                rng = pd.date_range(start, end, freq="%iD" % mov_stack)
            datelists[mov_stack] = rng.map(lambda t: t.date())
        #~ print datelists
    biglist = []
    filters = get_filters(db, all=False)
    while is_dtt_next_job(db, flag='T', jobtype='STACK'):
        jobs = get_dtt_next_job(db, flag='T', jobtype='STACK')

        if not len(jobs):
            # edge case, should only occur when is_next returns true, but
            # get_next receives no jobs (heavily parallelised calls).
            time.sleep(np.random.random())
            continue
        pair = jobs[0].pair
        refs, days = zip(*[[job.ref, job.day] for job in jobs])

        logger.info("There are STACKS jobs for some days to recompute for %s" %
                    pair)
        sta1, sta2 = pair.split(':')
        for f in filters:
            filterid = int(f.ref)
            for components in params.all_components:
                pair = "%s:%s" % (sta1, sta2)
                sta1 = sta1.replace('.', '_')
                sta2 = sta2.replace('.', '_')
                logger.debug('Processing %s-%s-%i' %
                             (pair, components, filterid))
                # updated_days = updated_days_for_dates(db, start, end, pair.replace('_', '.'), jobtype='CC', interval=datetime.timedelta(days=interval),returndays=True)
                updated_days = [UTCDateTime(d).datetime.date() for d in days]
                if len(updated_days) != 0:
                    logger.debug("New Data for %s-%s-%i" %
                                 (pair, components, filterid))
                    #~ print updated_days
                    # TODO: load only the updated dates +- max(mov_stack)+1
                    # Note: this would no longer be needed if the stack is h5
                    nstack, stack_total = get_results(db,
                                                      sta1,
                                                      sta2,
                                                      filterid,
                                                      components,
                                                      datelist,
                                                      format=format,
                                                      params=params)
                    if not nstack:
                        logger.debug("No new data found, hmmm")
                    logger.debug("Data loaded")
                    if nstack > 0:
                        if stype == "mov":
                            logger.debug("Mov Stack!")
                            for i, date in enumerate(datelist):
                                jobadded = False
                                for mov_stack in mov_stacks:
                                    if i < mov_stack:
                                        low = 0
                                        high = mov_stack
                                    else:
                                        low = i - mov_stack + 1
                                        high = i + 1
                                    newdata = False
                                    for uday in datelist[low:high]:
                                        if uday in updated_days:
                                            newdata = True
                                            break
                                    if newdata:
                                        corr = stack_total[low:high]
                                        if not np.all(np.isnan(corr)):
                                            day_name = "%s_%s" % (sta1, sta2)
                                            logger.debug(
                                                "%s %s %s [%s - %s] (%i day stack)"
                                                % (day_name, components, date,
                                                   datelist[low], datelist[i],
                                                   mov_stack))
                                            corr = stack(
                                                corr, stack_method,
                                                pws_timegate, pws_power,
                                                goal_sampling_rate)
                                            if not len(corr):
                                                continue

                                            corr = scipy.signal.detrend(
                                                corr).astype(np.float32)
                                            stack_path = os.path.join(
                                                "STACKS", "%02i" % filterid,
                                                "%03i_DAYS" % mov_stack,
                                                components, day_name)
                                            filename = os.path.join(
                                                stack_path, str(date))
                                            if mseed:
                                                export_mseed(db,
                                                             filename,
                                                             pair,
                                                             components,
                                                             filterid,
                                                             corr,
                                                             maxlag=maxlag,
                                                             cc_sampling_rate=
                                                             cc_sampling_rate,
                                                             params=params)
                                            if sac:
                                                export_sac(db,
                                                           filename,
                                                           pair,
                                                           components,
                                                           filterid,
                                                           corr,
                                                           maxlag=maxlag,
                                                           cc_sampling_rate=
                                                           cc_sampling_rate,
                                                           params=params)
                                            day_name = "%s:%s" % (sta1, sta2)
                                            if not jobadded and not params.hpc:
                                                update_job(
                                                    db, date,
                                                    day_name.replace('_', '.'),
                                                    'MWCS', 'T')
                                                jobadded = True
                                        del corr
                        elif stype == "step":
                            jobs_done = []
                            for mov_stack in mov_stacks:
                                for i, date in enumerate(datelists[mov_stack]):
                                    if date not in datelist:
                                        continue
                                    if i < mov_stack:
                                        low = 0
                                        high = mov_stack
                                    else:
                                        low = datelist.index(
                                            date) - mov_stack + 1
                                        high = datelist.index(date) + 1
                                    newdata = False
                                    for uday in datelist[low:high]:
                                        if uday in updated_days:
                                            newdata = True
                                            break
                                    if newdata:
                                        corr = stack_total[low:high]
                                        if not np.all(np.isnan(corr)):
                                            day_name = "%s_%s" % (sta1, sta2)
                                            logger.debug(
                                                "%s %s %s [%s - %s] (%i day stack)"
                                                % (day_name, components, date,
                                                   datelist[low],
                                                   datelist[high - 1],
                                                   mov_stack))
                                            corr = stack(
                                                corr, stack_method,
                                                pws_timegate, pws_power,
                                                goal_sampling_rate)
                                            corr = scipy.signal.detrend(corr)
                                            stack_path = os.path.join(
                                                "STACKS", "%02i" % filterid,
                                                "%03i_DAYS" % mov_stack,
                                                components, day_name)
                                            filename = os.path.join(
                                                stack_path, str(date))
                                            if mseed:
                                                export_mseed(db,
                                                             filename,
                                                             pair,
                                                             components,
                                                             filterid,
                                                             corr,
                                                             maxlag=maxlag,
                                                             cc_sampling_rate=
                                                             cc_sampling_rate,
                                                             params=params)
                                            if sac:
                                                export_sac(db,
                                                           filename,
                                                           pair,
                                                           components,
                                                           filterid,
                                                           corr,
                                                           maxlag=maxlag,
                                                           cc_sampling_rate=
                                                           cc_sampling_rate,
                                                           params=params)
                                            day_name = "%s:%s" % (sta1, sta2)
                                            job = "%s %s" % (date, day_name)
                                            if job not in jobs_done and not params.hpc:
                                                update_job(
                                                    db, date,
                                                    day_name.replace('_', '.'),
                                                    'MWCS', 'T')
                                                jobs_done.append(job)
                                        del corr

                        elif stype == "ref":
                            stack_path = os.path.join("STACKS",
                                                      "%02i" % filterid, "REF",
                                                      components)
                            ref_name = "%s_%s" % (sta1, sta2)
                            filename = os.path.join(stack_path, ref_name)
                            stack_total = scipy.signal.detrend(stack_total)

                            if mseed:
                                export_mseed(db,
                                             filename,
                                             pair,
                                             components,
                                             filterid,
                                             stack_total,
                                             params=params)
                            if sac:
                                export_sac(db,
                                           filename,
                                           pair,
                                           components,
                                           filterid,
                                           stack_total,
                                           params=params)
                            ref_name = "%s:%s" % (sta1, sta2)
                            update_job(db, "REF", ref_name.replace('_', '.'),
                                       'MWCS', 'T')
                            for jobtype in extra_jobtypes:
                                update_job(db, "REF",
                                           ref_name.replace('_',
                                                            '.'), jobtype, 'T')
                            del stack_total

        # THIS SHOULD BE IN THE API
        # This doesn't set MWCS jobs for REF stacks
        if stype != "ref":
            massive_update_job(db, jobs, "D")
            if stype != "step" and not params.hpc:
                for job in jobs:
                    update_job(db, job.day, job.pair, 'MWCS', 'T')
        if stype == "ref":
            biglist += jobs

    if stype == "ref":
        logger.info(
            "You just finished REF stacking, remember to reset the "
            "STACK jobs if you need to compute a MOV stacks. "
            "Run 'msnoise reset STACK' when all process have finished.")
        logger.info("The current STACK jobs have been intentionnaly left "
                    "'I'n progress so they can be reset.")
    #     massive_update_job(db, biglist, "T")

    logger.debug("Finished Stacking")
예제 #18
0
def main():
    logger = logbook.Logger("bill_recd")
    database = "well.sqlite"

    # make sure this gets backed up prior to any
    # writing of the db
    utils.backup_file(database)
    db = sqlite3.connect(database)
    db.row_factory = sqlite3.Row
    cur = db.cursor()

    # prompt for amount of the bill .. and date
    bill_date = utils.prompt_for_current_date("Date of bill")
    pge_bill = float(utils.prompt_for_amount("PGE bill amount"))
    logger.trace(f"pge_bill: {int(pge_bill)}")

    const = constants.Constants()
    exec_str = f"""
        INSERT INTO activity (date, type, amount, note) 
        VALUES (?, ?, ?, ?)
    """
    params = (bill_date, const.pge_bill_received, pge_bill,
              "PGE bill received")
    cur.execute(exec_str, params)

    # instantiate an obj for each of the accounts
    cur.execute("SELECT * FROM account")
    rows = cur.fetchall()

    acct_list = []
    total_usage = 0.0

    # each row('r') ... should represent an individual account
    for r in rows:
        if r["active"] == "no":
            logger.trace(f"Account {r['acct_id']} currently INACTIVE")
            continue
        acct_obj = account.Account(
            r["acct_id"],
            r["first_name"],
            r["last_name"],
            r["file_alias"],
            r["address"],
            r["reads_in"],
            r["master"],
        )
        acct_list.append(acct_obj)

        # fetch the last two reading rows from the db
        query: str = f"""
            SELECT reading 
            FROM reading
            WHERE account_id = (?)
            ORDER BY reading_id 
            DESC LIMIT 2
        """
        params = (r["acct_id"], )
        rows = cur.execute(query, params)

        # near as I can tell this returns a row for each line of data found
        # the row is a list of selected items .... so 'reading' is the
        # zeroeth item ...
        #
        # need to collect them both in a list for further processing
        readings_list = []
        for row in rows:
            readings_list.append(
                row["reading"])  # this retrieval by name seems to be fine

        logger.trace(f"readings_list: {readings_list}")
        acct_obj.latest_reading = readings_list[0]
        acct_obj.previous_reading = readings_list[1]

        acct_obj.calculate_current_usage()
        logger.trace(f"current usage: {acct_obj.current_usage}")

        logger.trace(f"{acct_obj.reads_in} .. {acct_obj.previous_reading}")
        total_usage += acct_obj.current_usage
    total_usage = round(total_usage, 2)
    logger.trace(f"total usage: {total_usage}")

    # a balance less than $10k should trigger an assessment
    # in the upcoming for loop
    savings_balance = utils.get_savings_balance(cur)
    logger.trace(f"savings_balance: {savings_balance}")

    assessment_total = 0
    for acct in acct_list:
        logger.trace(f"\n\n{acct.addr}")

        logger.trace(
            f"current_usage_percent (b4 calculation): {acct.current_usage_percent}"
        )
        logger.trace(
            f"current_usage_percent: {(acct.current_usage / total_usage) * 100}"
        )
        logger.trace(f"total_usage: {total_usage}")

        acct.current_usage_percent = round(
            (acct.current_usage / total_usage) * 100, 2)
        logger.trace(
            f"current_usage_percent (rounded): {acct.current_usage_percent:.2f}"
        )
        logger.trace(f"pge_bill: {int(pge_bill)}")
        logger.trace(f"a.current_usage_percent: {acct.current_usage_percent}")

        acct.pge_bill_share = round(
            (pge_bill * acct.current_usage_percent / 100), 0)
        logger.trace(f"pge_bill_share: {int(acct.pge_bill_share)}")

        exec_str = f"""
            INSERT INTO activity (date, acct, type, amount, note) 
            VALUES (?, ?, ?, ?, ?)
        """
        params = (
            bill_date,
            acct.acct_id,
            const.pge_bill_share,
            acct.pge_bill_share,
            "PGE bill share",
        )
        cur.execute(exec_str, params)

        # this should be moved outside ... no sense going through all
        # this if no assessment needed ...
        # move it outside and process as separate
        if savings_balance < 1000000:
            logger.trace(f"Assessment is due.")
            acct.savings_assessment = int(
                round(acct.current_usage * const.assessment_per_gallon * 100,
                      0))
            logger.trace(f"Assessed: {acct.savings_assessment}")

            # write this to the db
            exec_str = f"""
                INSERT INTO activity (date, acct, type, amount, note) 
                VALUES (?, ?, ?, ?, ?)
            """
            params = (
                bill_date,
                acct.acct_id,
                const.savings_assessment,
                acct.savings_assessment,
                "Savings assessment",
            )
            cur.execute(exec_str, params)

            assessment_total += acct.savings_assessment
            logger.trace(
                f"Bill total: {int(round(acct.savings_assessment + acct.pge_bill_share, 2))}"
            )
        else:
            logger.trace(f"No assessment needed.")

    # added this to make the savings deposit easier
    # 2019.07.21
    #
    # 2019.08.24 ... this needed reformatting
    print(f"==============================================")
    print(f"assessment_total: ${assessment_total / 100:10.2f}")
    print(f"==============================================")
    # save, then close the cursor and db
    db.commit()
    cur.close()
    db.close()
#!/usr/bin/env python

import couchdb
import json
import argparse
import logbook
import sys
import os
import ConfigParser

from couchdb import PreconditionFailed

#Set up logging
l = logbook.Logger("CouchDB replicator", level=logbook.INFO)
h = logbook.StreamHandler(sys.stdout, level=logbook.INFO)


def _get_config():
    """Looks for a configuration file and load credentials.
    """
    config = ConfigParser.SafeConfigParser()
    try:
        with open(os.path.join(os.environ['HOME'], '.couchrc'), 'r') as f:
            config.readfp(f)

        SOURCE = config.get('replication', 'SOURCE').rstrip()
        DESTINATION = config.get('replication', 'DESTINATION').rstrip()
    except:
        l.error("Please make sure you've created your own configuration file \
            (i.e: ~/.couchrc)")
        sys.exit(-1)
예제 #20
0
import csv
import sys

import logbook as logbook
import research

app_log = logbook.Logger('App')

def main():
    print('Drug Use By Age')
    research.init()

    print()

    msg = "Highest Percentage of those in a age group who used alcohol in the past 12 months"
    print(msg)
    print()
    data = research.highest_alcohol_use()
    for idx, r in enumerate(data[:5],1):
        print(f'{idx}. {r.alcohol_use} % of age group: {r.age} years used alcohol {int(r.alcohol_frequency)} number of times (median)')
    app_log.trace(f'QUERY: {msg}')
    print()

    msg = "Highest Percentage of those in a age group who used marijuana in the past 12 months"
    print(msg)
    print()
    data = research.highest_marijuana_use()
    for idx, r in enumerate(data[:5],1):
        print(f'{idx}. {r.marijuana_use} % of age group: {r.age} years used marijuana {int(r.marijuana_frequency)} number of times (median)')
    app_log.trace(f'QUERY:{msg}')
    print()
예제 #21
0
파일: configfiles.py 프로젝트: mbr/remand
import os
import re

from appdirs import AppDirs
import logbook

from .util import TypeConversionChainMap, ConfigParser

log = logbook.Logger('config')
app_dirs = AppDirs('remand', False)


def load_configuration(app_name, configfiles=[]):
    """Loads configuration information.

    Will load ``defaults.cfg`` (shipped with remand), ``config.ini`` from the
    application directory (similar to ``~/.config/remand/``) and any extra
    configuration files passed.

    :param configfiles: Additional configuration files to read.
    """
    fns = [
        os.path.join(os.path.dirname(__file__), 'defaults.cfg'),
        os.path.join(app_dirs.user_config_dir, 'config.ini'),
    ]

    if 'REMAND_CONFIG' in os.environ:
        fns.append(os.environ['REMAND_CONFIG'])

    fns.extend(configfiles)
예제 #22
0
    |                 | and self.end_date.                                 |
    +-----------------+----------------------------------------------------+
    | max_leverage    | The largest gross leverage between self.start_date |
    |                 | and self.end_date                                  |
    +-----------------+----------------------------------------------------+


"""

import logbook
import math
import numpy as np

import zipline.utils.math_utils as zp_math

log = logbook.Logger('Risk')

TREASURY_DURATIONS = [
    '1month', '3month', '6month', '1year', '2year', '3year', '5year', '7year',
    '10year', '30year'
]


# check if a field in rval is nan, and replace it with
# None.
def check_entry(key, value):
    if key != 'period_label':
        return np.isnan(value) or np.isinf(value)
    else:
        return False
예제 #23
0
import logbook
import pandas as pd
from pandas_datareader.data import DataReader
import pytz
from six import iteritems
from six.moves.urllib_error import HTTPError

from .benchmarks import get_benchmark_returns
from . import treasuries, treasuries_can, cn_treasury_curve
from ..utils.paths import (
    cache_root,
    data_root,
)
from zipline.utils.calendars import get_calendar

logger = logbook.Logger('Loader')

# Mapping from index symbol to appropriate bond data
INDEX_MAPPING = {
    'SPY': (treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
    '^GSPTSE': (treasuries_can, 'treasury_curves_can.csv', 'bankofcanada.ca'),
    '^FTSE':  # use US treasuries until UK bonds implemented
    (treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
}

ONE_HOUR = pd.Timedelta(hours=1)

from cn_stock_holidays.zipline.default_calendar import shsz_calendar
nyse_cal = get_calendar('SHSZ')

#nyse_cal = get_calendar('NYSE')
예제 #24
0
import logbook, logbook.queues

from bcbio import utils
from bcbio.log import logbook_zmqpush

LOG_NAME = "bcbio-nextgen"


def get_log_dir(config):
    d = config.get(
        "log_dir",
        config.get("resources", {}).get("log", {}).get("dir", "log"))
    return d


logger = logbook.Logger(LOG_NAME)
logger_cl = logbook.Logger(LOG_NAME + "-commands")
mpq = multiprocessing.Queue(-1)


def _is_cl(record, _):
    return record.channel == LOG_NAME + "-commands"


def _not_cl(record, handler):
    return not _is_cl(record, handler)


class CloseableNestedSetup(logbook.NestedSetup):
    def close(self):
        for obj in self.objects:
예제 #25
0
from typing import List

import logbook
import requests
import collections
import random
import time

Movie = collections.namedtuple(
    'Movie', 'imdb_code, title, director, keywords, '
    'duration, genres, rating, year, imdb_score')

api_log = logbook.Logger('API')


def find_movie_by_title(keyword: str) -> List[Movie]:

    t0 = time.time()

    if not keyword or not keyword.strip():
        api_log.warn('Empty keyword given.')
        raise ValueError('Must specify a search term.')

    url = f'http://movie_service.talkpython.fm/api/search/{keyword}'

    api_log.trace('Starting search for {}'.format(keyword))

    resp = requests.get(url)
    api_log.trace('Request finished, status code: {}'.format(resp.status_code))
    resp.raise_for_status()
예제 #26
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logbook

import pandas as pd

from six import with_metaclass

from catalyst.errors import (
    AccountControlViolation,
    TradingControlViolation,
)

log = logbook.Logger('TradingControl')


class TradingControl(with_metaclass(abc.ABCMeta)):
    """
    Abstract base class representing a fail-safe control on the behavior of any
    algorithm.
    """
    def __init__(self, on_error, **kwargs):
        """
        Track any arguments that should be printed in the error message
        generated by self.fail.
        """
        self.on_error = on_error
        self.__fail_args = kwargs
예제 #27
0
from six import iteritems
from datetime import datetime

import numpy as np
import pandas as pd
from pandas.tseries.tools import normalize_date

import zipline.finance.risk as risk
from zipline.finance import trading
from .period import PerformancePeriod

from zipline.finance.trading import with_environment
from zipline.utils.serialization_utils import (VERSION_LABEL)
from .position_tracker import PositionTracker

log = logbook.Logger('Performance')


class PerformanceTracker(object):
    """
    Tracks the performance of the algorithm.
    """
    @with_environment()
    def __init__(self, sim_params, env=None):

        self.sim_params = sim_params

        self.period_start = self.sim_params.period_start
        self.period_end = self.sim_params.period_end
        self.last_close = self.sim_params.last_close
        first_open = self.sim_params.first_open.tz_convert(
예제 #28
0
 def __init__(self, genetics, selection='roulette'):
     self.log = logbook.Logger(self.__class__.__name__)
     self.log.info('Initiating genetic algorithm.')
     self.genetics = genetics
     self.selection = selection
예제 #29
0
# -*- coding: utf-8 -*-
import csv
import logbook

from datetime import date, timedelta
from ftplib import FTP
from tempfile import TemporaryFile

from django.core.management.base import BaseCommand
from django.utils.datastructures import SortedDict
from django.utils.encoding import smart_str
from django.db.models import Q

from eloue.decorators import activate_language

log = logbook.Logger('eloue.rent.sinister')


def comma_separated(number):
    return str(number).replace('.', ',')


class Command(BaseCommand):
    help = "Send daily insurance sinisters"

    @activate_language
    def handle(self, *args, **options):
        from django.conf import settings
        from rent.models import Sinister
        log.info('Starting daily insurance sinisters batch')
        csv_file = TemporaryFile()
예제 #30
0
#!/usr/bin/env python
import sys
import os
import json
import codecs
import time
import dateutil.parser
import math
import cpapi
import cputils
import logbook

logger = logbook.Logger('archive')
log = logbook.FileHandler('monitoring.log')
log.push_application()
start_time = time.time()


class CmdLine:
    def __init__(self):
        self.authFilename = "archive.auth"
        self.starting = None
        self.ending = None
        self.reportModule = None
        self.url_base = "https://api.cloudpassage.com"
        self.output_path = os.getcwd()

    def processArgs(self, argv):
        allOK = True
        self.progdir = os.path.dirname(sys.argv[0])
        for arg in argv[1:]: