def runner(args): """ Run a sample through an NGS pipeline (as a command) using Popen. TOOD: logging, timing. """ if args.config is None: sample_params = json.loads(sys.stdin.readline().rstrip()) else: # read the first line from the test/debug config file sample_params = json.loads(args.config.readline().rstrip()) sample = sample_params["sample_id"] runner_log = Logger("%s logger" % sample) tstart = datetime.datetime.now() # total run time # preprocessing and alignment aln_cmd = build_sample_aln_command(sample_params) run_command_on_sample(aln_cmd, runner_log, sample, desc="preprocessing and alignment") if args.dry_run: return sort_cmd = safe_templater(SORT_STEPS['samtools-sort'], sample_params) run_command_on_sample(sort_cmd, runner_log, sample, desc="sorting BAM file") tend = datetime.datetime.now() elapsed = tend - tstart runner_log.info("%s all processing completed in: %s." % (sample, str(elapsed)))
class serialDataSource(dataSource): """ 从串口发送或者接收数据 """ def __init__(self): self.log = Logger(u"serialDataSource") self._ser = None def open(self,**config): """ 打开串口 config:串口配置参数,字典类型。 config["port"]:端口名 config["baudrate"] config["bytesize"]:数据位,默认8 config["parity"]:校验方式:奇校验,偶校验,无。默认为无校验 config["stopbits"] config["xonxoff"] config["rtscts"] config["dsrdtr"] 成功返回True,失败返回False """ # if type(config) != type({}): # self.log.error("open: invalid param") # return False try: self._ser = serial.Serial(**config) except serial.serialutil.SerialException,ValueError: self.log.error("open:config port failed %s:%s" % (serial.serialutil.SerialException,ValueError.message)) return False if not self._ser.isOpen(): self._ser.open() return True
class LoggableError(Exception): """ Parent class for customized exception classes which provides interface to the logging functions Attributes: log_file: The file in which to keep logs logger: Instance of logbook.Handler which handles logging """ def __init__(self): self.orig_message = self.message self.UPATH = os.getenv("HOME") self.log_file = '%s/Desktop/tweetlog.rtf' % self.UPATH self.initialize_logger() def initialize_logger(self): try: if self.logger is not None: pass except: self.logger = Logger() # self.logger = FileHandler(self.log_file) # self.logger.push_application() #Pushes handler onto stack of log handlers def log_error(self, error_message): self.logger.error(error_message) def log_warning(self, warning_message): self.logger.warn(warning_message)
class dataThread(stopableThread): """ feature: 1.定时从数据源获取数据。 2.将数据通过传递给TrigThread 3.订阅PORT_SETTING_CHANGED消息,并作相应处理 """ def __init__(self,filename="iSerial.ini",section="serial",objsrc=None,qlist=None,*args,**kwargs): super(dataThread,self).__init__(*args,**kwargs) self.log = Logger(u"dataThread") self.config = {} if objsrc: self._datasrc = objsrc else: self._datasrc = dataSource.serialDataSource() if os.path.isfile(filename) and section is not None: self.filename = filename self.section = section self.loadConfig() self._datasrc.open(**self.config) pub.subscribe(self.notifySettingChanged,topics.PORT_SETTING_CHANGED) pub.subscribe(self.onSend, topics.USER_INPUT_DATA) self.queueList = qlist def setQueueList(self,qlist): if qlist is not None and type(qlist) == type([]): self.queueList = qlist def loadConfig(self): """ 从配置文件中,读取数据源配置信息。 """ self.config.clear() cf = ConfigParser.ConfigParser() cf.read(self.filename) for opt in cf.options(self.section): try : self.config[opt] = ast.literal_eval(cf.get(self.section,opt)) except: self.log.error("loadconfig:invalid config in %s"%self.section) pub.sendMessage(topics.NOTIFY_TO_USER,type="error", msg="invalid settings in iSerial.ini % section"%self.section) def notifySettingChanged(self,newconfig,**kwargs): del self.config self.config = newconfig self.pause() self._datasrc.close() self._datasrc.open(**self.config) self.start() def process(self): size = self._datasrc.dataAvail() if size: msg = self._datasrc.recvData(size) for q in self.queueList: q.put(msg) def onSend(self,strData): self._datasrc.sendData(strData)
class iSerial(QtGui.QMainWindow): def __init__(self, parent=None,confname="iSerial.ini"): super(iSerial,self).__init__(parent) self._threadDict = {} self.log = Logger(u"controller") self.confname = confname pub.subscribe(self.notifytouser,topics.NOTIFY_TO_USER) self.ui = ui_mainwindow.Ui_MainWindow() self.ui.setupUi(self) self.queuelist = self.createQueues() self.defconf = self.loadDefaultMap() self._threadDict[DEFAULT_TRIG_THREAD] = self.createDefaultTrigThread(self.defconf) self.confManager = configManager() self.trigManager = triggerManager("plugins/Triggers") self.actManager = actionManager("plugins/Actions") self.modMonitor = fileMonitor(os.path.abspath("./plugins")) self.modMonitor.start() def createQueues(self): retQueue = [] for i in range(2): retQueue.append(Queue.Queue()) return retQueue def string2class(self,mod,str): try: m = importlib.import_module(mod) c = getattr(m,str) except AttributeError,KeyError: self.log.error(u"iSerial::string2class : create class:%s.%s failed" % (mod,str)) QtGui.QMessageBox.critical(None,"Error","iSerial::string2class : create class:%s.%s failed"%(mod,str)) return None return c
def logging_wrapper(job, f, ip, port): """Wrapper to execute user passed functions remotely after setting up logging ip and port should specify somewhere we can push logging messages over zmq and have something useful happen to them """ handler = NestedSetup([ ZeroMQPushHandler("tcp://" + ip + ":" + port, level="DEBUG"), FileHandler(os.path.join(job["workdir"], job["description"]+".log"), level="DEBUG", bubble=True) ]) logger = Logger(job["description"]) with handler.applicationbound(): try: if job.get("tmpdir"): os.chdir(job["tmpdir"]) else: os.chdir(job["workdir"]) f(job, logger=logger) except: if job.get("tmpdir"): open(os.path.join(job["tmpdir"], ".error"), 'a').close() logger.exception("Task failed with traceback:") raise return job
class actionThread(stopableThread): def __init__(self,actList, inputParam=None,*args,**kargs): super(actionThread,self).__init__(*args,**kargs) self.actList=actList self.param = inputParam self.log = Logger(u"%s actionThread"%self.name) def setParam(self,data): self.param = data def changeActList(self,newList): if type(newList) != type(()) and type(newList) != type(()): self.log.error(u"changeActList:invalid parameter") return self.actList = newList def process(self): """ 调用actList中的所有action plugin.Only run once. """ actManager = absManager.actionManager() for i in self.actList: self.log.info(u"do action %s:%s" % (i,actManager.id2filename(i))) actManager.call(i,self.param) self.pause()
class DigitempCli(object): last_cmd = '' """docstring for DigitempCli""" def __init__(self): self.Log = Logger('DigitempCli') self.idn = 'DigitempCli %d' % id(self) self.digitemp = dt.Digitemp() def __unicode__(self): return str(self) def send(self, cmd, **kwargs): self.Log.debug('send(cmd=%s, kwargs=%s)' %(cmd, str(kwargs))) self.last_cmd = cmd dt_method = getattr(self.digitemp, cmd) dt_method() return True def read(self, **kwargs): self.Log.debug('read(kwargs=%s)' %str (kwargs)) return (0,'DigitempCli resposne to %s' % self.last_cmd) def query(self, cmd, **kwargs): try: dt_method = getattr(self.digitemp, cmd) result = [0, dt_method()] except Exception as e: result = [1, e.message] return result
def test_win32_logger(self): from logbook import NTEventLogHandler, Logger logger = Logger('MyLogger') handler = NTEventLogHandler('My Application') with handler.applicationbound(): logger.error('Testing')
def __init__(self, logfile, run_handler): """pass logfile to be opened and handler to flush writing the file""" super(MyEventHandler, self).__init__() self.run_log = Logger('Runs') self.fs_log = Logger('Files') socket_path = environ.get("NVIM_LISTEN_ADDRESS") self.nvim = attach('socket', path=socket_path) self.log_file = logfile self.run_handler = run_handler
def percent_reporter(complete): log = Logger('Pool Dispatcher') while True: try: if not complete.empty(): log.notice('{:.2f}% complete'.format(complete.get())) except Exception: pass finally: time.sleep(0.01)
def shell(obj, username): gh = obj['githome'] try: # get user user = gh.get_user_by_name(username) log = Logger('githome-shell [{}]'.format(user.name)) # we've got our user, now authorize him or not shell_cmd = shlex.split(os.environ.get('SSH_ORIGINAL_COMMAND', '')) log.debug('SSH_ORIGINAL_COMMAND {!r}'.format(shell_cmd)) if not shell_cmd: log.critical('No shell command given') abort(1) cmd = gh.authorize_command(user, shell_cmd) log.debug('Executing {!r}', cmd) binary = cmd[0] # we use path through execlp except Exception as e: log.error(str(e)) abort(1) else: os.execlp(binary, *cmd)
def error(self, id_=None, error_code=None, error_msg=None): if isinstance(id_, Exception): # XXX: for an unknown reason 'log' is None in this branch, # therefore it needs to be instantiated before use global log if not log: log = Logger('IB Broker') log.exception(id_) if isinstance(error_code, EClientErrors.CodeMsgPair): error_msg = error_code.msg() error_code = error_code.code() if isinstance(error_code, int): if error_code in (502, 503, 326): # 502: Couldn't connect to TWS. # 503: The TWS is out of date and must be upgraded. # 326: Unable connect as the client id is already in use. self.unrecoverable_error = True if error_code < 1000: log.error("[{}] {} ({})".format(error_code, error_msg, id_)) else: log.info("[{}] {} ({})".format(error_code, error_msg, id_)) else: log.error("[{}] {} ({})".format(error_code, error_msg, id_))
def rpc_server(socket, protocol, dispatcher): log = Logger('rpc_server') log.debug('starting up...') while True: try: message = socket.recv_multipart() except Exception as e: log.warning('Failed to receive message from client, ignoring...') log.exception(e) continue log.debug('Received message %s from %r' % (message[-1], message[0])) # assuming protocol is threadsafe and dispatcher is theadsafe, as long # as its immutable def handle_client(message): try: request = protocol.parse_request(message[-1]) except RPCError as e: log.exception(e) response = e.error_respond() else: response = dispatcher.dispatch(request) log.debug('Response okay: %r' % response) # send reply message[-1] = response.serialize() log.debug('Replying %s to %r' % (message[-1], message[0])) socket.send_multipart(message) gevent.spawn(handle_client, message)
def runner(args): """ Run a sample through an NGS pipeline (as a command) using Popen. TOOD: logging, timing. """ if args.config is None: sample_params = json.loads(sys.stdin.readline().rstrip()) else: # read the first line from the test/debug config file sample_params = json.loads(args.config.readline().rstrip()) sample = sample_params["sample_id"] runner_log = Logger("%s logger" % sample) cmd = safe_templater(SAMPLE_PROCESS_CMD, sample_params) runner_log.info("%s starting preprocessing and alignment of sample." % sample) if args.dry_run: runner_log.debug("%s command: %s" % (sample, cmd)) return tstart = time.time() p = Popen(cmd, shell=True, executable=find_bash()) p.wait() if p.returncode != 0: # make this as loud as possible so Slurm can handle it runner_log.critical("%s exited abnormally with return code %d." % (sample, p.returncode)) sys.exit(p.returncode) tend = time.time() elapsed = tend - tstart runner_log.info("%s completed preprocessing and alignment in %s seconds." % (sample, str(round(elapsed, 5))))
def top_words(sorted_set): log = Logger("Top Words") top = [] while True: if(len(sorted_set) > 0): test = list(reversed(sorted_set[-10:])) if 0 in [item in top for item in test]: top = test log.info('#1-10 of {}: {}'.format(len(sorted_set), ', '.join(top))) time.sleep(0.01)
class StrategyTemplate: def __init__(self, user): self.user = user self.log = Logger(os.path.basename(__file__)) StreamHandler(sys.stdout).push_application() def strategy(self, event): """:param event event.data 为所有股票的信息,结构如下 {'162411': {'ask1': '0.493', 'ask1_volume': '75500', 'ask2': '0.494', 'ask2_volume': '7699281', 'ask3': '0.495', 'ask3_volume': '2262666', 'ask4': '0.496', 'ask4_volume': '1579300', 'ask5': '0.497', 'ask5_volume': '901600', 'bid1': '0.492', 'bid1_volume': '10765200', 'bid2': '0.491', 'bid2_volume': '9031600', 'bid3': '0.490', 'bid3_volume': '16784100', 'bid4': '0.489', 'bid4_volume': '10049000', 'bid5': '0.488', 'bid5_volume': '3572800', 'buy': '0.492', 'close': '0.499', 'high': '0.494', 'low': '0.489', 'name': '华宝油气', 'now': '0.493', 'open': '0.490', 'sell': '0.493', 'turnover': '420004912', 'volume': '206390073.351'}} """ pass def run(self, event): try: self.strategy(event) except Exception as e: self.log.error(e) def clock(self, event): pass
class timer(object): """Decorator that mesures the time it takes to run a function.""" __instances = {} def __init__(self, f): self.__f = f self.log = Logger(f.func_name) def __call__(self, *args, **kwargs): self.__start = time.time() result = self.__f(*args, **kwargs) value = time.time() - self.__start self.log.info('ellapsed time: {0:.2f}ms'.format(value * 1000)) return result
def __init__(self, filament): self.logger = Logger(Fibratus.__name__) self.file_handler = FileHandler(os.path.join(os.path.abspath(__file__), '..', '..', '..', 'fibratus.log'), mode='w+') self.kevt_streamc = KEventStreamCollector(etw.KERNEL_LOGGER_NAME.encode()) self.kcontroller = KTraceController() self.ktrace_props = KTraceProps() self.ktrace_props.enable_kflags() self.ktrace_props.logger_name = etw.KERNEL_LOGGER_NAME self.handle_repository = HandleRepository() self._handles = [] # query for handles on the # start of kernel trace with self.file_handler.applicationbound(): self.logger.info('Starting fibratus...') self.logger.info('Enumerating system handles...') self._handles = self.handle_repository.query_handles() self.logger.info('%s handles found' % len(self._handles)) self.handle_repository.free_buffers() self.thread_registry = ThreadRegistry(self.handle_repository, self._handles) self.kevent = KEvent(self.thread_registry) self._filament = filament self.fsio = FsIO(self.kevent, self._handles) self.hive_parser = HiveParser(self.kevent, self.thread_registry) self.tcpip_parser = TcpIpParser(self.kevent) self.dll_repository = DllRepository(self.kevent) self.requires_render = {} self.filters_count = 0
def __init__(self,path): super(fileMonitor,self).__init__() self.log = Logger(u"fileMonitor") self.event_handler = EventHandler() self.observer = Observer() self.observer.schedule(self.event_handler, path, recursive=True) self.log.info("now starting monitor path:%s" % path)
def __init__(self, channel="test",host='127.0.0.1'): self.redis = redis.Redis(host=host) self.channel = channel self.timeout = 10 self.query_delay = 0.1 self.idn = 'Client %d' % id(self) self.Log = Logger('Client')
def __init__(self, parameters): ''' Parameters parameters : dict(...) Named parameters used either for general portfolio settings (server and constraints), and for user optimizer function ''' super(PortfolioManager, self).__init__() self.log = Logger('Manager') self.portfolio = None self.date = None self._optimizer_parameters = parameters self.connected = False self.server = parameters.get('server', None) #TODO Message emission only if a client exists ? Could try to bind and give up if no connections #NOTE Non blocking recv(): https://github.com/zeromq/pyzmq/issues/132 / zmq.NOBLOCK ? #NOTE good example: http://zguide2.zeromq.org/py:peering3 #if self.server.ports is not None: #startup_msg = self.server.receive() #self.connected = True #log.info(json.dumps(startup_msg, indent=4, separators=(',', ': '))) #TODO Should send stuff anyway, and accept new connections while running #else: self.connected = parameters.get('connected', False) # Run the server if the engine didn't while it is asked if self.server.port is None and self.connected: self.log.info('Binding manager on default port...') self.server.run(host='127.0.0.1', port=5570)
def __init__(self, queue_name='default', interval=60, connection=None): if connection is None: connection = get_current_connection() self.connection = connection self.queue_name = queue_name self._interval = interval self.log = Logger('scheduler')
def __init__(self, transport, on_learn, on_prepare=None, on_stale=None, quorum_timeout=3, logger_group=None, ): self._logger = Logger('paxos') if logger_group is not None: logger_group.add_logger(self._logger) self.transport = transport self.on_learn = on_learn self.on_prepare = on_prepare self.on_stale = on_stale self.quorum_timeout = quorum_timeout self.id = 0 self.max_seen_id = 0 self.last_accepted_id = 0 self._logger.debug('2 last_accepted_id=%(last_accepted_id)s' % self.__dict__) self.proposed_value = None self.deferred = None self.queue = deque() # queue of (value, deferred) to propose self._learn_queue = [] # sorted list with learn requests which come out of order # delayed calls for timeouts self._accepted_timeout = None self._acks_timeout = None self._waiting_to_learn_id = deque()
def run(self, path_local_log=None, branch='next', sched='false', launch_pause='false'): """ :param str path_local_log: Path to the local log file copied from the remote server. If ``None``, do not copy remote log file. :param str branch: Target git branch to test. :param str sched: If ``'true'``, run tests only once. Otherwise, run tests at 23:00 hours daily. :param str launch_pause: If ``'true'``, pause at a breakpoint after launching the instance and mounting the data volume. Continuing from the breakpoint will terminate the instance and destroy the volume. """ import schedule from logbook import Logger self.log = Logger('nesii-testing') self.path_local_log = path_local_log self.branch = branch self.launch_pause = launch_pause if self.launch_pause == 'true': self.log.info('launching instance then pausing') self._run_tests_(should_email=False) else: if sched == 'true': self.log.info('begin continous loop') schedule.every().day.at("6:00").do(self._run_tests_, should_email=True) while True: schedule.run_pending() time.sleep(1) else: self.log.info('running tests once') self._run_tests_(should_email=True)
def start(self): """Initialize workdir, logging, etc. in preparation for running jobs. """ # make a working directory for each job for job in self.jobs: job["workdir"] = os.path.join(self.workdir, job["description"]) fs.maybe_mkdir(job["workdir"]) # temporary ipython profile directory self.ipythondir = os.path.join(self.workdir, ".ipython") fs.maybe_mkdir(self.ipythondir) # log dir self.logdir = os.path.join(self.workdir, "log") fs.maybe_mkdir(self.logdir) # determine which IP we are going to listen on for logging try: self.listen_ip = localinterfaces.public_ips()[0] except: raise ValueError("This machine appears not to have" " any publicly visible IP addresses") # setup ZMQ logging self.handler = FileHandler(os.path.join(self.logdir, "dish.log")) self.listen_port = str(randint(5000, 10000)) self.subscriber = ZeroMQPullSubscriber("tcp://" + self.listen_ip + ":" + self.listen_port) self.controller = self.subscriber.dispatch_in_background(self.handler) self.logger = Logger("dish_master")
def __init__(self, parameters): ''' Parameters parameters : dict(...) Named parameters used either for general portfolio settings (server and constraints), and for user optimizer function ''' super(PortfolioManager, self).__init__() self.log = Logger('Manager') self.datafeed = DataFeed() self.portfolio = None self.date = None self.name = parameters.get('name', 'Chuck Norris') self._optimizer_parameters = parameters self.connected = False #TODO Should send stuff anyway, and accept new connections while running self.connected = parameters.get('connected', False) # Run the server if the engine didn't while it is asked if 'server' in parameters: self.server = parameters.pop('server') if self.server.port is None and self.connected: self.log.info('Binding manager on default port...') self.server.run(host='127.0.0.1', port=5570)
def _setup(self, app_obj): super(PmLogHandler, self)._setup(app_obj) if self._meta.namespace is None: self._meta.namespace = self.app._meta.label self.backend = Logger(self._meta.namespace) # hack for application debugging if is_true(self.app._meta.debug): self.app.config.set('log', 'level', 'DEBUG') # Mainly for backwards compatibility since Logger level should # be NOTSET (level 0). Output level is controlled by handlers self.set_level(self.app.config.get('log', 'level')) # clear loggers? if is_true(self._meta.clear_loggers): self.clear_loggers() # console if is_true(self.app.config.get('log', 'to_console')): self._setup_console_log() # file if self.app.config.get('log', 'file'): self._setup_file_log() # nested setup self.backend.handlers.append(logbook.NullHandler(bubble=False)) self.log_setup = logbook.NestedSetup(self.backend.handlers) with self._console_handler.applicationbound(): self.debug("logging initialized for '%s' using PmLogHandler" % \ self._meta.namespace)
def __init__(self, port = 8, packet_timeout=2, baudrate=115200, bytesize=EIGHTBITS, parity=PARITY_NONE, stopbits=STOPBITS_ONE, xonxoff=0, rtscts=0, writeTimeout=None, dsrdtr=None ): ''' Initialise the asynchronous serial object ''' Thread.__init__(self) self.serial = serial.Serial(port, baudrate, bytesize, parity, stopbits, packet_timeout, xonxoff, rtscts, writeTimeout, dsrdtr) self.running = Event() self.buffer = '' self.log = Logger('Daq328p') log.info('Daq328p(is_alive=%d, serial_port_open=%d)' % (self.is_alive(), not self.serial.closed)) out = self.query('I') if not out[0]: log.info(out[1])
def __init__( self, queues, name=None, default_result_ttl=DEFAULT_RESULT_TTL, connection=None, exc_handler=None ): # noqa if connection is None: connection = get_current_connection() self.connection = connection if isinstance(queues, Queue): queues = [queues] self._name = name self.queues = queues self.validate_queues() self._exc_handlers = [] self.default_result_ttl = default_result_ttl self._state = "starting" self._is_horse = False self._horse_pid = 0 self._stopped = False self.log = Logger("worker") self.failed_queue = get_failed_queue(connection=self.connection) # By default, push the "move-to-failed-queue" exception handler onto # the stack self.push_exc_handler(self.move_to_failed_queue) if exc_handler is not None: self.push_exc_handler(exc_handler)
func_name=record.func_name, # 函数名 lineno=record.lineno, # 行号 msg=record.message, # 日志内容 ) return log # 打印到屏幕句柄 user_std_handler = ColorizedStderrHandler(bubble=True, level='ERROR') user_std_handler.formatter = user_handler_log_formatter # 日志路径,在主工程下生成log目录 LOG_DIR = os.path.join('log') if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR) # 打印到文件句柄 user_file_handler = TimedRotatingFileHandler(os.path.join( LOG_DIR, '%s.log' % 'test_log'), date_format='%Y%m%d', bubble=True) user_file_handler.formatter = user_handler_log_formatter # 用户代码logger日志 user_log = Logger("user_log") def init_logger(): logbook.set_datetime_format("local") user_log.handlers = [] user_log.handlers.append(user_std_handler) user_log.handlers.append(user_file_handler)
from qdb.comm import fmt_msg, fmt_err_msg from qdb.compat import str_to_bytes # errno's that are safe to ignore when killing a session. safe_errnos = ( errno.EBADF, errno.ECONNRESET, errno.EPIPE, ) # Symbolic constant for the attach_timeout case. ALLOW_ORPHANS = 0 log = Logger('QdbSessionStore') class DebuggingSession(namedtuple('DebuggingSessionBase', ['tracer', 'local_pid', 'pause_signal', 'clients', 'both_sides_event', 'timestamp'])): """ A DebuggingSession stores all the information about a task that is being debugged, including the socket to the client, the websockets to the client, and the timers that manage new connections. """ def __new__(cls, tracer=None,
from subprocess import PIPE from typing import Tuple, AsyncGenerator, Optional, List, Any, ClassVar, Sequence, Dict, cast, Iterator, Union import json from logbook import Logger import sys from encodings import utf_8 from shutil import which from .Utils import strToBool import attr from abc import ABC import asyncio from contextlib import suppress from .Utils import powerset from collections import deque logger = Logger(os.path.basename(__file__)) @attr.s(auto_attribs=True, frozen=True) class RequestObject(ABC): type: str = attr.ib(init=False) class SolidityM(PatchSynthesizer): name: ClassVar[str] = 'SolidityM' tempPatchDirs: Tuple[str, ...] path_sm: Path seed: Optional[str] mutation_types: List[str] = ['insert', 'replace', 'move'] spaceInfo: Dict[Tuple[str, ...], Dict[str, Any]]
import os import pandas as pd import glob BASE = os.path.dirname(os.path.realpath(__file__)) RAW_FLDR = "raw" # folder to store the raw text file START_DATE = '2009-01-01' # this is only used for getting data from the API END_DATE = datetime.datetime.today().strftime('%Y-%m-%d') ZIPLINE_DATA_DIR = zipline_root() + '/data/' FN = "4th.npy" # the file name to be used when storing this in ~/.zipline/data # TODO: add this to a config file DUMP_FILE = '/Users/peter/Documents/Bitbucket/qlite-backend/fundamental/data/master.csv' log = Logger('load_4thquartile_fund.py') def clear_raw_folder(raw_folder_path): # removes all the files in the raw folder print(' ** clearing the raw/ folder **') files = glob.glob(raw_folder_path + '/*') for f in files: os.remove(f) def populate_raw_data_from_dump(tickers2sid, fields, raw_path): """ Populates the raw/ folder based on a single dump download. :param tickers2sid: a dict with the ticker string as the key and the SID
def error_callback(update: Update, context: CallbackContext): if context.error is not Unauthorized: log = Logger()
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from contextlib2 import ExitStack from copy import copy from logbook import Logger, Processor from pandas.tslib import normalize_date from zipline.finance.order import ORDER_STATUS from zipline.protocol import BarData from zipline.utils.api_support import ZiplineAPI from six import viewkeys from zipline.gens.sim_engine import (BAR, SESSION_START, SESSION_END, MINUTE_END, BEFORE_TRADING_START_BAR) log = Logger('Trade Simulation') class AlgorithmSimulator(object): EMISSION_TO_PERF_KEY_MAP = {'minute': 'minute_perf', 'daily': 'daily_perf'} def __init__(self, algo, sim_params, data_portal, clock, benchmark_source, restrictions, universe_func, instant_fill=False):
from datetime import datetime from functools import wraps from time import sleep, time import fire import imageio import numpy as np import structlog from logbook import Logger, StreamHandler from PIL import Image from globals import history from imgHash import avhash, hamming StreamHandler(sys.stdout).push_application() logging = Logger('wing', level='INFO') # FORMAT = "%(asctime)s %(levelname)s %(message)s" # logging.basicConfig(format=FORMAT, level=logging.INFO) logging.info('start') cnt = 0 waiting_count = 0 # history = [] def timed(func): """This decorator prints the execution time for the decorated function.""" @wraps(func) def wrapper(*args, **kwargs):
from matrix.uploads import UploadsBuffer, upload_cb try: from urllib.parse import urlunparse except ImportError: from urlparse import urlunparse # yapf: disable WEECHAT_SCRIPT_NAME = SCRIPT_NAME WEECHAT_SCRIPT_DESCRIPTION = "matrix chat plugin" # type: str WEECHAT_SCRIPT_AUTHOR = "Damir Jelić <*****@*****.**>" # type: str WEECHAT_SCRIPT_VERSION = "0.3.0" # type: str WEECHAT_SCRIPT_LICENSE = "ISC" # type: str # yapf: enable logger = Logger("matrix-cli") def print_certificate_info(buff, sock, cert): cert_pem = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True)) x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem) public_key = x509.get_pubkey() key_type = ("RSA" if public_key.type() == crypto.TYPE_RSA else "DSA") key_size = str(public_key.bits()) sha256_fingerprint = x509.digest(n(b"SHA256")) sha1_fingerprint = x509.digest(n(b"SHA1")) signature_algorithm = x509.get_signature_algorithm()
import matplotlib.pyplot as plt from logbook import Logger from catalyst.exchange.utils.stats_utils import get_pretty_stats log = Logger('analyze.demo') def analyze(context, perf): log.info('the daily stats:\n{}'.format(get_pretty_stats(perf))) ax1 = plt.subplot(211) perf.portfolio_value.plot(ax=ax1) ax1.set_ylabel('portfolio value') ax2 = plt.subplot(212, sharex=ax1) perf.ending_cash.plot(ax=ax2) ax2.set_ylabel('ending_cash') plt.show() # plt.savefig("analysis.png", bbox_inches='tight')
# coding=utf-8 import json from functools import reduce from flask import Flask, Response, request from logbook import Logger from py_dice import actions, common, core, dcs, dice10k log = Logger(__name__) def start_api(): game_state = {} flask_app = Flask(__name__) flask_app.config["DEBUG"] = True slack_client = core.create_client() @flask_app.route("/create", methods=["POST"]) def create_game() -> Response: username = request.form["user_name"] game_id = dice10k.create_game()["game-id"] game_state[game_id] = {"game_id": game_id, "users": {}} game_state[game_id]["channel"] = request.form["channel_id"] response = slack_client.chat_postMessage(**dcs.message.create( game_id=game_id, channel_id=game_state[game_id]["channel"], message=f"@{username} started a game, click to join:", ).add_button(game_id=game_id, text="Join Game", action_id="join_game" ).add_start_game(game_id=game_id).add_start_game( game_id=game_id, auto_break=True).build())
import subprocess from threading import Thread from logbook import Logger, FileHandler import carla.driving_benchmark.experiment_suites as bms NAMESPACE = 'run_simulator' log = Logger(NAMESPACE) SIMULATOR_EXE = "./CarlaUE4.sh" SIMULATOR_SERVER = "-carla-server" SIMULATOR_WINDOWED = "-windowed" RESX = "-ResX" RESY = "-ResY" BENCHMARK = "-benchmark" SIMULATOR_SETTINGS = "-carla-settings" SIMULATOR_LOG_FILE = "simulator_log_file" FPS = "-fps" BENCHMARKS = { "CoRL2017": bms.CoRL2017, "BasicExperimentSuite": bms.BasicExperimentSuite } def get_benchmark(cfg): assert hasattr( cfg, "benchmark" ) and cfg.benchmark in BENCHMARKS, "Please provide a valid benchmark name." return BENCHMARKS[cfg.benchmark](cfg.town)
from __future__ import division import click import mxnet as mx from logbook import Logger from pprint import pformat import os from .utils import packargs, Bunch from .module_semimyo import Module from .data import Preprocess, Dataset from . import Context, constant logger = Logger('semimyo') @click.group() def cli(): pass @cli.command() @click.option('--batch-norm-momentum', type=float, default=constant.BATCH_NORM_MOMENTUM) @click.option('--batch-norm-use-global-stats/--no-batch-norm-use-global-stats', default=constant.BATCH_NORM_USE_GLOBAL_STATS) @click.option('--cudnn-tune', type=click.Choice(['off', 'limited_workspace', 'fastest']), default='fastest') @click.option('--symbol', default='semimyo') @click.option('--shared-net') @click.option('--gesture-net')
import types try: from queue import Queue except: from Queue import Queue from threading import Thread import pendulum from logbook import Logger from ziyan.lib.Sender import Sender from ziyan.utils.util import get_conf log = Logger('main') class Command(object): def __init__(self, configuration): self.conf = configuration # 类相关属性 self.query_rate = self.conf['ziyan']['query_rate'] pass def work(self, queues, **kwargs): # get command queue self.command_queue = command_queue = queues['command_queue'] while True:
import zipline.errors import zipline.protocol as zp from zipline.finance.slippage import ( VolumeShareSlippage, transact_partial, ) from zipline.finance.commission import PerShare,OrderCost from zipline.finance.order import Order from zipline.utils.serialization_utils import ( VERSION_LABEL ) log = Logger('Blotter') class Blotter(object): def __init__(self): self.transact = transact_partial(VolumeShareSlippage(), OrderCost()) # these orders are aggregated by sid self.open_orders = defaultdict(list) # keep a dict of orders by their own id self.orders = {} # holding orders that have come in since the last # event. self.new_orders = [] self.current_dt = None self.max_shares = int(1e+11)
import numpy as np import pandas as pd from catalyst.assets._assets import TradingPair from catalyst.constants import LOG_LEVEL, AUTO_INGEST from catalyst.data.data_portal import DataPortal from catalyst.exchange.exchange_bundle import ExchangeBundle from catalyst.exchange.exchange_errors import (ExchangeRequestError, PricingDataNotLoadedError) from catalyst.exchange.utils.exchange_utils import resample_history_df, \ group_assets_by_exchange from catalyst.exchange.utils.datetime_utils import get_frequency, get_start_dt from logbook import Logger from redo import retry log = Logger('DataPortalExchange', level=LOG_LEVEL) class DataPortalExchangeBase(DataPortal): def __init__(self, *args, **kwargs): self.attempts = dict( get_spot_value_attempts=5, get_history_window_attempts=5, retry_sleeptime=5, ) super(DataPortalExchangeBase, self).__init__(*args, **kwargs) def _get_history_window(self, assets, end_dt,
def main(): # Setup logging logging.getLogger("pdfminer").setLevel(logging.WARNING) logging.getLogger("ocrmypdf").setLevel(logging.WARNING) redirect_logging() format_string = "{record.level_name}: {record.message}" StreamHandler( sys.stdout, format_string=format_string, level="INFO" ).push_application() log = Logger() q = mq.MessageQueue(all_burst_limit=3, all_time_limit_ms=3000) request = Request(con_pool_size=8) pdf_bot = MQBot(TELE_TOKEN, request=request, mqueue=q) # Create the EventHandler and pass it your bot's token. updater = Updater( bot=pdf_bot, use_context=True, request_kwargs={"connect_timeout": TIMEOUT, "read_timeout": TIMEOUT}, ) def stop_and_restart(): updater.stop() os.execl(sys.executable, sys.executable, *sys.argv) def restart(_): Thread(target=stop_and_restart).start() job_queue = updater.job_queue job_queue.run_repeating(restart, interval=dt.timedelta(minutes=30)) # Get the dispatcher to register handlers dispatcher = updater.dispatcher # General commands handlers dispatcher.add_handler( CommandHandler( "start", send_support_options, Filters.regex("support"), run_async=True ) ) dispatcher.add_handler(CommandHandler("start", start_msg, run_async=True)) dispatcher.add_handler(CommandHandler("help", help_msg, run_async=True)) dispatcher.add_handler(CommandHandler("setlang", send_lang, run_async=True)) dispatcher.add_handler( CommandHandler("support", send_support_options, run_async=True) ) # Callback query handler dispatcher.add_handler(CallbackQueryHandler(process_callback_query, run_async=True)) # Payment handlers dispatcher.add_handler(PreCheckoutQueryHandler(precheckout_check, run_async=True)) dispatcher.add_handler( MessageHandler(Filters.successful_payment, successful_payment, run_async=True) ) # URL handler dispatcher.add_handler( MessageHandler(Filters.entity(MessageEntity.URL), url_to_pdf, run_async=True) ) # PDF commands handlers dispatcher.add_handler(compare_cov_handler()) dispatcher.add_handler(merge_cov_handler()) dispatcher.add_handler(photo_cov_handler()) dispatcher.add_handler(text_cov_handler()) dispatcher.add_handler(watermark_cov_handler()) # PDF file handler dispatcher.add_handler(file_cov_handler()) # Feedback handler dispatcher.add_handler(feedback_cov_handler()) # Dev commands handlers dispatcher.add_handler(CommandHandler("send", send_msg, Filters.user(DEV_TELE_ID))) dispatcher.add_handler( CommandHandler("stats", get_stats, Filters.user(DEV_TELE_ID)) ) # Log all errors dispatcher.add_error_handler(error_callback) # Start the Bot if APP_URL is not None: updater.start_webhook( listen="0.0.0.0", port=PORT, url_path=TELE_TOKEN, webhook_url=APP_URL + TELE_TOKEN, ) log.notice("Bot started webhook") else: updater.start_polling() log.notice("Bot started polling") # Run the bot until the you presses Ctrl-C or the process receives SIGINT, # SIGTERM or SIGABRT. This should be used most of the time, since # start_polling() is non-blocking and will stop the bot gracefully. updater.idle()
import re import requests import os import uuid import socket import base64 import urllib import sys import threading from collections import OrderedDict from logbook import Logger, StreamHandler from . import helpers from .webtrader import WebTrader StreamHandler(sys.stdout).push_application() log = Logger(os.path.basename(__file__)) # 移除心跳线程产生的日志 debug_log = log.debug def remove_heart_log(*args, **kwargs): if threading.current_thread() == threading.main_thread(): debug_log(*args, **kwargs) log.debug = remove_heart_log class HTTrader(WebTrader): config_path = os.path.dirname(__file__) + '/config/ht.json'
level='WARNING'), #Processor(inject_information) ]) color_setup = NestedSetup([ StreamHandler(sys.stdout, format_string=log_format), ColorizedStderrHandler(format_string=log_format, level='NOTICE'), #Processor(inject_information) ]) #remote_setup = NestedSetup([ #ZeroMQHandler('tcp://127.0.0.1:56540'), ##Processor(inject_information) #]) log = Logger('Trade Labo') #TODO: reimplement fatal function with (colors ?) exit '''--------------------------------------------------------------------------------------- Logger class ---------------------------------------------------------------------------------------''' class LogSubsystem(object): ''' Trade logging version ''' def __init__(self, name='default', lvl='debug', file_channel=False): if lvl == "debug": lvl = logging.DEBUG elif lvl == "info": lvl = logging.INFO elif lvl == 'error':
from zipline.finance.order import Order from zipline.finance.slippage import ( DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT, VolatilityVolumeShare, FixedBasisPointsSlippage, ) from zipline.finance.commission import ( DEFAULT_PER_CONTRACT_COST, FUTURE_EXCHANGE_FEES_BY_SYMBOL, PerContract, PerShare, ) from zipline.finance.cancel_policy import NeverCancel from zipline.utils.input_validation import expect_types log = Logger('Blotter') warning_logger = Logger('AlgoWarning') class Blotter(object): def __init__(self, data_frequency, equity_slippage=None, future_slippage=None, equity_commission=None, future_commission=None, cancel_policy=None): # these orders are aggregated by asset self.open_orders = defaultdict(list) # keep a dict of orders by their own id
from math import * from itertools import product from logbook import Logger import cv2 import numpy as np import networkx as nx import math from tqdm import tqdm # from palettable.cartocolors.qualitative import Pastel_10 as COLORS from suppose.common import timing from suppose.camera import load_calibration import pandas as pd log = Logger("pose3d") def undistort_points(pts, calibration): if pts.size == 0: return pts camera_matrix = calibration["cameraMatrix"] distortion_coefficients = calibration["distortionCoefficients"] original_shape = pts.shape pts2 = np.ascontiguousarray(pts).reshape(-1, 1, 2) undistorted_points = cv2.undistortPoints(pts2, camera_matrix, distortion_coefficients, P=camera_matrix) undistorted_points = undistorted_points.reshape(original_shape) return undistorted_points
## # @file test_blotter.py # @brief 测试模拟器的价格撮合, 当前持仓,权益,可用资金等。 # @author wondereamer # @version 0.3 # @date 2015-01-06 import six import datetime import unittest import pandas as pd import os from quantdigger.datastruct import Contract from quantdigger import * from logbook import Logger logger = Logger('test') capital = 200000000 OFFSET = 0.6 buy1 = datetime.datetime.strptime("09:01:00", "%H:%M:%S").time() buy2 = datetime.datetime.strptime("09:02:00", "%H:%M:%S").time() buy3 = datetime.datetime.strptime("09:03:00", "%H:%M:%S").time() sell1 = datetime.datetime.strptime("14:57:00", "%H:%M:%S").time() sell2 = datetime.datetime.strptime("14:58:00", "%H:%M:%S").time() sell3 = datetime.datetime.strptime("15:00:00", "%H:%M:%S").time() fname = os.path.join(os.getcwd(), 'data', '1MINUTE', 'TEST', 'STOCK.csv') source = pd.read_csv(fname, parse_dates=True, index_col=0) class TestOneDataOneCombinationStock(unittest.TestCase): """ 测试股票单数据单组合的价格撮合,持仓查询/默认持仓查询,可用资金等交易接口 """ def test_case(self):
def init_class_fixtures(cls): super(WithLogger, cls).init_class_fixtures() cls.log = Logger() cls.log_handler = cls.enter_class_context( cls.make_log_handler().applicationbound(), )
from zipline.finance.transaction import Transaction as ZPTransaction from zipline.api import symbol from zipline.gens.type import * import datetime from logbook import Logger import pandas as pd from tdx.engine import Engine import numpy as np import zerorpc import platform from zipline.errors import SymbolNotFound if platform.architecture()[0] == '32bit': from zipline.gens.tdx_client import TdxClient log = Logger("TDX Broker") class TdxBroker(Broker): def __init__(self, tdx_uri, account_id=None): self._orders = {} if tdx_uri.startswith('tcp'): self._client = zerorpc.Client() self._client.connect(tdx_uri) elif platform.architecture()[0] == '32bit': self._client = TdxClient(tdx_uri) self._client.login() else: raise Exception("please use 32bit python to use local client directly, or use tcp client") self.currency = 'RMB'
class BTgymServer(multiprocessing.Process): """Backtrader server class. Expects to receive dictionary, containing at least 'action' field. Control mode IN:: dict(action=<control action, type=str>,), where control action is: '_reset' - rewinds backtrader engine and runs new episode; '_getstat' - retrieve episode results and statistics; '_stop' - server shut-down. Control mode OUT:: <string message> - reports current server status; <statisic dict> - last run episode statisics. NotImplemented. Within-episode signals: Episode mode IN: dict(action=<agent_action, type=str>,), where agent_action is: {'buy', 'sell', 'hold', 'close', '_done'} - agent or service actions; '_done' - stops current episode; Episode mode OUT:: response <tuple>: observation, <array> - observation of the current environment state, could be any tensor; default is: [4,m] array of <fl32>, where: m - num. of last datafeed values, 4 - num. of data features (Lines); reward, <any> - current portfolio statistics for environment reward estimation; done, <bool> - episode termination flag; info, <list> - auxiliary information. """ data_server_response = None def __init__( self, cerebro=None, render=None, network_address=None, data_network_address=None, connect_timeout=90, log_level=None, task=0, ): """ Args: cerebro: backtrader.cerebro engine class. render: render class network_address: environmnet communication, str data_network_address: data communication, str connect_timeout: seconds, int log_level: int, logbook.level """ super(BTgymServer, self).__init__() self.task = task self.log_level = log_level self.log = None self.process = None self.cerebro = cerebro self.network_address = network_address self.render = render self.data_network_address = data_network_address self.connect_timeout = connect_timeout # server connection timeout in seconds. self.connect_timeout_step = 0.01 @staticmethod def _comm_with_timeout(socket, message): """ Exchanges messages via socket with timeout. Note: socket zmq.RCVTIMEO and zmq.SNDTIMEO should be set to some finite number of milliseconds Returns: dictionary: status: communication result; message: received message, if any. """ response = dict( status='ok', message=None, ) try: socket.send_pyobj(message) except zmq.ZMQError as e: if e.errno == zmq.EAGAIN: response['status'] = 'send_failed_due_to_connect_timeout' else: response['status'] = 'send_failed_for_unknown_reason' return response start = time.time() try: response['message'] = socket.recv_pyobj() response['time'] = time.time() - start except zmq.ZMQError as e: if e.errno == zmq.EAGAIN: response['status'] = 'receive_failed_due_to_connect_timeout' else: response['status'] = 'receive_failed_for_unknown_reason' return response return response def get_data(self, reset_kwargs): """ Args: reset_kwargs: dictionary of args to pass to parent data iterator Returns: trial_sample, trial_stat, dataset_stat """ wait = 0 while True: # Get new data subset: data_server_response = self._comm_with_timeout( socket=self.data_socket, message={ 'ctrl': '_get_data', 'kwargs': reset_kwargs }) if data_server_response['status'] in 'ok': self.log.debug( 'Data_server responded with data in about {} seconds.'. format(data_server_response['time'])) else: msg = 'BtgymServer_sampling_attempt: data_server unreachable with status: <{}>.'. \ format(data_server_response['status']) self.log.error(msg) raise ConnectionError(msg) # Ready or not? try: assert 'Dataset not ready' in data_server_response['message'][ 'ctrl'] if wait <= self.wait_for_data_reset: pause = random.random() * 2 time.sleep(pause) wait += pause self.log.info( 'Domain dataset not ready, wait time left: {:4.2f}s.'. format(self.wait_for_data_reset - wait)) else: data_server_response = self._comm_with_timeout( socket=self.data_socket, message={'ctrl': '_stop'}) self.socket.close() self.context.destroy() raise RuntimeError( 'Failed to assert Domain dataset is ready. Exiting.') except (AssertionError, KeyError) as e: break # Get trial instance: trial_sample = data_server_response['message']['sample'] trial_stat = trial_sample.describe() trial_sample.reset() dataset_stat = data_server_response['message']['dataset_stat'] return trial_sample, trial_stat, dataset_stat def run(self): """ Server process runtime body. This method is invoked by env._start_server(). """ # Logging: from logbook import Logger, StreamHandler, WARNING import sys StreamHandler(sys.stdout).push_application() if self.log_level is None: self.log_level = WARNING self.log = Logger('BTgym_Server_{}'.format(self.task), level=self.log_level) self.process = multiprocessing.current_process() self.log.info('PID: {}'.format(self.process.pid)) # Runtime Housekeeping: cerebro = None episode_result = dict() trial_sample = None trial_stat = None dataset_stat = None # How long to wait for data_master to reset data: self.wait_for_data_reset = 300 # seconds connect_timeout = 60 # in seconds # Set up a comm. channel for server as ZMQ socket # to carry both service and data signal # !! Reminder: Since we use REQ/REP - messages do go in pairs !! self.context = zmq.Context() self.socket = self.context.socket(zmq.REP) self.socket.setsockopt(zmq.RCVTIMEO, -1) self.socket.setsockopt(zmq.SNDTIMEO, connect_timeout * 1000) self.socket.bind(self.network_address) self.data_context = zmq.Context() self.data_socket = self.data_context.socket(zmq.REQ) self.data_socket.setsockopt(zmq.RCVTIMEO, connect_timeout * 1000) self.data_socket.setsockopt(zmq.SNDTIMEO, connect_timeout * 1000) self.data_socket.connect(self.data_network_address) # Check connection: self.log.debug('Pinging data_server at: {} ...'.format( self.data_network_address)) data_server_response = self._comm_with_timeout( socket=self.data_socket, message={'ctrl': 'ping!'}) if data_server_response['status'] in 'ok': self.log.debug( 'Data_server seems ready with response: <{}>'.format( data_server_response['message'])) else: msg = 'Data_server unreachable with status: <{}>.'.\ format(data_server_response['status']) self.log.error(msg) raise ConnectionError(msg) # Init renderer: self.render.initialize_pyplot() # Server 'Control Mode' loop: for episode_number in itertools.count(0): while True: # Stuck here until '_reset' or '_stop': service_input = self.socket.recv_pyobj() msg = 'Control mode: received <{}>'.format(service_input) self.log.debug(msg) if 'ctrl' in service_input: # It's time to exit: if service_input['ctrl'] == '_stop': # Server shutdown logic: # send last run statistic, release comm channel and exit: message = 'Exiting.' self.log.info(message) self.socket.send_pyobj(message) self.socket.close() self.context.destroy() return None # Start episode: elif service_input['ctrl'] == '_reset': message = 'Starting episode with kwargs: {}'.format( service_input['kwargs']) self.log.debug(message) self.socket.send_pyobj(message) # pairs '_reset' break # Retrieve statistic: elif service_input['ctrl'] == '_getstat': self.socket.send_pyobj(episode_result) self.log.debug('Episode statistic sent.') # Send episode rendering: elif service_input[ 'ctrl'] == '_render' and 'mode' in service_input.keys( ): # Just send what we got: self.socket.send_pyobj( self.render.render(service_input['mode'], )) self.log.debug( 'Episode rendering for [{}] sent.'.format( service_input['mode'])) else: # ignore any other input # NOTE: response string must include 'ctrl' key # for env.reset(), env.get_stat(), env.close() correct operation. message = { 'ctrl': 'send control keys: <_reset>, <_getstat>, <_render>, <_stop>.' } self.log.debug('Control mode: sent: ' + str(message)) self.socket.send_pyobj( message) # pairs any other input else: message = 'No <ctrl> key received:{}\nHint: forgot to call reset()?'.format( msg) self.log.debug(message) self.socket.send_pyobj(message) # Got '_reset' signal -> prepare Cerebro subclass and run episode: start_time = time.time() cerebro = copy.deepcopy(self.cerebro) cerebro._socket = self.socket cerebro._log = self.log cerebro._render = self.render # Add DrawDown observer if not already: dd_added = False for observer in cerebro.observers: if bt.observers.DrawDown in observer: dd_added = True if not dd_added: cerebro.addobserver(bt.observers.DrawDown) # Add communication utility: cerebro.addanalyzer( _BTgymAnalyzer, _name='_env_analyzer', ) # Parse resetting kwargs: if need to request new data range from dataserver or sample from existing one: reset_kwargs = dict( new_trial=True, episode_type=None, ) if service_input['kwargs'] is not None: reset_kwargs.update(service_input['kwargs']) assert reset_kwargs['episode_type'] in [0, 1, None], \ 'Expected `episode_type` be 0 (train), 1 (test) or None, got: {}'.format(reset_kwargs['episode_type']) if reset_kwargs['new_trial'] or trial_sample is None: self.log.debug('Requesting new data from data server...') trial_sample, trial_stat, dataset_stat = self.get_data( reset_kwargs) self.log.debug('Got new Trial <{}>'.format( trial_sample.filename)) else: self.log.debug('Sampling from existing Trial <{}>'.format( trial_sample.filename)) # Sample requested type of episode: # TODO: if using sample-bounded data iterator: request new one if exhausted episode_sample = trial_sample.sample( type=reset_kwargs['episode_type']) self.log.debug('Got new episode <{}> '.format( episode_sample.filename)) # Get episode data statistic and pass it to strategy params: cerebro.strats[0][0][2]['trial_stat'] = trial_stat cerebro.strats[0][0][2]['trial_metadata'] = trial_sample.metadata cerebro.strats[0][0][2]['dataset_stat'] = dataset_stat cerebro.strats[0][0][2]['episode_stat'] = episode_sample.describe() cerebro.strats[0][0][2]['metadata'] = episode_sample.metadata # Set nice broker cash plotting: cerebro.broker.set_shortcash(False) # Convert and add data to engine: cerebro.adddata(episode_sample.to_btfeed()) # Finally: episode = cerebro.run(stdstats=True, preload=False, oldbuysell=True)[0] # Update episode rendering: _ = self.render.render('just_render', cerebro=cerebro) _ = None # Recover that bloody analytics: analyzers_list = episode.analyzers.getnames() analyzers_list.remove('_env_analyzer') elapsed_time = timedelta(seconds=time.time() - start_time) self.log.info('Episode elapsed time: {}.'.format(elapsed_time)) episode_result['episode'] = episode_number episode_result['runtime'] = elapsed_time episode_result['length'] = len(episode.data.close) for name in analyzers_list: episode_result[name] = episode.analyzers.getbyname( name).get_analysis() gc.collect() # Just in case -- we actually shouldn't get there except by some error: return None
from decimal import Decimal from logbook import Logger from lxml import etree from lxml.builder import E from ups.rating_package import RatingService from ups.base import PyUPSException from trytond.model import ModelView, fields from trytond.pool import PoolMeta, Pool from trytond.transaction import Transaction from trytond.pyson import Eval, Bool __all__ = ['Configuration', 'Sale'] __metaclass__ = PoolMeta logger = Logger('trytond_ups') UPS_PACKAGE_TYPES = [ ('01', 'UPS Letter'), ('02', 'Customer Supplied Package'), ('03', 'Tube'), ('04', 'PAK'), ('21', 'UPS Express Box'), ('24', 'UPS 25KG Box'), ('25', 'UPS 10KG Box'), ('30', 'Pallet'), ('2a', 'Small Express Box'), ('2b', 'Medium Express Box'), ('2c', 'Large Express Box'), ]
from catalyst.utils.calendars import get_calendar from catalyst.utils.factory import create_simulation_parameters from catalyst.data.loader import load_crypto_market_data import catalyst.utils.paths as pth from catalyst.exchange.exchange_algorithm import ( ExchangeTradingAlgorithmLive, ExchangeTradingAlgorithmBacktest, ) from catalyst.exchange.exchange_data_portal import DataPortalExchangeLive, \ DataPortalExchangeBacktest from catalyst.exchange.exchange_asset_finder import ExchangeAssetFinder from catalyst.constants import LOG_LEVEL log = Logger('run_algo', level=LOG_LEVEL) class _RunAlgoError(click.ClickException, ValueError): """Signal an error that should have a different message if invoked from the cli. Parameters ---------- pyfunc_msg : str The message that will be shown when called as a python function. cmdline_msg : str The message that will be shown on the command line. """ exit_code = 1
def run(self): """ Server process runtime body. This method is invoked by env._start_server(). """ # Logging: from logbook import Logger, StreamHandler, WARNING import sys StreamHandler(sys.stdout).push_application() if self.log_level is None: self.log_level = WARNING self.log = Logger('BTgym_Server_{}'.format(self.task), level=self.log_level) self.process = multiprocessing.current_process() self.log.info('PID: {}'.format(self.process.pid)) # Runtime Housekeeping: cerebro = None episode_result = dict() trial_sample = None trial_stat = None dataset_stat = None # How long to wait for data_master to reset data: self.wait_for_data_reset = 300 # seconds connect_timeout = 60 # in seconds # Set up a comm. channel for server as ZMQ socket # to carry both service and data signal # !! Reminder: Since we use REQ/REP - messages do go in pairs !! self.context = zmq.Context() self.socket = self.context.socket(zmq.REP) self.socket.setsockopt(zmq.RCVTIMEO, -1) self.socket.setsockopt(zmq.SNDTIMEO, connect_timeout * 1000) self.socket.bind(self.network_address) self.data_context = zmq.Context() self.data_socket = self.data_context.socket(zmq.REQ) self.data_socket.setsockopt(zmq.RCVTIMEO, connect_timeout * 1000) self.data_socket.setsockopt(zmq.SNDTIMEO, connect_timeout * 1000) self.data_socket.connect(self.data_network_address) # Check connection: self.log.debug('Pinging data_server at: {} ...'.format( self.data_network_address)) data_server_response = self._comm_with_timeout( socket=self.data_socket, message={'ctrl': 'ping!'}) if data_server_response['status'] in 'ok': self.log.debug( 'Data_server seems ready with response: <{}>'.format( data_server_response['message'])) else: msg = 'Data_server unreachable with status: <{}>.'.\ format(data_server_response['status']) self.log.error(msg) raise ConnectionError(msg) # Init renderer: self.render.initialize_pyplot() # Server 'Control Mode' loop: for episode_number in itertools.count(0): while True: # Stuck here until '_reset' or '_stop': service_input = self.socket.recv_pyobj() msg = 'Control mode: received <{}>'.format(service_input) self.log.debug(msg) if 'ctrl' in service_input: # It's time to exit: if service_input['ctrl'] == '_stop': # Server shutdown logic: # send last run statistic, release comm channel and exit: message = 'Exiting.' self.log.info(message) self.socket.send_pyobj(message) self.socket.close() self.context.destroy() return None # Start episode: elif service_input['ctrl'] == '_reset': message = 'Starting episode with kwargs: {}'.format( service_input['kwargs']) self.log.debug(message) self.socket.send_pyobj(message) # pairs '_reset' break # Retrieve statistic: elif service_input['ctrl'] == '_getstat': self.socket.send_pyobj(episode_result) self.log.debug('Episode statistic sent.') # Send episode rendering: elif service_input[ 'ctrl'] == '_render' and 'mode' in service_input.keys( ): # Just send what we got: self.socket.send_pyobj( self.render.render(service_input['mode'], )) self.log.debug( 'Episode rendering for [{}] sent.'.format( service_input['mode'])) else: # ignore any other input # NOTE: response string must include 'ctrl' key # for env.reset(), env.get_stat(), env.close() correct operation. message = { 'ctrl': 'send control keys: <_reset>, <_getstat>, <_render>, <_stop>.' } self.log.debug('Control mode: sent: ' + str(message)) self.socket.send_pyobj( message) # pairs any other input else: message = 'No <ctrl> key received:{}\nHint: forgot to call reset()?'.format( msg) self.log.debug(message) self.socket.send_pyobj(message) # Got '_reset' signal -> prepare Cerebro subclass and run episode: start_time = time.time() cerebro = copy.deepcopy(self.cerebro) cerebro._socket = self.socket cerebro._log = self.log cerebro._render = self.render # Add DrawDown observer if not already: dd_added = False for observer in cerebro.observers: if bt.observers.DrawDown in observer: dd_added = True if not dd_added: cerebro.addobserver(bt.observers.DrawDown) # Add communication utility: cerebro.addanalyzer( _BTgymAnalyzer, _name='_env_analyzer', ) # Parse resetting kwargs: if need to request new data range from dataserver or sample from existing one: reset_kwargs = dict( new_trial=True, episode_type=None, ) if service_input['kwargs'] is not None: reset_kwargs.update(service_input['kwargs']) assert reset_kwargs['episode_type'] in [0, 1, None], \ 'Expected `episode_type` be 0 (train), 1 (test) or None, got: {}'.format(reset_kwargs['episode_type']) if reset_kwargs['new_trial'] or trial_sample is None: self.log.debug('Requesting new data from data server...') trial_sample, trial_stat, dataset_stat = self.get_data( reset_kwargs) self.log.debug('Got new Trial <{}>'.format( trial_sample.filename)) else: self.log.debug('Sampling from existing Trial <{}>'.format( trial_sample.filename)) # Sample requested type of episode: # TODO: if using sample-bounded data iterator: request new one if exhausted episode_sample = trial_sample.sample( type=reset_kwargs['episode_type']) self.log.debug('Got new episode <{}> '.format( episode_sample.filename)) # Get episode data statistic and pass it to strategy params: cerebro.strats[0][0][2]['trial_stat'] = trial_stat cerebro.strats[0][0][2]['trial_metadata'] = trial_sample.metadata cerebro.strats[0][0][2]['dataset_stat'] = dataset_stat cerebro.strats[0][0][2]['episode_stat'] = episode_sample.describe() cerebro.strats[0][0][2]['metadata'] = episode_sample.metadata # Set nice broker cash plotting: cerebro.broker.set_shortcash(False) # Convert and add data to engine: cerebro.adddata(episode_sample.to_btfeed()) # Finally: episode = cerebro.run(stdstats=True, preload=False, oldbuysell=True)[0] # Update episode rendering: _ = self.render.render('just_render', cerebro=cerebro) _ = None # Recover that bloody analytics: analyzers_list = episode.analyzers.getnames() analyzers_list.remove('_env_analyzer') elapsed_time = timedelta(seconds=time.time() - start_time) self.log.info('Episode elapsed time: {}.'.format(elapsed_time)) episode_result['episode'] = episode_number episode_result['runtime'] = elapsed_time episode_result['length'] = len(episode.data.close) for name in analyzers_list: episode_result[name] = episode.analyzers.getbyname( name).get_analysis() gc.collect() # Just in case -- we actually shouldn't get there except by some error: return None
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from time import sleep from datetime import time from logbook import Logger import pandas as pd from zipline.gens.sim_engine import (BAR, SESSION_START, SESSION_END, MINUTE_END, BEFORE_TRADING_START_BAR) from zipline.utils.calendars.trading_calendar import days_at_time log = Logger('Realtime Clock') class RealtimeClock(object): """Realtime clock for live trading. This class is a drop-in replacement for :class:`zipline.gens.sim_engine.MinuteSimulationClock`. The key difference between the two is that the RealtimeClock's event emission is synchronized to the (broker's) wall time clock, while MinuteSimulationClock yields a new event on every iteration (regardless of wall clock). The :param:`time_skew` parameter represents the time difference between the Broker and the live trading machine's clock. """
from logbook import Logger from catalyst.api import ( record, order, symbol ) from catalyst.exchange.utils.stats_utils import get_pretty_stats from catalyst.utils.run_algo import run_algorithm algo_namespace = 'arbitrage_eth_btc' log = Logger(algo_namespace) def initialize(context): log.info('initializing arbitrage algorithm') # The context contains a new "exchanges" attribute which is a dictionary # of exchange objects by exchange name. This allow easy access to the # exchanges. context.buying_exchange = context.exchanges['poloniex'] context.selling_exchange = context.exchanges['binance'] context.trading_pair_symbol = 'eth_btc' context.trading_pairs = dict() # Note the second parameter of the symbol() method # Passing the exchange name here returns a TradingPair object including # the exchange information. This allow all other operations using # the TradingPair to target the correct exchange. context.trading_pairs[context.buying_exchange] = \
# # You should have received a copy of the GNU General Public License # along with pyfa. If not, see <http://www.gnu.org/licenses/>. # ============================================================================= import os import config from eos import db from eos.db import migration from eos.db.saveddata.loadDefaultDatabaseValues import DefaultDatabaseValues from eos.db.saveddata.databaseRepair import DatabaseCleanup from logbook import Logger pyfalog = Logger(__name__) # Make sure the saveddata db exists if config.savePath and not os.path.exists(config.savePath): os.mkdir(config.savePath) if config.saveDB and os.path.isfile(config.saveDB): # If database exists, run migration after init'd database pyfalog.debug("Run database migration.") db.saveddata_meta.create_all() migration.update(db.saveddata_engine) # Import default database values # Import values that must exist otherwise Pyfa breaks pyfalog.debug("Import Required Database Values.") DefaultDatabaseValues.importRequiredDefaults()