Example #1
0
class ImagesManager(object):
    def __init__(self):
        handler = TimedRotatingFileHandler('../logs/images_manager.log')
        handler.push_application()
        self.logger = Logger(name='Docker Images Manage Api', level=11)

        os.environ['DOCKER_API_VERSION'] = 1.39
        self.logger.debug(os.environ.get('DOCKER_API_VERSION'))
        DOCKER_API_VERSION = 1.39
        self.docker_client = docker.from_env()

    def run_container(self,
                      image_name,
                      cont_name,
                      command,
                      detach=True,
                      cont_ports={}):
        return self.docker_client.containers.run(image=image_name, command=command, name=cont_name, detach=detach, \
                                                 ports=cont_ports)

    def commit_container(self, container, commits):
        container.wait()
        return container.commit(commits)

    def get_container(self, cont_id):
        return self.docker_client.containers.get(cont_id)

    def list_container(self, **kwargs):
        return self.docker_client.containers.list(kwargs)

    def remove_container(self, cont_id):
        return docker.client.APIClient.remove_container(cont_id)
Example #2
0
class Router(Thread):
    """Thread waiting for a request by another Driver and responding to
    it with the chunked asked.
    """
    def __init__(self, name, redis, get_chunk):
        super(Router, self).__init__()

        self.name = name
        self.redis = redis
        self.get_chunk = get_chunk
        self.router = None

        self.logger = Logger("{} - Router".format(self.name))
        self.logger.info("Started")

        self.context = zmq.Context.instance()

    def run(self):
        self.router = self.context.socket(zmq.ROUTER)
        port = self.router.bind_to_random_port('tcp://*')
        self.redis.set('drivers:{}:router'.format(self.name), port)

        while True:
            msg = self.router.recv_multipart()
            self._respond_to(*msg)

    def _respond_to(self, identity, filename, offset, size):
        """Calls the `get_chunk` handler defined by the Driver to get
        the chunk and send it to the addressee.
        """
        self.logger.debug("Getting chunk of size {} from offset {} in '{}'",
                          size, offset, filename)
        chunk = self.get_chunk(filename, int(offset), int(size))
        self.router.send_multipart((identity, chunk))
Example #3
0
def rpc_server(socket, protocol, dispatcher):
    log = Logger('rpc_server')
    log.debug('starting up...')
    while True:
        try:
            message = socket.recv_multipart()
        except Exception as e:
            log.warning('Failed to receive message from client, ignoring...')
            log.exception(e)
            continue

        log.debug('Received message %s from %r' % (message[-1], message[0]))

        # assuming protocol is threadsafe and dispatcher is theadsafe, as long
        # as its immutable

        def handle_client(message):
            try:
                request = protocol.parse_request(message[-1])
            except RPCError as e:
                log.exception(e)
                response = e.error_respond()
            else:
                response = dispatcher.dispatch(request)
                log.debug('Response okay: %r' % response)

            # send reply
            message[-1] = response.serialize()
            log.debug('Replying %s to %r' % (message[-1], message[0]))
            socket.send_multipart(message)

        gevent.spawn(handle_client, message)
Example #4
0
class DigitempCli(object):
    last_cmd = ''
    """docstring for DigitempCli"""
    def __init__(self):
        self.Log = Logger('DigitempCli')
        self.idn = 'DigitempCli %d' % id(self)
        self.digitemp = dt.Digitemp()

    def __unicode__(self):
        return str(self)

    def send(self, cmd, **kwargs):
        self.Log.debug('send(cmd=%s, kwargs=%s)' % (cmd, str(kwargs)))
        self.last_cmd = cmd

        dt_method = getattr(self.digitemp, cmd)
        dt_method()

        return True

    def read(self, **kwargs):
        self.Log.debug('read(kwargs=%s)' % str(kwargs))
        return (0, 'DigitempCli resposne to %s' % self.last_cmd)

    def query(self, cmd, **kwargs):
        try:
            dt_method = getattr(self.digitemp, cmd)
            result = [0, dt_method()]
        except Exception as e:
            result = [1, e.message]

        return result
Example #5
0
def start_parse(debug, file, save):
    """ main function for starting everything"""
    if debug:
        log_level = 'DEBUG'
    else:
        log_level = 'WARNING'

    StreamHandler(stdout, level=log_level).push_application()
    log = Logger('main')
    log.debug('Starting up...')

    games = parse(file)

    log.info("Games:", games.count)
    if save:
        log.debug('Saving data to json files')
        l_game = []
        for game in games.items():
            name = game.sid.strftime("%Y-%m-%d_%H%M%S")
            l_game.append(name)
            filename = save + name + ".json"
            with open(filename, 'w') as file:
                file.write(jsonpickle.encode(game))
                log.debug("wrote game info to {file}".format(file=filename))
            filename = save + 'hs2017.json'
        highscore = score_count(games)
        data = {'games': l_game, 'highscore': highscore}
        with open(filename, 'w') as file:
            log.debug("wrote highscore info to {file}".format(file=filename))
            file.write(json.dumps(data))

    log.debug('End of program...')
Example #6
0
def runner(args):
    """
    Run a sample through an NGS pipeline (as a command) using Popen.
    TOOD: logging, timing.
    """
    if args.config is None:
        sample_params = json.loads(sys.stdin.readline().rstrip())
    else:
        # read the first line from the test/debug config file
        sample_params = json.loads(args.config.readline().rstrip())

    sample = sample_params["sample_id"]
    runner_log = Logger("%s logger" % sample)

    cmd = safe_templater(SAMPLE_PROCESS_CMD, sample_params)
    runner_log.info("%s starting preprocessing and alignment of sample." % sample)
    if args.dry_run:
        runner_log.debug("%s command: %s" % (sample, cmd))
        return
    tstart = time.time()
    p = Popen(cmd, shell=True, executable=find_bash())
    p.wait()
    if p.returncode != 0:
        # make this as loud as possible so Slurm can handle it
        runner_log.critical("%s exited abnormally with return code %d." % (sample, p.returncode))
        sys.exit(p.returncode)
    tend = time.time()
    elapsed = tend - tstart
    runner_log.info("%s completed preprocessing and alignment in %s seconds." % (sample, str(round(elapsed, 5))))
Example #7
0
File: cmd.py Project: mbr/githome
def shell(obj, username):
    gh = obj['githome']

    try:
        # get user
        user = gh.get_user_by_name(username)

        log = Logger('githome-shell [{}]'.format(user.name))

        # we've got our user, now authorize him or not
        shell_cmd = shlex.split(os.environ.get('SSH_ORIGINAL_COMMAND', ''))
        log.debug('SSH_ORIGINAL_COMMAND {!r}'.format(shell_cmd))

        if not shell_cmd:
            log.critical('No shell command given')
            abort(1)

        cmd = gh.authorize_command(user, shell_cmd)

        log.debug('Executing {!r}', cmd)

        binary = cmd[0]  # we use path through execlp
    except Exception as e:
        log.error(str(e))
        abort(1)
    else:
        os.execlp(binary, *cmd)
Example #8
0
def runner(args):
    """
    Run a sample through an NGS pipeline (as a command) using Popen.
    TOOD: logging, timing.
    """
    if args.config is None:
        sample_params = json.loads(sys.stdin.readline().rstrip())
    else:
        # read the first line from the test/debug config file
        sample_params = json.loads(args.config.readline().rstrip())

    sample = sample_params["sample_id"]
    runner_log = Logger("%s logger" % sample)

    cmd = safe_templater(SAMPLE_PROCESS_CMD, sample_params)
    runner_log.info("%s starting preprocessing and alignment of sample." %
                    sample)
    if args.dry_run:
        runner_log.debug("%s command: %s" % (sample, cmd))
        return
    tstart = time.time()
    p = Popen(cmd, shell=True, executable=find_bash())
    p.wait()
    if p.returncode != 0:
        # make this as loud as possible so Slurm can handle it
        runner_log.critical("%s exited abnormally with return code %d." %
                            (sample, p.returncode))
        sys.exit(p.returncode)
    tend = time.time()
    elapsed = tend - tstart
    runner_log.info("%s completed preprocessing and alignment in %s seconds." %
                    (sample, str(round(elapsed, 5))))
Example #9
0
class DigitempCli(object):
    last_cmd = ''
    """docstring for DigitempCli"""
    def __init__(self):
        self.Log = Logger('DigitempCli')
        self.idn = 'DigitempCli %d' % id(self)
        self.digitemp = dt.Digitemp()

    def __unicode__(self):
        return str(self)

    def send(self, cmd, **kwargs):
        self.Log.debug('send(cmd=%s, kwargs=%s)' %(cmd, str(kwargs)))
        self.last_cmd = cmd

        dt_method = getattr(self.digitemp, cmd)
        dt_method()

        return True

    def read(self, **kwargs):
        self.Log.debug('read(kwargs=%s)' %str (kwargs))
        return (0,'DigitempCli resposne to %s' % self.last_cmd)

    def query(self, cmd, **kwargs):
    	try:
        	dt_method = getattr(self.digitemp, cmd)
        	result = [0, dt_method()]
        except Exception as e:
        	result = [1, e.message]

        return result
Example #10
0
def rpc_server(socket, protocol, dispatcher):
    log = Logger('rpc_server')
    log.debug('starting up...')
    while True:
        try:
            message = socket.recv_multipart()
        except Exception as e:
            log.warning('Failed to receive message from client, ignoring...')
            log.exception(e)
            continue

        log.debug('Received message %s from %r', message[-1], message[0])

        # assuming protocol is threadsafe and dispatcher is theadsafe, as long
        # as its immutable

        def handle_client(message):
            try:
                request = protocol.parse_request(message[-1])
            except RPCError as e:
                log.exception(e)
                response = e.error_respond()
            else:
                response = dispatcher.dispatch(request)
                log.debug('Response okay: %r', response)

            # send reply
            message[-1] = response.serialize()
            log.debug('Replying %s to %r', message[-1], message[0])
            socket.send_multipart(message)

        gevent.spawn(handle_client, message)
Example #11
0
def get_loop_length(log: Logger, sse1: Frame3D, sse2: Frame3D, loop_step: int,
                    loop_range: int) -> Tuple[int, int]:
    """Calculate the expected number of residues to join two SSE.

    :param log: Job Logger.
    :param sse1: N-SSE.
    :param sse2: C-SSE.
    :param loop_step: Assumption on how much distance a residue can cover.
    :param loop_range: Plus-minus range of residue length.
    """
    from SBI.structure import ChainFrame
    from SBI.structure.geometry.basics import distance

    res1 = ChainFrame(PDB(sse1)).last_compound
    res2 = ChainFrame(PDB(sse2)).first_compound
    distance = distance(res1[res1['label_atom_id'] == 'C'].coordinates,
                        res2[res2['label_atom_id'] == 'N'].coordinates)
    log.debug(f'Distance between SSE is {distance} Angstrongs.')
    distance = math.ceil(distance / loop_step)
    log.debug(
        f'Assuming the need of {distance} residues with a {loop_range} residue range.'
    )
    distance = [
        x for x in range(distance - loop_range - 1, distance + loop_range + 1)
        if x > 0
    ]
    return max(distance), min(distance)
Example #12
0
def test2():
    log = Logger('Logbook-test-2')
    log.critical("critical")
    log.error("error")
    log.warn("warn")
    log.notice("notice")
    log.info("test")
    log.debug("debug")
Example #13
0
def single_segment_eigens(log: Logger, motif: Frame3D,
                          hotspot: Frame3D) -> List[Dict]:
    """Generate eigenvectors for a single segment motif.

    :param motif: motif to geometrically analyze.
    :param hotspot: CA atom to guide front-view.
    """
    log.debug('Single segment geometric analysis.')
    # Get the orientation
    eigens = dict(
        map(
            reversed,
            zip(motif['AtomType:CA'].eigenvectors(10),
                ('perpendicular', 'side', 'major'))))
    edist = [
        np.linalg.norm(hotspot.coordinates - eigens['perpendicular'][0]),
        np.linalg.norm(hotspot.coordinates - eigens['perpendicular'][-1]),
        np.linalg.norm(hotspot.coordinates - eigens['side'][0]),
        np.linalg.norm(hotspot.coordinates - eigens['side'][-1])
    ]
    mdist = edist.index(min(edist))
    if mdist > 1:  # swap side and perpendicular
        log.debug('Swap side and perpendicular axes.')
        tmp = eigens['side']
        eigens['side'] = eigens['perpendicular']
        eigens['perpendicular'] = tmp
        if mdist == 2:  # Change perpendicular orientation
            log.debug('Flip perpendicular axis.')
            eigens['perpendicular'] = np.flip(eigens['perpendicular'], axis=0)
    if mdist == 0:  # Change perpendicular orientation
        log.debug('Flip perpendicular axis.')
        eigens['perpendicular'] = np.flip(eigens['perpendicular'], axis=0)
    return [
        eigens,
    ]
Example #14
0
def create_app(config_name):
    app = Flask(__name__)
    app.config.from_object(config[config_name])
    setup_logger(app.config)
    log = Logger("APP")
    

    log.debug(mysql_client.init_app(app))

    from app.api import api as api_blueprint
    
    app.register_blueprint(api_blueprint, url_prefix='/api')

    return app
Example #15
0
class Plugin(object):
    def __init__(self, site, name=None):
        self.name = name or remove_suffix('plugin', self.__class__.__name__)
        self.log = Logger(self.__class__.__name__.lower())

        self.log.debug('{} initialized'.format(self.name))
        self.base_dir = Path(inspect.getfile(self.__class__)).parent

        # initialize templates
        template_path = self.base_dir / 'templates'
        if template_path.exists():
            self.jinja_env = jinja2.Environment(
                loader=jinja2.FileSystemLoader(str(template_path)),
                extensions=['jinja2.ext.with_']
            )

        # load possible default configuration
        self.register(site)

    @property
    def DEFAULTS_FILE(self):
        return self.base_dir / 'defaults.cfg'

    def register(self, site):
        pass

    def enable_app(self, app):
        pass

    def render_template(self, template_name, **kwargs):
        if not hasattr(self, 'jinja_env'):
            return RuntimeError('Plugin {} has no template path'.format(
                self.__class__.__name__
            ))
        tpl = self.jinja_env.get_template(template_name)
        return tpl.render(**kwargs)

    def output_template(self, template_name, dest, _mode=0o644, **kwargs):
        if not dest.parent.exists():
            self.log.warning('Path {} did not exist and was created'.format(
                             dest.parent,
            ))
            dest.parent.mkdir(parents=True)

        with new_file(dest, _mode) as out:
            self.log.info('Writing {}'.format(dest.resolve()))
            out.write(self.render_template(template_name, **kwargs))
Example #16
0
def main():
    global log
    StderrHandler().push_application()
    log = Logger("xbbs.coordinator")

    XBBS_CFG_DIR = os.getenv("XBBS_CFG_DIR", "/etc/xbbs")
    with open(path.join(XBBS_CFG_DIR, "coordinator.toml"), "r") as fcfg:
        cfg = CONFIG_VALIDATOR.validate(toml.load(fcfg))

    inst = Xbbs.create(cfg)

    for name, elem in cfg["projects"].items():
        project = Project(name,
                          **elem,
                          base=path.join(inst.project_base, name))
        inst.projects[name] = project
        os.makedirs(project.base, exist_ok=True)
        log.debug("got project {}", inst.projects[name])

    with inst.zmq.socket(zmq.REP) as sock_cmd, \
         inst.zmq.socket(zmq.PULL) as inst.intake, \
         inst.zmq.socket(zmq.ROUTER) as inst.worker_endpoint:
        # XXX: potentially make perms overridable? is that useful in any
        #      capacity?
        inst.intake.bind(cfg["intake"]["bind"])
        _ipc_chmod(cfg["intake"]["bind"], 0o664)

        inst.worker_endpoint.bind(cfg["worker_endpoint"])
        inst.worker_endpoint.set(zmq.ROUTER_MANDATORY, 1)
        _ipc_chmod(cfg["worker_endpoint"], 0o664)

        sock_cmd.bind(cfg["command_endpoint"]["bind"])
        _ipc_chmod(cfg["command_endpoint"]["bind"], 0o664)

        dumper = gevent.signal_handler(signal.SIGUSR1, dump_projects, inst)
        log.info("startup")
        intake = gevent.spawn(intake_loop, inst)
        job_pull = gevent.spawn(job_pull_loop, inst)
        try:
            command_loop(inst, sock_cmd)
        finally:
            # XXX: This may not be the greatest way to handle this
            gevent.killall(inst.project_greenlets[:])
            gevent.kill(intake)
            gevent.kill(job_pull)
            dumper.cancel()
Example #17
0
def main():
    logbook.concurrency.enable_gevent()

    global log
    StderrHandler().push_application()
    log = Logger('xbbs.worker')
    inst = XbbsWorker()

    XBBS_CFG_DIR = os.getenv("XBBS_CFG_DIR", "/etc/xbbs")
    with open(path.join(XBBS_CFG_DIR, "worker.toml"), "r") as fcfg:
        cfg = CONFIG_VALIDATOR.validate(toml.load(fcfg))

    job_request = msgs.JobRequest(capabilities=cfg["capabilities"]).pack()

    gevent.signal_handler(signal.SIGUSR1, gevent.util.print_run_info)

    log.info(cfg)
    while True:
        with inst.zmq.socket(zmq.REQ) as jobs:
            jobs.connect(cfg["job_endpoint"])

            while True:
                jobs.send(job_request)
                log.debug("waiting for job...")
                # the coordinator sends a heartbeat each minute, so 1.5 minutes
                # should be a sane duration to assume coordinator death on
                if jobs.poll(90000) == 0:
                    # breaking the inner loop will cause a reconnect
                    # since the coordinator is presumed dead, drop requests yet
                    # unsent to it
                    jobs.set(zmq.LINGER, 0)
                    log.debug("dropping socket after a heartbeat timeout")
                    break
                try:
                    msg = jobs.recv()
                    if len(msg) == 0:
                        # drop null msgs
                        continue
                    process_job_msg(inst, msg)
                except KeyboardInterrupt:
                    log.exception("interrupted")
                    return
                except Exception as e:
                    log.exception("job error", e)
Example #18
0
class Main(object):
    """Main loop."""

    def __init__(self, conf):
        self.conf = conf
        handler = TimedRotatingFileHandler(conf.log_file, date_format="%Y-%m-%d")
        handler.push_application()
        self.logger = Logger("Firetower-server")
        self.queue = redis_util.get_redis_conn(host=conf.redis_host, port=conf.redis_port, redis_db=conf.redis_db)
        self.classifier = classifier.Levenshtein()
        self.last_archive = None

    def get_error(self):
        """Get the next error to be categorised"""
        return self.queue.rpop(self.conf.queue_key)

    def run_archiving(self):
        """Run the timeseries archiving for all categories
        """
        now = datetime.datetime.utcnow()
        if self.last_archive is None:
            self.last_archive = datetime.datetime.utcnow()
            return

        delta = datetime.timedelta(seconds=self.conf.archive_time)
        if self.last_archive < (now - delta):
            self.logger.debug("Archiving counts older than %s seconds" % (self.conf.archive_time,))
            for c in category.Category.get_all_categories(self.queue):
                self.logger.debug("Archiving for %s category" % (c.cat_id))
                c.timeseries.archive_cat_counts(self.last_archive)
            self.last_archive = now

    def run(self):
        """Drop into a loop pulling errors and categorizing them"""
        while 1:
            err = self.get_error()
            self.run_archiving()
            if err:
                parsed = json.loads(err)
                category.Category.classify(self.queue, self.classifier, parsed, self.conf.class_thresh)
            else:
                time.sleep(1)
Example #19
0
class UrlToMarkdown(object):
    default_mobilizer = "original"

    def __init__(self, mobilizer="original"):
        if mobilizer:
            if not getattr(mobilizers, mobilizer.capitalize() + "Mobilizer", None):
                raise Exception("Invalid mobilizer: {}".format(mobilizer))
            self.default_mobilizer = mobilizer
        self.log = Logger(self.__class__.__name__)

    def convert(self, url, mobilizer=None, simple_result=True):
        """Fetch a page from URL, mobilize it, then convert it to Markdown

        url: ...
        mobilizer: 'original', 'instapaper',
        simple_result: True returns markdown text, else returns a dict
        """
        if not mobilizer:
            mobilizer = self.default_mobilizer
        try:
            mob_object = getattr(mobilizers, mobilizer.capitalize() + "Mobilizer")
        except AttributeError:
            raise Exception("Invalid mobilizer: {}".format(mobilizer))
        mob = mob_object()

        self.log.debug(
            "Obtaining {url} via {mobilizer}".format(url=url, mobilizer=mobilizer)
        )
        mobilized = mob.fetch(url)
        self.log.info("Title is {0[title]!r}".format(mobilized))

        self.log.debug("Converting {url} to Markdown".format(url=url))
        h2t = HTML2Text()
        # html2text also wraps image/link URLs, breaking them
        h2t.body_width = 0

        self.log.info("Converted to Markdown")
        mobilized["markdown"] = h2t.handle(mobilized["body"].html())
        if simple_result:
            return mobilized["markdown"]
        return mobilized
Example #20
0
    def wrapper(self, request, *args, **kwargs):
        was_interrupted = [False]
        log = Logger('web')

        def on_cancel(failure):
            err(failure, 'Call to "%s" was interrupted' % request.path)
            was_interrupted[0] = True

        request.notifyFinish().addErrback(on_cancel)

        def finish_request(result):
            log.debug('%s(%r, args=%r, kwargs=%r)=%s' % (func.__name__, request, args, kwargs, result))

            if isinstance(result, Failure):
                request.setResponseCode(INTERNAL_SERVER_ERROR)
                log.exception('Call to %s(%r, args=%r, kwargs=%r) failed' % (func.__name__, request, args, kwargs), exc_info = (result.type, result.value, result.getTracebackObject()))

            if was_interrupted[0] == False and result != NOT_DONE_YET:
                request.finish()

        log.debug('Calling %s(%r, args=%r, kwargs=%r)' % (func.__name__, request, args, kwargs))
        d = func(self, request, *args, **kwargs)
        log.debug('Result: %s' % d)
        log.debug('is returned deferred was called? %s' % d.called)
        d.addBoth(finish_request)
        return NOT_DONE_YET
Example #21
0
def multi_segment_eigens(log: Logger, motifs: List[Frame3D],
                         hotspot: str) -> List[Dict]:
    """Generate eigenvectors for a multisegment motif.

    :param motifs: list of segments of the motif to geometrically analyze.
    :param hotspot: CA atom to guide front-view.
    """
    log.debug('Multiple segment geometric analysis.')
    eigens = []
    hotspot_segment = 0
    for i, motif in enumerate(motifs):
        eigens.append(
            dict(
                map(
                    reversed,
                    zip(motif['AtomType:CA'].eigenvectors(10),
                        ('perpendicular', 'side', 'major')))))
        if not motif[f'Chain:{hotspot.split(":")[0]}'][
                f'Residue:{hotspot.split(":")[1]}'].is_empty:
            log.debug(f'Guiding residue located in segment {i + 1}.')
            hotspot_segment = i
    return eigens
Example #22
0
class Router(Thread):
    """Thread waiting for a request by another Driver and responding to
    it with the chunked asked.
    """

    def __init__(self, name, redis, get_chunk):
        super(Router, self).__init__()

        self.name = name
        self.redis = redis
        self.get_chunk = get_chunk
        self.router = None

        self.logger = Logger("{} - Router".format(self.name))
        self.logger.info("Started")

        self.context = zmq.Context.instance()

    def run(self):
        self.router = self.context.socket(zmq.ROUTER)
        port = self.router.bind_to_random_port('tcp://*')
        self.redis.set('drivers:{}:router'.format(self.name), port)

        while True:
            msg = self.router.recv_multipart()
            self._respond_to(*msg)

    def _respond_to(self, identity, filename, offset, size):
        """Calls the `get_chunk` handler defined by the Driver to get
        the chunk and send it to the addressee.
        """
        self.logger.debug(
            "Getting chunk of size {} from offset {} in '{}'",
            size, offset, filename
        )
        chunk = self.get_chunk(filename, int(offset), int(size))
        self.router.send_multipart((identity, chunk))
Example #23
0
def run():
    raven_client = Client()
    logger = Logger("ardegra")
    spider_name = " ".join(sys.argv[1:])
    client = pymongo.MongoClient("mongodb://{}/ardegra".format(
        Config.DATABASE_ADDRESS))
    logger.debug("Running: {}".format(spider_name))

    try:
        db = client["ardegra"]
        document = db.spiders.find_one({"name": spider_name})

        if document["type"]["name"] == "Forum Spider 1":
            spider = ForumSpider1(name=spider_name)
        elif document["type"]["name"] == "News Spider 1":
            spider = NewsSpider1(name=spider_name)
        elif document["type"]["name"] == "News Spider 2":
            spider = NewsSpider2(name=spider_name)
        spider.run()
    except Exception as err:
        raven_client.captureException()
        logger.error(str(err))
    finally:
        client.close()
Example #24
0
class InterfaceTemplate(object):
    last_cmd = ''
    """docstring for InterfaceTemplate"""
    def __init__(self):
        self.Log = Logger('InterfaceTemplate')
        self.idn = 'InterfaceTemplate %d' % id(self)

    def __unicode__(self):
        return str(self)

    def send(self, cmd, **kwargs):
        self.Log.debug('send(cmd=%s, kwargs=%s)' %(cmd, str(kwargs)))
        self.last_cmd = cmd


        return True

    def read(self, **kwargs):
        self.Log.debug('read(kwargs=%s)' %str (kwargs))
        return (0,'InterfaceTemplate resposne to %s' % self.last_cmd)

    def query(self, cmd, **kwargs):
        self.send(cmd, **kwargs)
        return self.read()
Example #25
0
def create_app(config_name):
    app = Flask(__name__)
    app.config.from_object(config[config_name])
    setup_logger(app.config)
    log = Logger("APP")
    log.debug(app.config)

    mail.init_app(app)
    log.debug(mysql_client.init_app(app))
    login_manager.init_app(app)
    log.debug(redis_client.init_app(app))
    ueditor.init_app(app)

    from .main import main as main_blueprint
    from .auth import auth as auth_blueprint
    app.register_blueprint(main_blueprint)
    app.register_blueprint(auth_blueprint, url_prefix='/auth')
    return app
class Logging_MixIn(Abstract_Interface):
    """
    Logging Version
    """
    def __init__(self, *args, **kwargs):
        """
        Instantiate logging interface
        :param args:
        :param kwargs:
        :return:
        """
        Abstract_Interface.__init__(self, *args, **kwargs)
        from logbook import Logger
        self.log = Logger("{}-{}".format(type(self).__name__, self.door_name))
        self.log.debug("Initialised using : {}".format(kwargs))

    def activate(self):
        self.log.debug("Activated")

    def is_active(self):
        self.log.debug("Checking activation")

    def open(self, duration=10):
        self.log.info("Opening for {}".format(duration))
Example #27
0
class Logging_MixIn(Abstract_Interface):
    """
    Logging Version
    """
    def __init__(self, *args, **kwargs):
        """
        Instantiate logging interface
        :param args:
        :param kwargs:
        :return:
        """
        Abstract_Interface.__init__(self, *args, **kwargs)
        from logbook import Logger
        self.log = Logger("{}-{}".format(type(self).__name__,self.door_name))
        self.log.debug("Initialised using : {}".format(kwargs))

    def activate(self):
        self.log.debug("Activated")

    def is_active(self):
        self.log.debug("Checking activation")

    def open(self, duration=10):
        self.log.info("Opening for {}".format(duration))
Example #28
0
    params = dict(line.split() for line in sys.stdin.readlines())
    api = twitter.Api(**params)

    min_page = int(math.floor(keep_number/50))
    existing_tweets_id = [filename[:-4] for filename in os.listdir(TWEETS_PATH)]
    for i in range(min_page, min_page+50):
        log.info(u"Fetching page: %s" % i)
        statuses = api.GetUserTimeline(page=i+1, count=49)
        if not statuses:
            break
        for status in statuses:
            tweet = status.AsDict()
            tweet_id = str(tweet['id'])
            if tweet_id not in existing_tweets_id:
                log.debug(u"Processing tweet: %s" % tweet_id)
                if tweet['user']['screen_name']==SCREEN_NAME and not tweet['retweeted']:
                    created_at = datetime.strptime(tweet['created_at'], "%a %b %d %H:%M:%S +0000 %Y")
                    retweet_count = 'retweet_count' in tweet and tweet['retweet_count'] or u'0'
                    log.debug(u"Urlizing content: %s" % tweet['text'])
                    try:
                        content = __urlize(tweet['text'])
                    except socket.gaierror:
                        content = tweet['text']
                        log.error(u"Error urlizing: %s" % content)
                        log.error(u"Verify the tweet online: https://twitter.com/%s/status/%s" % (SCREEN_NAME, tweet_id))
                    except socket.error:  # couldn't figure out the difference easily
                        content = tweet['text']
                        log.error(u"Alternative error urlizing: %s" % content)
                        log.error(u"Verify the tweet online: https://twitter.com/%s/status/%s" % (SCREEN_NAME, tweet_id))
                    text =  u'%s$%s$%s\n%s' % (tweet_id, created_at, retweet_count, content)
Example #29
0
class BTgymMultiData:
    """
    Multiply data streams wrapper.
    """
    def __init__(self,
                 data_class_ref=None,
                 data_config=None,
                 name='multi_data',
                 data_names=None,
                 task=0,
                 log_level=WARNING,
                 **kwargs):
        """
        Args:
            data_class_ref:         one of BTgym single-stream datafeed classes
            data_config:            nested dictionary of individual data streams sources, see notes below.

            kwargs:                 shared parameters for all data streams, see base dataclass

        Notes:
            `Data_config` specifies all data sources consumed by strategy::

                data_config = {
                    data_line_name_0: {
                        filename: [source csv filename string or list of strings],
                        [config: {optional dict of individual stream config. params},]
                    },
                    ...,
                    data_line_name_n : {...}
                }

        Example::

            data_config = {
                'usd': {'filename': '.../DAT_ASCII_EURUSD_M1_2017.csv'},
                'gbp': {'filename': '.../DAT_ASCII_EURGBP_M1_2017.csv'},
                'jpy': {'filename': '.../DAT_ASCII_EURJPY_M1_2017.csv'},
                'chf': {'filename': '.../DAT_ASCII_EURCHF_M1_2017.csv'},
            }
            It is user responsibility to correctly choose historic data conversion rates wrt cash currency (here - EUR).
        """
        self.data_class_ref = data_class_ref
        if data_config is None:
            self.data_config = {}

        else:
            self.data_config = data_config

        self.master_data = None
        self.name = name
        self.task = task
        self.metadata = {'sample_num': 0, 'type': None}
        self.filename = None
        self.is_ready = False
        self.global_timestamp = 0
        self.log_level = log_level
        self.params = {}
        self.names = []
        self.sample_num = 0

        # Logging:
        StreamHandler(sys.stdout).push_application()
        self.log = Logger('{}_{}'.format(self.name, self.task),
                          level=self.log_level)

        if data_names is None:
            # Infer from data configuration (at top-level):
            self.data_names = list(self.data_config.keys())

        else:
            self.data_names = data_names
        try:
            assert len(self.data_names
                       ) > 0, 'At least one data_line should be provided'

        except AssertionError:
            self.log.error('At least one data_line should be provided')
            raise ValueError

        # Make dictionary of single-stream datasets:
        self.data = OrderedDict()
        for key, stream in self.data_config.items():
            try:
                stream['config'].update(kwargs)

            except KeyError:
                stream['config'] = kwargs

            try:
                if stream['dataframe'] is None:
                    pass

            except KeyError:
                stream['dataframe'] = None

            self.data[key] = self.data_class_ref(filename=stream['filename'],
                                                 dataframe=stream['dataframe'],
                                                 data_names=(key, ),
                                                 task=task,
                                                 name='{}_{}'.format(
                                                     name, key),
                                                 log_level=log_level,
                                                 **stream['config'])
            try:
                # If master-data has been pointed explicitly by 'base' kwarg:
                if stream['base']:
                    self.master_data = self.data[key]

            except KeyError:
                pass

    def set_logger(self, level=None, task=None):
        """
        Sets logbook logger.

        Args:
            level:  logbook.level, int
            task:   task id, int

        """
        if task is not None:
            self.task = task

        if level is not None:
            for stream in self.data.values():
                stream.log = Logger('{}_{}'.format(stream.name, stream.task),
                                    level=level)

            self.log = Logger('{}_{}'.format(self.name, self.task),
                              level=level)

    def set_params(self, params_dict):
        """
        Batch attribute setter.

        Args:
            params_dict: dictionary of parameters to be set as instance attributes.
        """
        for key, value in params_dict.items():
            for stream in self.data.values():
                setattr(stream, key, value)

    def read_csv(self, data_filename=None, force_reload=False):
        # Load:
        indexes = []
        for stream in self.data.values():
            stream.read_csv(force_reload=force_reload)
            indexes.append(stream.data.index)

        # Get indexes intersection:
        if len(indexes) > 1:
            idx_intersected = indexes[0]
            for i in range(1, len(indexes)):
                idx_intersected = idx_intersected.intersection(indexes[i])

            # Truncate data to common index:
            for stream in self.data.values():
                stream.data = stream.data.loc[idx_intersected]

    def reset(self, **kwargs):
        indexes = []
        for stream in self.data.values():
            stream.reset(**kwargs)
            indexes.append(stream.data.index)

        # Get indexes intersection:
        if len(indexes) > 1:
            idx_intersected = indexes[0]
            for i in range(1, len(indexes)):
                idx_intersected = idx_intersected.intersection(indexes[i])

            idx_intersected.drop_duplicates()
            self.log.info('shared num. records: {}'.format(
                len(idx_intersected)))

            # Truncate data to common index:
            for stream in self.data.values():
                stream.data = stream.data.loc[idx_intersected]

            # Choose master_data
            if self.master_data is None:
                # Just choose first key:
                all_keys = list(self.data.keys())
                if len(all_keys) > 0:
                    self.master_data = self.data[all_keys[0]]

            self.global_timestamp = self.master_data.global_timestamp
            self.names = self.master_data.names
        self.sample_num = 0
        self.is_ready = True

    def set_global_timestamp(self, timestamp):
        for stream in self.data.values():
            stream.set_global_timestamp(timestamp)

        self.global_timestamp = self.master_data.global_timestamp

    def describe(self):
        return {key: stream.describe() for key, stream in self.data.items()}

    def sample(self, **kwargs):

        # Get sample to infer exact interval:
        self.log.debug('Making master sample...')
        master_sample = self.master_data.sample(**kwargs)
        self.log.debug('Making master ok.')

        # Prepare empty instance of multistream data:
        sample = BTgymMultiData(
            data_names=self.data_names,
            task=self.task,
            log_level=self.log_level,
            name='sub_' + self.name,
        )
        sample.metadata = copy.deepcopy(master_sample.metadata)

        interval = [
            master_sample.metadata['first_row'],
            master_sample.metadata['last_row']
        ]

        # Populate sample with data:
        for key, stream in self.data.items():
            self.log.debug(
                'Sampling <{}> with interval: {}, kwargs: {}'.format(
                    key, interval, kwargs))
            sample.data[key] = stream.sample(interval=interval,
                                             force_interval=True,
                                             **kwargs)

        sample.filename = {
            key: stream.filename
            for key, stream in self.data.items()
        }
        self.sample_num += 1
        return sample

    def to_btfeed(self):
        feed = OrderedDict()
        for key, stream in self.data.items():
            # Get single-dataline btfeed dict:
            feed_dict = stream.to_btfeed()
            assert len(list(feed_dict.keys())) == 1, \
                'Expected base datafeed dictionary contain single data_line, got: {}'.format(feed_dict)
            # Rename every base btfeed according to data_config keys:
            feed[key] = feed_dict[list(feed_dict.keys())[0]]
        return feed
Example #30
0
class Worker(multiprocessing.Process):
    """
    Distributed tf worker class.

    Sets up environment, trainer and starts training process in supervised session.
    """
    env_list = None

    def __init__(self,
                 env_config,
                 policy_config,
                 trainer_config,
                 cluster_spec,
                 job_name,
                 task,
                 log_dir,
                 log_level,
                 max_env_steps,
                 random_seed=None,
                 render_last_env=False,
                 test_mode=False):
        """

        Args:
            env_config:         environment class_config_dict.
            policy_config:      model policy estimator class_config_dict.
            trainer_config:     algorithm class_config_dict.
            cluster_spec:       tf.cluster specification.
            job_name:           worker or parameter server.
            task:               integer number, 0 is chief worker.
            log_dir:            for tb summaries and checkpoints.
            log_level:          int, logbook.level
            max_env_steps:      number of environment steps to run training on
            random_seed:        int or None
            render_last_env:    bool, if True - render enabled for last environment in a list; first otherwise
            test_mode:          if True - use Atari mode, BTGym otherwise.

            Note:
                - Conventional `self.global_step` refers to number of environment steps,
                    summarized over all environment instances, not to number of policy optimizer train steps.

                - Every worker can run several environments in parralell, as specified by `cluster_config'['num_envs'].
                    If use 4 forkers and num_envs=4 => total number of environments is 16. Every env instance has
                    it's own ThreadRunner process.

                - When using replay memory, keep in mind that every ThreadRunner is keeping it's own replay memory,
                    If memory_size = 2000, num_workers=4, num_envs=4 => total replay memory size equals 32 000 frames.
        """
        super(Worker, self).__init__()
        self.env_class = env_config['class_ref']
        self.env_kwargs = env_config['kwargs']
        self.policy_config = policy_config
        self.trainer_class = trainer_config['class_ref']
        self.trainer_kwargs = trainer_config['kwargs']
        self.cluster_spec = cluster_spec
        self.job_name = job_name
        self.task = task
        self.log_dir = log_dir
        self.max_env_steps = max_env_steps
        self.log_level = log_level
        self.log = None
        self.test_mode = test_mode
        self.random_seed = random_seed
        self.render_last_env = render_last_env

    def run(self):
        """Worker runtime body.
        """
        # Logging:
        StreamHandler(sys.stdout).push_application()
        self.log = Logger('Worker_{}'.format(self.task), level=self.log_level)

        tf.reset_default_graph()

        if self.test_mode:
            import gym

        # Define cluster:
        cluster = tf.train.ClusterSpec(self.cluster_spec).as_cluster_def()

        # Start tf.server:
        if self.job_name in 'ps':
            server = tf.train.Server(
                cluster,
                job_name=self.job_name,
                task_index=self.task,
                config=tf.ConfigProto(device_filters=["/job:ps"])
            )
            self.log.debug('parameters_server started.')
            # Just block here:
            server.join()

        else:
            server = tf.train.Server(
                cluster,
                job_name='worker',
                task_index=self.task,
                config=tf.ConfigProto(
                    intra_op_parallelism_threads=1,  # original was: 1
                    inter_op_parallelism_threads=2  # original was: 2
                )
            )
            self.log.debug('tf.server started.')

            self.log.debug('making environments:')
            # Making as many environments as many entries in env_config `port` list:
            # TODO: Hacky-II: only one example over all parallel environments can be data-master [and renderer]
            # TODO: measure data_server lags, maybe launch several instances
            self.env_list = []
            env_kwargs = self.env_kwargs.copy()
            env_kwargs['log_level'] = self.log_level
            port_list = env_kwargs.pop('port')
            data_port_list = env_kwargs.pop('data_port')
            data_master = env_kwargs.pop('data_master')
            render_enabled = env_kwargs.pop('render_enabled')

            render_list = [False for entry in port_list]
            if render_enabled:
                if self.render_last_env:
                    render_list[-1] = True
                else:
                    render_list[0] = True

            data_master_list = [False for entry in port_list]
            if data_master:
                data_master_list[0] = True

            # Parallel envs. numbering:
            if len(port_list) > 1:
                task_id = 0.0
            else:
                task_id = 0

            for port, data_port, is_render, is_master in zip(port_list, data_port_list, render_list, data_master_list):
                # Get random seed for environments:
                env_kwargs['random_seed'] = random.randint(0, 2 ** 30)

                if not self.test_mode:
                    # Assume BTgym env. class:
                    self.log.debug('setting env at port_{} is data_master: {}'.format(port, data_master))
                    self.log.debug('env_kwargs:')
                    for k, v in env_kwargs.items():
                        self.log.debug('{}: {}'.format(k, v))
                    try:
                        self.env_list.append(
                            self.env_class(
                                port=port,
                                data_port=data_port,
                                data_master=is_master,
                                render_enabled=is_render,
                                task=self.task + task_id,
                                **env_kwargs
                            )
                        )
                        data_master = False
                        self.log.info('set BTGym environment {} @ port:{}, data_port:{}'.
                                      format(self.task + task_id, port, data_port))
                        task_id += 0.01

                    except:
                        self.log.exception(
                            'failed to make BTGym environment at port_{}.'.format(port)
                        )
                        raise RuntimeError

                else:
                    # Assume atari testing:
                    try:
                        self.env_list.append(self.env_class(env_kwargs['gym_id']))
                        self.log.debug('set Gyn/Atari environment.')

                    except:
                        self.log.exception('failed to make Gym/Atari environment')
                        raise RuntimeError

            self.log.debug('Defining trainer...')

            # Define trainer:
            trainer = self.trainer_class(
                env=self.env_list,
                task=self.task,
                policy_config=self.policy_config,
                log_level=self.log_level,
                cluster_spec=self.cluster_spec,
                random_seed=self.random_seed,
                **self.trainer_kwargs,
            )

            self.log.debug('trainer ok.')

            # Saver-related:
            variables_to_save = [v for v in tf.global_variables() if not 'local' in v.name]
            local_variables = [v for v in tf.global_variables() if 'local' in v.name] + tf.local_variables()
            init_op = tf.variables_initializer(variables_to_save)
            local_init_op = tf.variables_initializer(local_variables)
            init_all_op = tf.global_variables_initializer()

            saver = _FastSaver(variables_to_save)

            self.log.debug('VARIABLES TO SAVE:')
            for v in variables_to_save:
                self.log.debug('{}: {}'.format(v.name, v.get_shape()))

            def init_fn(ses):
                self.log.info("initializing all parameters.")
                ses.run(init_all_op)

            config = tf.ConfigProto(device_filters=["/job:ps", "/job:worker/task:{}/cpu:0".format(self.task)])
            logdir = os.path.join(self.log_dir, 'train')
            summary_dir = logdir + "_{}".format(self.task)

            summary_writer = tf.summary.FileWriter(summary_dir)

            self.log.debug('before tf.train.Supervisor... ')

            # TODO: switch to tf.train.MonitoredTrainingSession
            sv = tf.train.Supervisor(
                is_chief=(self.task == 0),
                logdir=logdir,
                saver=saver,
                summary_op=None,
                init_op=init_op,
                local_init_op=local_init_op,
                init_fn=init_fn,
                #ready_op=tf.report_uninitialized_variables(variables_to_save),
                ready_op=tf.report_uninitialized_variables(),
                global_step=trainer.global_step,
                save_model_secs=300,
            )
            self.log.info("connecting to the parameter server... ")

            with sv.managed_session(server.target, config=config) as sess, sess.as_default():
                #sess.run(trainer.sync)
                trainer.start(sess, summary_writer)

                # Note: `self.global_step` refers to number of environment steps
                # summarized over all environment instances, not to number of policy optimizer train steps.
                global_step = sess.run(trainer.global_step)
                self.log.notice("started training at step: {}".format(global_step))

                while not sv.should_stop() and global_step < self.max_env_steps:
                    trainer.process(sess)
                    global_step = sess.run(trainer.global_step)

                # Ask for all the services to stop:
                for env in self.env_list:
                    env.close()

                sv.stop()
            self.log.notice('reached {} steps, exiting.'.format(global_step))
Example #31
0
from service.update import Update
import gui.fitCommands as cmd


pyfalog = Logger(__name__)

disableOverrideEditor = False

try:
    from gui.propertyEditor import AttributeEditor
except ImportError as e:
    AttributeEditor = None
    pyfalog.warning("Error loading Attribute Editor: %s.\nAccess to Attribute Editor is disabled." % e.message)
    disableOverrideEditor = True

pyfalog.debug("Done loading mainframe imports")


# dummy panel(no paint no erasebk)
class PFPanel(wx.Panel):
    def __init__(self, parent):
        wx.Panel.__init__(self, parent)
        self.Bind(wx.EVT_PAINT, self.OnPaint)
        self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnBkErase)

    def OnPaint(self, event):
        event.Skip()

    def OnBkErase(self, event):
        pass
Example #32
0
File: pyfa.py Project: Sectoid/Pyfa
        # noinspection PyUnresolvedReferences
        import service.prefetch  # noqa: F401

        # Make sure the saveddata db exists
        if not os.path.exists(config.savePath):
            os.mkdir(config.savePath)

        eos.db.saveddata_meta.create_all()

        from gui.mainFrame import MainFrame

        # set title if it wasn't supplied by argument
        if options.title is None:
            options.title = "pyfa %s - Python Fitting Assistant" % (config.getVersion())

        pyfa = wx.App(False)
        mf = MainFrame(options.title)
        ErrorHandler.SetParent(mf)

        if options.profile_path:
            profile_path = os.path.join(options.profile_path, 'pyfa-{}.profile'.format(datetime.datetime.now().strftime('%Y%m%d_%H%M%S')))
            pyfalog.debug("Starting pyfa with a profiler, saving to {}".format(profile_path))
            import cProfile
            cProfile.run('pyfa.MainLoop()', profile_path)
        else:
            pyfa.MainLoop()

        # TODO: Add some thread cleanup code here. Right now we bail, and that can lead to orphaned threads or threads not properly exiting.
        sys.exit()
Example #33
0
class BaseSynchroRunner():
    """
    Data provider class. Interacts with environment and outputs data in form of rollouts augmented with
    relevant summaries and metadata. This runner is `synchronous` in sense that data collection is `in-process'
    and every rollout is collected by explicit call to respective `get_data()` method [this is unlike 'async-`
    thread-runner version found earlier in this this package which, once being started,
    runs on its own and can not be moderated].
    Makes precise control on policy being executed possible.
    Does not support 'atari' mode.
    """
    def __init__(self,
                 env,
                 task,
                 rollout_length,
                 episode_summary_freq,
                 env_render_freq,
                 ep_summary,
                 test=False,
                 policy=None,
                 data_sample_config=None,
                 memory_config=None,
                 test_conditions=None,
                 slowdown_steps=0,
                 global_step_op=None,
                 aux_render_modes=None,
                 _implemented_aux_render_modes=None,
                 name='synchro',
                 log_level=WARNING,
                 **kwargs):
        """

        Args:
            env:                            BTgym environment instance
            task:                           int, runner task id
            rollout_length:                 int
            episode_summary_freq:           int
            env_render_freq:                int
            test:                           legacy, not used
            ep_summary:                     legacy, not used
            policy:                         policy instance to execute
            data_sample_config:             dict, data sampling configuration dictionary
            memory_config:                  dict, replay memory configuration
            test_conditions:                dict or None,
                                            dictionary of single experience conditions to check to mark it as test one.
            slowdown_time:                  time to sleep between steps
            aux_render_modes:               iterable of str, additional summaries to compute
            _implemented_aux_render_modes   iterable of str, implemented additional summaries
            name:                           str, name scope
            log_level:                      int, logbook.level
        """
        self.env = env
        self.task = task
        self.name = name
        self.rollout_length = rollout_length
        self.episode_summary_freq = episode_summary_freq
        self.env_render_freq = env_render_freq

        self.memory_config = memory_config
        self.policy = policy
        self.data_sample_config = data_sample_config

        self.log_level = log_level
        StreamHandler(sys.stdout).push_application()
        self.log = Logger('{}_Runner_{}'.format(self.name, self.task),
                          level=self.log_level)

        # Aux rendering setup:
        if _implemented_aux_render_modes is None:
            self.implemented_aux_render_modes = []

        else:
            self.implemented_aux_render_modes = _implemented_aux_render_modes

        self.aux_render_modes = []
        if aux_render_modes is not None:
            for mode in aux_render_modes:
                if mode in self.implemented_aux_render_modes:
                    self.aux_render_modes.append(mode)

                else:
                    msg = 'Render mode `{}` is not implemented.'.format(mode)
                    self.log.error(msg)
                    raise NotImplementedError(msg)

        self.log.debug('self.render modes: {}'.format(self.aux_render_modes))

        self.sess = None
        self.summary_writer = None

        self.global_step_op = global_step_op

        if self.task == 0 and slowdown_steps > 0 and self.global_step_op is not None:
            self.log.notice(
                'is slowed down by {} global_iterations/step'.format(
                    slowdown_steps))
            self.slowdown_steps = slowdown_steps

        else:
            self.slowdown_steps = 0

        if test_conditions is None:
            # Default test conditions are: experience comes from test episode, from target domain:
            self.test_conditions = {
                'state': {
                    'metadata': {
                        'type': 1,
                        'trial_type': 1
                    }
                }
            }
        else:
            self.test_conditions = test_conditions

        # Make replay memory:
        if self.memory_config is not None:
            self.memory = self.memory_config['class_ref'](
                **self.memory_config['kwargs'])

        else:
            self.memory = _DummyMemory()

        self.length = 0
        self.local_episode = 0
        self.reward_sum = 0

        self.terminal_end = True

        # Summary averages accumulators:
        self.total_r = []
        self.cpu_time = []
        self.final_value = []
        self.total_steps = []
        self.total_steps_atari = []
        self.info = [None]
        self.pre_experience = None
        self.state = None
        self.context = None

        self.last_action = None
        self.last_reward = None

        # Episode accumulators:
        self.ep_accum = None

        self.log.debug('__init__() done.')

    def sleep(self):
        if self.slowdown_steps > 0:
            start_global_step = self.sess.run(self.global_step_op)
            while start_global_step + self.slowdown_steps > self.sess.run(
                    self.global_step_op):
                time.sleep(0.05)

    def start_runner(self, sess, summary_writer, **kwargs):
        """
        Legacy wrapper.
        """
        self.start(sess, summary_writer, **kwargs)

    def start(self,
              sess,
              summary_writer,
              init_context=None,
              data_sample_config=None):
        """
        Executes initial sequence; fills initial replay memory if any.
        """
        assert self.policy is not None, 'Initial policy not specified'
        self.sess = sess
        self.summary_writer = summary_writer

        # # Hacky but we need env.renderer methods ready: NOT HERE, went to VerboseRunner
        # self.env.renderer.initialize_pyplot()

        self.pre_experience, self.state, self.context, self.last_action, self.last_reward = self.get_init_experience(
            policy=self.policy,
            init_context=init_context,
            data_sample_config=data_sample_config)

        if self.memory_config is not None:
            while not self.memory.is_full():
                # collect some rollouts to fill memory:
                _ = self.get_data()
            self.log.notice('Memory filled')
        self.log.notice('started collecting data.')

    def get_init_experience(self,
                            policy,
                            policy_sync_op=None,
                            init_context=None,
                            data_sample_config=None):
        """
        Starts new environment episode.

        Args:
            policy:                 policy to execute.
            policy_sync_op:         operation copying local behavioural policy params from global one
            init_context:           initial policy context for new episode.
            data_sample_config:     configuration dictionary of type `btgym.datafeed.base.EnvResetConfig`

        Returns:
            incomplete initial experience of episode as dictionary (misses bootstrapped R value),
            next_state,
            next, policy RNN context
            action_reward
        """
        self.length = 0
        self.reward_sum = 0
        # Increment global and local episode counters:
        self.sess.run(self.policy.inc_episode)
        self.local_episode += 1

        # self.log.warning('get_init_exp() data_sample_config: {}'.format(data_sample_config))

        if data_sample_config is None:
            data_sample_config = policy.get_sample_config()

        # Pass sample config to environment (.get_sample_config() is actually aac framework method):
        init_state = self.env.reset(**data_sample_config)

        # Master worker always resets context at the episode beginning:
        # TODO: !
        # if not self.data_sample_config['mode']:
        init_context = None

        # self.log.warning('init_context_passed: {}'.format(init_context))
        # self.log.warning('state_metadata: {}'.format(state['metadata']))

        init_action = self.env.action_space.encode(
            self.env.get_initial_action())
        init_reward = np.asarray(0.0)

        # Update policy:
        if policy_sync_op is not None:
            self.sess.run(policy_sync_op)

        init_context = policy.get_initial_features(state=init_state,
                                                   context=init_context)
        action, logits, value, next_context = policy.act(
            init_state,
            init_context,
            init_action[None, ...],
            init_reward[None, ...],
        )
        next_state, reward, terminal, self.info = self.env.step(
            action['environment'])

        experience = {
            'position': {
                'episode': self.local_episode,
                'step': self.length
            },
            'state': init_state,
            'action': action['one_hot'],
            'reward': reward,
            'value': value,
            'terminal': terminal,
            'context': init_context,
            'last_action': init_action,
            'last_reward': init_reward,
            'r': None,  # to be updated
            'info': self.info[-1],
        }
        # Execute user-defined callbacks to policy, if any:
        for key, callback in policy.callback.items():
            experience[key] = callback(**locals())

        # reset per-episode  counters and accumulators:
        self.ep_accum = {
            'logits': [logits],
            'value': [value],
            'context': [init_context]
        }
        self.terminal_end = terminal
        #self.log.warning('init_experience_context: {}'.format(context))

        # Take a nap:
        self.sleep()

        return experience, next_state, next_context, action['encoded'], reward

    def get_experience(self,
                       policy,
                       state,
                       context,
                       action,
                       reward,
                       policy_sync_op=None):
        """
        Get single experience (possibly terminal).

        Returns:
            incomplete experience as dictionary (misses bootstrapped R value),
            next_state,
            next, policy RNN context
            action_reward
        """
        # Update policy:
        # TODO: for contd. enable for meta-learning
        # if policy_sync_op is not None:
        #     self.sess.run(policy_sync_op)

        # TODO: meta-learning related; TEMPORAL, refract:
        # if hasattr(policy, 'meta') and self.task == 0:
        #     wait = 1
        #     i = 0
        #     while wait:
        #         wait = policy.meta.act()
        #         # self.log.warning('policy_meta_action: {}'.format(wait))
        #         time.sleep(4)
        #         # i += 1
        #         self.sess.run([policy_sync_op, policy.meta.sync_slot_op])
        #
        #     policy.meta.reset()
        #     policy.meta.global_reset()
        #
        #     # self.log.notice('waited: {}'.format(i))

        # Continue adding experiences to rollout:
        next_action, logits, value, next_context = policy.act(
            state,
            context,
            action[None, ...],
            reward[None, ...],
        )
        self.ep_accum['logits'].append(logits)
        self.ep_accum['value'].append(value)
        self.ep_accum['context'].append(next_context)

        # self.log.notice('context: {}'.format(context))
        next_state, next_reward, terminal, self.info = self.env.step(
            next_action['environment'])

        # Partially compose experience:
        experience = {
            'position': {
                'episode': self.local_episode,
                'step': self.length
            },
            'state': state,
            'action': next_action['one_hot'],
            'reward': next_reward,
            'value': value,
            'terminal': terminal,
            'context': context,
            'last_action': action,
            'last_reward': reward,
            'r': None,
            'info': self.info[-1],
        }
        for key, callback in policy.callback.items():
            experience[key] = callback(**locals())

        # Housekeeping:
        self.length += 1

        # Take a nap:
        # self.sleep()

        return experience, next_state, next_context, next_action[
            'encoded'], next_reward

    def get_train_stat(self, is_test=False):
        """
        Updates and computes average statistics for train episodes.
        Args:
            is_test: bool, current episode type

        Returns:
            dict of stats
        """
        ep_stat = {}
        if not is_test:
            self.total_r += [self.reward_sum]
            episode_stat = self.env.get_stat()  # get episode statistic
            last_i = self.info[-1]  # pull most recent info
            self.cpu_time += [episode_stat['runtime'].total_seconds()]
            self.final_value += [last_i['broker_value']]
            self.total_steps += [episode_stat['length']]

        if self.local_episode % self.episode_summary_freq == 0:
            ep_stat = dict(total_r=np.average(self.total_r),
                           cpu_time=np.average(self.cpu_time),
                           final_value=np.average(self.final_value),
                           steps=np.average(self.total_steps))
            self.total_r = []
            self.cpu_time = []
            self.final_value = []
            self.total_steps = []
            self.total_steps_atari = []

        return ep_stat

    def get_test_stat(self, is_test=False):
        """
        Updates and computes  statistics for single test episode.

        Args:
            is_test: bool, current episode type

        Returns:
            dict of stats

        """
        ep_stat = {}
        if is_test:
            episode_stat = self.env.get_stat()  # get episode statistic
            last_i = self.info[-1]  # pull most recent info
            ep_stat = dict(total_r=self.reward_sum,
                           final_value=last_i['broker_value'],
                           steps=episode_stat['length'])
        return ep_stat

    def get_ep_render(self, is_test=False):
        """
        Collects environment renderings. Relies on environment renderer class methods,
        so it is only valid when environment rendering is enabled (typically it is true for master runner).

        Returns:
            dictionary of images as rgb arrays

        """
        # Only render chief worker and test (slave) environment:
        # if self.task < 1 and (
        #     is_test or(
        #         self.local_episode % self.env_render_freq == 0 and not self.data_sample_config['mode']
        #     )
        # ):
        if self.task < 1 and self.local_episode % self.env_render_freq == 0:

            # Render environment (chief worker only):
            render_stat = {
                mode: self.env.render(mode)[None, :]
                for mode in self.env.render_modes
            }

        else:
            render_stat = None

        return render_stat

    def get_data(self,
                 policy=None,
                 policy_sync_op=None,
                 init_context=None,
                 data_sample_config=None,
                 rollout_length=None,
                 force_new_episode=False):
        """
        Collects single trajectory rollout and bunch of summaries using specified policy.
        Updates episode statistics and replay memory.

        Args:
            policy:                 policy to execute
            policy_sync_op:         operation copying local behavioural policy params from global one
            init_context:           if specified, overrides initial episode context provided bu self.context
                                    (valid only if new episode is started within this rollout).
            data_sample_config:     environment configuration parameters for next episode to sample:
                                    configuration dictionary of type `btgym.datafeed.base.EnvResetConfig
            rollout_length:         length of rollout to collect, if specified  - overrides self.rollout_length attr
            force_new_episode:      bool, if True - resets the environment


        Returns:
                data dictionary
        """
        if policy is None:
            policy = self.policy

        if init_context is None:
            init_context = self.context

        if rollout_length is None:
            rollout_length = self.rollout_length

        rollout = Rollout()
        is_test = False
        train_ep_summary = None
        test_ep_summary = None
        render_ep_summary = None

        if self.terminal_end or force_new_episode:
            # Start new episode:
            self.pre_experience, self.state, self.context, self.last_action, self.last_reward = self.get_init_experience(
                policy=policy,
                policy_sync_op=policy_sync_op,
                init_context=init_context,
                data_sample_config=data_sample_config)
            # self.log.warning(
            #     'started new episode with:\ndata_sample_config: {}\nforce_new_episode: {}'.
            #         format(data_sample_config, force_new_episode)
            # )
            # self.log.warning('pre_experience_metadata: {}'.format(self.pre_experience['state']['metadata']))

        # NOTE: self.terminal_end is set actual via get_init_experience() method

        # Collect single rollout:
        while rollout.size < rollout_length - 1 and not self.terminal_end:
            if self.pre_experience['terminal']:
                # Episode has been just finished,
                # need to complete and push last experience and update all episode summaries
                self.pre_experience['r'] = np.asarray([0.0])
                experience = None
                self.state = None
                self.context = None
                self.last_action = None
                self.last_reward = None

                self.terminal_end = True

                train_ep_summary = self.get_train_stat(is_test)
                test_ep_summary = self.get_test_stat(is_test)
                render_ep_summary = self.get_ep_render(is_test)

            else:
                experience, self.state, self.context, self.last_action, self.last_reward = self.get_experience(
                    policy=policy,
                    policy_sync_op=policy_sync_op,
                    state=self.state,
                    context=self.context,
                    action=self.last_action,
                    reward=self.last_reward)
                # Complete previous experience by bootstrapping V from next one:
                self.pre_experience['r'] = experience['value']

            # Push:
            rollout.add(self.pre_experience)

            # Where are you coming from?
            is_test = is_subdict(self.test_conditions, self.pre_experience)

            # try:
            #     # Was it test (i.e. test episode from traget domain)?
            #     if self.pre_experience['state']['metadata']['type']\
            #             and self.pre_experience['state']['metadata']['trial_type']:
            #         is_test = True
            #
            # except KeyError:
            #     pass

            # Only training rollouts are added to replay memory:
            if not is_test:
                self.memory.add(self.pre_experience)

            self.reward_sum += self.pre_experience['reward']

            # Move one step froward:
            self.pre_experience = experience

        # Done collecting rollout, either got termination of episode or not:
        if not self.terminal_end:
            # Bootstrap:
            self.pre_experience['r'] = np.asarray([
                policy.get_value(
                    self.pre_experience['state'],
                    self.pre_experience['context'],
                    self.pre_experience['last_action'][None, ...],
                    self.pre_experience['last_reward'][None, ...],
                )
            ])
            rollout.add(self.pre_experience)
            if not is_test:
                self.memory.add(self.pre_experience)

        # self.log.warning('rollout.terminal: {}'.format(self.terminal_end))
        # self.log.warning('rollout.size: {}'.format(rollout.size))

        data = dict(
            on_policy=rollout,
            terminal=self.terminal_end,
            off_policy=self.memory.sample_uniform(
                sequence_size=rollout_length),
            off_policy_rp=self.memory.sample_priority(exact_size=True),
            ep_summary=train_ep_summary,
            test_ep_summary=test_ep_summary,
            render_summary=render_ep_summary,
            is_test=is_test,
        )
        return data

    def get_episode(self,
                    policy=None,
                    policy_sync_op=None,
                    init_context=None,
                    data_sample_config=None):
        """
        == WRONG DO NOT USE ===
        Collects entire episode trajectory as single rollout and bunch of summaries using specified policy.
        Updates episode statistics and replay memory.

        Args:
            policy:                 policy to execute
            policy_sync_op:         operation copying local behavioural policy params from global one
            init_context:           if specified, overrides initial episode context provided bu self.context
            data_sample_config:     environment configuration parameters for next episode to sample:
                                    configuration dictionary of type `btgym.datafeed.base.EnvResetConfig

        Returns:
                data dictionary
        """
        if policy is None:
            policy = self.policy

        if init_context is None:
            init_context = self.context

        elif init_context == 0:  # mmm... TODO: fix this shame
            init_context = None

        rollout = Rollout()
        train_ep_summary = None
        test_ep_summary = None
        render_ep_summary = None

        # Start new episode:
        self.pre_experience, self.state, self.context, self.last_action, self.last_reward = self.get_init_experience(
            policy=policy,
            policy_sync_op=policy_sync_op,
            init_context=
            init_context,  # None (initially) or final context of previous episode
            data_sample_config=data_sample_config)

        is_test = is_subdict(self.test_conditions, self.pre_experience)
        # try:
        #     # Was it test (`type` in metadata is not zero)?
        #     # TODO: change to source/target?
        #     if self.pre_experience['state']['metadata']['type']:
        #         is_test = True
        #
        # except KeyError:
        #     pass

        # Collect data until episode is over:

        while not self.terminal_end:
            experience, self.state, self.context, self.last_action = self.get_experience(
                policy=policy,
                policy_sync_op=policy_sync_op,
                state=self.state,
                context=self.context,
                action=self.last_action,
                reward=self.last_reward,
            )
            # Complete previous experience by bootstrapping V from next one:
            self.pre_experience['r'] = experience['value']
            # Push:
            rollout.add(self.pre_experience)

            if not is_test:
                self.memory.add(self.pre_experience)

            # Move one step froward:
            self.pre_experience = experience

            self.reward_sum += experience['reward']

            if self.pre_experience['terminal']:
                # Episode has been just finished,
                # need to complete and push last experience and update all episode summaries:
                self.terminal_end = True

        # Bootstrap:
        # TODO: should be zero here
        self.pre_experience['r'] = np.asarray([
            policy.get_value(
                self.pre_experience['state'],
                self.pre_experience['context'],
                self.pre_experience['last_reward'][None, ...],
                self.pre_experience['last_reward'][None, ...],
            )
        ])
        rollout.add(self.pre_experience)
        if not is_test:
            self.memory.add(self.pre_experience)

        train_ep_summary = self.get_train_stat(is_test)
        test_ep_summary = self.get_test_stat(is_test)
        render_ep_summary = self.get_ep_render(is_test)

        # self.log.warning('episodic_rollout.size: {}'.format(rollout.size))

        data = dict(
            on_policy=rollout,
            terminal=self.terminal_end,
            off_policy=self.memory.sample_uniform(
                sequence_size=self.rollout_length),
            off_policy_rp=self.memory.sample_priority(exact_size=True),
            ep_summary=train_ep_summary,
            test_ep_summary=test_ep_summary,
            render_summary=render_ep_summary,
            is_test=is_test,
        )
        return data

    def get_batch(self,
                  size,
                  policy=None,
                  policy_sync_op=None,
                  require_terminal=True,
                  same_trial=True,
                  init_context=None,
                  data_sample_config=None):
        """
        Returns batch as list of 'size' or more rollouts collected under specified policy.
        Rollouts can be collected from several episodes consequently; there is may be more rollouts than set 'size' if
        it is necessary to collect at least one terminal rollout.

        Args:
            size:                   int, number of rollouts to collect
            policy:                 policy to use
            policy_sync_op:         operation copying local behavioural policy params from global one
            require_terminal:       bool, if True - require at least one terminal rollout to be present.
            same_trial:             bool, if True - all episodes are sampled from same trial
            init_context:           if specified, overrides initial episode context provided bu self.context
            data_sample_config:     environment configuration parameters for all episodes in batch:
                                    configuration dictionary of type `btgym.datafeed.base.EnvResetConfig

        Returns:
            dict containing:
            'data'key holding list of data dictionaries;
            'terminal_context' key holding list of terminal output contexts.
            If 'require_terminal = True, this list is guarantied to hold at least one element.
        """

        batch = []
        terminal_context = []

        if require_terminal:
            got_terminal = False
        else:
            got_terminal = True

        if same_trial:
            assert isinstance(data_sample_config, dict),\
                'get_batch(same_trial=True) expected `data_sample_config` dict., got: {}'.format(data_sample_config)

        # Collect first rollout:
        batch = [
            self.get_data(policy=policy,
                          policy_sync_op=policy_sync_op,
                          init_context=init_context,
                          data_sample_config=data_sample_config,
                          force_new_episode=True)
        ]
        if same_trial:
            # sample new episodes from same trial only:
            data_sample_config['trial_config']['get_new'] = False

        collected_size = 1

        if batch[0]['terminal']:
            terminal_context.append(self.context)
            got_terminal = True

        # Collect others:
        while not (collected_size >= size and got_terminal):
            rollout_data = self.get_data(policy=policy,
                                         policy_sync_op=policy_sync_op,
                                         init_context=init_context,
                                         data_sample_config=data_sample_config)
            batch.append(rollout_data)

            if rollout_data['terminal']:
                terminal_context.append(self.context)
                got_terminal = True

            collected_size += 1

        data = dict(
            data=batch,
            terminal_context=terminal_context,
        )
        return data
Example #34
0
class BTgymEnv(gym.Env):
    """
    OpenAI Gym API shell for Backtrader backtesting/trading library.
    """
    # Datafeed Server management:
    data_master = True
    data_network_address = 'tcp://127.0.0.1:'  # using localhost.
    data_port = 4999
    data_server = None
    data_server_pid = None
    data_context = None
    data_socket = None
    data_server_response = None

    # Dataset:
    dataset = None  # BTgymDataset instance.
    dataset_stat = None

    # Backtrader engine:
    engine = None  # bt.Cerbro subclass for server to execute.

    # Strategy:
    strategy = None  # strategy to use if no <engine> class been passed.

    # Server and network:
    server = None  # Server process.
    context = None  # ZMQ context.
    socket = None  # ZMQ socket, client side.
    port = 5500  # network port to use.
    network_address = 'tcp://127.0.0.1:'  # using localhost.
    ctrl_actions = ('_done', '_reset', '_stop', '_getstat', '_render'
                    )  # server control messages.
    server_response = None

    # Connection timeout:
    connect_timeout = 60  # server connection timeout in seconds.
    #connect_timeout_step = 0.01  # time between retries in seconds.

    # Rendering:
    render_enabled = True
    render_modes = [
        'human',
        'episode',
    ]
    # `episode` - plotted episode results.
    # `human` - raw_state observation in conventional human-readable format.
    #  <obs_space_key> - rendering of arbitrary state presented in observation_space with same key.

    renderer = None  # Rendering support.
    rendered_rgb = dict()  # Keep last rendered images for each mode.

    # Logging and id:
    log = None
    log_level = None  # logbook level: NOTICE, WARNING, INFO, DEBUG etc. or its integer equivalent;
    verbose = 0  # verbosity mode, valid only if no `log_level` arg has been provided:
    # 0 - WARNING, 1 - INFO, 2 - DEBUG.
    task = 0

    closed = True

    def __init__(self, **kwargs):
        """
        Keyword Args:

            filename=None (str, list):                      csv data file.
            **datafeed_args (any):                          any datafeed-related args, passed through to
                                                            default btgym.datafeed class.
            dataset=None (btgym.datafeed):                  BTgymDataDomain instance,
                                                            overrides `filename` or any other datafeed-related args.
            strategy=None (btgym.startegy):                 strategy to be used by `engine`, any subclass of
                                                            btgym.strategy.base.BTgymBaseStrateg
            engine=None (bt.Cerebro):                       environment simulation engine, any bt.Cerebro subclass,
                                                            overrides `strategy` arg.
            network_address=`tcp://127.0.0.1:` (str):       BTGym_server address.
            port=5500 (int):                                network port to use for server - API_shell communication.
            data_master=True (bool):                        let this environment control over data_server;
            data_network_address=`tcp://127.0.0.1:` (str):  data_server address.
            data_port=4999 (int):                           network port to use for server -- data_server communication.
            connect_timeout=60 (int):                       server connection timeout in seconds.
            render_enabled=True (bool):                     enable rendering for this environment;
            render_modes=['human', 'episode'] (list):       `episode` - plotted episode results;
                                                            `human` - raw_state observation.
            **render_args (any):                            any render-related args, passed through to renderer class.
            verbose=0 (int):                                verbosity mode, {0 - WARNING, 1 - INFO, 2 - DEBUG}
            log_level=None (int):                           logbook level {DEBUG=10, INFO=11, NOTICE=12, WARNING=13},
                                                            overrides `verbose` arg;
            log=None (logbook.Logger):                      external logbook logger,
                                                            overrides `log_level` and `verbose` args.
            task=0 (int):                                   environment id

        Environment kwargs applying logic::

            if <engine> kwarg is given:
                do not use default engine and strategy parameters;
                ignore <strategy> kwarg and all strategy and engine-related kwargs.

            else (no <engine>):
                use default engine parameters;
                if any engine-related kwarg is given:
                    override corresponding default parameter;

                if <strategy> is given:
                    do not use default strategy parameters;
                    if any strategy related kwarg is given:
                        override corresponding strategy parameter;

                else (no <strategy>):
                    use default strategy parameters;
                    if any strategy related kwarg is given:
                        override corresponding strategy parameter;

            if <dataset> kwarg is given:
                do not use default dataset parameters;
                ignore dataset related kwargs;

            else (no <dataset>):
                use default dataset parameters;
                    if  any dataset related kwarg is given:
                        override corresponding dataset parameter;

            If any <other> kwarg is given:
                override corresponding default parameter.
        """
        # Parameters and default values:
        self.params = dict(

            # Backtrader engine mandatory parameters:
            engine=dict(
                start_cash=10.0,  # initial trading capital.
                broker_commission=
                0.001,  # trade execution commission, default is 0.1% of operation value.
                fixed_stake=10,  # single trade stake is fixed type by def.
            ),
            # Dataset mandatory parameters:
            dataset=dict(filename=None, ),
            strategy=dict(state_shape=dict(), ),
            render=dict(),
        )
        p2 = dict(  # IS HERE FOR REFERENCE ONLY
            # Strategy related parameters:
            # Observation state shape is dictionary of Gym spaces,
            # at least should contain `raw_state` field.
            # By convention first dimension of every Gym Box space is time embedding one;
            # one can define any shape; should match env.observation_space.shape.
            # observation space state min/max values,
            # For `raw_state' - absolute min/max values from BTgymDataset will be used.
            state_shape=dict(raw_state=spaces.Box(
                shape=(10, 4), low=-100, high=100, dtype=np.float32)),
            drawdown_call=
            None,  # episode maximum drawdown threshold, default is 90% of initial value.
            portfolio_actions=None,
            # agent actions,
            # should consist with BTgymStrategy order execution logic;
            # defaults are: 0 - 'do nothing', 1 - 'buy', 2 - 'sell', 3 - 'close position'.
            skip_frame=None,
            # Number of environment steps to skip before returning next response,
            # e.g. if set to 10 -- agent will interact with environment every 10th episode step;
            # Every other step agent's action is assumed to be 'hold'.
            # Note: INFO part of environment response is a list of all skipped frame's info's,
            #       i.e. [info[-9], info[-8], ..., info[0].
        )
        # Update self attributes, remove used kwargs:
        for key in dir(self):
            if key in kwargs.keys():
                setattr(self, key, kwargs.pop(key))

        self.metadata = {'render.modes': self.render_modes}

        # Logging and verbosity control:
        if self.log is None:
            StreamHandler(sys.stdout).push_application()
            if self.log_level is None:
                log_levels = [(0, NOTICE), (1, INFO), (2, DEBUG)]
                self.log_level = WARNING
                for key, value in log_levels:
                    if key == self.verbose:
                        self.log_level = value
            self.log = Logger('BTgymAPIshell_{}'.format(self.task),
                              level=self.log_level)

        # Network parameters:
        self.network_address += str(self.port)
        self.data_network_address += str(self.data_port)

        # Set server rendering:
        if self.render_enabled:
            self.renderer = BTgymRendering(self.metadata['render.modes'],
                                           log_level=self.log_level,
                                           **kwargs)

        else:
            self.renderer = BTgymNullRendering()
            self.log.info(
                'Rendering disabled. Call to render() will return null-plug image.'
            )

        # Append logging:
        self.renderer.log = self.log

        # Update params -1: pull from renderer, remove used kwargs:
        self.params['render'].update(self.renderer.params)
        for key in self.params['render'].keys():
            if key in kwargs.keys():
                _ = kwargs.pop(key)

        if self.data_master:
            # DATASET preparation, only data_master executes this:
            #
            if self.dataset is not None:
                # If BTgymDataset instance has been passed:
                # do nothing.
                msg = 'Custom Dataset class used.'

            else:
                # If no BTgymDataset has been passed,
                # Make default dataset with given CSV file:
                try:
                    os.path.isfile(str(self.params['dataset']['filename']))

                except:
                    raise FileNotFoundError(
                        'Dataset source data file not specified/not found')

                # Use kwargs to instantiate dataset:
                self.dataset = BTgymDataset(**kwargs)
                msg = 'Base Dataset class used.'

            # Append logging:
            self.dataset.set_logger(self.log_level, self.task)

            # Update params -2: pull from dataset, remove used kwargs:
            self.params['dataset'].update(self.dataset.params)
            for key in self.params['dataset'].keys():
                if key in kwargs.keys():
                    _ = kwargs.pop(key)

            self.log.info(msg)

        # Connect/Start data server (and get dataset statistic):
        self.log.info('Connecting data_server...')
        self._start_data_server()
        self.log.info('...done.')
        # ENGINE preparation:

        # Update params -3: pull engine-related kwargs, remove used:
        for key in self.params['engine'].keys():
            if key in kwargs.keys():
                self.params['engine'][key] = kwargs.pop(key)

        if self.engine is not None:
            # If full-blown bt.Cerebro() subclass has been passed:
            # Update info:
            msg = 'Custom Cerebro class used.'
            self.strategy = msg
            for key in self.params['engine'].keys():
                self.params['engine'][key] = msg

        # Note: either way, bt.observers.DrawDown observer [and logger] will be added to any BTgymStrategy instance
        # by BTgymServer process at runtime.

        else:
            # Default configuration for Backtrader computational engine (Cerebro),
            # if no bt.Cerebro() custom subclass has been passed,
            # get base class Cerebro(), using kwargs on top of defaults:
            self.engine = bt.Cerebro()
            msg = 'Base Cerebro class used.'

            # First, set STRATEGY configuration:
            if self.strategy is not None:
                # If custom strategy has been passed:
                msg2 = 'Custom Strategy class used.'

            else:
                # Base class strategy :
                self.strategy = BTgymBaseStrategy
                msg2 = 'Base Strategy class used.'

            # Add, using kwargs on top of defaults:
            #self.log.debug('kwargs for strategy: {}'.format(kwargs))
            strat_idx = self.engine.addstrategy(self.strategy, **kwargs)

            msg += ' ' + msg2

            # Second, set Cerebro-level configuration:
            self.engine.broker.setcash(self.params['engine']['start_cash'])
            self.engine.broker.setcommission(
                self.params['engine']['broker_commission'])
            self.engine.addsizer(bt.sizers.SizerFix,
                                 stake=self.params['engine']['fixed_stake'])

        self.log.info(msg)

        # Define observation space shape, minimum / maximum values and agent action space.
        # Retrieve values from configured engine or...

        # ...Update params -4:
        # Pull strategy defaults to environment params dict :
        for t_key, t_value in self.engine.strats[0][0][0].params._gettuple():
            self.params['strategy'][t_key] = t_value

        # Update it with values from strategy 'passed-to params':
        for key, value in self.engine.strats[0][0][2].items():
            self.params['strategy'][key] = value

        # ... Push it all back (don't ask):
        for key, value in self.params['strategy'].items():
            self.engine.strats[0][0][2][key] = value

        # For 'raw_state' min/max values,
        # the only way is to infer from raw Dataset price values (we already got those from data_server):
        if 'raw_state' in self.params['strategy']['state_shape'].keys():
            # Exclude 'volume' from columns we count:
            self.dataset_columns.remove('volume')

            #print(self.params['strategy'])
            #print('self.engine.strats[0][0][2]:', self.engine.strats[0][0][2])
            #print('self.engine.strats[0][0][0].params:', self.engine.strats[0][0][0].params._gettuple())

            # Override with absolute price min and max values:
            self.params['strategy']['state_shape']['raw_state'].low =\
                self.engine.strats[0][0][2]['state_shape']['raw_state'].low =\
                np.zeros(self.params['strategy']['state_shape']['raw_state'].shape) +\
                self.dataset_stat.loc['min', self.dataset_columns].min()

            self.params['strategy']['state_shape']['raw_state'].high = \
                self.engine.strats[0][0][2]['state_shape']['raw_state'].high = \
                np.zeros(self.params['strategy']['state_shape']['raw_state'].shape) + \
                self.dataset_stat.loc['max', self.dataset_columns].max()

            self.log.info(
                'Inferring `state_raw` high/low values form dataset: {:.6f} / {:.6f}.'
                .format(
                    self.dataset_stat.loc['min', self.dataset_columns].min(),
                    self.dataset_stat.loc['max', self.dataset_columns].max()))

        # Set observation space shape from engine/strategy parameters:
        self.observation_space = DictSpace(
            self.params['strategy']['state_shape'])

        self.log.debug('Obs. shape: {}'.format(self.observation_space.spaces))
        #self.log.debug('Obs. min:\n{}\nmax:\n{}'.format(self.observation_space.low, self.observation_space.high))

        # Set action space and corresponding server messages:
        self.action_space = spaces.Discrete(
            len(self.params['strategy']['portfolio_actions']))
        self.server_actions = self.params['strategy']['portfolio_actions']

        # Finally:
        self.server_response = None
        self.env_response = None

        #if not self.data_master:
        self._start_server()
        self.closed = False

        self.log.info('Environment is ready.')

    def _seed(self, seed=None):
        """
        Sets env. random seed.

        Args:
            seed:   int or None
        """
        np.random.seed(seed)

    @staticmethod
    def _comm_with_timeout(
        socket,
        message,
    ):
        """
        Exchanges messages via socket, timeout sensitive.

        Args:
            socket: zmq connected socket to communicate via;
            message: message to send;

        Note:
            socket zmq.RCVTIMEO and zmq.SNDTIMEO should be set to some finite number of milliseconds.

        Returns:
            dictionary:
                `status`: communication result;
                `message`: received message if status == `ok` or None;
                `time`: remote side response time.
        """
        response = dict(
            status='ok',
            message=None,
        )
        try:
            socket.send_pyobj(message)

        except zmq.ZMQError as e:
            if e.errno == zmq.EAGAIN:
                response['status'] = 'send_failed_due_to_connect_timeout'

            else:
                response['status'] = 'send_failed_for_unknown_reason'
            return response

        start = time.time()
        try:
            response['message'] = socket.recv_pyobj()
            response['time'] = time.time() - start

        except zmq.ZMQError as e:
            if e.errno == zmq.EAGAIN:
                response['status'] = 'receive_failed_due_to_connect_timeout'

            else:
                response['status'] = 'receive_failed_for_unknown_reason'
            return response

        return response

    def _start_server(self):
        """
        Configures backtrader REQ/REP server instance and starts server process.
        """

        # Ensure network resources:
        # 1. Release client-side, if any:
        if self.context:
            self.context.destroy()
            self.socket = None

        # 2. Kill any process using server port:
        cmd = "kill $( lsof -i:{} -t ) > /dev/null 2>&1".format(self.port)
        os.system(cmd)

        # Set up client channel:
        self.context = zmq.Context()
        self.socket = self.context.socket(zmq.REQ)
        self.socket.setsockopt(zmq.RCVTIMEO, self.connect_timeout * 1000)
        self.socket.setsockopt(zmq.SNDTIMEO, self.connect_timeout * 1000)
        self.socket.connect(self.network_address)

        # Configure and start server:
        self.server = BTgymServer(
            cerebro=self.engine,
            render=self.renderer,
            network_address=self.network_address,
            data_network_address=self.data_network_address,
            connect_timeout=self.connect_timeout,
            log_level=self.log_level,
            task=self.task,
        )
        self.server.daemon = False
        self.server.start()
        # Wait for server to startup:
        time.sleep(1)

        # Check connection:
        self.log.info('Server started, pinging {} ...'.format(
            self.network_address))

        self.server_response = self._comm_with_timeout(
            socket=self.socket, message={'ctrl': 'ping!'})
        if self.server_response['status'] in 'ok':
            self.log.info('Server seems ready with response: <{}>'.format(
                self.server_response['message']))

        else:
            msg = 'Server unreachable with status: <{}>.'.format(
                self.server_response['status'])
            self.log.error(msg)
            raise ConnectionError(msg)

        self._closed = False

    def _stop_server(self):
        """
        Stops BT server process, releases network resources.
        """
        if self.server:

            if self._force_control_mode():
                # In case server is running and client side is ok:
                self.socket.send_pyobj({'ctrl': '_stop'})
                self.server_response = self.socket.recv_pyobj()

            else:
                self.server.terminate()
                self.server.join()
                self.server_response = 'Server process terminated.'

            self.log.info('{} Exit code: {}'.format(self.server_response,
                                                    self.server.exitcode))

        # Release client-side, if any:
        if self.context:
            self.context.destroy()
            self.socket = None

    def _force_control_mode(self):
        """Puts BT server to control mode.
        """
        # Check is there any faults with server process and connection?
        network_error = [
            (not self.server or not self.server.is_alive(),
             'No running server found. Hint: forgot to call reset()?'),
            (not self.context
             or self.context.closed, 'No network connection found.'),
        ]
        for (err, msg) in network_error:
            if err:
                self.log.info(msg)
                self.server_response = msg
                return False

            # If everything works, insist to go 'control':
            self.server_response = {}
            attempt = 0

            while 'ctrl' not in self.server_response:
                self.socket.send_pyobj({'ctrl': '_done'})
                self.server_response = self.socket.recv_pyobj()
                attempt += 1
                self.log.debug(
                    'FORCE CONTROL MODE attempt: {}.\nResponse: {}'.format(
                        attempt, self.server_response))

            return True

    def _assert_response(self, response):
        """
        Simple watcher:
        roughly checks if we really talking to environment (== episode is running).
        Rises exception if response given is not as expected.
        """
        try:
            assert type(response) == tuple and len(response) == 4

        except AssertionError:
            msg = 'Unexpected environment response: {}\nHint: Forgot to call reset() or reset_data()?'.format(
                response)
            self.log.exception(msg)
            raise AssertionError(msg)

        self.log.debug('Response checker received:\n{}\nas type: {}'.format(
            response, type(response)))

    def _print_space(self, space, _tab=''):
        """
        Parses observation space shape or response.

        Args:
            space: gym observation space or state.

        Returns:
            description as string.
        """
        response = ''
        if type(space) in [dict, OrderedDict]:
            for key, value in space.items():
                response += '\n{}{}:{}\n'.format(
                    _tab, key, self._print_space(value, '   '))

        elif type(space) in [spaces.Dict, DictSpace]:
            for s in space.spaces:
                response += self._print_space(s, '   ')

        elif type(space) in [tuple, list]:
            for i in space:
                response += self._print_space(i, '   ')

        elif type(space) == np.ndarray:
            response += '\n{}array of shape: {}, low: {}, high: {}'.format(
                _tab, space.shape, space.min(), space.max())

        else:
            response += '\n{}{}, '.format(_tab, space)
            try:
                response += 'low: {}, high: {}'.format(space.low.min(),
                                                       space.high.max())

            except (KeyError, AttributeError, ArithmeticError,
                    ValueError) as e:
                pass
                #response += '\n{}'.format(e)

        return response

    def reset(self, **kwargs):
        """
        Implementation of OpenAI Gym env.reset method. Starts new episode. Episode data are sampled
        according to data provider class logic, controlled via kwargs. Refer `BTgym_Server` and data provider
        classes for details.

        Args:
            kwargs:         any kwargs; this dictionary is passed through to BTgym_server side without any checks and
                            modifications; currently used for data sampling control;

        Returns:
            observation space state

        Notes:
            Current kwargs accepted is::


                episode_config=dict(
                    get_new=True,
                    sample_type=0,
                    b_alpha=1,
                    b_beta=1
                ),
                trial_config=dict(
                    get_new=True,
                    sample_type=0,
                    b_alpha=1,
                    b_beta=1
                )

        """
        # Data Server check:
        if self.data_master:
            if not self.data_server or not self.data_server.is_alive():
                self.log.info('No running data_server found, starting...')
                self._start_data_server()

            # Domain dataset status check:
            self.data_server_response = self._comm_with_timeout(
                socket=self.data_socket, message={'ctrl': '_get_info'})
            if not self.data_server_response['message']['dataset_is_ready']:
                self.log.info(
                    'Data domain `reset()` called prior to `reset_data()` with [possibly inconsistent] defaults.'
                )
                self.reset_data()

        # Server process check:
        if not self.server or not self.server.is_alive():
            self.log.info('No running server found, starting...')
            self._start_server()

        if self._force_control_mode():
            self.server_response = self._comm_with_timeout(socket=self.socket,
                                                           message={
                                                               'ctrl':
                                                               '_reset',
                                                               'kwargs': kwargs
                                                           })
            # Get initial environment response:
            self.env_response = self.step(0)

            # Check (once) if it is really (o,r,d,i) tuple:
            self._assert_response(self.env_response)

            # Check (once) if state_space is as expected:
            try:
                #assert self.observation_space.contains(self.env_response[0])
                pass

            except (AssertionError, AttributeError) as e:
                msg1 = self._print_space(self.observation_space.spaces)
                msg2 = self._print_space(self.env_response[0])
                msg3 = ''
                for step_info in self.env_response[-1]:
                    msg3 += '{}\n'.format(step_info)
                msg = ('\nState observation shape/range mismatch!\n' +
                       'Space set by env: \n{}\n' +
                       'Space returned by server: \n{}\n' +
                       'Full response:\n{}\n' + 'Reward: {}\n' + 'Done: {}\n' +
                       'Info:\n{}\n' +
                       'Hint: Wrong Strategy.get_state() parameters?').format(
                           msg1,
                           msg2,
                           self.env_response[0],
                           self.env_response[1],
                           self.env_response[2],
                           msg3,
                       )
                self.log.exception(msg)
                self._stop_server()
                raise AssertionError(msg)

            return self.env_response[0]  #["raw_state"][np.newaxis]

        else:
            msg = 'Something went wrong. env.reset() can not get response from server.'
            self.log.exception(msg)
            raise ChildProcessError(msg)

    def step(self, action):
        """
        Implementation of OpenAI Gym env.step() method.
        Makes a step in the environment.

        Args:
            action:     int, number representing action from env.action_space

        Returns:
            tuple (Observation, Reward, Info, Done)

        """
        # Are you in the list, ready to go and all that?
        if self.action_space.contains(action)\
            and not self._closed\
            and (self.socket is not None)\
            and not self.socket.closed:
            pass

        else:
            msg = ('\nAt least one of these is true:\n' +
                   'Action error: (space is {}, action sent is {}): {}\n' +
                   'Environment closed: {}\n' +
                   'Network error [socket doesnt exists or closed]: {}\n' +
                   'Hint: forgot to call reset()?').format(
                       self.action_space,
                       action,
                       not self.action_space.contains(action),
                       self._closed,
                       not self.socket or self.socket.closed,
                   )
            self.log.exception(msg)
            raise AssertionError(msg)

        # Send action to backtrader engine, receive environment response
        env_response = self._comm_with_timeout(
            socket=self.socket,
            message={'action': self.server_actions[action]})
        if not env_response['status'] in 'ok':
            msg = '.step(): server unreachable with status: <{}>.'.format(
                env_response['status'])
            self.log.error(msg)
            raise ConnectionError(msg)

        # self.env_response = env_response ['message']
        tempNew_state, tempReward, tempDone, tempInfo = env_response['message']
        tempNew_state = tempNew_state["raw_state"][np.newaxis]
        self.env_response = tempNew_state, tempReward, tempDone, tempInfo

        return self.env_response

    def close(self):
        """
        Implementation of OpenAI Gym env.close method.
        Puts BTgym server in Control Mode.
        """
        self.log.debug('close.call()')
        self._stop_server()
        self._stop_data_server()
        self.log.info('Environment closed.')

    def get_stat(self):
        """
        Returns last run episode statistics.

        Note:
            when invoked, forces running episode to terminate.
        """
        if self._force_control_mode():
            self.socket.send_pyobj({'ctrl': '_getstat'})
            return self.socket.recv_pyobj()

        else:
            return self.server_response

    def render(self, mode='other_mode', close=False):
        """
        Implementation of OpenAI Gym env.render method.
        Visualises current environment state.

        Args:
            `mode`:     str, any of these::

                            `human` - current state observation as price lines;
                            `episode` - plotted results of last completed episode.
                            [other_key] - corresponding to any custom observation space key
        """
        if close:
            return None

        if not self._closed\
            and self.socket\
            and not self.socket.closed:
            pass

        else:
            msg = ('\nCan'
                   't get renderings.'
                   '\nAt least one of these is true:\n' +
                   'Environment closed: {}\n' +
                   'Network error [socket doesnt exists or closed]: {}\n' +
                   'Hint: forgot to call reset()?').format(
                       self._closed,
                       not self.socket or self.socket.closed,
                   )
            self.log.warning(msg)
            return None
        if mode not in self.render_modes:
            raise ValueError('Unexpected render mode {}'.format(mode))
        self.socket.send_pyobj({'ctrl': '_render', 'mode': mode})

        rgb_array_dict = self.socket.recv_pyobj()

        self.rendered_rgb.update(rgb_array_dict)

        return self.rendered_rgb[mode]

    def _stop(self):
        """
        Finishes current episode if any, does nothing otherwise. Leaves server running.
        """
        if self._force_control_mode():
            self.log.info('Episode stop forced.')

    def _restart_server(self):
        """Restarts server.
        """
        self._stop_server()
        self._start_server()
        self.log.info('Server restarted.')

    def _start_data_server(self):
        """
        For data_master environment:
            - configures backtrader REQ/REP server instance and starts server process.

        For others:
            - establishes network connection to existing data_server.
        """
        self.data_server = None

        # Ensure network resources:
        # 1. Release client-side, if any:
        if self.data_context:
            self.data_context.destroy()
            self.data_socket = None

        # Only data_master launches/stops data_server process:
        if self.data_master:
            # 2. Kill any process using server port:
            cmd = "kill $( lsof -i:{} -t ) > /dev/null 2>&1".format(
                self.data_port)
            os.system(cmd)

            # Configure and start server:
            self.data_server = BTgymDataFeedServer(
                dataset=self.dataset,
                network_address=self.data_network_address,
                log_level=self.log_level,
                task=self.task)
            self.data_server.daemon = False
            self.data_server.start()
            # Wait for server to startup
            time.sleep(1)

        # Set up client channel:
        self.data_context = zmq.Context()
        self.data_socket = self.data_context.socket(zmq.REQ)
        self.data_socket.setsockopt(zmq.RCVTIMEO, self.connect_timeout * 1000)
        self.data_socket.setsockopt(zmq.SNDTIMEO, self.connect_timeout * 1000)
        self.data_socket.connect(self.data_network_address)

        # Check connection:
        self.log.debug('Pinging data_server at: {} ...'.format(
            self.data_network_address))

        self.data_server_response = self._comm_with_timeout(
            socket=self.data_socket, message={'ctrl': 'ping!'})
        if self.data_server_response['status'] in 'ok':
            self.log.debug(
                'Data_server seems ready with response: <{}>'.format(
                    self.data_server_response['message']))

        else:
            msg = 'Data_server unreachable with status: <{}>.'.\
                format(self.data_server_response['status'])
            self.log.error(msg)
            raise ConnectionError(msg)

        # Get info and statistic:
        self.dataset_stat, self.dataset_columns, self.data_server_pid = self._get_dataset_info(
        )

    def _stop_data_server(self):
        """
        For data_master:
            - stops BT server process, releases network resources.
        """
        if self.data_master:
            if self.data_server is not None and self.data_server.is_alive():
                # In case server is running and is ok:
                self.data_socket.send_pyobj({'ctrl': '_stop'})
                self.data_server_response = self.data_socket.recv_pyobj()

            else:
                self.data_server.terminate()
                self.data_server.join()
                self.data_server_response = 'Data_server process terminated.'

            self.log.info('{} Exit code: {}'.format(self.data_server_response,
                                                    self.data_server.exitcode))

        if self.data_context:
            self.data_context.destroy()
            self.data_socket = None

    def _restart_data_server(self):
        """
        Restarts data_server.
        """
        if self.data_master:
            self._stop_data_server()
            self._start_data_server()

    def _get_dataset_info(self):
        """
        Retrieves dataset descriptive statistic.
        """
        self.data_socket.send_pyobj({'ctrl': '_get_info'})
        self.data_server_response = self.data_socket.recv_pyobj()

        return self.data_server_response['dataset_stat'],\
               self.data_server_response['dataset_columns'],\
               self.data_server_response['pid']

    def reset_data(self, **kwargs):
        """
        Resets data provider class used, whatever it means for that class. Gets data_server ready to provide data.
        Supposed to be called before first env.reset().

        Note:
            when invoked, forces running episode to terminate.

        Args:
            **kwargs:   data provider class .reset() method specific.
        """
        if self.closed:
            self._start_server()
            if self.data_master:
                self._start_data_server()
            self.closed = False

        else:
            _ = self._force_control_mode()

        if self.data_master:
            if self.data_server is None or not self.data_server.is_alive():
                self._restart_data_server()

            self.data_server_response = self._comm_with_timeout(
                socket=self.data_socket,
                message={
                    'ctrl': '_reset_data',
                    'kwargs': kwargs
                })
            if self.data_server_response['status'] in 'ok':
                self.log.debug(
                    'Dataset seems ready with response: <{}>'.format(
                        self.data_server_response['message']))

            else:
                msg = 'Data_server unreachable with status: <{}>.'. \
                    format(self.data_server_response['status'])
                self.log.error(msg)
                raise SystemExit(msg)

        else:
            pass
Example #35
0
File: twitter.py Project: m00n/mutc
class TwitterThread(QThread):
    newTweets = pyqtSignal(object, object)

    RATE_CHECK_INTERVAL = 60 * 10

    def __init__(self, parent, subscriptions, limit_config):
        QThread.__init__(self, parent)

        self.subscriptions = subscriptions
        self.limit_config = limit_config

        self.ticks = 1
        self.tick_count = 60

        self.running = True
        self.force_check = threading.Event()

        self.logger = Logger("twitter-thread")
        self.rate_logger = Logger("twitter-limits")

        #
        self.last_rate_check = time()
        self.ticks_for_account = {}
        self.tick_counter = {}

    def run(self):
        while self.running:
            self.check_intervals()

            if time() - self.last_rate_check > self.RATE_CHECK_INTERVAL:
                self.rate_logger.info("Recalculating ticks")
                self.calc_rates()

            sleep(self.ticks)

    def check_intervals(self):
        subscriptions = self.get_subscriptions()
        accounts = set(subscription.account for subscription in subscriptions
                        if subscription.account.me)

        for account in accounts:
            #__rticks = self.tick_counter.get(account)
            #if __rticks and __rticks % 5 == 0:
                #print >>sys.stderr, account, self.tick_counter.get(account)

            if account not in self.tick_counter:
                self.calc_rates()
                self.tick_counter[account] = 1 # force checking

            #print account, self.tick_counter[account]
            self.tick_counter[account] -= 1
            if self.tick_counter[account] == 0:
                self.tick_counter[account] = self.ticks_for_account[account]
                self.check_subscriptions(account)

    def get_subscriptions(self):
        with self.subscriptions:
            return self.subscriptions.values()

    def calc_rates(self):
        subscriptions = self.get_subscriptions()
        calls_per_account = defaultdict(int)

        for subscription in subscriptions:
            calls_per_account[subscription.account] += subscription.calls

        for account, calls in calls_per_account.iteritems():
            if account.me:
                rate_info = safe_api_request(account.api.rate_limit_status)
                ticks = calc_ticks(rate_info, calls, **self.limit_config)
                self.ticks_for_account[account] = ticks
                self.tick_counter[account] = ticks

                self.rate_logger.debug(
                    "{0}; calls: {1}({2}); ticks: {3}",
                    repr(account.me.screen_name),
                    calls,
                    rate_info["remaining_hits"],
                    ticks
                )

        self.last_rate_check = time()

    def check_subscriptions(self, account=None):
        subscriptions = self.get_subscriptions()

        self.logger.debug("Checking {0} subscriptions", len(subscriptions))

        for subscription in subscriptions:
            if account and subscription.account != account:
                continue

            if subscription.account.api:
                try:
                    tweets = subscription.update()
                except tweepy.TweepError as error:
                    self.logger.exception("Error while fetching tweets")
                except Exception as exc:
                    self.logger.exception("Unexpected exception")
                else:
                    if tweets:
                        self.logger.debug("{0} new tweets for {1}/{2}",
                            len(tweets),
                            subscription.account,
                            subscription.subscription_type
                        )
                        self.newTweets.emit(subscription, tweets)

    def stepped_sleep(self):
        for x in xrange(self.tick_count):
            sleep(self.ticks)
            if self.force_check.is_set():
                self.force_check.clear()
                break
Example #36
0
File: twitter.py Project: m00n/mutc
class Twitter(QObject):
    newTweets = pyqtSignal(object, list)
    newSubscription = pyqtSignal("QVariant")

    accountCreated = pyqtSignal(QObject)

    tweetRemoved = pyqtSignal("QVariant")
    tweetChanged = pyqtSignal(bool, unicode, object)

    newTweetsForModel = pyqtSignal(TweetModel, list, int)

    requestSent = pyqtSignal("QVariant", "QVariant")

    def __init__(self, config):
        QObject.__init__(self)

        self.account_model = AccountModel(self)

        self.subscriptions = LockableDict()

        self.panel_model = PanelModel(
            self,
            self.subscriptions,
        )

        self.thread = TwitterThread(self, self.subscriptions, config["limits"])
        self.thread.newTweets.connect(self.newTweets.emit)

        self.logger = Logger("twitter")

    def locking(func):
        @wraps(func)
        def wrapper(self, *args, **kwds):
            try:
                func(self, *args, **kwds)
            except Exception as error:
                self.requestSent.emit(False, unicode(error))
            else:
                self.requestSent.emit(True, None)

        return wrapper

    def check_selected_accounts(self, accounts):
        if not accounts:
            raise NoAccountSelectedException(
                "You have to select at least one account"
            )

    def on_account_connected(self, account):
        self.panel_model.setScreenName(account.uuid, account.me.screen_name)

    @pyqtSlot("QVariant")
    def subscribe(self, request):
        subscription = create_subscription(
            request["type"],
            request["account"],
            request.get("args", "")
        )

        tweet_model = self.panel_model.addPanel(subscription)
        self.tweetRemoved.connect(tweet_model.removeTweet)
        self.tweetChanged.connect(tweet_model.replaceTweet)

        self.thread.force_check.set()

    @pyqtSlot("QVariant", "QVariant", "QVariant")
    @async
    @locking
    def tweet(self, accounts, tweet, in_reply=None):
        self.check_selected_accounts(accounts)
        in_reply = in_reply if in_reply else None

        for account in accounts:
            safe_api_request(
                lambda api=account.api: api.update_status(tweet, in_reply)
            )

    @pyqtSlot("QVariant", "QVariant")
    @async
    @locking
    def retweet(self, accounts, tweet_id):
        self.check_selected_accounts(accounts)

        for account in accounts:
        	# warum die beiden so unterschiedlich?
            status = safe_api_request(
                lambda api=account.api: api.retweet(tweet_id),
            )
            old_status = safe_api_request(
                lambda: account.api.get_status(tweet_id)
            )

            status.retweeted = True
            status.created_at = old_status.created_at

            if hasattr(old_status, "retweeted_status"):
                # RTed a retweet
                status.other_retweet = old_status

            self.tweetChanged.emit(False, tweet_id, status)

    @pyqtSlot("QVariant", "QVariant")
    @async
    @locking
    def undo_retweet(self, accounts, tweet_id):
        self.check_selected_accounts(accounts)

        for account in accounts:
            status = safe_api_request(
                lambda: account.api.destroy_status(tweet_id)
            )
            self.tweetChanged.emit(True, tweet_id, status.retweeted_status)


    @pyqtSlot("QVariant", "QVariant")
    @async
    @locking
    def favorite(self, accounts, tweet_id):
        self.check_selected_accounts(accounts)

        for account in accounts:
            self.logger.debug("api.create_favorite({0})", tweet_id)

            status = safe_api_request(
                lambda api=account.api: api.create_favorite(tweet_id),
            )

            status.favorited = True

            self.tweetChanged.emit(False, tweet_id, status)

    @pyqtSlot("QVariant", "QVariant")
    @async
    @locking
    def undo_favorite(self, accounts, tweet_id):
        self.check_selected_accounts(accounts)

        for account in accounts:
            self.logger.debug("api.destroy_favorite({0})", tweet_id)

            status = safe_api_request(
                lambda: account.api.destroy_favorite(tweet_id)
            )

            status.favorited = False

            self.tweetChanged.emit(True, tweet_id, status)

            # Remove tweet from Favorites-Panels
            for subscription, model in self.panel_model.panels:
                print repr(tweet_id), subscription.subscription_type, subscription.account, account
                if subscription.subscription_type == "favorites" \
                   and subscription.account == account:
                    call_in_mainloop(model.removeTweet, tweet_id)

    @pyqtSlot("QVariant", "QVariant", "QVariant")
    @async
    @locking
    def send_direct_message(self, account, to_twitter_id, text):
        safe_api_request(
            lambda: account.api.send_direct_message(
                user_id=to_twitter_id,
                text=text
            )
        )

    @pyqtSlot("QVariant")
    @async
    @locking
    def destroy_tweet(self, tweet_id):
        # FIXME
        status = self.account_model.accounts[0].api.get_status(tweet_id)
        author_id = status.author.id

        for account in self.account_model.accounts:
            if author_id == account.me.id:
                account.api.destroy_status(tweet_id)
                self.requestSent.emit(True, None)

                self.tweetRemoved.emit(tweet_id)

                break
        else:
            raise TweepError(
                "This tweet doesn't belong to any of your accounts"
            )

    @pyqtSlot("QVariant", "QVariant")
    @async
    @locking
    def destroy_direct_message(self, account, tweet_id):
        safe_api_request(
            lambda: account.api.destroy_direct_message(tweet_id)
        )
        self.tweetRemoved.emit(tweet_id)

    @pyqtSlot(result=QObject)
    def new_account(self):
        account = Account()
        account.setParent(self)
        #account.ready.connect(partial(self.announce_account, account))
        self.accountCreated.emit(account)
        return account

    def add_account(self, account):
        self.account_model.addAccount(account)
        account.connected.connect(self.on_account_connected)

    def connect(self):
        """
        Connects all account and starts the TwitterThread which fetches
        new tweets
        """
        for account in self.account_model.accounts:
            account.connect()

        self.thread.start()

    @pyqtSlot("QVariant")
    @async
    @locking
    def debug_tweet(self, tweet):
            self.logger.debug("debug_tweet: {0}", tweet)
Example #37
0
class Worker(object):
    redis_worker_namespace_prefix = 'rq:worker:'
    redis_workers_keys = 'rq:workers'

    @classmethod
    def all(cls, connection=None):
        """Returns an iterable of all Workers.
        """
        if connection is None:
            connection = get_current_connection()
        reported_working = connection.smembers(cls.redis_workers_keys)
        workers = [cls.find_by_key(key, connection) for key in reported_working]
        return compact(workers)

    @classmethod
    def find_by_key(cls, worker_key, connection=None):
        """Returns a Worker instance, based on the naming conventions for
        naming the internal Redis keys.  Can be used to reverse-lookup Workers
        by their Redis keys.
        """
        prefix = cls.redis_worker_namespace_prefix
        name = worker_key[len(prefix):]
        if not worker_key.startswith(prefix):
            raise ValueError('Not a valid RQ worker key: %s' % (worker_key,))

        if connection is None:
            connection = get_current_connection()
        if not connection.exists(worker_key):
            return None

        name = worker_key[len(prefix):]
        worker = cls([], name)
        queues = connection.hget(worker.key, 'queues')
        worker._state = connection.hget(worker.key, 'state') or '?'
        if queues:
            worker.queues = map(Queue, queues.split(','))
        return worker


    def __init__(self, queues, name=None, rv_ttl=500, connection=None):  # noqa
        if connection is None:
            connection = get_current_connection()
        self.connection = connection
        if isinstance(queues, Queue):
            queues = [queues]
        self._name = name
        self.queues = queues
        self.validate_queues()
        self.rv_ttl = rv_ttl
        self._state = 'starting'
        self._is_horse = False
        self._horse_pid = 0
        self._stopped = False
        self.log = Logger('worker')
        self.failed_queue = get_failed_queue(connection=self.connection)


    def validate_queues(self):  # noqa
        """Sanity check for the given queues."""
        if not iterable(self.queues):
            raise ValueError('Argument queues not iterable.')
        for queue in self.queues:
            if not isinstance(queue, Queue):
                raise NoQueueError('Give each worker at least one Queue.')

    def queue_names(self):
        """Returns the queue names of this worker's queues."""
        return map(lambda q: q.name, self.queues)

    def queue_keys(self):
        """Returns the Redis keys representing this worker's queues."""
        return map(lambda q: q.key, self.queues)


    @property  # noqa
    def name(self):
        """Returns the name of the worker, under which it is registered to the
        monitoring system.

        By default, the name of the worker is constructed from the current
        (short) host name and the current PID.
        """
        if self._name is None:
            hostname = socket.gethostname()
            shortname, _, _ = hostname.partition('.')
            self._name = '%s.%s' % (shortname, self.pid)
        return self._name

    @property
    def key(self):
        """Returns the worker's Redis hash key."""
        return self.redis_worker_namespace_prefix + self.name

    @property
    def pid(self):
        """The current process ID."""
        return os.getpid()

    @property
    def horse_pid(self):
        """The horse's process ID.  Only available in the worker.  Will return
        0 in the horse part of the fork.
        """
        return self._horse_pid

    @property
    def is_horse(self):
        """Returns whether or not this is the worker or the work horse."""
        return self._is_horse

    def procline(self, message):
        """Changes the current procname for the process.

        This can be used to make `ps -ef` output more readable.
        """
        setprocname('rq: %s' % (message,))


    def register_birth(self):  # noqa
        """Registers its own birth."""
        self.log.debug('Registering birth of worker %s' % (self.name,))
        if self.connection.exists(self.key) and \
                not self.connection.hexists(self.key, 'death'):
            raise ValueError(
                    'There exists an active worker named \'%s\' '
                    'already.' % (self.name,))
        key = self.key
        now = time.time()
        queues = ','.join(self.queue_names())
        with self.connection.pipeline() as p:
            p.delete(key)
            p.hset(key, 'birth', now)
            p.hset(key, 'queues', queues)
            p.sadd(self.redis_workers_keys, key)
            p.execute()

    def register_death(self):
        """Registers its own death."""
        self.log.debug('Registering death')
        with self.connection.pipeline() as p:
            # We cannot use self.state = 'dead' here, because that would
            # rollback the pipeline
            p.srem(self.redis_workers_keys, self.key)
            p.hset(self.key, 'death', time.time())
            p.expire(self.key, 60)
            p.execute()

    def set_state(self, new_state):
        self._state = new_state
        self.connection.hset(self.key, 'state', new_state)

    def get_state(self):
        return self._state

    state = property(get_state, set_state)

    @property
    def stopped(self):
        return self._stopped

    def _install_signal_handlers(self):
        """Installs signal handlers for handling SIGINT and SIGTERM
        gracefully.
        """

        def request_force_stop(signum, frame):
            """Terminates the application (cold shutdown).
            """
            self.log.warning('Cold shut down.')

            # Take down the horse with the worker
            if self.horse_pid:
                msg = 'Taking down horse %d with me.' % self.horse_pid
                self.log.debug(msg)
                try:
                    os.kill(self.horse_pid, signal.SIGKILL)
                except OSError as e:
                    # ESRCH ("No such process") is fine with us
                    if e.errno != errno.ESRCH:
                        self.log.debug('Horse already down.')
                        raise
            raise SystemExit()

        def request_stop(signum, frame):
            """Stops the current worker loop but waits for child processes to
            end gracefully (warm shutdown).
            """
            self.log.debug('Got %s signal.' % signal_name(signum))

            signal.signal(signal.SIGINT, request_force_stop)
            signal.signal(signal.SIGTERM, request_force_stop)

            if self.is_horse:
                self.log.debug('Ignoring signal %s.' % signal_name(signum))
                return

            msg = 'Warm shut down. Press Ctrl+C again for a cold shutdown.'
            self.log.warning(msg)
            self._stopped = True
            self.log.debug('Stopping after current horse is finished.')

        signal.signal(signal.SIGINT, request_stop)
        signal.signal(signal.SIGTERM, request_stop)


    def work(self, burst=False):  # noqa
        """Starts the work loop.

        Pops and performs all jobs on the current list of queues.  When all
        queues are empty, block and wait for new jobs to arrive on any of the
        queues, unless `burst` mode is enabled.

        The return value indicates whether any jobs were processed.
        """
        self._install_signal_handlers()

        did_perform_work = False
        self.register_birth()
        self.state = 'starting'
        try:
            while True:
                if self.stopped:
                    self.log.info('Stopping on request.')
                    break
                self.state = 'idle'
                qnames = self.queue_names()
                self.procline('Listening on %s' % ','.join(qnames))
                self.log.info('')
                self.log.info('*** Listening on %s...' % \
                        green(', '.join(qnames)))
                wait_for_job = not burst
                try:
                    result = Queue.dequeue_any(self.queues, wait_for_job, \
                            connection=self.connection)
                    if result is None:
                        break
                except UnpickleError as e:
                    msg = '*** Ignoring unpickleable data on %s.' % \
                            green(e.queue.name)
                    self.log.warning(msg)
                    self.log.debug('Data follows:')
                    self.log.debug(e.raw_data)
                    self.log.debug('End of unreadable data.')
                    self.failed_queue.push_job_id(e.job_id)
                    continue

                job, queue = result
                self.log.info('%s: %s (%s)' % (green(queue.name),
                    blue(job.description), job.id))

                self.state = 'busy'
                self.fork_and_perform_job(job)

                did_perform_work = True
        finally:
            if not self.is_horse:
                self.register_death()
        return did_perform_work

    def fork_and_perform_job(self, job):
        """Spawns a work horse to perform the actual work and passes it a job.
        The worker will wait for the work horse and make sure it executes
        within the given timeout bounds, or will end the work horse with
        SIGALRM.
        """
        child_pid = Process()#
        if child_pid == 0:
            self.main_work_horse(job)
        else:
            self._horse_pid = child_pid
            self.procline('Forked %d at %d' % (child_pid, time.time()))
            while True:
                try:
                    os.waitpid(child_pid, 0)
                    break
                except OSError as e:
                    # In case we encountered an OSError due to EINTR (which is
                    # caused by a SIGINT or SIGTERM signal during
                    # os.waitpid()), we simply ignore it and enter the next
                    # iteration of the loop, waiting for the child to end.  In
                    # any other case, this is some other unexpected OS error,
                    # which we don't want to catch, so we re-raise those ones.
                    if e.errno != errno.EINTR:
                        raise

    def main_work_horse(self, job):
        """This is the entry point of the newly spawned work horse."""
        # After fork()'ing, always assure we are generating random sequences
        # that are different from the worker.
        random.seed()
        self._is_horse = True
        self.log = Logger('horse')

        success = self.perform_job(job)

        # os._exit() is the way to exit from childs after a fork(), in
        # constrast to the regular sys.exit()
        os._exit(int(not success))

    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.procline('Processing %s from %s since %s' % (
            job.func_name,
            job.origin, time.time()))

        try:
            with death_penalty_after(job.timeout or 180):
                rv = job.perform()
        except Exception as e:
            fq = self.failed_queue
            self.log.exception(red(str(e)))
            self.log.warning('Moving job to %s queue.' % fq.name)

            fq.quarantine(job, exc_info=traceback.format_exc())
            return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' % (yellow(unicode(rv)),))

        if rv is not None:
            p = self.connection.pipeline()
            p.hset(job.key, 'result', dumps(rv))
            p.expire(job.key, self.rv_ttl)
            p.execute()
        else:
            # Cleanup immediately
            job.delete()

        return True
Example #38
0
class InsteonPLM(object):
    """
    Class used to connect to Insteon serial power line modem (PLM) 
    """

    debug = 1
    # Controls verbosity output
    Interface = None

    def __init__(self, port="/dev/insteon_plm", redislog=False):
        """        
        """
        if redislog:
            self.Log = logger.RedisLogger("insteon.py:InsteonPLM")
            self.Log.addHandler(handlers.RedisHandler.to("log", host="localhost", port=6379))
        else:
            self.Log = Logger("InsteonPLM")

        self.channel = "cmd:insteon"
        self.redis = redis.Redis()
        self.pubsub = self.redis.pubsub()

        if os.name == "nt":
            port = port - 1
        try:
            self.uart = serial.Serial(port=port, baudrate=19200, timeout=3)
            self.uart.timeout = 1
            self.Log.debug("__init__(port={0}, channel={1})".format(port, self.channel))
        except:
            self.uart = None
            self.Log.error("Error occured while oppening serial Interface")
        self._start_listner()

    def __del__(self):
        if self.is_open():
            self._stop_thread()
            self.uart.close()

    def __unicode__(self):
        if self.uart is not None:
            name = "PLM(%s)" % self.uart.port
        else:
            name = "PLM(None)"

        return name

    def is_open(self):
        """return open if port is open"""
        return self.uart.isOpen()

    def __open__(self):
        if not (self.uart.isOpen()):
            try:
                self.uart.open()
                self.Log.debug("Port {} is now open".format(str(self.uart.port)))
                return PORT_OPEN
            except uartial.uartialException:
                self.Log.error("uart Exception Cannot open port: {} ".format(str(self.uart.port)))
                return PORT_ERROR
            except:
                self.Log.error("OtherException Cannot open port: {}".format(str(self.uart.port)))
                return PORT_ERROR
        else:
            self.Log.debug("Port {} is already open".format(str(self.uart.port)))
            return PORT_OPEN

    def read(self):
        if self.uart is not None:
            data = self.uart.read(self.uart.inWaiting())
            self.Log.debug("read(): = {}".format(str(data)))
        else:
            self.Log.debug("No uartial connection")
            data = ""
        return data

    def send(self, cmd):
        self.Log.debug("send(cmd={}):".format(str(cmd)))
        if not isinstance(cmd, list):
            raise TypeError("cmd must be a list")

        if len(cmd) < 2:
            raise ValueError("every insteon command must be at least 2 bytes long")

        if cmd[0] != 2:
            raise ValueError("All insteon commands must start with 0x02 (2 dec)")

        cmd_meta = cmd_dict.get(cmd[1], 0)
        if isinstance(cmd_meta, int) and cmd_meta == 0:
            raise ValueError("Command 0x{0:x} ({0}) was not found in command dictionary".format(cmd[1]))

        cmd_length = cmd_meta[1]

        if len(cmd) != cmd_length:
            raise ValueError('Command 0x{0:x} ({0}) - "{1}", must be {2} bytes'.format(cmd[1], cmd_meta[0], cmd_length))

        if self.is_open():
            out = self.uart.write(cmd)
        else:
            self.Log.debug("Serial port is not open")
            out = 0

        if out != cmd_length:
            self.Log.debug("Warning serial port sent {0} bytes while {1} were expected".format(out, cmd_length))

        return out

    def query(self, cmd, expected_response_length=None):
        # read data which could have arrived
        leftover_buffer = self.read()

        if not cmd_dict.has_key(cmd[1]):
            self.Log.debug("Command not found")
            return [None, leftover_buffer]

        cmd_details = cmd_dict.get(cmd[1])
        if expected_response_length is None:
            expected_response_length = cmd_details[2]

        # TODO this is bit of a hack and will not work for extended commands
        if cmd[1] == 0x62:
            expected_response_length += 11

        # print "Sending {0}, expecting response length of {1}".format(cmd_details[0], expected_response_length)
        bytes_sent = self.send(cmd)

        timeout = 1
        to = time.time()
        tn = to
        response = ""
        n = 0

        while tn - to < timeout and n < expected_response_length:
            n = self.uart.inWaiting()
            tn = time.time()

        if n >= expected_response_length:
            # response += self.Ser.read(cmd_details[2])
            response += self.read()
            data = [ord(x) for x in response]
            dbg_msg = "cmd succesful: = {0}".format(data)
            return_data = data
        else:
            dbg_msg = "did not recive expected number of bytes within the time out period"

            response += self.read()
            data = [ord(x) for x in response]
            return_data = data
        self.Log.debug(dbg_msg)
        return [return_data, leftover_buffer]

    def send_sd_cmd(self, *args):
        cmd = build_sd_message(args[0], 15, args[1], args[2])
        res = self.query(cmd)
        parsed = parse(res[0])[0]
        if len(parsed) > 1:
            ack_received = parsed[1][2]["flag"][1][0] == 1
            if ack_received:
                return [parsed[1][2]["cmd2"], parsed]
        return None

    ###########################################################################################################
    # REDIS Thread
    #
    def _start_listner(self):
        self.Log.debug("Start redis sub channel and listen for commands send via redis")
        self._redis_subscriber_alive = True
        self.redis_subscriber_thread = threading.Thread(target=self.cmd_via_redis_subscriber)
        self.redis_subscriber_thread.setDaemon(True)
        self.redis_subscriber_thread.start()

    def _stop_thread(self):
        self.Log.debug("_stop_thread()")
        self._redis_subscriber_alive = False
        self.redis.publish(self.channel, "unsubscribe")
        self.redis_subscriber_thread.join(3)
        if self.redis_subscriber_thread.is_alive():
            self.Log.debug("thread stopped()")
        else:
            self.Log.debug("thread still alive after 3 delay")

    # publish cmd:insteon "{\"cmd\": \"SetSwOff\", \"addr\" : \"20.1f.11\"}"
    def cmd_via_redis_subscriber(self):
        self.Log.debug("cmd_via_redis_subscriber()")
        self.pubsub.subscribe(self.channel)

        while self._redis_subscriber_alive:
            try:
                for item in self.pubsub.listen():
                    self.Log.debug(item)
                    if item["data"] == "unsubscribe":
                        self.pubsub.unsubscribe()
                        self.Log.info("unsubscribed and finished")
                        break
                    else:
                        cmd = item["data"]
                        if isinstance(cmd, str):
                            self.Log.debug("cmd_via_redis_subscriber() cmd = {}".format(cmd))

                            try:
                                cmd_obj = deserialize(cmd)
                                if isinstance(cmd_obj, dict):
                                    self.Log.debug(cmd_obj)
                                    cmd_str = cmd_obj["cmd"]
                                    if cmd_str == "GetStatusOfAllDevices":
                                        res = self.GetStatusOfAllDevices()
                                        final_data = dict()
                                        timestamp = datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
                                        final_data["timestamp"] = timestamp
                                        final_data["raw"] = res
                                        final_data["data"] = res
                                        M = Message("InsteonPLM")
                                        M.msg = final_data
                                        self.redis.publish("data", M.as_json())
                                    if cmd_str == "GetLightLevel":
                                        addr_str = cmd_obj["addr"]
                                        res = self.GetLightLevel(addr_str)
                                    if cmd_str == "SetLightLevel":
                                        addr_str = cmd_obj["addr"]
                                        val = cmd_obj["val"]
                                        res = self.SetLightLevel(addr_str, val)
                                    if cmd_str == "SetSwOn":
                                        addr_str = cmd_obj["addr"]
                                        res = self.SetSwOn(addr_str)
                                    if cmd_str == "SetSwOff":
                                        addr_str = cmd_obj["addr"]
                                        res = self.SetSwOff(addr_str)

                                else:
                                    addr = str2hex(cmd_obj[0])
                                    insteon_cmd = cmd_obj[1]
                                    val = cmd_obj[2]
                                    res = self.send_sd_cmd(addr, insteon_cmd, val)
                                    self.Log.debug(res)
                                    self.redis.publish(self.channel + "_res", serialize(res))

                            except Exception as E:
                                error_msg = {
                                    "source": "serinsteon:cmd_via_redis_subscriber",
                                    "function": "def run(self):",
                                    "error": E.message,
                                }
                                self.Log.error(error_msg)

                        else:
                            self.Log.debug(cmd)
            except Exception as E:
                error_msg = {"source": "InsteonSub", "function": "def run(self):", "error": E.message}
                self.Log.error(error_msg)
        self.Log.debug("end of cmd_via_redis_subscriber()")

    ###########################################################################################################
    # HIGH LEVEL FUNCTIONS
    #
    def Ping(self, addr):
        out = self.send_sd_cmd(addr, 0x0F, 0)
        if out is None:
            return False
        if out[0] == 0:
            return True
        else:
            return False

    def GetLightLevel(self, addr):
        self.Log.debug("GetLightLevel")
        out = self.send_sd_cmd(addr, 25, 0)
        if out is None:
            return -1
        return round(float(out[0]) / 2.55)

    def SetLightLevel(self, addr, level):
        self.Log.debug("SetLightLevel")
        if level < 0 or level > 100:
            raise ValueError("Light level must be between 0-100%")
        level = int(round(255 * float(level) / 100.0))
        out = self.send_sd_cmd(addr, 17, level)
        return round(float(out[0]) / 2.55)

    def SetSwOff(self, addr):
        self.Log.debug("SetSwOff({})".format(addr))
        out = self.send_sd_cmd(addr, 19, 0)
        return out

    def SetSwOn(self, addr):
        self.Log.debug("SetSwOn({})".format(addr))
        out = self.send_sd_cmd(addr, 17, 255)
        return out

    def GetStatusOfAllDevices(self, devices=devices):
        self.Log.debug("GetStatusOfAllDevices")
        data = []
        for k, v in devices.items():
            for i in range(2):
                stat = self.GetLightLevel(k)
                if stat == -1:
                    sleep(0.1)
                else:
                    break
            data.append([k, stat, v])
        return data
Example #39
0
class PmLogHandler(log.CementLogHandler):  
    """
    PmLogHandler - override CementLogHandler to use logbook.

    This class uses the same configuration options as 
    :ref:`LoggingLogHandler <cement.ext.ext_logging>`
    """
    
    class Meta:
        interface = log.ILog
        """The interface that this class implements."""

        label = 'pmlog'
        """The string identifier of this handler."""

        namespace = "pm"
        """
        The logging namespace.  
        
        Note: Although Meta.namespace defaults to None, Cement will set this 
        to the application label (CementApp.Meta.label) if not set during
        setup.
        """

        file_format = "{record.time} ({record.level_name}) {record.channel} : {record.message}"
        """The logging format for the file logger."""

        console_format = "{record.time:%Y-%m-%d %H:%M} ({record.level_name}): {record.message}"
        """The logging format for the consoler logger."""

        debug_format = "{record.time} ({record.level_name}) {record.channel} : {record.message}"
        """The logging format for both file and console if ``debug==True``."""

        log_setup = None
        """Nested log setup placeholder"""

        level = 0
        """Global level for handlers"""

        clear_loggers = True
        """Whether of not to clear previous loggers first."""
        # These are the default config values, overridden by any '[log]' 
        # section in parsed config files.
        config_section = 'log'
        """
        The section of the application configuration that holds this handlers
        configuration.
        """
        
        config_defaults = dict(
            file=None,
            level='INFO',
            to_console=True,
            rotate=False,
            max_bytes=512000,
            max_files=4,
            )
        """
        The default configuration dictionary to populate the ``log`` section.
        """
            
    levels = ['INFO', 'WARN', 'ERROR', 'DEBUG', 'FATAL']


    def __init__(self, *args, **kw):
        super(PmLogHandler, self).__init__(*args, **kw)
        self.app = None
        
    def _setup(self, app_obj):
        super(PmLogHandler, self)._setup(app_obj)
        if self._meta.namespace is None:
            self._meta.namespace = self.app._meta.label

        self.backend = Logger(self._meta.namespace)

        # hack for application debugging
        if is_true(self.app._meta.debug):
            self.app.config.set('log', 'level', 'DEBUG')
            
        # Mainly for backwards compatibility since Logger level should
        # be NOTSET (level 0). Output level is controlled by handlers
        self.set_level(self.app.config.get('log', 'level'))
        
        # clear loggers?
        if is_true(self._meta.clear_loggers):
            self.clear_loggers()
            
        # console
        if is_true(self.app.config.get('log', 'to_console')):
            self._setup_console_log()
        
        # file
        if self.app.config.get('log', 'file'):
            self._setup_file_log()
        # nested setup
        self.backend.handlers.append(logbook.NullHandler(bubble=False))
        self.log_setup = logbook.NestedSetup(self.backend.handlers)
        with self._console_handler.applicationbound():
            self.debug("logging initialized for '%s' using PmLogHandler" % \
                           self._meta.namespace)


    def set_level(self, level):
        """
        Set the log level.  Must be one of the log levels configured in 
        self.levels which are ``['INFO', 'WARN', 'ERROR', 'DEBUG', 'FATAL']``.
        
        :param level: The log level to set.
        
        """
        level = level.upper()
        if level not in self.levels:
            level = 'INFO'
        level = logbook.lookup_level(level.upper())
        self.level = level
        
    def get_level(self):
        """Returns a string representation of the current log level."""
        return logbook.get_level_name(self.level)

    def _setup_console_log(self):
        """Add a console log handler."""
        if logbook.lookup_level(self.get_level()) == logbook.DEBUG:
            fmt_string = self._meta.debug_format
        else:
            fmt_string = self._meta.console_format
        console_handler = logbook.StderrHandler(
            format_string=fmt_string,
            level = logbook.lookup_level(self.get_level()),
            bubble = True)
        self._console_handler = console_handler
        self.backend.handlers.append(console_handler)

    def _setup_file_log(self):
        """Add a file log handler."""
        file_path = os.path.expandvars(fs.abspath(self.app.config.get('log', 'file')))
        log_dir = os.path.dirname(file_path)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        if logbook.lookup_level(self.get_level()) == logbook.DEBUG:
            fmt_string = self._meta.debug_format
        else:
            fmt_string = self._meta.file_format

        if self.app.config.get('log', 'rotate'):
            from logbook import RotatingFileHandler
            file_handler = RotatingFileHandler(
                file_path, 
                max_size=int(self.app.config.get('log', 'max_bytes')), 
                backup_count=int(self.app.config.get('log', 'max_files')),
                format_string=fmt_string,
                level = logbook.lookup_level(self.get_level()),
                bubble = True,
                )
        else:
            from logbook import FileHandler
            file_handler = FileHandler(file_path,
                                       format_string=fmt_string,
                                       level = logbook.lookup_level(self.get_level()),
                                       bubble = True,
                                       )
        
        self._file_handler = file_handler
        self.backend.handlers.append(file_handler)

    def _get_logging_kwargs(self, namespace, **kw):
        if namespace is None:
            namespace = self._meta.namespace
        if 'extra' in kw.keys() and 'namespace' in kw['extra'].keys():
            pass
        elif 'extra' in kw.keys() and 'namespace' not in kw['extra'].keys():
            kw['extra']['namespace'] = namespace
        else:
            kw['extra'] = dict(namespace=namespace)
        
        return kw

    def info(self, msg, namespace=None, **kw):
        """
        Log to the INFO facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
            
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.info(msg, **kwargs)

    def debug(self, msg, namespace=None, **kw):
        """
        Log to the DEBUG facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.debug(msg, **kwargs)

    def warn(self, msg, namespace=None, **kw):
        """
        Log to the WARN facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.warn(msg, **kwargs)

    def critical(self, msg, namespace=None, **kw):
        """
        Log to the CRITICAL facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.critical(msg, **kwargs)

    def fatal(self, msg, namespace=None, **kw):
        """
        Log to the FATAL facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.fatal(msg, **kwargs)

    def error(self, msg, namespace=None, **kw):
        """
        Log to the ERROR facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
            """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.error(msg, **kwargs)
    
    ## NOTE: do we even need this for logbook?
    def clear_loggers(self):
        """Clear any previously configured logging namespaces.
        """
        if not self._meta.namespace:
            # _setup() probably wasn't run
            return
        self.handlers = []
Example #40
0
class Schema(object):
    url = None
    version = None
    homepage = None
    deps = []
    dirs_to_symlink = ['bin', 'sbin', 'etc', 'lib', 'include', 'var']

    def __init__(self, env):
        self.env = env.copy()
        self.log = Logger()

        if self.version is None:
            self.version = _figureout_version(self.url)

        if self.url.startswith('git://'):
            self.deps.append('git')
            self.retriver = self.git
        else:
            self.retriver = self.wget

        self.env['prefix'] = os.path.join(
            env['osbench_root'],
            'workbench',
            env['schema'],
            self.version
        )
        self.env['version'] = self.version

    # Methods to override
    def install(self):
        pass

    def is_installed(self):
        """Right now we'll simple check if program
        was built and installed into workbench."""
        return os.path.exists(self.env['prefix'])

    # Internals
    def _get_source(self):
        if not self.url:
            return

        self.log.info('Getting source code')

        with higher_log_indent():
            self.retriver(self.url)

            patch_names = sorted(name for name in dir(self) if name.startswith('patch_'))

            for name in patch_names:
                filename = name.replace('_', '-') + '.diff'
                with open(filename, 'w') as f:
                    f.write(self._get_patch(name))
                self.call('patch -p1 < ' + filename)

    def _get_patch(self, name):
        data = getattr(self, name)

        if len(data[:200].split('\n')) == 1:
            # then probably this is a file or URL
            patch_filename = os.path.join(
                os.path.dirname(self.env['schema_filename']),
                data
            )
            if os.path.exists(patch_filename):
                data = open(patch_filename).read()

        data = self._substitute_vars(data)
        return data

    def _get_environment_vars(self):
        """ Returns dict with variables to set in shell and to replace in the templates.
        """
        return dict(
            OSBENCH_ROOT=self.env['osbench_root'],
            OSBENCH_PREFIX=self.env['prefix'],
        )

    def _substitute_vars(self, text):
        for name, value in self._get_environment_vars().items():
            text = text.replace(name, value)
        return text

    def wget(self, url):
        self.call('wget "{0}"'.format(self.url))
        files = os.listdir('.')
        assert len(files) == 1

        filename = files[0]
        # remove url params if they was added to the filename
        stripped_filename = filename.split('?', 1)[0]

        if stripped_filename.endswith('.gz') or \
           stripped_filename.endswith('.tgz'):
            tar_options = '-zxvf'
        elif stripped_filename.endswith('.bz2'):
            tar_options = '-jxvf'
        else:
            raise RuntimeError('Unknown archive format.')

        self.call('tar {0} "{1}"'.format(tar_options, filename))
        os.unlink(filename)

        dirs = os.listdir('.')
        assert len(dirs) == 1

        os.chdir(dirs[0])

    def git(self, url):
        self.call("git clone '{0}'".format(url))
        dirs = os.listdir('.')
        assert len(dirs) == 1
        os.chdir(dirs[0])

    def _install_deps(self):
        if self.deps:
            self.log.info('Installing dependencies')

            with higher_log_indent():
                for dep in self.deps:
                    if dep.startswith('system.'):
                        self.call('sudo apt-get --yes install %s' % dep[7:])

    def _install(self, interactive=False):
        self.log.info('Installing "{schema}"'.format(**self.env))

        with higher_log_indent():
            self._install_deps()

            root = tempfile.mkdtemp(prefix='diy-')

            if not os.path.exists(self.env['prefix']):
                os.makedirs(self.env['prefix'])

            try:
                os.chdir(root)

                self._get_source()

                if interactive:
                    self.log.info('Entering into the interactive mode')
                    with higher_log_indent():
                        shell = os.environ['SHELL']
                        self.call('git init')
                        self.call('git add -A')
                        for name, value in self._get_environment_vars().items():
                            os.environ[name] = value
                        self.call(shell, pass_output=True)
                else:
                    self.log.info('Running schema\'s install method')
                    with higher_log_indent():
                        self.install()

                self._link()

            finally:
                self.call('rm -fr "{0}"'.format(root))

    def _uninstall(self):
        """Uninstalls the schema from the prefix.

        It removes symlinks and deletes installed files from workbenches.
        """
        self.log.info('Uninstalling "{schema}"'.format(**self.env))
        with higher_log_indent():
            self._unlink()
            self._delete()

    def _delete(self):
        shutil.rmtree(self.env['prefix'])

    def _join_path(self, *args):
        return os.path.normpath(os.path.join(*args))

    def _link(self):
        self.log.info('Making symlinks')

        with higher_log_indent():
            for dir_name in self.dirs_to_symlink:
                s_dir = self._join_path(self.env['prefix'], dir_name)
                t_dir = self._join_path(self.env['osbench_root'], dir_name)

                if not os.path.exists(t_dir):
                    self.log.debug('Creating directory "{0}"', t_dir)
                    os.makedirs(t_dir)

                if os.path.exists(s_dir):
                    for root, dirs, files in os.walk(s_dir):
                        # making root, relative
                        root = os.path.relpath(root, s_dir)

                        for dir_name in dirs:
                            full_dir_name = self._join_path(
                                t_dir, root, dir_name
                            )
                            if not os.path.exists(full_dir_name):
                                self.log.debug('Creating directory "{0}"', full_dir_name)
                                os.makedirs(full_dir_name)


                        for filename in files:
                            source = self._join_path(s_dir, root, filename)
                            target = self._join_path(t_dir, root, filename)

                            if os.path.exists(target):
                                if os.path.islink(target):
                                    if os.readlink(target) == source:
                                        self.log.debug('Symlink {target} already exists', target=target)
                                        continue
                                    else:
                                        self.log.warning('Unlinking file {target}, pointing to {source}',
                                                         target=target, source=source)
                                        os.unlink(target)
                                else:
                                    self.log.warning('File {target} already exists and it is not a link',
                                                     target=target)

                            if not os.path.exists(target):
                                self.log.debug('Creating symlink from "{source}" to {target}',
                                               source=source, target=target)
                                os.symlink(source, target)


    def _unlink(self):
        self.log.info('Removing symlinks')
        for ftype, name in self.get_files_to_unlink():
            if ftype == 'file':
                os.unlink(name)
            else:
                try:
                    os.rmdir(name)
                except OSError:
                    pass

    def get_files_to_unlink(self):
        with higher_log_indent():
            for dir_name in self.dirs_to_symlink:
                s_dir = self._join_path(self.env['prefix'], dir_name)
                t_dir = self._join_path(self.env['osbench_root'], dir_name)

                if os.path.exists(s_dir):
                    for root, dirs, files in os.walk(s_dir, topdown=False):
                        # making root, relative
                        root = os.path.relpath(root, s_dir)

                        for filename in files:
                            source = self._join_path(s_dir, root, filename)
                            target = self._join_path(t_dir, root, filename)

                            if os.path.islink(target) and \
                                os.path.realpath(target) == source:
                                yield ('file', target)

                        for dir_name in dirs:
                            full_dir_name = self._join_path(
                                t_dir, root, dir_name
                            )
                            yield ('dir', full_dir_name)

    # Utilities
    def call(self, command, pass_output=False):
        command = command.format(**self.env)
        self.log.info('Running "{0}"'.format(command))

        with higher_log_indent():
            options = dict(
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT,
                shell=True,
            )
            if pass_output:
                del options['stdout']
                del options['stderr']

            proc = subprocess.Popen(
                command,
                **options
            )

            full_output = []
            if not pass_output:
                for line in proc.stdout:
                    line = line.decode('utf-8').strip(u'\n')
                    full_output.append(line)
                    self.log.debug(line)

            return_code = proc.wait()
            if return_code != 0:
                for line in full_output:
                    self.log.error(line)
                raise BadExitCode('subprogram exit with non zero exit code')

    def makedirs(self, *dirs):
        """Makes dirs inside the prefix.

        Use this command inside your `install` method.
        """
        for d in dirs:
            fullname = os.path.join(self.env['prefix'], d)
            if not os.path.exists(fullname):
                self.log.info('Creating directory "{0}".'.format(fullname))
                os.makedirs(fullname)

    def create_file_with_content(self, filename, content, mode=None):
        """Creates a file inside 'prefix'.

        Use this command inside your `install` method.
        Note: Source and target directory should exists.
        Warning: if there is some file already, it will be overwritten.
        """
        filename = os.path.join(self.env['prefix'], filename)

        self.log.info('Creating file "{0}"'.format(filename))

        with open(filename, 'w') as f:
            f.write(self._substitute_vars(content))

        if mode is not None:
            self.call('chmod "{0}" "{1}"'.format(mode, filename))

    def copy_file(self, from_filename, to_filename, mode=None):
        """Copies file, to a directory inside 'prefix'.

        from_filename could be relative to the current directory, or 
        use variables to be expanded to self.env.

        Use this command inside your `install` method.
        Note: Source and target directory should exists.
        Warning: if there is some file already, it will be overwritten.
        """
        with open(from_filename.format(**self.env), 'r') as f:
            self.create_file_with_content(to_filename, f.read(), mode=mode)
Example #41
0
from service.port import EfsPort, IPortUser, Port
from service.settings import HTMLExportSettings, SettingsProvider
from service.update import Update

disableOverrideEditor = False

try:
    from gui.propertyEditor import AttributeEditor
except ImportError as e:
    AttributeEditor = None
    print(("Error loading Attribute Editor: %s.\nAccess to Attribute Editor is disabled." % e.message))
    disableOverrideEditor = True

pyfalog = Logger(__name__)

pyfalog.debug("Done loading mainframe imports")


# dummy panel(no paint no erasebk)
class PFPanel(wx.Panel):
    def __init__(self, parent):
        wx.Panel.__init__(self, parent)
        self.Bind(wx.EVT_PAINT, self.OnPaint)
        self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnBkErase)

    def OnPaint(self, event):
        event.Skip()

    def OnBkErase(self, event):
        pass
Example #42
0
            # Logging.
            logger.debug('--------SHIPMENT CONFIRM RESPONSE--------\n%s'
                         '\n--------END RESPONSE--------' %
                         etree.tostring(response, pretty_print=True))
        except PyUPSException, e:
            self.raise_user_error(unicode(e[0]))

        digest = ShipmentConfirm.extract_digest(response)

        shipment_accept = ShipmentAccept.shipment_accept_request_type(digest)

        shipment_accept_instance = carrier.ups_api_instance(call="accept")

        # Logging.
        logger.debug('Making Shipment Accept Request for'
                     'Shipment ID: {0} and Carrier ID: {1}'.format(
                         self.id, self.carrier.id))
        logger.debug('--------SHIPMENT ACCEPT REQUEST--------\n%s'
                     '\n--------END REQUEST--------' %
                     etree.tostring(shipment_accept, pretty_print=True))

        try:
            response = shipment_accept_instance.request(shipment_accept)

            # Logging.
            logger.debug('--------SHIPMENT ACCEPT RESPONSE--------\n%s'
                         '\n--------END RESPONSE--------' %
                         etree.tostring(response, pretty_print=True))
        except PyUPSException, e:
            self.raise_user_error(unicode(e[0]))
Example #43
0
            except urllib2.URLError, e:
                logger.error('lock URLError: %s, key %r, retries left %s' % (e, key, num_retries))
                num_retries -= 1
                time.sleep(random.random())
            else:
                # check if we lock right
                with self._lock:
                    if key in self._keys:
                        if self._keys[key] == self.id:
                            logger.warning('################## key %s already locked by me' % (key, ))
                        else:
                            logger.warning('------------------ key %s already locked by worker %s' % (key, self._keys[key]))
                    else:
                        self._keys[key] = self.id

                logger.debug('locked %r' % (key,))

                return True
        return False

    def unlock(self, key):
        num_retries = 10

        while num_retries > 0:
            try:
                self.iter += 1
                logger.debug('unlocking %r (iter=%s)' % (key, self.iter))
                request = DeleteRequest(
                    'http://127.0.0.1:900%d/%s?data=%s-%s' % (choose_server(self.id), key, self.id, self.iter)
                )
                result = urllib2.urlopen(request)
Example #44
0
class BTgymRendering():
    """
    Handles BTgym Environment rendering.

    Note:
        Call `initialize_pyplot()` method before first render() call!
    """
    # Here we'll keep last rendered image for each rendering mode:
    rgb_dict = dict()
    render_modes = ['episode', 'human']
    params = dict(
        # Plotting controls, can be passed as kwargs:
        render_state_as_image=True,
        render_state_channel=0,
        render_size_human=(6, 3.5),
        render_size_state=(7, 3.5),
        render_size_episode=(12, 8),
        render_rowsmajor_episode=1,
        render_dpi=75,
        render_plotstyle='seaborn',
        render_cmap='PRGn',
        render_xlabel='Relative timesteps',
        render_ylabel='Value',
        render_title=
        'local step: {}, state observation min: {:.4f}, max: {:.4f}',
        render_boxtext=dict(
            fontsize=12,
            fontweight='bold',
            color='w',
            bbox={
                'facecolor': 'k',
                'alpha': 0.3,
                'pad': 3
            },
        ),
        plt_backend='Agg',  # Not used.
    )
    enabled = True
    ready = False

    def __init__(self, render_modes, **kwargs):
        """
        Plotting controls, can be passed as kwargs.

        Args:
            render_state_as_image=True,
            render_state_channel=0,
            render_size_human=(6, 3.5),
            render_size_state=(7, 3.5),
            render_size_episode=(12,8),
            render_dpi=75,
            render_plotstyle='seaborn',
            render_cmap='PRGn',
            render_xlabel='Relative timesteps',
            render_ylabel='Value',
            render_title='local step: {}, state observation min: {:.4f}, max: {:.4f}',
            render_boxtext=dict(fontsize=12,
                                fontweight='bold',
                                color='w',
                                bbox={'facecolor': 'k', 'alpha': 0.3, 'pad': 3},
                                )
        """
        # Update parameters with relevant kwargs:
        for key, value in kwargs.items():
            if key in self.params.keys():
                self.params[key] = value

        # Unpack it as attributes:
        for key, value in self.params.items():
            setattr(self, key, value)

        # Logging:
        if 'log_level' not in dir(self):
            self.log_level = WARNING

        StreamHandler(sys.stdout).push_application()
        self.log = Logger('BTgymRenderer', level=self.log_level)

        #from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
        #self.FigureCanvas = FigureCanvas

        self.plt = None  # Will set it inside server process when calling initialize_pyplot().

        #self.plotter = BTgymPlotter() # Modified bt.Cerebro() plotter, to get episode renderings.

        # Set empty plugs for each render mode:
        self.render_modes = render_modes
        for mode in self.render_modes:
            self.rgb_dict[mode] = self.rgb_empty()

    def initialize_pyplot(self):
        """
        Call me before use!
        [Supposed to be done inside already running server process]
        """
        if not self.ready:
            from multiprocessing import Pipe
            self.out_pipe, self.in_pipe = Pipe()

            if self.plt is None:
                import matplotlib
                matplotlib.use(self.plt_backend, force=True)
                import matplotlib.pyplot as plt

            self.plt = plt
            self.ready = True

    def to_string(self, dictionary, excluded=[]):
        """
        Converts given dictionary to more-or-less good looking `text block` string.
        """
        text = ''
        for k, v in dictionary.items():
            if k not in excluded:
                if type(v) in [float]:
                    v = '{:.4f}'.format(v)
                text += '{}: {}\n'.format(k, v)
        return text[:-1]

    def rgb_empty(self):
        """
        Returns empty 'plug' image.
        """
        return (np.random.rand(100, 200, 3) * 255).astype(dtype=np.uint8)

    def parse_response(
        self,
        state,
        mode,
        reward,
        info,
        done,
    ):
        """
        Converts environment response to plotting attributes:
        state, title, text.
        """
        if len(state[mode].shape) <= 2:
            state = np.asarray(state[mode])

        elif len(state[mode].shape) == 3:
            if state[mode].shape[1] == 1:
                # Assume 2nd dim (H) is fake expansion for 1D input, so can render all channels:
                state = np.asarray(state[mode][:, 0, :])

            else:
                # Assume it is HWC 2D input, only can render single channel:
                state = np.asarray(state[mode][:, :,
                                               self.render_state_channel])

        else:
            raise NotImplementedError(
                '2D rendering can be done for obs. state tensor with rank <= 3; ' +\
                'got state shape: {}'.format(np.asarray(state[mode]).shape))

        # Figure out how to deal with info output:
        try:
            assert type(info[-1]) == dict
            info_dict = info[-1]

        except AssertionError:
            try:
                assert type(info) == dict
                info_dict = info

            except AssertionError:
                try:
                    info_dict = {'info': str(dict)}

                except:
                    info_dict = {}

        # Add records:
        info_dict.update(
            reward=reward,
            is_done=done,
        )

        # Try to get step information:
        try:
            current_step = info_dict['step']

        except:
            current_step = '--'

        # Set box text, excluding redundant fields:
        box_text = self.to_string(info_dict, excluded=['step'])

        # Set title output:
        title = self.render_title.format(current_step, state.min(),
                                         state.max())

        return state, title, box_text

    def render(self,
               mode_list,
               cerebro=None,
               step_to_render=None,
               send_img=True):
        """
        Renders given mode if possible, else
        just passes last already rendered image.
        Returns rgb image as numpy array.

        Logic:
            - If `cerebro` arg is received:
                render entire episode, using built-in backtrader plotting feature,
                update stored `episode` image.

            - If `step_to_render' arg is received:
                - if mode = 'raw_state':
                    render current state observation in conventional 'price' format,
                    update stored `raw_state` image;
                - if mode = something_else':
                    visualise observation as 'seen' by agent,
                    update stored 'agent' image.

        Returns:
             `mode` image.

        Note:
            It can actually return several modes in a single dict.
            It prevented by Gym modes convention, but done internally at the end of the episode.
        """
        if type(mode_list) == str:
            mode_list = [mode_list]

        if cerebro is not None:
            self.rgb_dict['episode'] = self.draw_episode(cerebro)
            self.log.debug('Episode rendering done.')
            # Try to render given episode:
            #try:
            # Get picture of entire episode:
            #fig = cerebro.plot(plotter=self.plotter,  # Modified plotter class, doesnt actually save anything.
            #                   savefig=True,
            #                   width=self.render_size_episode[0],
            #                   height=self.render_size_episode[1],
            #                   dpi=self.render_dpi,
            #                   use=None, #self.plt_backend,
            #                   iplot=False,
            #                   figfilename='_tmp_btgym_render.png',
            #                   )[0][0]

            #fig.canvas.draw()
            #rgb_array = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
            #self.rgb_dict['episode'] = rgb_array.reshape(fig.canvas.get_width_height()[::-1] + (3,))
            # Clean up:
            #self.plt.gcf().clear()
            #self.plt.close(fig)

            #except:
            # Just keep previous rendering
            #   pass

        if step_to_render is not None:
            # Perform step rendering:

            # Unpack:
            raw_state, state, reward, done, info = step_to_render

            for mode in mode_list:
                if mode in self.render_modes and mode not in [
                        'episode', 'human'
                ]:
                    # Render user-defined (former agent) mode state:
                    agent_state, title, box_text = self.parse_response(
                        state, mode, reward, info, done)
                    if self.render_state_as_image:
                        self.rgb_dict[mode] = self.draw_image(
                            agent_state,
                            figsize=self.render_size_state,
                            title='{} / {}'.format(mode, title),
                            box_text=box_text,
                            ylabel=self.render_ylabel,
                            xlabel=self.render_xlabel,
                        )
                    else:
                        self.rgb_dict[mode] = self.draw_plot(
                            agent_state,
                            figsize=self.render_size_state,
                            title='{} / {}'.format(mode, title),
                            box_text=box_text,
                            ylabel=self.render_ylabel,
                            xlabel=self.render_xlabel,
                        )

                if 'human' in mode:
                    # Render `human` state:
                    human_state, title, box_text = self.parse_response(
                        raw_state, mode, reward, info, done)
                    self.rgb_dict['human'] = self.draw_plot(
                        human_state,
                        figsize=self.render_size_human,
                        title=title,
                        box_text=box_text,
                        ylabel='Price',
                        xlabel=self.render_xlabel,
                        line_labels=['Open', 'High', 'Low', 'Close'],
                    )
            if send_img:
                return self.rgb_dict

        else:
            # this case is for internal use only;
            # now `mode` supposed to contain several modes, let's return dictionary of arrays:
            return_dict = dict()
            for entry in mode_list:
                if entry in self.rgb_dict.keys():
                    # ...and it is legal:
                    return_dict[entry] = self.rgb_dict[entry]

                else:
                    return_dict[entry] = self.rgb_empty()

            return return_dict

    def draw_plot(self,
                  data,
                  figsize=(10, 6),
                  title='',
                  box_text='',
                  xlabel='X',
                  ylabel='Y',
                  line_labels=None):
        """
        Visualises environment state as 2d line plot.
        Retrurns image as rgb_array.

        Args:
            data:           np.array of shape [num_values, num_lines]
            figsize:        figure size (in.)
            title:
            box_text:
            xlabel:
            ylabel:
            line_labels:    iterable holding line legends as str

        Returns:
                rgb image as np.array of size [with, height, 3]
        """
        if line_labels is None:
            # If got no labels - make it numbers:
            if len(data.shape) > 1:
                line_labels = [
                    'line_{}'.format(i) for i in range(data.shape[-1])
                ]
            else:
                line_labels = ['line_0']
                data = data[:, None]
        else:
            assert len(line_labels) == data.shape[-1], \
                'Expected `line_labels` kwarg consist of {} names, got: {}'. format(data.shape[-1], line_labels)

        fig = self.plt.figure(
            figsize=figsize,
            dpi=self.render_dpi,
        )
        #ax = fig.add_subplot(111)

        self.plt.style.use(self.render_plotstyle)
        self.plt.title(title)

        # Plot x axis as reversed time-step embedding:
        xticks = np.linspace(data.shape[0] - 1,
                             0,
                             int(data.shape[0]),
                             dtype=int)
        self.plt.xticks(xticks.tolist(), (-xticks[::-1]).tolist(),
                        visible=False)

        # Set every 5th tick label visible:
        for tick in self.plt.xticks()[1][::5]:
            tick.set_visible(True)

        self.plt.xlabel(xlabel)
        self.plt.ylabel(ylabel)
        self.plt.grid(True)

        # Switch off antialiasing:
        #self.plt.setp([ax.get_xticklines() + ax.get_yticklines() + ax.get_xgridlines() + ax.get_ygridlines()],antialiased=False)
        #self.plt.rcParams['text.antialiased']=False

        # Add Info box:
        self.plt.text(0, data.min(), box_text, **self.render_boxtext)

        for line, label in enumerate(line_labels):
            self.plt.plot(data[:, line], label=label)
        self.plt.legend()
        self.plt.tight_layout()

        fig.canvas.draw()

        # Save it to a numpy array:
        rgb_array = np.fromstring(fig.canvas.tostring_rgb(),
                                  dtype=np.uint8,
                                  sep='')

        # Clean up:
        self.plt.close(fig)
        #self.plt.gcf().clear()

        return rgb_array.reshape(fig.canvas.get_width_height()[::-1] + (3, ))

    def draw_image(self,
                   data,
                   figsize=(12, 6),
                   title='',
                   box_text='',
                   xlabel='X',
                   ylabel='Y',
                   line_labels=None):
        """
        Visualises environment state as image.
        Returns rgb_array.
        """
        fig = self.plt.figure(
            figsize=figsize,
            dpi=self.render_dpi,
        )
        #ax = fig.add_subplot(111)

        self.plt.style.use(self.render_plotstyle)
        self.plt.title(title)

        # Plot x axis as reversed time-step embedding:
        xticks = np.linspace(data.shape[0] - 1,
                             0,
                             int(data.shape[0]),
                             dtype=int)
        self.plt.xticks(xticks.tolist(), (-xticks[::-1]).tolist(),
                        visible=False)

        # Set every 5th tick label visible:
        for tick in self.plt.xticks()[1][::5]:
            tick.set_visible(True)

        #self.plt.yticks(visible=False)

        self.plt.xlabel(xlabel)
        self.plt.ylabel(ylabel)
        self.plt.grid(False)

        # Switch off antialiasing:
        # self.plt.setp([ax.get_xticklines() + ax.get_yticklines() + ax.get_xgridlines() + ax.get_ygridlines()],antialiased=False)
        # self.plt.rcParams['text.antialiased']=False
        #self.log.warning('render_data_shape:{}'.format(data.shape))

        # Add Info box:
        self.plt.text(0, data.shape[1] - 1, box_text, **self.render_boxtext)

        im = self.plt.imshow(data.T, aspect='auto', cmap=self.render_cmap)
        self.plt.colorbar(im, use_gridspec=True)

        self.plt.tight_layout()

        fig.canvas.draw()

        # Save it to a numpy array:
        rgb_array = np.fromstring(fig.canvas.tostring_rgb(),
                                  dtype=np.uint8,
                                  sep='')

        # Clean up:
        self.plt.close(fig)
        #self.plt.gcf().clear()

        #ax.cla()
        return rgb_array.reshape(fig.canvas.get_width_height()[::-1] + (3, ))

    def draw_episode(self, cerebro):
        """
        Hacky way to render episode.
        Due to backtrader/matplotlib memory leaks have to encapsulate it in separate process.
        Strange but reliable. PID's are driving crazy.

        Args:
            cerebro instance

        Returns:
            rgb array.
        """
        draw_process = DrawCerebro(
            cerebro=cerebro,
            width=self.render_size_episode[0],
            height=self.render_size_episode[1],
            dpi=self.render_dpi,
            result_pipe=self.in_pipe,
            rowsmajor=self.render_rowsmajor_episode,
        )

        draw_process.start()
        #print('Plotter PID: {}'.format(draw_process.pid))
        try:
            rgb_array = self.out_pipe.recv()

            draw_process.terminate()
            draw_process.join()

            return rgb_array

        except:
            return self.rgb_empty()
Example #45
0
class HTTrader(WebTrader):
    config_path = os.path.dirname(__file__) + '/config/ht.json'

    def __init__(self, account='', password=''):
        super().__init__()
        self.__set_ip_and_mac()
        StreamHandler(sys.stdout).push_application()
        self.log = Logger('HTTrader')

    def read_config(self, path):
        self.account_config = helpers.file2dict(path)

    def autologin(self):
        """实现自动登录"""
        isLoginOk = self.login()
        if not isLoginOk:
            time.sleep(1)
            self.autologin()

    # TODO: 待重构
    def login(self):
        """实现华泰的自动登录"""
        self.s = requests.session()
        # 进入华泰登录页面
        login_page_response = self.s.get(self.config['login_page'])
        # 获取验证码
        verify_code_response = self.s.get(self.config['verify_code_api'], data=dict(ran=random.random()))
        # 保存验证码
        with open('vcode', 'wb') as f:
            f.write(verify_code_response.content)
        # 调用 tesserac t识别
        # ubuntu 15.10貌似无法export TESSDATA_PREFIX
        # os.system('export TESSDATA_PREFIX="/usr/share/tesseract-ocr/tessdata/"; tesseract vcode result -psm 7')
        res = os.system('tesseract vcode result -psm 7')
        if res != 0:
            os.system('export TESSDATA_PREFIX="/usr/share/tesseract-ocr/tessdata/"; tesseract vcode result -psm 7')

        # 获取识别的验证码
        with open('result.txt') as f:
            vcode = f.readline()
            # 移除空格和换行符
            vcode = vcode.replace(' ', '')[:-1]
            if len(vcode) != 4:
                return False

        os.remove('result.txt')
        os.remove('vcode')

        # 设置登录所需参数
        params = dict(
            userName=self.account_config['userName'],
            trdpwd=self.account_config['trdpwd'],
            trdpwdEns=self.account_config['trdpwd'],
            servicePwd=self.account_config['servicePwd'],
            macaddr=self.__mac,
            lipInfo=self.__ip,
            vcode=vcode
        )
        params.update(self.config['login'])

        login_api_response = self.s.post(self.config['login_api'], params)

        if login_api_response.text.find('欢迎您登录') == -1:
            return False

        # 请求下列页面获取交易所需的 uid 和 password
        trade_info_response = self.s.get(self.config['trade_info_page'])

        # 查找登录信息
        search_result = re.search('var data = "([=\w\+]+)"', trade_info_response.text)
        if search_result == None:
            return False

        need_data_index = 0
        need_data = search_result.groups()[need_data_index]
        bytes_data = base64.b64decode(need_data)
        str_data = bytes_data.decode('gbk')
        json_data = json.loads(str_data)

        # TODO 更好的实现方式? 直接生成字典?
        self.__fund_account = json_data['fund_account']
        self.__client_risklevel = json_data['branch_no']
        self.__sh_stock_account = json_data['item'][0]['stock_account']
        self.__sh_exchange_type = json_data['item'][0]['exchange_type']
        self.__sz_stock_account = json_data['item'][1]['stock_account']
        self.__sz_exchange_type = json_data['item'][1]['exchange_type']
        self.__op_station = json_data['op_station']
        self.__trdpwd = json_data['trdpwd']
        self.__uid = json_data['uid']
        self.__branch_no = json_data['branch_no']

        return True

    def __set_ip_and_mac(self):
        """获取本机IP和MAC地址"""
        # 获取ip
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        s.connect(("baidu.com",80))
        self.__ip = s.getsockname()[0]
        s.close()

        # 获取mac地址
        self.__mac = ("".join(c + "-" if i % 2 else c for i, c in enumerate(hex(uuid.getnode())[2:].zfill(12)))[:-1]).upper()

    # TODO: 华泰未实现
    def __keepalive(self):
        """启动保持在线的进程 """
        self.heart_process = Process(target=self.__send_heartbeat)
        self.heart_process.start()

    def __send_heartbeat(self):
        """每隔30秒查询指定接口保持 token 的有效性"""
        while True:
            data = self.get_balance()
            if type(data) == dict and data.get('error_no'):
                break
            time.sleep(30)

    def exit(self):
        """结束保持 token 在线的进程"""
        if self.heart_process.is_alive():
            self.heart_process.terminate()

    @property
    def balance(self):
        return self.get_balance()

    def get_balance(self):
        """获取账户资金状况"""
        return self.__do(self.config['balance'])

    @property
    def position(self):
        return self.get_position()

    def get_position(self):
        """获取持仓"""
        return self.__do(self.config['position'])

    @property
    def entrust(self):
        return self.get_entrust()

    def get_entrust(self):
        """获取当日委托列表"""
        return self.__do(self.config['entrust'])

    def cancel_entrust(self, entrust_no):
        """撤单
        :param entrust_no: 委托单号"""
        cancel_params = dict(
            self.config['cancel_entrust'],
            password=self.__trdpwd,
            entrust_no=entrust_no
        )
        return self.__do(cancel_params)


    # TODO: 实现买入卖出的各种委托类型
    def buy(self, stock_code, price, amount=0, volume=0, entrust_prop=0):
        """买入卖出股票
        :param stock_code: 股票代码
        :param price: 卖出价格
        :param amount: 卖出总金额 由 volume / price 取 100 整数, 若指定 price 则此参数无效
        :param entrust_prop: 委托类型,暂未实现,默认为限价委托
        """
        params = dict(
            self.config['buy'],
            entrust_amount=amount if amount else volume // price // 100 * 100
        )
        return self.__buy_or_sell(stock_code, price, entrust_prop=entrust_prop, other=params)

    def sell(self, stock_code, price, amount=0, volume=0, entrust_prop=0):
        """卖出股票
        :param stock_code: 股票代码
        :param price: 卖出价格
        :param amount: 卖出总金额 由 volume / price 取整, 若指定 price 则此参数无效
        :param entrust_prop: 委托类型,暂未实现,默认为限价委托
        """
        params = dict(
            self.config['sell'],
            entrust_amount=amount if amount else volume // price
        )
        return self.__buy_or_sell(stock_code, price, entrust_prop=entrust_prop, other=params)

    def __buy_or_sell(self, stock_code, price, entrust_prop, other):
        need_info = self.__get_trade_need_info(stock_code)
        return self.__do(dict(
                other,
                stock_account=need_info['stock_account'],  # '沪深帐号'
                exchange_type=need_info['exchange_type'],  # '沪市1 深市2'
                entrust_prop=0,  # 委托方式
                stock_code='{:0>6}'.format(stock_code),  # 股票代码, 右对齐宽为6左侧填充0
                entrust_price=price
            ))

    def __get_trade_need_info(self, stock_code):
        """获取股票对应的证券市场和帐号"""
        # 获取股票对应的证券市场
        exchange_type = self.__sh_exchange_type if helpers.get_stock_type(stock_code) == 'sh' else self.__sz_exchange_type
        # 获取股票对应的证券帐号
        stock_account = self.__sh_stock_account if exchange_type == self.__sh_exchange_type else self.__sz_stock_account
        return dict(
            exchange_type=exchange_type,
            stock_account=stock_account
        )

    def __do(self, params):
        """发起对 api 的请求并过滤返回结果"""
        basic_params = self.__create_basic_params()
        basic_params.update(params)
        response_data = self.__request(basic_params)
        format_json_data = self.__format_reponse_data(response_data)
        return self.__fix_error_data(format_json_data)

    def __create_basic_params(self):
        """生成基本的参数"""
        basic_params = dict(
            uid=self.__uid,
            version=1,
            custid=self.account_config['userName'],
            op_branch_no=self.__branch_no,
            branch_no=self.__branch_no,
            op_entrust_way=7,
            op_station=self.__op_station,
            fund_account=self.account_config['userName'],
            password=self.__trdpwd,
            identity_type='',
            ram=random.random()
        )
        return basic_params

    def __request(self, params):
        """请求并获取 JSON 数据"""
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'
        }
        params_str = urllib.parse.urlencode(params)
        unquote_str = urllib.parse.unquote(params_str)

        self.log.debug(unquote_str)

        b64params = base64.b64encode(unquote_str.encode()).decode()
        r = self.s.get('{prefix}/?{b64params}'.format(prefix=self.trade_prefix, b64params=b64params), headers=headers)
        self.log.debug(r.url)
        return r.content

    def __format_reponse_data(self, data):
        """格式化返回的 json 数据"""
        bytes_str = base64.b64decode(data)
        self.log.debug(bytes_str)
        gbk_str = bytes_str.decode('gbk')
        return json.loads(gbk_str)

    def __fix_error_data(self, data):
        """若是返回错误则不进行数据提取"""
        if data['cssweb_code'] == 'error':
            return data
        t1 = data['item']
        return t1[:-1]
Example #46
0
    _app.register_blueprint(helloworld.blueprint)
    db.init_app(_app)

    _app.config.from_object(config_object)
    override_env_name = 'FLASK_SEED_CONFIG'
    if _app.config.from_envvar(override_env_name, silent=True):
        path = os.environ[override_env_name]
        print 'Overriding config by environment variable: %s = %s' % (override_env_name, path)

    return _app


### MAIN

config_name = os.getenv('CONFIG', 'Default')
app = create_app(config_object=config.defined[config_name])

if app.config['LOG_LEVEL'] == 'INFO':
    log_setup = utils.LoggingSetup(app.config['LOG_LEVEL'])
else:
    log_setup = utils.ProdLoggingSetup(app.config['LOG_LEVEL'],
                                       app.config['LOG_DIR'] + '%s.log' % app.config['APP_NAME'])

nested_log_setup = log_setup.get_default_setup()

with nested_log_setup.applicationbound():
    log.debug('Starting application...')

    if __name__ == '__main__':
        app.debug = True
        app.run()
Example #47
0
class Admin(object):
    """Administrative task object."""

    def __init__(self, conf):
        """conf: dict, yaml parameters."""
        self.conf = conf
        handler = TimedRotatingFileHandler(conf.log_file, date_format="%Y-%m-%d")
        handler.push_application()
        self.logger = Logger("Firetower-admin")
        self.queue = redis_util.get_redis_conn(host=conf.redis_host, port=conf.redis_port, redis_db=conf.redis_db)
        self.classifier = classifier.Levenshtein()
        self.last_archive_run = None

    # Wrote this to avoid numpy dependencies until we absolutely require them.
    def mean_stdev(self, items):
        """Return mean and stdev of numerical list of items.

        Args:
            items: list, list of numbers (int or float) to perform calculations upon.
        Returns:
            tuble of (mean between items, and stdeviation of items and mean).
        """
        n, mean, std = 0, 0, 0
        if items:
            n = len(items)

        else:
            return (0.0, 0.0)

        for item in items:
            mean = mean + item

        mean = mean / float(n)
        for item in items:
            std = std + (item + mean) ** 2

        std = sqrt(std / float(n))  # Avoid DivideByZero by not subtracting one.

        return mean, std

    def calc_stats(self):
        """Calculate and save mean and standard deviation."""

        categories = category.Category.get_all_categories(self.queue)
        for cat in categories:
            all_events = cat.events.range(0, -1)
            ratios = []
            for event in all_events:
                event = json.loads(event)
                ratios.append(self.classifier.str_ratio(cat.signature, event["sig"]))
            cat.mean, cat.stdev = self.mean_stdev(ratios)

    def archive_events(self):
        """Run the timeseries archiving for all categories.

        This code moves counts from the atomically incrementable HASHes
        to Sorted Sets (which can be sliced by date)."""

        now = datetime.datetime.utcnow()
        if self.last_archive_run is None:
            self.last_archive_run = datetime.datetime.utcnow()
            return

        delta = datetime.timedelta(seconds=self.conf.archive_time)
        if self.last_archive_run < (now - delta):
            self.logger.debug("Archiving counts older than %s seconds" % (self.conf.archive_time,))
            for c in category.Category.get_all_categories(self.queue):
                self.logger.debug("Archiving for %s category" % (c.cat_id))
                c.timeseries.archive_cat_counts(self.last_archive_run)

    def run(self, args):
        """Run set of jobs specified on commandline or config."""

        self.logger.info("Running with tasks: %s" % (",".join(args)))
        for arg in args:
            if arg not in TASKS:
                self.logger.error("Specified unknown task: %s" % (arg,))
                sys.exit(1)
            if arg == "calc_stats":
                self.logger.info("Calculating stats for each category")
                self.calc_stats()
            if arg == "archive_events":
                self.archive_events()
                self.logger.info("Archiving old data from each category")
Example #48
0
class Scheduler(object):
    scheduler_key = 'rq:scheduler'
    scheduled_jobs_key = 'rq:scheduler:scheduled_jobs'

    def __init__(self, queue_name='default', interval=60, connection=None):
        if connection is None:
            connection = get_current_connection()
        self.connection = connection
        self.queue_name = queue_name
        self._interval = interval
        self.log = Logger('scheduler')

    def register_birth(self):
        if self.connection.exists(self.scheduler_key) and \
                not self.connection.hexists(self.scheduler_key, 'death'):
            raise ValueError("There's already an active RQ scheduler")
        key = self.scheduler_key
        now = time.time()
        with self.connection.pipeline() as p:
            p.delete(key)
            p.hset(key, 'birth', now)
            p.execute()

    def register_death(self):
        """Registers its own death."""
        with self.connection.pipeline() as p:
            p.hset(self.scheduler_key, 'death', time.time())
            p.expire(self.scheduler_key, 60)
            p.execute()

    def _install_signal_handlers(self):
        """
        Installs signal handlers for handling SIGINT and SIGTERM
        gracefully.
        """

        def stop(signum, frame):
            """
            Register scheduler's death and exit.
            """
            self.log.debug('Shutting down RQ scheduler...')
            self.register_death()
            raise SystemExit()

        signal.signal(signal.SIGINT, stop)
        signal.signal(signal.SIGTERM, stop)

    def _create_job(self, func, args=None, kwargs=None, commit=True,
                    result_ttl=None):
        """
        Creates an RQ job and saves it to Redis.
        """
        if func.__module__ == '__main__':
            raise ValueError(
                    'Functions from the __main__ module cannot be processed '
                    'by workers.')
        if args is None:
            args = ()
        if kwargs is None:
            kwargs = {}
        job = Job.create(func, args=args, connection=self.connection,
                         kwargs=kwargs, result_ttl=result_ttl)
        job.origin = self.queue_name
        if commit:
            job.save()
        return job

    def enqueue_at(self, scheduled_time, func, *args, **kwargs):
        """
        Pushes a job to the scheduler queue. The scheduled queue is a Redis sorted
        set ordered by timestamp - which in this case is job's scheduled execution time.

        Usage:

        from datetime import datetime
        from redis import Redis
        from rq.scheduler import Scheduler

        from foo import func

        redis = Redis()
        scheduler = Scheduler(queue_name='default', connection=redis)
        scheduler.enqueue_at(datetime(2020, 1, 1), func, 'argument', keyword='argument')
        """
        job = self._create_job(func, args=args, kwargs=kwargs)
        self.connection.zadd(self.scheduled_jobs_key, job.id,
                             int(scheduled_time.strftime('%s')))
        return job

    def enqueue_in(self, time_delta, func, *args, **kwargs):
        """
        Similar to ``enqueue_at``, but accepts a timedelta instead of datetime object.
        The job's scheduled execution time will be calculated by adding the timedelta
        to datetime.now().
        """
        job = self._create_job(func, args=args, kwargs=kwargs)
        self.connection.zadd(self.scheduled_jobs_key, job.id,
                             int((datetime.now() + time_delta).strftime('%s')))
        return job

    def enqueue_periodic(self, scheduled_time, interval, repeat, func,
                         *args, **kwargs):
        """
        Schedule a job to be periodically executed, at a certain interval.
        """
        warnings.warn("'enqueue_periodic()' has been deprecated in favor of '.schedule()'"
                      "and will be removed in a future release.", DeprecationWarning)
        return self.schedule(scheduled_time, func, args=args, kwargs=kwargs,
                            interval=interval, repeat=repeat)

    def schedule(self, scheduled_time, func, args=None, kwargs=None,
                interval=None, repeat=None, result_ttl=None):
        """
        Schedule a job to be periodically executed, at a certain interval.
        """
        # Set result_ttl to -1 for periodic jobs, if result_ttl not specified
        if interval is not None and result_ttl is None:
            result_ttl = -1
        job = self._create_job(func, args=args, kwargs=kwargs, commit=False,
                               result_ttl=result_ttl)
        if interval is not None:
            job.interval = int(interval)
        if repeat is not None:
            job.repeat = int(repeat)
        if repeat and interval is None:
            raise ValueError("Can't repeat a job without interval argument")
        job.save()
        self.connection.zadd(self.scheduled_jobs_key, job.id,
                             int(scheduled_time.strftime('%s')))
        return job

    def enqueue(self, scheduled_time, func, args=None, kwargs=None,
                interval=None, repeat=None, result_ttl=None):
        """
        This method is deprecated and only left in as a backwards compatibility
        alias for schedule().
        """
        warnings.warn("'enqueue()' has been deprecated in favor of '.schedule()'"
                      "and will be removed in a future release.", DeprecationWarning)
        return self.schedule(scheduled_time, func, args, kwargs, interval,
                             repeat, result_ttl)

    def cancel(self, job):
        """
        Pulls a job from the scheduler queue. This function accepts either a
        job_id or a job instance.
        """
        if isinstance(job, basestring):
            self.connection.zrem(self.scheduled_jobs_key, job)
        else:
            self.connection.zrem(self.scheduled_jobs_key, job.id)

    def __contains__(self, item):
        """
        Returns a boolean indicating whether the given job instance or job id is
        scheduled for execution.
        """
        job_id = item
        if isinstance(item, Job):
            job_id = item.id
        return self.connection.zscore(self.scheduled_jobs_key, job_id) is not None

    def change_execution_time(self, job, date_time):
        """
        Change a job's execution time. Wrap this in a transaction to prevent race condition.
        """
        with self.connection.pipeline() as pipe:
            while 1:
                try:
                    pipe.watch(self.scheduled_jobs_key)
                    if pipe.zscore(self.scheduled_jobs_key, job.id) is None:
                        raise ValueError('Job not in scheduled jobs queue')
                    pipe.zadd(self.scheduled_jobs_key, job.id, int(date_time.strftime('%s')))
                    break
                except WatchError:
                    # If job is still in the queue, retry otherwise job is already executed
                    # so we raise an error
                    if pipe.zscore(self.scheduled_jobs_key, job.id) is None:
                        raise ValueError('Job not in scheduled jobs queue')
                    continue

    def get_jobs(self, until=None, with_times=False):
        """
        Returns a list of job instances that will be queued until the given time.
        If no 'until' argument is given all jobs are returned. This function
        accepts datetime and timedelta instances as well as integers representing
        epoch values.
        If with_times is True a list of tuples consisting of the job instance and
        it's scheduled execution time is returned.
        """
        def epoch_to_datetime(epoch):
            return datetime.fromtimestamp(float(epoch))

        if until is None:
            until = "+inf"
        elif isinstance(until, datetime):
            until = until.strftime('%s')
        elif isinstance(until, timedelta):
            until = (datetime.now() + until).strftime('%s')
        job_ids = self.connection.zrangebyscore(self.scheduled_jobs_key, 0,
                                                until, withscores=with_times,
                                                score_cast_func=epoch_to_datetime)
        if not with_times:
            job_ids = zip(job_ids, repeat(None))
        jobs = []
        for job_id, sched_time in job_ids:
            try:
                job = Job.fetch(job_id, connection=self.connection)
                if with_times:
                    jobs.append((job, sched_time))
                else:
                    jobs.append(job)
            except NoSuchJobError:
                # Delete jobs that aren't there from scheduler
                self.cancel(job_id)
        return jobs

    def get_jobs_to_queue(self, with_times=False):
        """
        Returns a list of job instances that should be queued
        (score lower than current timestamp).
        If with_times is True a list of tuples consisting of the job instance and
        it's scheduled execution time is returned.
        """
        return self.get_jobs(int(time.strftime('%s')), with_times=with_times)

    def get_queue_for_job(self, job):
        """
        Returns a queue to put job into.
        """
        key = '{0}{1}'.format(Queue.redis_queue_namespace_prefix, job.origin)
        return Queue.from_queue_key(key, connection=self.connection)

    def enqueue_job(self, job):
        """
        Move a scheduled job to a queue. In addition, it also does puts the job
        back into the scheduler if needed.
        """
        self.log.debug('Pushing {0} to {1}'.format(job.id, job.origin))

        interval = getattr(job, 'interval', None)
        repeat = getattr(job, 'repeat', None)

        # If job is a repeated job, decrement counter
        if repeat:
            job.repeat = int(repeat) - 1
        job.enqueued_at = datetime.now()
        job.save()

        queue = self.get_queue_for_job(job)
        queue.push_job_id(job.id)
        self.connection.zrem(self.scheduled_jobs_key, job.id)

        if interval:
            # If this is a repeat job and counter has reached 0, don't repeat
            if repeat is not None:
                if job.repeat == 0:
                    return
            self.connection.zadd(self.scheduled_jobs_key, job.id,
                int(datetime.now().strftime('%s')) + int(interval))

    def enqueue_jobs(self):
        """
        Move scheduled jobs into queues.
        """
        jobs = self.get_jobs_to_queue()
        for job in jobs:
            self.enqueue_job(job)
        return jobs

    def run(self):
        """
        Periodically check whether there's any job that should be put in the queue (score
        lower than current time).
        """
        self.log.debug('Running RQ scheduler...')
        self.register_birth()
        self._install_signal_handlers()
        try:
            while True:
                self.enqueue_jobs()
                time.sleep(self._interval)
        finally:
            self.register_death()
Example #49
0
server_sock, port = create_server()

while True:
	print("Waiting for connection on RFCOMM channel %d" % port)
	
	try:
		client_sock, client_info = server_sock.accept()
		print("Accepted connection from ", client_info)
	
	except KeyboardInterrupt:
		break

	try:
		while True:
			data = client_sock.recv(2048).decode(encoding='UTF-8')
			log.debug(data)
			if data == "quit":
				break
			if data == "is_server_present":
				client_sock.send(1).encode(encoding='UTF-8')

			time.sleep(0.0006)
	except IOError:
		pass
	
	print("disconnected")
	
	client_sock.close()
	print("all done")

server_sock.close()
Example #50
0
from . import migration
from eos import config
from logbook import Logger

pyfalog = Logger(__name__)
pyfalog.info("Initializing database")
pyfalog.info("Gamedata connection: {0}", config.gamedata_connectionstring)
pyfalog.info("Saveddata connection: {0}", config.saveddata_connectionstring)


class ReadOnlyException(Exception):
    pass


pyfalog.debug('Initializing gamedata')
gamedata_connectionstring = config.gamedata_connectionstring
if callable(gamedata_connectionstring):
    gamedata_engine = create_engine("sqlite://",
                                    creator=gamedata_connectionstring,
                                    echo=config.debug)
else:
    gamedata_engine = create_engine(gamedata_connectionstring,
                                    echo=config.debug)

gamedata_meta = MetaData()
gamedata_meta.bind = gamedata_engine
gamedata_session = sessionmaker(bind=gamedata_engine,
                                autoflush=False,
                                expire_on_commit=False)()
Example #51
0
class Worker(object):
    redis_worker_namespace_prefix = 'rq:worker:'
    redis_workers_keys = 'rq:workers'

    @classmethod
    def all(cls, connection=None):
        """Returns an iterable of all Workers.
        """
        if connection is None:
            connection = get_current_connection()
        reported_working = connection.smembers(cls.redis_workers_keys)
        workers = [cls.find_by_key(key, connection) for key in
                reported_working]
        return compact(workers)

    @classmethod
    def find_by_key(cls, worker_key, connection=None):
        """Returns a Worker instance, based on the naming conventions for
        naming the internal Redis keys.  Can be used to reverse-lookup Workers
        by their Redis keys.
        """
        prefix = cls.redis_worker_namespace_prefix
        name = worker_key[len(prefix):]
        if not worker_key.startswith(prefix):
            raise ValueError('Not a valid RQ worker key: %s' % (worker_key,))

        if connection is None:
            connection = get_current_connection()
        if not connection.exists(worker_key):
            return None

        name = worker_key[len(prefix):]
        worker = cls([], name, connection=connection)
        queues = connection.hget(worker.key, 'queues')
        worker._state = connection.hget(worker.key, 'state') or '?'
        if queues:
            worker.queues = [Queue(queue, connection=connection)
                                for queue in queues.split(',')]
        return worker


    def __init__(self, queues, name=None, default_result_ttl=DEFAULT_RESULT_TTL,
            connection=None, exc_handler=None):  # noqa
        if connection is None:
            connection = get_current_connection()
        self.connection = connection
        if isinstance(queues, Queue):
            queues = [queues]
        self._name = name
        self.queues = queues
        self.validate_queues()
        self._exc_handlers = []
        self.default_result_ttl = default_result_ttl
        self._state = 'starting'
        self._is_horse = False
        self._horse_pid = 0
        self._stopped = False
        self.log = Logger('worker')
        self.failed_queue = get_failed_queue(connection=self.connection)

        # By default, push the "move-to-failed-queue" exception handler onto
        # the stack
        self.push_exc_handler(self.move_to_failed_queue)
        if exc_handler is not None:
            self.push_exc_handler(exc_handler)


    def validate_queues(self):  # noqa
        """Sanity check for the given queues."""
        if not iterable(self.queues):
            raise ValueError('Argument queues not iterable.')
        for queue in self.queues:
            if not isinstance(queue, Queue):
                raise NoQueueError('Give each worker at least one Queue.')

    def queue_names(self):
        """Returns the queue names of this worker's queues."""
        return map(lambda q: q.name, self.queues)

    def queue_keys(self):
        """Returns the Redis keys representing this worker's queues."""
        return map(lambda q: q.key, self.queues)


    @property  # noqa
    def name(self):
        """Returns the name of the worker, under which it is registered to the
        monitoring system.

        By default, the name of the worker is constructed from the current
        (short) host name and the current PID.
        """
        if self._name is None:
            hostname = socket.gethostname()
            shortname, _, _ = hostname.partition('.')
            self._name = '%s.%s' % (shortname, self.pid)
        return self._name

    @property
    def key(self):
        """Returns the worker's Redis hash key."""
        return self.redis_worker_namespace_prefix + self.name

    @property
    def pid(self):
        """The current process ID."""
        return os.getpid()

    @property
    def horse_pid(self):
        """The horse's process ID.  Only available in the worker.  Will return
        0 in the horse part of the fork.
        """
        return self._horse_pid

    @property
    def is_horse(self):
        """Returns whether or not this is the worker or the work horse."""
        return self._is_horse

    def procline(self, message):
        """Changes the current procname for the process.

        This can be used to make `ps -ef` output more readable.
        """
        setprocname('rq: %s' % (message,))


    def register_birth(self):  # noqa
        """Registers its own birth."""
        self.log.debug('Registering birth of worker %s' % (self.name,))
        if self.connection.exists(self.key) and \
                not self.connection.hexists(self.key, 'death'):
            raise ValueError(
                    'There exists an active worker named \'%s\' '
                    'already.' % (self.name,))
        key = self.key
        now = time.time()
        queues = ','.join(self.queue_names())
        with self.connection.pipeline() as p:
            p.delete(key)
            p.hset(key, 'birth', now)
            p.hset(key, 'queues', queues)
            p.sadd(self.redis_workers_keys, key)
            p.execute()

    def register_death(self):
        """Registers its own death."""
        self.log.debug('Registering death')
        with self.connection.pipeline() as p:
            # We cannot use self.state = 'dead' here, because that would
            # rollback the pipeline
            p.srem(self.redis_workers_keys, self.key)
            p.hset(self.key, 'death', time.time())
            p.expire(self.key, 60)
            p.execute()

    def set_state(self, new_state):
        self._state = new_state
        self.connection.hset(self.key, 'state', new_state)

    def get_state(self):
        return self._state

    state = property(get_state, set_state)

    @property
    def stopped(self):
        return self._stopped

    def _install_signal_handlers(self):
        """Installs signal handlers for handling SIGINT and SIGTERM
        gracefully.
        """

        def request_force_stop(signum, frame):
            """Terminates the application (cold shutdown).
            """
            self.log.warning('Cold shut down.')

            # Take down the horse with the worker
            if self.horse_pid:
                msg = 'Taking down horse %d with me.' % self.horse_pid
                self.log.debug(msg)
                try:
                    os.kill(self.horse_pid, signal.SIGKILL)
                except OSError as e:
                    # ESRCH ("No such process") is fine with us
                    if e.errno != errno.ESRCH:
                        self.log.debug('Horse already down.')
                        raise
            raise SystemExit()

        def request_stop(signum, frame):
            """Stops the current worker loop but waits for child processes to
            end gracefully (warm shutdown).
            """
            self.log.debug('Got signal %s.' % signal_name(signum))

            signal.signal(signal.SIGINT, request_force_stop)
            signal.signal(signal.SIGTERM, request_force_stop)

            msg = 'Warm shut down requested.'
            self.log.warning(msg)

            # If shutdown is requested in the middle of a job, wait until
            # finish before shutting down
            if self.state == 'busy':
                self._stopped = True
                self.log.debug('Stopping after current horse is finished. '
                               'Press Ctrl+C again for a cold shutdown.')
            else:
                raise StopRequested()

        signal.signal(signal.SIGINT, request_stop)
        signal.signal(signal.SIGTERM, request_stop)


    def work(self, burst=False):  # noqa
        """Starts the work loop.

        Pops and performs all jobs on the current list of queues.  When all
        queues are empty, block and wait for new jobs to arrive on any of the
        queues, unless `burst` mode is enabled.

        The return value indicates whether any jobs were processed.
        """
        self._install_signal_handlers()

        did_perform_work = False
        self.register_birth()
        self.log.info('RQ worker started, version %s' % VERSION)
        self.state = 'starting'
        try:
            while True:
                if self.stopped:
                    self.log.info('Stopping on request.')
                    break
                self.state = 'idle'
                qnames = self.queue_names()
                self.procline('Listening on %s' % ','.join(qnames))
                self.log.info('')
                self.log.info('*** Listening on %s...' % \
                        green(', '.join(qnames)))
                wait_for_job = not burst
                try:
                    result = Queue.dequeue_any(self.queues, wait_for_job, \
                            connection=self.connection)
                    if result is None:
                        break
                except StopRequested:
                    break
                except UnpickleError as e:
                    msg = '*** Ignoring unpickleable data on %s.' % \
                            green(e.queue.name)
                    self.log.warning(msg)
                    self.log.debug('Data follows:')
                    self.log.debug(e.raw_data)
                    self.log.debug('End of unreadable data.')
                    self.failed_queue.push_job_id(e.job_id)
                    continue

                self.state = 'busy'

                job, queue = result
                # Use the public setter here, to immediately update Redis
                job.status = Status.STARTED
                self.log.info('%s: %s (%s)' % (green(queue.name),
                    blue(job.description), job.id))

                self.fork_and_perform_job(job)

                did_perform_work = True
        finally:
            if not self.is_horse:
                self.register_death()
        return did_perform_work

    def fork_and_perform_job(self, job):
        """Spawns a work horse to perform the actual work and passes it a job.
        The worker will wait for the work horse and make sure it executes
        within the given timeout bounds, or will end the work horse with
        SIGALRM.
        """
        child_pid = os.fork()
        if child_pid == 0:
            self.main_work_horse(job)
        else:
            self._horse_pid = child_pid
            self.procline('Forked %d at %d' % (child_pid, time.time()))
            while True:
                try:
                    os.waitpid(child_pid, 0)
                    break
                except OSError as e:
                    # In case we encountered an OSError due to EINTR (which is
                    # caused by a SIGINT or SIGTERM signal during
                    # os.waitpid()), we simply ignore it and enter the next
                    # iteration of the loop, waiting for the child to end.  In
                    # any other case, this is some other unexpected OS error,
                    # which we don't want to catch, so we re-raise those ones.
                    if e.errno != errno.EINTR:
                        raise

    def main_work_horse(self, job):
        """This is the entry point of the newly spawned work horse."""
        # After fork()'ing, always assure we are generating random sequences
        # that are different from the worker.
        random.seed()

        # Always ignore Ctrl+C in the work horse, as it might abort the
        # currently running job.
        # The main worker catches the Ctrl+C and requests graceful shutdown
        # after the current work is done.  When cold shutdown is requested, it
        # kills the current job anyway.
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)

        self._is_horse = True
        self.log = Logger('horse')

        success = self.perform_job(job)

        # os._exit() is the way to exit from childs after a fork(), in
        # constrast to the regular sys.exit()
        os._exit(int(not success))

    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.procline('Processing %s from %s since %s' % (
            job.func_name,
            job.origin, time.time()))

        try:
            with death_penalty_after(job.timeout or 180):
                rv = job.perform()

            # Pickle the result in the same try-except block since we need to
            # use the same exc handling when pickling fails
            pickled_rv = dumps(rv)
            job._status = Status.FINISHED
        except:
            # Use the public setter here, to immediately update Redis
            job.status = Status.FAILED
            self.handle_exception(job, *sys.exc_info())
            return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' % (yellow(unicode(rv)),))

        # How long we persist the job result depends on the value of
        # result_ttl:
        # - If result_ttl is 0, cleanup the job immediately.
        # - If it's a positive number, set the job to expire in X seconds.
        # - If result_ttl is negative, don't set an expiry to it (persist
        #   forever)
        result_ttl =  self.default_result_ttl if job.result_ttl is None else job.result_ttl  # noqa
        if result_ttl == 0:
            job.delete()
            self.log.info('Result discarded immediately.')
        else:
            p = self.connection.pipeline()
            p.hset(job.key, 'result', pickled_rv)
            p.hset(job.key, 'status', job._status)
            if result_ttl > 0:
                p.expire(job.key, result_ttl)
                self.log.info('Result is kept for %d seconds.' % result_ttl)
            else:
                self.log.warning('Result will never expire, clean up result key manually.')
            p.execute()

        return True


    def handle_exception(self, job, *exc_info):
        """Walks the exception handler stack to delegate exception handling."""
        exc_string = ''.join(
                traceback.format_exception_only(*exc_info[:2]) +
                traceback.format_exception(*exc_info))
        self.log.error(exc_string)

        for handler in reversed(self._exc_handlers):
            self.log.debug('Invoking exception handler %s' % (handler,))
            fallthrough = handler(job, *exc_info)

            # Only handlers with explicit return values should disable further
            # exc handling, so interpret a None return value as True.
            if fallthrough is None:
                fallthrough = True

            if not fallthrough:
                break

    def move_to_failed_queue(self, job, *exc_info):
        """Default exception handler: move the job to the failed queue."""
        exc_string = ''.join(traceback.format_exception(*exc_info))
        self.log.warning('Moving job to %s queue.' % self.failed_queue.name)
        self.failed_queue.quarantine(job, exc_info=exc_string)

    def push_exc_handler(self, handler_func):
        """Pushes an exception handler onto the exc handler stack."""
        self._exc_handlers.append(handler_func)

    def pop_exc_handler(self):
        """Pops the latest exception handler off of the exc handler stack."""
        return self._exc_handlers.pop()
Example #52
0
class Paxos(object):
    def __init__(self, transport, on_learn, on_prepare=None,
            on_stale=None, quorum_timeout=3,
            logger_group=None,
        ):
        self._logger = Logger('paxos')
        if logger_group is not None:
            logger_group.add_logger(self._logger)

        self.transport = transport
        self.on_learn = on_learn
        self.on_prepare = on_prepare
        self.on_stale = on_stale
        self.quorum_timeout = quorum_timeout

        self.id = 0
        self.max_seen_id = 0
        self.last_accepted_id = 0
        self._logger.debug('2 last_accepted_id=%(last_accepted_id)s' % self.__dict__)

        self.proposed_value = None
        self.deferred = None
        self.queue = deque() # queue of (value, deferred) to propose
        self._learn_queue = [] # sorted list with learn requests which come out of order

        # delayed calls for timeouts
        self._accepted_timeout = None
        self._acks_timeout = None
        self._waiting_to_learn_id = deque()

    def recv(self, message, client):
        message = shlex.split(message)
        command = getattr(self, message[0])
        command(client=client, *message[1:])

    def propose(self, value):
        deferred = Deferred()
        if self.proposed_value is None:
            self._start_paxos(value, deferred)
        else:
            self.queue.append((value, deferred))
            self._logger.debug('Request for %s was queued, queue size is %s (because we are proposing %s now)' % (
                    value,
                    len(self.queue),
                    self.proposed_value,
                )
            )
        return deferred

    def _start_paxos(self, value, deferred):
        """Starts paxos iteration proposing given value."""
        self.id = self.max_seen_id + 1
        self.proposed_value = value
        self.deferred = deferred

        self._num_acks_to_wait = self.transport.quorum_size

        def _timeout_callback():
            self._logger.info('+++ prepare timeout')
            # TODO sometimes self.deferred is None when this callbach is called
            self.deferred.errback(PrepareTimeout())
            self.deferred = None
            self.proposed_value = None

        self._acks_timeout = reactor.callLater(self.quorum_timeout, _timeout_callback)
        self.transport.broadcast('paxos_prepare %s %s' % (self.id, self.last_accepted_id))

    def paxos_prepare(self, num, last_accepted_id, client):
        num = int(num)
        last_accepted_id = int(last_accepted_id)

        if last_accepted_id > self.last_accepted_id:
            # Move to the "stale" state
            self._logger.debug('stale last_accepted_id(%s) < self.last_accepted_id(%s)' % (
                last_accepted_id, self.last_accepted_id
            ))
            self.on_stale(last_accepted_id)
        else:
            if num > self.max_seen_id:
                if self.on_prepare is not None:
                    self.on_prepare(num, client)

                self.max_seen_id = num
                self._send_to(client, 'paxos_ack %s' % num)

    def paxos_ack(self, num, client):
        num = int(num)
        if self.proposed_value is not None and num == self.id:
            self._num_acks_to_wait -= 1
            if self._num_acks_to_wait == 0:
                _stop_waiting(self._acks_timeout)

                self._num_accepts_to_wait = self.transport.quorum_size

                def _timeout_callback():
                    self._logger.info('+++ accept timeout')
                    self.deferred.errback(AcceptTimeout())
                    self.deferred = None
                    self.proposed_value = None

                self._accepted_timeout = reactor.callLater(
                    self.quorum_timeout,
                    _timeout_callback
                )
                self.transport.broadcast('paxos_accept %s "%s"' % (self.id, escape(self.proposed_value)))

    def paxos_accept(self, num, value, client):
        num = int(num)
        if num == self.max_seen_id:
            if self.id == num:
                # we have a deferred to return result in this round
                self._waiting_to_learn_id.append((num, self.deferred))
            else:
                # may be we have deferred but it is for another Paxos round
                self._waiting_to_learn_id.append((num, None))
            self._send_to(client, 'paxos_accepted %s' % num)

    def paxos_accepted(self, num, client):
        num = int(num)
        if self.proposed_value is not None and num == self.id:
            self._num_accepts_to_wait -= 1
            if self._num_accepts_to_wait == 0:
                _stop_waiting(self._accepted_timeout)
                self.transport.broadcast('paxos_learn %s "%s"' % (self.id, escape(self.proposed_value)))

    def paxos_learn(self, num, value, client):
        self._logger.info('paxos.learn %s' % value)

        num = int(num)
        if self._waiting_to_learn_id and num == self._waiting_to_learn_id[0][0]:
            num, deferred = self._waiting_to_learn_id.popleft()

            try:
                result = self.on_learn(num, value, client)
            except Exception, e:
                self._logger.exception('paxos.learn %s' % value)
                result = e

            self.last_accepted_id = num
            self._logger.debug('1 last_accepted_id=%(last_accepted_id)s' % self.__dict__)

            if deferred is not None and value == self.proposed_value:
                # this works for current round coordinator only
                # because it must return result to the client
                # and to start a new round for next request

                if isinstance(result, Exception):
                    self._logger.warning('returning error from paxos.learn %s, %s' % (value, result))
                    deferred.errback(result)
                else:
                    self._logger.warning('returning success from paxos.learn %s' % value)
                    deferred.callback(result)

                self._logger.debug('queue size: %s' % len(self.queue))
                if self.queue:
                    # start new Paxos instance
                    # for next value from the queue
                    next_value, deferred = self.queue.pop()
                    self._logger.debug('next value from the queue: %s' % next_value)
                    self._start_paxos(next_value, deferred)
                else:
                    self.proposed_value = None
                    self.deferred = None

            if self._learn_queue:
                self._logger.debug('relearning remembered values')
                # clear queue because it will be filled again if needed
                queue, self._learn_queue = self._learn_queue, []
                for args in queue:
                    self.paxos_learn(*args)

        else:
Example #53
0
import RPi.GPIO as GPIO
import signal
import ConfigParser
from pyomxplayer import OMXPlayer
from logbook import Logger
from logbook import SyslogHandler

log = Logger('Door Logger')
error_handler = SyslogHandler('videodoor', level='DEBUG')

with error_handler.applicationbound():

    Config = ConfigParser.ConfigParser()
    Config.read('/etc/videodoor/videodoor.ini')
    SensorPin = Config.getint('hardware','SensorPin')
    log.debug('setting up GPIO pin %i...' % SensorPin)
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(SensorPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
    log.debug('done.')

    log.debug('Setting up the omxplayer instance...')
    File = Config.get('video','File')
    Options = Config.get('video','Options')
    omx_status = False
    log.info('initializing videoplayer with file %s and options %s' % (File, Options))
    omx = OMXPlayer(File, Options)
    log.debug('done.')

    def start_video():
        global omx, omx_status
        if(omx_status):
Example #54
0
from . import helpers
from .webtrader import WebTrader

StreamHandler(sys.stdout).push_application()
log = Logger(os.path.basename(__file__))

# 移除心跳线程产生的日志
debug_log = log.debug


def remove_heart_log(*args, **kwargs):
    if threading.current_thread() == threading.main_thread():
        debug_log(*args, **kwargs)


log.debug = remove_heart_log


class HTTrader(WebTrader):
    config_path = os.path.dirname(__file__) + '/config/ht.json'

    def __init__(self):
        super().__init__()
        self.account_config = None
        self.s = None

        self.__set_ip_and_mac()

    def __set_ip_and_mac(self):
        """获取本机IP和MAC地址"""
        # 获取ip
Example #55
0
from logbook import Logger, StreamHandler
from . import helpers
from .webtrader import WebTrader

StreamHandler(sys.stdout).push_application()
log = Logger(os.path.basename(__file__))

# 移除心跳线程产生的日志
debug_log = log.debug


def remove_heart_log(*args, **kwargs):
    if threading.current_thread() == threading.main_thread():
        debug_log(*args, **kwargs)

log.debug = remove_heart_log


class HTTrader(WebTrader):
    config_path = os.path.dirname(__file__) + '/config/ht.json'

    def __init__(self):
        super().__init__()
        self.account_config = None
        self.s = None

        self.__set_ip_and_mac()

    def __set_ip_and_mac(self):
        """获取本机IP和MAC地址"""
        # 获取ip
Example #56
0
class BTgymServer(multiprocessing.Process):
    """Backtrader server class.

    Expects to receive dictionary, containing at least 'action' field.

    Control mode IN::

        dict(action=<control action, type=str>,), where control action is:
        '_reset' - rewinds backtrader engine and runs new episode;
        '_getstat' - retrieve episode results and statistics;
        '_stop' - server shut-down.

    Control mode OUT::

        <string message> - reports current server status;
        <statisic dict> - last run episode statisics.  NotImplemented.

        Within-episode signals:
        Episode mode IN:
        dict(action=<agent_action, type=str>,), where agent_action is:
        {'buy', 'sell', 'hold', 'close', '_done'} - agent or service actions; '_done' - stops current episode;

    Episode mode OUT::

        response  <tuple>: observation, <array> - observation of the current environment state,
                                                 could be any tensor; default is:
                                                 [4,m] array of <fl32>, where:
                                                 m - num. of last datafeed values,
                                                 4 - num. of data features (Lines);
                           reward, <any> - current portfolio statistics for environment reward estimation;
                           done, <bool> - episode termination flag;
                           info, <list> - auxiliary information.
    """
    data_server_response = None

    def __init__(
        self,
        cerebro=None,
        render=None,
        network_address=None,
        data_network_address=None,
        connect_timeout=90,
        log_level=None,
        task=0,
    ):
        """

        Args:
            cerebro:                backtrader.cerebro engine class.
            render:                 render class
            network_address:        environmnet communication, str
            data_network_address:   data communication, str
            connect_timeout:        seconds, int
            log_level:              int, logbook.level
        """

        super(BTgymServer, self).__init__()
        self.task = task
        self.log_level = log_level
        self.log = None
        self.process = None
        self.cerebro = cerebro
        self.network_address = network_address
        self.render = render
        self.data_network_address = data_network_address
        self.connect_timeout = connect_timeout  # server connection timeout in seconds.
        self.connect_timeout_step = 0.01

        self.trial_sample = None
        self.trial_stat = None
        self.dataset_stat = None

    @staticmethod
    def _comm_with_timeout(socket, message):
        """
        Exchanges messages via socket with timeout.

        Note:
            socket zmq.RCVTIMEO and zmq.SNDTIMEO should be set to some finite number of milliseconds

        Returns:
            dictionary:
                status: communication result;
                message: received message, if any.
        """
        response = dict(
            status='ok',
            message=None,
        )
        try:
            socket.send_pyobj(message)

        except zmq.ZMQError as e:
            if e.errno == zmq.EAGAIN:
                response['status'] = 'send_failed_due_to_connect_timeout'

            else:
                response['status'] = 'send_failed_for_unknown_reason'
            return response

        start = time.time()
        try:
            response['message'] = socket.recv_pyobj()
            response['time'] = time.time() - start

        except zmq.ZMQError as e:
            if e.errno == zmq.EAGAIN:
                response['status'] = 'receive_failed_due_to_connect_timeout'

            else:
                response['status'] = 'receive_failed_for_unknown_reason'
            return response

        return response

    def get_dataset_stat(self):
        data_server_response = self._comm_with_timeout(
            socket=self.data_socket, message={'ctrl': '_get_info'})
        if data_server_response['status'] in 'ok':
            self.log.debug(
                'Data_server @{} responded with dataset statistic in about {} seconds.'
                .format(self.data_network_address,
                        data_server_response['time']))

            return data_server_response['message']

        else:
            msg = 'BtgymServer_sampling_attempt: data_server @{} unreachable with status: <{}>.'. \
                format(self.data_network_address, data_server_response['status'])
            self.log.error(msg)
            raise ConnectionError(msg)

    def get_trial(self, **reset_kwargs):
        """

        Args:
            reset_kwargs:   dictionary of args to pass to parent data iterator

        Returns:
            trial_sample, trial_stat, dataset_stat
        """
        wait = 0
        while True:
            # Get new data subset:
            data_server_response = self._comm_with_timeout(
                socket=self.data_socket,
                message={
                    'ctrl': '_get_data',
                    'kwargs': reset_kwargs
                })
            if data_server_response['status'] in 'ok':
                self.log.debug(
                    'Data_server @{} responded in ~{:1.6f} seconds.'.format(
                        self.data_network_address,
                        data_server_response['time']))

            else:
                msg = 'BtgymServer_sampling_attempt: data_server @{} unreachable with status: <{}>.'. \
                    format(self.data_network_address, data_server_response['status'])
                self.log.error(msg)
                raise ConnectionError(msg)

            # Ready or not?
            try:
                assert 'Dataset not ready' in data_server_response['message'][
                    'ctrl']
                if wait <= self.wait_for_data_reset:
                    pause = random.random() * 2
                    time.sleep(pause)
                    wait += pause
                    self.log.info(
                        'Domain dataset not ready, wait time left: {:4.2f}s.'.
                        format(self.wait_for_data_reset - wait))
                else:
                    data_server_response = self._comm_with_timeout(
                        socket=self.data_socket, message={'ctrl': '_stop'})
                    self.socket.close()
                    self.context.destroy()
                    raise RuntimeError(
                        'Failed to assert Domain dataset is ready. Exiting.')

            except (AssertionError, KeyError) as e:
                break
        # Get trial instance:
        trial_sample = data_server_response['message']['sample']
        trial_stat = trial_sample.describe()
        trial_sample.reset()
        dataset_stat = data_server_response['message']['stat']
        origin = data_server_response['message']['origin']
        timestamp = data_server_response['message']['timestamp']

        return trial_sample, trial_stat, dataset_stat, origin, timestamp

    def get_trial_message(self):
        """
        Prepares  message containing current trial instance, mimicking data_server message protocol.
        Intended for serving requests from data_slave environment.

        Returns:
            dict containing trial instance, d_set statistic and origin key; dict containing 'ctrl' response if master
            d_set is not ready;
        """
        if self.trial_sample is not None:
            message = {
                'sample': self.trial_sample,
                'stat': self.dataset_stat,
                'origin': 'master_environment',
                'timestamp': self.get_global_time()
            }

        else:
            message = {'ctrl': 'Dataset not ready, hold on...'}
            self.log.debug('Sent to slave: ' + str(message))

        return message

    def get_global_time(self):
        """
        Asks dataserver for current dataset global_time.

        Returns:
            POSIX timestamp
        """
        data_server_response = self._comm_with_timeout(
            socket=self.data_socket, message={'ctrl': '_get_global_time'})
        if data_server_response['status'] in 'ok':
            pass

        else:
            msg = 'BtgymServer_sampling_attempt: data_server @{} unreachable with status: <{}>.'. \
                format(self.data_network_address, data_server_response['status'])
            self.log.error(msg)
            raise ConnectionError(msg)

        return data_server_response['message']['timestamp']

    def run(self):
        """
        Server process runtime body. This method is invoked by env._start_server().
        """
        # Logging:
        from logbook import Logger, StreamHandler, WARNING
        import sys
        StreamHandler(sys.stdout).push_application()
        if self.log_level is None:
            self.log_level = WARNING
        self.log = Logger('BTgymServer_{}'.format(self.task),
                          level=self.log_level)

        self.process = multiprocessing.current_process()
        self.log.info('PID: {}'.format(self.process.pid))

        # Runtime Housekeeping:
        cerebro = None
        episode_result = dict()
        episode_sample = None

        # How long to wait for data_master to reset data:
        self.wait_for_data_reset = 300  # seconds

        connect_timeout = 60  # in seconds

        # Set up a comm. channel for server as ZMQ socket
        # to carry both service and data signal
        # !! Reminder: Since we use REQ/REP - messages do go in pairs !!
        self.context = zmq.Context()
        self.socket = self.context.socket(zmq.REP)
        self.socket.setsockopt(zmq.RCVTIMEO, -1)
        self.socket.setsockopt(zmq.SNDTIMEO, connect_timeout * 1000)
        self.socket.bind(self.network_address)

        self.data_context = zmq.Context()
        self.data_socket = self.data_context.socket(zmq.REQ)
        self.data_socket.setsockopt(zmq.RCVTIMEO, connect_timeout * 1000)
        self.data_socket.setsockopt(zmq.SNDTIMEO, connect_timeout * 1000)
        self.data_socket.connect(self.data_network_address)

        # Check connection:
        self.log.debug('Pinging data_server at: {} ...'.format(
            self.data_network_address))

        data_server_response = self._comm_with_timeout(
            socket=self.data_socket, message={'ctrl': 'ping!'})
        if data_server_response['status'] in 'ok':
            self.log.debug(
                'Data_server seems ready with response: <{}>'.format(
                    data_server_response['message']))

        else:
            msg = 'Data_server unreachable with status: <{}>.'.\
                format(data_server_response['status'])
            self.log.error(msg)
            raise ConnectionError(msg)

        # Init renderer:
        self.render.initialize_pyplot()

        # Mandatory DrawDown and auxillary plotting observers to add to data-master strategy instance:
        # TODO: make plotters optional args
        if self.render.enabled:
            aux_obsrevers = [bt.observers.DrawDown, Reward, Position, NormPnL]

        else:
            aux_obsrevers = [bt.observers.DrawDown]

        # Server 'Control Mode' loop:
        for episode_number in itertools.count(0):
            while True:
                # Stuck here until '_reset' or '_stop':
                service_input = self.socket.recv_pyobj()
                msg = 'Control mode: received <{}>'.format(service_input)
                self.log.debug(msg)

                if 'ctrl' in service_input:
                    # It's time to exit:
                    if service_input['ctrl'] == '_stop':
                        # Server shutdown logic:
                        # send last run statistic, release comm channel and exit:
                        message = 'Exiting.'
                        self.log.info(message)
                        self.socket.send_pyobj(message)
                        self.socket.close()
                        self.context.destroy()
                        return None

                    # Start episode:
                    elif service_input['ctrl'] == '_reset':
                        message = 'Preparing new episode with kwargs: {}'.format(
                            service_input['kwargs'])
                        self.log.debug(message)
                        self.socket.send_pyobj(message)  # pairs '_reset'
                        break

                    # Retrieve statistic:
                    elif service_input['ctrl'] == '_getstat':
                        self.socket.send_pyobj(episode_result)
                        self.log.debug('Episode statistic sent.')

                    # Send episode rendering:
                    elif service_input[
                            'ctrl'] == '_render' and 'mode' in service_input.keys(
                            ):
                        # Just send what we got:
                        self.socket.send_pyobj(
                            self.render.render(service_input['mode']))
                        self.log.debug(
                            'Episode rendering for [{}] sent.'.format(
                                service_input['mode']))

                    # Serve data-dependent environment with trial instance:
                    elif service_input['ctrl'] == '_get_data':
                        message = 'Sending trial data to slave'
                        self.log.debug(message)
                        self.socket.send_pyobj(self.get_trial_message())

                    # Serve data-dependent environment with dataset statisitc:
                    elif service_input['ctrl'] == '_get_info':
                        message = 'Sending dataset statistic to slave'
                        self.log.debug(message)
                        self.socket.send_pyobj(self.get_dataset_stat())

                    else:  # ignore any other input
                        # NOTE: response string must include 'ctrl' key
                        # for env.reset(), env.get_stat(), env.close() correct operation.
                        message = {
                            'ctrl':
                            'send control keys: <_reset>, <_getstat>, <_render>, <_stop>.'
                        }
                        self.log.debug('Control mode: sent: ' + str(message))
                        self.socket.send_pyobj(
                            message)  # pairs any other input

                else:
                    message = 'No <ctrl> key received:{}\nHint: forgot to call reset()?'.format(
                        msg)
                    self.log.debug(message)
                    self.socket.send_pyobj(message)

            # Got '_reset' signal -> prepare Cerebro subclass and run episode:
            start_time = time.time()
            cerebro = copy.deepcopy(self.cerebro)
            cerebro._socket = self.socket
            cerebro._data_socket = self.data_socket
            cerebro._log = self.log
            cerebro._render = self.render

            # Pass methods for serving capabilities:
            cerebro._get_data = self.get_trial_message
            cerebro._get_info = self.get_dataset_stat

            # Add auxillary observers, if not already:
            for aux in aux_obsrevers:
                is_added = False
                for observer in cerebro.observers:
                    if aux in observer:
                        is_added = True
                if not is_added:
                    cerebro.addobserver(aux)

            # Add communication utility:
            cerebro.addanalyzer(
                _BTgymAnalyzer,
                _name='_env_analyzer',
            )

            # Data preparation:
            # Parse args we got with _reset call:
            sample_config = dict(
                episode_config=copy.deepcopy(DataSampleConfig),
                trial_config=copy.deepcopy(DataSampleConfig))
            for key, config in sample_config.items():
                try:
                    config.update(service_input['kwargs'][key])

                except KeyError:
                    self.log.debug(
                        '_reset <{}> kwarg not found, using default values: {}'
                        .format(key, config))

            # Get new Trial from data_server if requested,
            # despite bult-in new/reuse data object sampling option, perform checks here to avoid
            # redundant traffic:
            if sample_config['trial_config'][
                    'get_new'] or self.trial_sample is None:
                self.log.info(
                    'Requesting new Trial sample with args: {}'.format(
                        sample_config['trial_config']))
                self.trial_sample, self.trial_stat, self.dataset_stat, origin, current_timestamp =\
                    self.get_trial(**sample_config['trial_config'])

                if origin in 'data_server':
                    self.trial_sample.set_logger(self.log_level, self.task)

                self.log.debug('Got new Trial: <{}>'.format(
                    self.trial_sample.filename))

            else:
                self.log.info('Reusing Trial <{}>'.format(
                    self.trial_sample.filename))
                current_timestamp = self.get_global_time()

            self.log.debug('current global_time: {}'.format(
                datetime.datetime.fromtimestamp(current_timestamp)))
            # Get episode:
            if sample_config['episode_config']['timestamp'] is None or\
                    sample_config['episode_config']['timestamp'] < current_timestamp:
                sample_config['episode_config'][
                    'timestamp'] = current_timestamp

            self.log.info('Requesting episode from <{}> with args: {}'.format(
                self.trial_sample.filename, sample_config['episode_config']))

            episode_sample = self.trial_sample.sample(
                **sample_config['episode_config'])
            self.log.debug('Got new Episode: <{}>'.format(
                episode_sample.filename))

            # Get episode data statistic and pass it to strategy params:
            cerebro.strats[0][0][2]['trial_stat'] = self.trial_stat
            cerebro.strats[0][0][2][
                'trial_metadata'] = self.trial_sample.metadata
            cerebro.strats[0][0][2]['dataset_stat'] = self.dataset_stat
            cerebro.strats[0][0][2]['episode_stat'] = episode_sample.describe()
            cerebro.strats[0][0][2]['metadata'] = episode_sample.metadata

            # Set nice broker cash plotting:
            cerebro.broker.set_shortcash(False)

            # Convert and add data to engine:
            feed = episode_sample.to_btfeed()
            if isinstance(feed, dict):
                for key, stream in feed.items():
                    cerebro.adddata(stream, name=key)

            else:
                cerebro.adddata(feed, name='base_asset')

            # Finally:
            episode = cerebro.run(stdstats=True,
                                  preload=False,
                                  oldbuysell=True)[0]

            # Update episode rendering:
            _ = self.render.render('just_render', cerebro=cerebro)
            _ = None

            # Recover that bloody analytics:
            analyzers_list = episode.analyzers.getnames()
            analyzers_list.remove('_env_analyzer')

            elapsed_time = timedelta(seconds=time.time() - start_time)
            self.log.debug('Episode elapsed time: {}.'.format(elapsed_time))

            episode_result['episode'] = episode_number
            episode_result['runtime'] = elapsed_time
            episode_result['length'] = len(episode.data.close)

            for name in analyzers_list:
                episode_result[name] = episode.analyzers.getbyname(
                    name).get_analysis()

            gc.collect()

        # Just in case -- we actually shouldn't get there except by some error:
        return None
Example #57
0
class Scheduler(object):
    scheduler_key = 'rq:scheduler'
    scheduled_jobs_key = 'rq:scheduler:scheduled_jobs'

    def __init__(self, queue_name='default', interval=60, connection=None):
        if connection is None:
            connection = get_current_connection()
        self.connection = connection
        self.queue_name = queue_name
        self._interval = interval
        self.log = Logger('scheduler')

    def register_birth(self):
        if self.connection.exists(self.scheduler_key) and \
                not self.connection.hexists(self.scheduler_key, 'death'):
            raise ValueError("There's already an active RQ scheduler")
        key = self.scheduler_key
        now = time.time()
        with self.connection.pipeline() as p:
            p.delete(key)
            p.hset(key, 'birth', now)
            p.execute()

    def register_death(self):
        """Registers its own death."""
        with self.connection.pipeline() as p:
            p.hset(self.scheduler_key, 'death', time.time())
            p.expire(self.scheduler_key, 60)
            p.execute()

    def _install_signal_handlers(self):
        """
        Installs signal handlers for handling SIGINT and SIGTERM
        gracefully.
        """

        def stop(signum, frame):
            """
            Register scheduler's death and exit.
            """
            self.log.debug('Shutting down RQ scheduler...')
            self.register_death()
            raise SystemExit()

        signal.signal(signal.SIGINT, stop)
        signal.signal(signal.SIGTERM, stop)

    def _create_job(self, func, *args, **kwargs):
        """
        Creates an RQ job and saves it to Redis.
        """
        if func.__module__ == '__main__':
            raise ValueError(
                    'Functions from the __main__ module cannot be processed '
                    'by workers.')

        job = Job.create(func, *args, connection=self.connection, **kwargs)
        job.origin = self.queue_name
        job.save()
        return job

    def enqueue_at(self, scheduled_time, func, *args, **kwargs):
        """
        Pushes a job to the scheduler queue. The scheduled queue is a Redis sorted
        set ordered by timestamp - which in this case is job's scheduled execution time.

        Usage:
        
        from datetime import datetime
        from redis import Redis
        from rq.scheduler import Scheduler

        from foo import func

        redis = Redis()
        scheduler = Scheduler(queue_name='default', connection=redis)
        scheduler.enqueue_at(datetime(2020, 1, 1), func, 'argument', keyword='argument')
        """
        job = self._create_job(func, *args, **kwargs)
        self.connection.zadd(self.scheduled_jobs_key, job.id,
                             int(scheduled_time.strftime('%s')))
        return job

    # This is here for backwards compatibility purposes
    schedule = enqueue_at

    def enqueue_in(self, time_delta, func, *args, **kwargs):
        """
        Similar to ``enqueue_at``, but accepts a timedelta instead of datetime object.
        The job's scheduled execution time will be calculated by adding the timedelta
        to datetime.now().
        """
        job = self._create_job(func, *args, **kwargs)
        self.connection.zadd(self.scheduled_jobs_key, job.id,
                             int((datetime.now() + time_delta).strftime('%s')))
        return job

    def get_jobs_to_queue(self):
        """
        Returns a list of job instances that should be queued
        (score lower than current timestamp).
        """
        job_ids = self.connection.zrangebyscore(self.scheduled_jobs_key, 0, int(time.strftime('%s')))
        return [Job.fetch(job_id, connection=self.connection) for job_id in job_ids]

    def get_queue_for_job(self, job):
        """
        Returns a queue to put job into.
        """
        key = '{0}{1}'.format(Queue.redis_queue_namespace_prefix, job.origin)
        return Queue.from_queue_key(key, connection=self.connection)

    def enqueue_job(self, job):
        """
        Move a scheduled job to a queue.
        """
        self.log.debug('Pushing {0} to {1}'.format(job.id, job.origin))
        job.enqueued_at = datetime.now()
        job.save()
        queue = self.get_queue_for_job(job)
        queue.push_job_id(job.id)
        self.connection.zrem(self.scheduled_jobs_key, job.id)

    def enqueue_jobs(self):
        """
        Move scheduled jobs into queues. 
        """
        jobs = self.get_jobs_to_queue()
        for job in jobs:
            self.enqueue_job(job)
        return jobs

    def run(self):
        """
        Periodically check whether there's any job that should be put in the queue (score 
        lower than current time).
        """
        self.log.debug('Running RQ scheduler...')
        self.register_birth()
        self._install_signal_handlers()
        try:
            while True:
                self.enqueue_jobs()
                time.sleep(self._interval)
        finally:
            self.register_death()
Example #58
0
from eos import db
from eos.db import migration
from eos.db.saveddata.loadDefaultDatabaseValues import DefaultDatabaseValues
from eos.db.saveddata.databaseRepair import DatabaseCleanup

from logbook import Logger

pyfalog = Logger(__name__)

# Make sure the saveddata db exists
if config.savePath and not os.path.exists(config.savePath):
    os.mkdir(config.savePath)

if config.saveDB and os.path.isfile(config.saveDB):
    # If database exists, run migration after init'd database
    pyfalog.debug("Run database migration.")
    db.saveddata_meta.create_all()
    migration.update(db.saveddata_engine)
    # Import default database values
    # Import values that must exist otherwise Pyfa breaks
    pyfalog.debug("Import Required Database Values.")
    DefaultDatabaseValues.importRequiredDefaults()

    # Finds and fixes database corruption issues.
    pyfalog.debug("Starting database validation.")
    database_cleanup_instance = DatabaseCleanup()
    database_cleanup_instance.OrphanedCharacterSkills(db.saveddata_engine)
    database_cleanup_instance.OrphanedFitCharacterIDs(db.saveddata_engine)
    database_cleanup_instance.OrphanedFitDamagePatterns(db.saveddata_engine)
    database_cleanup_instance.NullDamagePatternNames(db.saveddata_engine)
    database_cleanup_instance.NullTargetResistNames(db.saveddata_engine)
Example #59
0
                '\n--------END RESPONSE--------'
                % etree.tostring(response, pretty_print=True)
            )
        except PyUPSException, e:
            self.raise_user_error(unicode(e[0]))

        digest = ShipmentConfirm.extract_digest(response)

        shipment_accept = ShipmentAccept.shipment_accept_request_type(digest)

        shipment_accept_instance = carrier.ups_api_instance(call="accept")

        # Logging.
        logger.debug(
            'Making Shipment Accept Request for'
            'Shipment ID: {0} and Carrier ID: {1}'
            .format(self.id, self.carrier.id)
        )
        logger.debug(
            '--------SHIPMENT ACCEPT REQUEST--------\n%s'
            '\n--------END REQUEST--------'
            % etree.tostring(shipment_accept, pretty_print=True)
        )

        try:
            response = shipment_accept_instance.request(shipment_accept)

            # Logging.
            logger.debug(
                '--------SHIPMENT ACCEPT RESPONSE--------\n%s'
                '\n--------END RESPONSE--------'