Example #1
0
logger = logging.getLogger('MAIN')
logger.setLevel(logging.INFO)

# create a file handler
handler = logging.FileHandler(args.logging_file)
handler.setLevel(logging.INFO)

# create a logging format
formatter = logging.Formatter(
    '%(asctime)s - %(VCO_CUSTOMER_EDGE)s - %(funcName)s - %(lineno)s - %(levelname)s - %(message)s'
)
handler.setFormatter(formatter)
logger.addHandler(handler)

VCO_CUSTOMER_EDGE = 'MAIN'
local_logger = logging.LoggerAdapter(logging.getLogger('MAIN'),
                                     {'VCO_CUSTOMER_EDGE': VCO_CUSTOMER_EDGE})

console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
logger.addHandler(console)
VCO_CUSTOMER_EDGE = 'MAIN'
local_logger = logging.LoggerAdapter(logging.getLogger('MAIN'),
                                     {'VCO_CUSTOMER_EDGE': VCO_CUSTOMER_EDGE})
urllib3.disable_warnings()
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

cnx = mysql.connector.connect(host=cfg.mysql_prod.host,
                              database=cfg.mysql_prod.db,
                              user=cfg.mysql_prod.user,
                              password=cfg.mysql_prod.password)
class IoTManager:
    """Manages IoT communications to the Google cloud backend MQTT service."""

    # Initialize logger
    extra = {"console_name": "IoT", "file_name": "IoT"}
    logger = logging.getLogger("iot")
    logger = logging.LoggerAdapter(logger, extra)

    # Place holder for thread object.
    thread = None

    # Keep track of the previous values that we have published.
    # We only publish a value if it changes.
    prev_vars = None
    sentAboutJson = False
    last_status = datetime.datetime.utcnow()
    status_publish_freq_secs = 300

    def __init__(self, state, ref_recipe):
        """ Class constructor """
        self.iot = None
        self.state = state
        self.error = None
        self.ref_recipe = ref_recipe

        # Initialize our state.  These are filled in by the IoTPubSub class
        self.state.iot = {
            "error": self.error,
            "connected": "No",
            "received_message_count": 0,
            "published_message_count": 0,
        }

        self._stop_event = threading.Event()  # so we can stop this thread
        self.reset()

    def reset(self):
        try:
            # Pass in the callback that receives commands
            self.iot = IoTPubSub(self, self.command_received, self.state.iot)
        except (Exception) as e:
            self.iot = None
            self.error = str(e)
            # exc_type, exc_value, exc_traceback = sys.exc_info()
            self.logger.error("Couldn't create IoT connection: {}".format(e))
            # traceback.print_tb( exc_traceback, file=sys.stdout )

    def kill_iot_pubsub(self, msg):
        """Kills IoT pubsub."""
        self.iot = None
        self.error = msg
        self.logger.error("Killing IoTPubSub: {}".format(msg))

    def command_received(self, command, arg0, arg1):
        """Process commands received from the backend (UI). This is a callback that is 
        called by the IoTPubSub class when this device receives commands from the UI."""

        if None == self.iot:
            return

        try:
            if command == IoTPubSub.CMD_START:
                recipe_json = arg0
                recipe_dict = json.loads(arg0)

                # Make sure we have a valid recipe uuid
                if ("uuid" not in recipe_dict or None == recipe_dict["uuid"]
                        or 0 == len(recipe_dict["uuid"])):
                    self.logger.error("command_received: missing recipe UUID")
                    return
                recipe_uuid = recipe_dict["uuid"]

                # First stop any recipe that may be running
                self.ref_recipe.events.stop_recipe()

                # Put this recipe via recipe manager
                self.ref_recipe.events.create_or_update_recipe(recipe_json)

                # Start this recipe via recipe manager
                self.ref_recipe.events.start_recipe(recipe_uuid)

                # Record that we processed this command
                self.iot.publish_command_reply(command, recipe_json)
                return

            if command == IoTPubSub.CMD_STOP:
                self.ref_recipe.events.stop_recipe()
                self.iot.publish_command_reply(command, "")
                return

            self.logger.error(
                "command_received: Unknown command: {}".format(command))
        except Exception as e:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            self.logger.critical("Exception in command_received(): %s" % e)
            traceback.print_tb(exc_traceback, file=sys.stdout)
            return False

    @property
    def error(self):
        """Gets error value."""
        return self._error

    @error.setter
    def error(self, value):
        """Safely updates recipe error in shared state."""
        self._error = value
        with threading.Lock():
            self.state.iot["error"] = value

    @property
    def connected(self):
        if self.iot is None:
            return False
        return self.iot.connected

    @connected.setter
    def connected(self, value):
        if self.iot is None:
            return
        self.iot.connected = value

    def publish_message(self, name, msg_json):
        """ Send a command reply. """
        if self.iot is None:
            return
        self.iot.publish_command_reply(name, msg_json)

    def spawn(self):
        self.logger.info("Spawning IoT thread")
        self.thread = threading.Thread(target=self.thread_proc)
        self.thread.daemon = True
        self.thread.start()

    def stop(self):
        self.logger.info("Stopping IoT thread")
        self._stop_event.set()

    def stopped(self):
        return self._stop_event.is_set()

    def publish(self):
        if self.iot is None:
            return

        # Safely get vars dict
        vars_dict = get_nested_dict_safely(
            self.state.environment,
            ["reported_sensor_stats", "individual", "instantaneous"],
        )

        # Check if vars is empty, if so turn into a dict
        if vars_dict == None:
            vars_dict = {}

        # Keep a copy of the first set of values (usually None).
        if self.prev_vars is None:
            self.prev_vars = copy.deepcopy(vars_dict)

        # for each value, only publish the ones that have changed.
        for var in vars_dict:
            if self.prev_vars[var] != vars_dict[var]:
                self.prev_vars[var] = copy.deepcopy(vars_dict[var])
                self.iot.publish_env_var(var, vars_dict[var])

    def get_ip(self):
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        try:
            s.connect(("8.8.8.8", 80))
        except:
            pass
        ip = s.getsockname()[0]
        s.close()
        return ip

    def thread_proc(self):
        while True:

            # Make sure we have a valid registration + device id
            # export DEVICE_ID=EDU-BD9BC8B7-f4-5e-ab-3f-07-fd
            device_id = ConnectUtilities.get_device_id_from_file()
            if device_id is None:
                time.sleep(15)
                self.logger.error("Missing device id file.")
                self.clean_up_images()  # don't fill the disk!
                continue
            os.environ["DEVICE_ID"] = device_id

            # Re-connect to IoT if we lose it (or never had it to begin with)
            if self.iot is None:
                time.sleep(15)
                self.reset()
                continue

            # Publish a boot message
            DEVICE_CONFIG_PATH = "data/config/device.txt"
            if not self.sentAboutJson:
                self.sentAboutJson = True
                try:
                    # Get device config
                    device = None
                    if os.path.exists(DEVICE_CONFIG_PATH):
                        with open(DEVICE_CONFIG_PATH) as f:
                            device = f.readline().strip()

                    about_dict = {
                        "package_version":
                        self.state.upgrade.get("current_version", "unknown"),
                        "device_config":
                        device,
                        "IP":
                        self.get_ip(),
                    }
                    about_json = json.dumps(about_dict)
                    self.iot.publish_command_reply("boot", about_json)

                except:
                    self._error = "Unable to send boot message."
                    self.logger.critical(self._error)

            # Publish status every 5 minutes
            secs_since_last_status = (datetime.datetime.utcnow() -
                                      self.last_status).seconds
            if secs_since_last_status > self.status_publish_freq_secs:
                try:
                    self.last_status = datetime.datetime.utcnow()
                    status_dict = {}
                    status_dict["timestamp"] = time.strftime(
                        "%FT%XZ", time.gmtime())
                    status_dict["IP"] = self.get_ip()

                    # get the current version from the upgrade state
                    status_dict["package_version"] = self.state.upgrade.get(
                        "current_version", "unknown")

                    device = None
                    if os.path.exists(DEVICE_CONFIG_PATH):
                        with open(DEVICE_CONFIG_PATH) as f:
                            device = f.readline().strip()
                    status_dict["device_config"] = device

                    status_dict["status"] = self.state.resource.get(
                        "status", "")
                    status_dict["internet_connection"] = self.state.resource[
                        "internet_connection"]
                    status_dict["memory_available"] = self.state.resource[
                        "free_memory"]
                    status_dict["disk_available"] = self.state.resource[
                        "available_disk_space"]

                    status_dict["iot_status"] = self.state.iot["connected"]
                    status_dict["iot_received_message_count"] = self.state.iot[
                        "received_message_count"]
                    status_dict[
                        "iot_published_message_count"] = self.state.iot[
                            "published_message_count"]

                    status_dict["recipe_percent_complete"] = self.state.recipe[
                        "percent_complete"]
                    status_dict[
                        "recipe_percent_complete_string"] = self.state.recipe[
                            "percent_complete_string"]
                    status_dict[
                        "recipe_time_remaining_minutes"] = self.state.recipe[
                            "time_remaining_minutes"]
                    status_dict[
                        "recipe_time_remaining_string"] = self.state.recipe[
                            "time_remaining_string"]
                    status_dict[
                        "recipe_time_elapsed_string"] = self.state.recipe[
                            "time_elapsed_string"]

                    status_json = json.dumps(status_dict)
                    self.iot.publish_command_reply("status", status_json)
                except:
                    self._error = "Unable to send status message."
                    self.logger.critical(self._error)

            if self.stopped():
                break

            # Send and receive messages over IoT
            try:
                self.iot.process_network_events()
            except:
                pass

            # Check for images to publish
            try:
                image_file_list = glob.glob(IMAGE_DIR + "*.png")
                for image_file in image_file_list:

                    # Is this file open by a process? (fswebcam)
                    if 0 == os.system("lsof -f -- {} > /dev/null 2>&1".format(
                            image_file)):
                        continue  # Yes, so skip it and try the next one.

                    # 2018-06-15-T18:34:45Z_Camera-Top.png
                    fn1 = image_file.split("_")
                    fn2 = fn1[1]  # Camera-Top.png
                    fn3 = fn2.split(".")
                    camera_name = fn3[0]  # Camera-Top

                    # Get the file contents
                    f = open(image_file, "rb")
                    file_bytes = f.read()
                    f.close()

                    # If the size is < 200KB, then it is garbage we delete
                    # (based on the 1280x1024 average file size)
                    if len(file_bytes) < 200000:
                        os.remove(image_file)
                        continue

                    self.iot.publish_binary_image(camera_name, "png",
                                                  file_bytes)

                    # Check if stored directory exists, if not create it
                    if not os.path.isdir(IMAGE_DIR + "stored"):
                        os.mkdir(IMAGE_DIR + "stored")

                    # Move image from image directory once processed
                    stored_image_file = image_file.replace(
                        IMAGE_DIR, IMAGE_DIR + "stored/")
                    shutil.move(image_file, stored_image_file)

            except Exception as e:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                self.logger.critical("Exception: {}".format(e))
                traceback.print_tb(exc_traceback, file=sys.stdout)

            # idle for a bit
            time.sleep(1)

    def clean_up_images(self):
        """If we are not registered for a long time, the camera peripheral will still 
        be taking pictures every hour by default.  So to avoid filling up the small 
        disk, we remove any images that build up."""
        try:
            image_file_list = glob.glob(IMAGE_DIR + "*.png")
            for image_file in image_file_list:
                # Is this file open by a process? (fswebcam)
                if 0 == os.system(
                        "lsof -f -- {} > /dev/null 2>&1".format(image_file)):
                    continue  # Yes, so skip it and try the next one.
                os.remove(image_file)
        except Exception as e:
            self.logger.error("clean_up_images: {}".format(e))
Example #3
0
File: bot.py Project: wrkzdev/DHV3
# Importing and creating a logger

import logging
import traceback

import time
from typing import Union

import cogs.helpers.aux_inits as inits
from cogs.helpers import checks
from cogs.helpers.context import CustomContext

base_logger = inits.init_logger()

extra = {"channelid": 0, "userid": 0}
logger = logging.LoggerAdapter(base_logger, extra)

logger.info("Starting the bot")

# Setting up asyncio to use uvloop if possible, a faster implementation on the event loop
import asyncio

try:
    import uvloop
except ImportError:
    logger.warning(
        "Using the not-so-fast default asyncio event loop. Consider installing uvloop."
    )
    pass
else:
    logger.info("Using the fast uvloop asyncio event loop")
Example #4
0
 def logger(self):
     logger = logging.getLogger(self.name)
     return logging.LoggerAdapter(logger, {'spider': self})
Example #5
0
 def logger(self, name=None):
     return logging.LoggerAdapter(logging.getLogger(name),
                                  {'actorAddress': self._addr})
    def setup(self):
        self.env = ''
        if (
                # handle direct invocation of "nosetests"
                "test" in sys.argv[0] or
                # handle "setup.py test" and all permutations thereof.
                "setup.py" in sys.argv[0] and "test" in sys.argv[1:]):
            self.env = "unit_test"

        self.queues = queues.declare_queues(self)

        self.extension_subdomains = dict(
            simple="mobile",
            i="compact",
            api="api",
            rss="rss",
            xml="xml",
            json="json",
        )

        ################# PROVIDERS
        self.auth_provider = select_provider(
            self.config,
            self.pkg_resources_working_set,
            "r2.provider.auth",
            self.authentication_provider,
        )
        self.media_provider = select_provider(
            self.config,
            self.pkg_resources_working_set,
            "r2.provider.media",
            self.media_provider,
        )
        self.cdn_provider = select_provider(
            self.config,
            self.pkg_resources_working_set,
            "r2.provider.cdn",
            self.cdn_provider,
        )
        self.ticket_provider = select_provider(
            self.config,
            self.pkg_resources_working_set,
            "r2.provider.support",
            # TODO: fix this later, it refuses to pick up
            # g.config['ticket_provider'] value, so hardcoding for now.
            # really, the next uncommented line should be:
            #self.ticket_provider,
            # instead of:
            "zendesk",
        )
        self.image_resizing_provider = select_provider(
            self.config,
            self.pkg_resources_working_set,
            "r2.provider.image_resizing",
            self.image_resizing_provider,
        )
        self.email_provider = select_provider(
            self.config,
            self.pkg_resources_working_set,
            "r2.provider.email",
            self.email_provider,
        )
        self.startup_timer.intermediate("providers")

        ################# CONFIGURATION
        # AMQP is required
        if not self.amqp_host:
            raise ValueError("amqp_host not set in the .ini")

        if not self.cassandra_seeds:
            raise ValueError("cassandra_seeds not set in the .ini")

        # heavy load mode is read only mode with a different infobar
        if self.heavy_load_mode:
            self.read_only_mode = True

        origin_prefix = self.domain_prefix + "." if self.domain_prefix else ""
        self.origin = self.default_scheme + "://" + origin_prefix + self.domain

        self.trusted_domains = set([self.domain])
        if self.https_endpoint:
            https_url = urlparse(self.https_endpoint)
            self.trusted_domains.add(https_url.hostname)

        # load the unique hashed names of files under static
        static_files = os.path.join(self.paths.get('static_files'), 'static')
        names_file_path = os.path.join(static_files, 'names.json')
        if os.path.exists(names_file_path):
            with open(names_file_path) as handle:
                self.static_names = json.load(handle)
        else:
            self.static_names = {}

        # make python warnings go through the logging system
        logging.captureWarnings(capture=True)

        log = logging.getLogger('reddit')

        # when we're a script (paster run) just set up super simple logging
        if self.running_as_script:
            log.setLevel(logging.INFO)
            log.addHandler(logging.StreamHandler())

        # if in debug mode, override the logging level to DEBUG
        if self.debug:
            log.setLevel(logging.DEBUG)

        # attempt to figure out which pool we're in and add that to the
        # LogRecords.
        try:
            with open("/etc/ec2_asg", "r") as f:
                pool = f.read().strip()
            # clean up the pool name since we're putting stuff after "-"
            pool = pool.partition("-")[0]
        except IOError:
            pool = "reddit-app"
        self.log = logging.LoggerAdapter(log, {"pool": pool})

        # set locations
        locations = pkg_resources.resource_stream(__name__,
                                                  "../data/locations.json")
        self.locations = json.loads(locations.read())

        if not self.media_domain:
            self.media_domain = self.domain
        if self.media_domain == self.domain:
            print >> sys.stderr, (
                "Warning: g.media_domain == g.domain. " +
                "This may give untrusted content access to user cookies")
        if self.oauth_domain == self.domain:
            print >> sys.stderr, ("Warning: g.oauth_domain == g.domain. "
                                  "CORS requests to g.domain will be allowed")

        for arg in sys.argv:
            tokens = arg.split("=")
            if len(tokens) == 2:
                k, v = tokens
                self.log.debug("Overriding g.%s to %s" % (k, v))
                setattr(self, k, v)

        self.reddit_host = socket.gethostname()
        self.reddit_pid = os.getpid()

        if hasattr(signal, 'SIGUSR1'):
            # not all platforms have user signals
            signal.signal(signal.SIGUSR1, thread_dump)

        locale.setlocale(locale.LC_ALL, self.locale)

        # Pre-calculate ratelimit values
        self.RL_RESET_SECONDS = self.config["RL_RESET_MINUTES"] * 60
        self.RL_MAX_REQS = int(self.config["RL_AVG_REQ_PER_SEC"] *
                               self.RL_RESET_SECONDS)

        self.RL_OAUTH_RESET_SECONDS = self.config["RL_OAUTH_RESET_MINUTES"] * 60
        self.RL_OAUTH_MAX_REQS = int(self.config["RL_OAUTH_AVG_REQ_PER_SEC"] *
                                     self.RL_OAUTH_RESET_SECONDS)

        self.RL_LOGIN_MAX_REQS = int(self.config["RL_LOGIN_AVG_PER_SEC"] *
                                     self.RL_RESET_SECONDS)
        self.RL_LOGIN_IP_MAX_REQS = int(
            self.config["RL_LOGIN_IP_AVG_PER_SEC"] * self.RL_RESET_SECONDS)
        self.RL_SHARE_MAX_REQS = int(self.config["RL_SHARE_AVG_PER_SEC"] *
                                     self.RL_RESET_SECONDS)

        # Compile ratelimit regexs
        user_agent_ratelimit_regexes = {}
        for agent_re, limit in self.user_agent_ratelimit_regexes.iteritems():
            user_agent_ratelimit_regexes[re.compile(agent_re)] = limit
        self.user_agent_ratelimit_regexes = user_agent_ratelimit_regexes

        self.startup_timer.intermediate("configuration")

        ################# ZOOKEEPER
        # for now, zookeeper will be an optional part of the stack.
        # if it's not configured, we will grab the expected config from the
        # [live_config] section of the ini file
        zk_hosts = self.config.get("zookeeper_connection_string")
        if zk_hosts:
            from r2.lib.zookeeper import (connect_to_zookeeper, LiveConfig,
                                          LiveList)
            zk_username = self.config["zookeeper_username"]
            zk_password = self.config["zookeeper_password"]
            self.zookeeper = connect_to_zookeeper(zk_hosts,
                                                  (zk_username, zk_password))
            self.live_config = LiveConfig(self.zookeeper, LIVE_CONFIG_NODE)
            self.secrets = fetch_secrets(self.zookeeper)
            self.throttles = LiveList(self.zookeeper,
                                      "/throttles",
                                      map_fn=ipaddress.ip_network,
                                      reduce_fn=ipaddress.collapse_addresses)

            # close our zk connection when the app shuts down
            SHUTDOWN_CALLBACKS.append(self.zookeeper.stop)
        else:
            self.zookeeper = None
            parser = ConfigParser.RawConfigParser()
            parser.optionxform = str
            parser.read([self.config["__file__"]])
            self.live_config = extract_live_config(parser, self.plugins)
            self.secrets = extract_secrets(parser)
            self.throttles = tuple()  # immutable since it's not real

        ################# PRIVILEGED USERS
        self.admins = PermissionFilteredEmployeeList(self.live_config,
                                                     type="admin")
        self.sponsors = PermissionFilteredEmployeeList(self.live_config,
                                                       type="sponsor")
        self.employees = PermissionFilteredEmployeeList(self.live_config,
                                                        type="employee")

        # Store which OAuth clients employees may use, the keys are just for
        # readability.
        self.employee_approved_clients = \
            self.live_config["employee_approved_clients"].values()

        self.startup_timer.intermediate("zookeeper")

        ################# MEMCACHE
        num_mc_clients = self.num_mc_clients

        # the main memcache pool. used for most everything.
        memcaches = CMemcache(
            "main",
            self.memcaches,
            min_compress_len=1400,
            num_clients=num_mc_clients,
            validators=[validate_size_error],
        )

        # a pool just used for @memoize results
        memoizecaches = CMemcache(
            "memoize",
            self.memoizecaches,
            min_compress_len=50 * 1024,
            num_clients=num_mc_clients,
            validators=[validate_size_error],
        )

        # a pool just for srmember rels
        srmembercaches = CMemcache(
            "srmember",
            self.srmembercaches,
            min_compress_len=96,
            num_clients=num_mc_clients,
            validators=[validate_size_error],
        )

        # a pool just for rels
        relcaches = CMemcache(
            "rel",
            self.relcaches,
            min_compress_len=96,
            num_clients=num_mc_clients,
            validators=[validate_size_error],
        )

        ratelimitcaches = CMemcache(
            "ratelimit",
            self.ratelimitcaches,
            min_compress_len=96,
            num_clients=num_mc_clients,
            validators=[validate_size_error],
        )

        # a smaller pool of caches used only for distributed locks.
        self.lock_cache = CMemcache(
            "lock",
            self.lockcaches,
            num_clients=num_mc_clients,
            validators=[validate_size_error],
        )
        self.make_lock = make_lock_factory(self.lock_cache, self.stats)

        # memcaches used in front of the permacache CF in cassandra.
        # XXX: this is a legacy thing; permacache was made when C* didn't have
        # a row cache.
        permacache_memcaches = CMemcache(
            "perma",
            self.permacache_memcaches,
            min_compress_len=1400,
            num_clients=num_mc_clients,
            validators=[],
        )

        # the stalecache is a memcached local to the current app server used
        # for data that's frequently fetched but doesn't need to be fresh.
        if self.stalecaches:
            stalecaches = CMemcache(
                "stale",
                self.stalecaches,
                num_clients=num_mc_clients,
                validators=[validate_size_error],
            )
        else:
            stalecaches = None

        # hardcache memcache pool
        hardcache_memcaches = CMemcache(
            "hardcache",
            self.hardcache_memcaches,
            binary=True,
            min_compress_len=1400,
            num_clients=num_mc_clients,
            validators=[validate_size_error],
        )

        self.startup_timer.intermediate("memcache")

        ################# MCROUTER
        self.mcrouter = Mcrouter(
            "mcrouter",
            self.mcrouter_addr,
            min_compress_len=1400,
            num_clients=1,
        )

        ################# THRIFT-BASED SERVICES
        activity_endpoint = self.config.get("activity_endpoint")
        if activity_endpoint:
            # make ActivityInfo objects rendercache-key friendly
            # TODO: figure out a more general solution for this if
            # we need to do this for other thrift-generated objects
            ActivityInfo.cache_key = lambda self, style: repr(self)

            activity_pool = ThriftConnectionPool(activity_endpoint,
                                                 timeout=0.1)
            self.baseplate.add_to_context(
                "activity_service",
                ThriftContextFactory(activity_pool, ActivityService.Client))

        self.startup_timer.intermediate("thrift")

        ################# CASSANDRA
        keyspace = "reddit"
        self.cassandra_pools = {
            "main":
            StatsCollectingConnectionPool(keyspace,
                                          stats=self.stats,
                                          logging_name="main",
                                          server_list=self.cassandra_seeds,
                                          pool_size=self.cassandra_pool_size,
                                          timeout=4,
                                          max_retries=3,
                                          prefill=False),
        }

        permacache_cf = Permacache._setup_column_family(
            'permacache',
            self.cassandra_pools[self.cassandra_default_pool],
        )

        self.startup_timer.intermediate("cassandra")

        ################# POSTGRES
        self.dbm = self.load_db_params()
        self.startup_timer.intermediate("postgres")

        ################# CHAINS
        # initialize caches. Any cache-chains built here must be added
        # to cache_chains (closed around by reset_caches) so that they
        # can properly reset their local components
        cache_chains = {}
        localcache_cls = (SelfEmptyingCache
                          if self.running_as_script else LocalCache)

        if stalecaches:
            self.cache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                memcaches,
            )
        else:
            self.cache = CacheChain((localcache_cls(), memcaches))
        cache_chains.update(cache=self.cache)

        if stalecaches:
            self.thingcache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                self.mcrouter,
            )
        else:
            self.thingcache = CacheChain((localcache_cls(), self.mcrouter))
        cache_chains.update(thingcache=self.thingcache)

        def get_new_account_prefix_and_key(key, prefix=''):
            old_prefix = "Account_"
            new_prefix = "account:"

            if prefix:
                assert prefix == old_prefix
                return new_prefix, key
            else:
                key = str(key)
                assert key.startswith(old_prefix)
                account_id = key[len(old_prefix):]
                return '', new_prefix + account_id

        self.account_transitionalcache = TransitionalCache(
            original_cache=self.cache,
            replacement_cache=self.thingcache,
            read_original=True,
            key_transform=get_new_account_prefix_and_key,
        )

        if stalecaches:
            self.memoizecache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                memoizecaches,
            )
        else:
            self.memoizecache = MemcacheChain(
                (localcache_cls(), memoizecaches))
        cache_chains.update(memoizecache=self.memoizecache)

        if stalecaches:
            self.srmembercache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                srmembercaches,
            )
        else:
            self.srmembercache = MemcacheChain(
                (localcache_cls(), srmembercaches))
        cache_chains.update(srmembercache=self.srmembercache)

        if stalecaches:
            self.relcache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                relcaches,
            )
        else:
            self.relcache = MemcacheChain((localcache_cls(), relcaches))
        cache_chains.update(relcache=self.relcache)

        self.ratelimitcache = MemcacheChain(
            (localcache_cls(), ratelimitcaches))
        cache_chains.update(ratelimitcache=self.ratelimitcache)

        # rendercache holds rendered partial templates.
        self.rendercache = MemcacheChain((
            localcache_cls(),
            self.mcrouter,
        ))
        cache_chains.update(rendercache=self.rendercache)

        # pagecaches hold fully rendered pages (includes comment panes)
        self.pagecache = MemcacheChain((
            localcache_cls(),
            self.mcrouter,
        ))
        cache_chains.update(pagecache=self.pagecache)

        # cassandra_local_cache is used for request-local caching in tdb_cassandra
        self.cassandra_local_cache = localcache_cls()
        cache_chains.update(cassandra_local_cache=self.cassandra_local_cache)

        if stalecaches:
            permacache_cache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                permacache_memcaches,
            )
        else:
            permacache_cache = CacheChain(
                (localcache_cls(), permacache_memcaches), )
        cache_chains.update(permacache=permacache_cache)

        self.permacache = Permacache(
            permacache_cache,
            permacache_cf,
            lock_factory=self.make_lock,
        )

        # hardcache is used for various things that tend to expire
        # TODO: replace hardcache w/ cassandra stuff
        self.hardcache = HardcacheChain(
            (localcache_cls(), hardcache_memcaches, HardCache(self)),
            cache_negative_results=True,
        )
        cache_chains.update(hardcache=self.hardcache)

        # I know this sucks, but we need non-request-threads to be
        # able to reset the caches, so we need them be able to close
        # around 'cache_chains' without being able to call getattr on
        # 'g'
        def reset_caches():
            for name, chain in cache_chains.iteritems():
                if isinstance(chain, TransitionalCache):
                    chain = chain.read_chain

                chain.reset()
                if isinstance(chain, LocalCache):
                    continue
                elif isinstance(chain, StaleCacheChain):
                    chain.stats = StaleCacheStats(self.stats, name)
                else:
                    chain.stats = CacheStats(self.stats, name)

        self.cache_chains = cache_chains

        self.reset_caches = reset_caches
        self.reset_caches()

        self.startup_timer.intermediate("cache_chains")

        # try to set the source control revision numbers
        self.versions = {}
        r2_root = os.path.dirname(os.path.dirname(self.paths["root"]))
        r2_gitdir = os.path.join(r2_root, ".git")
        self.short_version = self.record_repo_version("r2", r2_gitdir)

        if I18N_PATH:
            i18n_git_path = os.path.join(os.path.dirname(I18N_PATH), ".git")
            self.record_repo_version("i18n", i18n_git_path)

        # Initialize the amqp module globals, start the worker, etc.
        r2.lib.amqp.initialize(self)

        self.events = EventQueue()

        self.startup_timer.intermediate("revisions")
Example #7
0
        datefmt=HA_DATEFMT)
    lfh.setFormatter(lfhformatter)
    log.addHandler(lfh)

## add debug logging to file
if HA_DEBUGLOG and HA_LOGFILE != HA_DEBUGLOG:
    dfh = logging.FileHandler(HA_DEBUGLOG)
    if HA_DEBUG == 0:
        dfh.setLevel(logging.WARNING)
    dfhformatter = logging.Formatter(
        '%(filename)s(%(OCF_RESOURCE_INSTANCE)s)[%(process)s]:\t%(asctime)s%(levelname)s: %(message)s',
        datefmt=HA_DATEFMT)
    dfh.setFormatter(dfhformatter)
    log.addHandler(dfh)

logger = logging.LoggerAdapter(
    log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE})

_exit_reason_set = False


def ocf_exit_reason(msg):
    """
	Print exit error string to stderr.

	Allows the OCF agent to provide a string describing
	why the exit code was returned.
	"""
    global _exit_reason_set
    cookie = env.get("OCF_EXIT_REASON_PREFIX", "ocf-exit-reason:")
    sys.stderr.write("{}{}\n".format(cookie, msg))
    sys.stderr.flush()
Example #8
0
async def spawn_duck(bot, channel, instance=None, ignore_event=False):
    if bot.can_spawn:
        if not ignore_event and bot.current_event['id'] == 5:
            if random.randint(0,
                              100) <= bot.current_event['ducks_cancel_chance']:
                bot.logger.debug(
                    f"A duck was canceled due to `connexion problems` (event)")
                return False

        if not ignore_event and bot.current_event['id'] == 1:
            if random.randint(
                    0, 100) <= bot.current_event['chance_for_second_duck']:
                await spawn_duck(bot, channel, ignore_event=True)

        if not instance:

            population = [
                ducks.Duck,
                ducks.SuperDuck,
                ducks.BabyDuck,
                ducks.MotherOfAllDucks,
            ]

            weights = [
                await bot.db.get_pref(channel.guild, "ducks_chance"),
                await bot.db.get_pref(
                    channel.guild,
                    "super_ducks_chance"),  # Modified below by event n°3
                await bot.db.get_pref(channel.guild, "baby_ducks_chance"),
                await bot.db.get_pref(channel.guild,
                                      "mother_of_all_ducks_chance"),
            ]

            if not ignore_event and bot.current_event['id'] == 3:
                weights[1] += int(
                    weights[1] *
                    bot.current_event['chance_added_for_super_duck'] / 100)

            if sum(weights) == 0:
                extra = {"channelid": channel.id, "userid": 0}
                logger = logging.LoggerAdapter(bot.base_logger, extra)
                logger.debug(
                    "A duck was ignored because all the weights were set to 0")
                # Owner don't want ducks to spawn
                return False

            type_ = random.choices(population, weights=weights, k=1)[0]

            instance = await type_.create(bot,
                                          channel,
                                          ignore_event=ignore_event)

        bot.ducks_spawned.append(instance)

        message = instance.discord_spawn_str

        if await bot.db.get_pref(channel.guild,
                                 "debug_show_ducks_class_on_spawn"):
            message = f"[{type(instance).__name__}] -- {message}"

        await bot.send_message(where=channel,
                               mention=False,
                               can_pm=False,
                               message=message)
    else:
        return False
Example #9
0
import json
import logging
import os
import threading
import time

logger = logging.LoggerAdapter(logging.getLogger("montreal"),
                               {"class": os.path.basename(__file__)})


class SensorListCreator(threading.Thread):
    def __init__(self, name, event, input_queue, output_queue, config):
        super(SensorListCreator, self).__init__()
        self.name = name
        self.event = event
        self.config = config
        self.input_queue = input_queue
        self.output_queue = output_queue
        logger = logging.LoggerAdapter(logging.getLogger("montreal"),
                                       {"class": os.path.basename(__file__)})

    def run(self):
        logger.info("Started {}".format(self.name))
        while not self.event.is_set():
            self.event.wait(5)
            sensors = {"sensors": {"buildings": {}}, "timestamp": {}}
            data = []
            while not self.input_queue.empty():
                sensor_data = json.loads(self.input_queue.get().replace(
                    "\'", "\""))
                sensors = self.__update_sensors(sensors, sensor_data)
Example #10
0
import pki.nssdb
import pki.util
from lxml import etree

INSTANCE_BASE_DIR = '/var/lib/pki'
CONFIG_BASE_DIR = '/etc/pki'
LOG_BASE_DIR = '/var/log/pki'
REGISTRY_DIR = '/etc/sysconfig/pki'

SUBSYSTEM_TYPES = ['ca', 'kra', 'ocsp', 'tks', 'tps']
SUBSYSTEM_CLASSES = {}

SELFTEST_CRITICAL = 'critical'

logger = logging.LoggerAdapter(
    logging.getLogger(__name__),
    extra={'indent': ''})


class PKIServer(object):

    @classmethod
    def instances(cls):

        instances = []

        if not os.path.exists(os.path.join(REGISTRY_DIR, 'tomcat')):
            return instances

        for instance_name in os.listdir(pki.server.INSTANCE_BASE_DIR):
            instance = pki.server.PKIInstance(instance_name)
Example #11
0
    def __init__(
        self,
        ghost,
        user_agent=default_user_agent,
        wait_timeout=8,
        wait_callback=None,
        display=False,
        viewport_size=None,
        ignore_ssl_errors=True,
        plugins_enabled=False,
        java_enabled=False,
        javascript_enabled=True,
        download_images=True,
        show_scrollbars=True,
        exclude=None,
        network_access_manager_class=NetworkAccessManager,
        web_page_class=GhostWebPage,
        local_storage_enabled=True,
    ):
        self.ghost = ghost

        self.id = str(uuid.uuid4())

        self.logger = logging.LoggerAdapter(
            logger.getChild('session'),
            {'session': self.id},
        )
        self.logger.info("Starting new session")

        self.http_resources = []

        self.wait_timeout = wait_timeout
        self.wait_callback = wait_callback
        self.ignore_ssl_errors = ignore_ssl_errors
        self.loaded = True

        self.display = display

        self.popup_messages = []
        self.page = web_page_class(self.ghost._app, self)

        if network_access_manager_class is not None:
            self.page.setNetworkAccessManager(
                network_access_manager_class(exclude_regex=exclude))

        QWebSettings.setMaximumPagesInCache(0)
        QWebSettings.setObjectCacheCapacities(0, 0, 0)
        QWebSettings.globalSettings().setAttribute(
            QWebSettings.LocalStorageEnabled, local_storage_enabled)

        self.page.setForwardUnsupportedContent(True)
        self.page.settings().setAttribute(
            QWebSettings.AutoLoadImages, download_images)
        self.page.settings().setAttribute(
            QWebSettings.PluginsEnabled, plugins_enabled)
        self.page.settings().setAttribute(
            QWebSettings.JavaEnabled,
            java_enabled,
        )
        self.page.settings().setAttribute(
            QWebSettings.JavascriptEnabled, javascript_enabled)

        if not show_scrollbars:
            self.page.mainFrame().setScrollBarPolicy(
                Qt.Vertical,
                Qt.ScrollBarAlwaysOff,
            )
            self.page.mainFrame().setScrollBarPolicy(
                Qt.Horizontal,
                Qt.ScrollBarAlwaysOff,
            )

        # Page signals
        self.page.loadFinished.connect(self._page_loaded)
        self.page.loadStarted.connect(self._page_load_started)
        self.page.unsupportedContent.connect(self._unsupported_content)

        self.manager = self.page.networkAccessManager()
        self.manager.finished.connect(self._request_ended)
        self.manager.sslErrors.connect(self._on_manager_ssl_errors)

        # Cookie jar
        self.cookie_jar = QNetworkCookieJar()
        self.manager.setCookieJar(self.cookie_jar)

        # User Agent
        self.page.set_user_agent(user_agent)

        self.page.networkAccessManager().authenticationRequired\
            .connect(self._authenticate)
        self.page.networkAccessManager().proxyAuthenticationRequired\
            .connect(self._authenticate)

        self.main_frame = self.page.mainFrame()

        class GhostQWebView(QWebView):
            def sizeHint(self):
                return QSize(*viewport_size)

        self.webview = GhostQWebView()

        self.set_viewport_size(*viewport_size)

        if plugins_enabled:
            self.webview.settings().setAttribute(
                QWebSettings.PluginsEnabled, True)
        if java_enabled:
            self.webview.settings().setAttribute(
                QWebSettings.JavaEnabled, True)

        self.webview.setPage(self.page)

        if self.display:
            self.show()
Example #12
0
        'Without it, the boot record will always be flagged as suspicious. '
        'Defaults to {0}'.format(BOOTRECORD_WHITELIST_PATH),
        default=BOOTRECORD_WHITELIST_PATH,
        type=lambda x: x.decode(sys.getfilesystemencoding()))
    parser.add_argument(
        '--logLevel',
        default='INFO',
        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
        help='Show debug messages according to the level provided.')
    args = parser.parse_args()

    logging.basicConfig(
        level=getattr(logging, args.logLevel.upper()),
        format='%(levelname)-8s - [%(objectid)s] [%(stage)s] %(message)s')
    logger = logging.LoggerAdapter(logging.getLogger(__file__), {
        'objectid': None,
        'stage': 'main'
    })

    if args.type == 'VBR' and args.offset is None:
        logger.error('--offset is required when parsing VBR')
        sys.exit(1)

    if args.whitelist is not None:
        whitelist = initWhitelist(args.whitelist)
    else:
        whitelist = []

    for inputFile in args.input:
        logger.extra.update({'objectid': inputFile})
        with open(inputFile, 'rb') as f_input:
            if args.type == 'IMG':
Example #13
0
import base64
import logging

from hvac import Client

logger = logging.LoggerAdapter(logging.getLogger(__name__),
                               {'STAGE': 'Transit encryption'})


class Transit:
    client = None
    encryption_key = None
    mount = None

    def __init__(self, client, encryption_key, mount):
        self.client = client
        self.encryption_key = encryption_key
        self.mount = mount
        logger.info(
            f'Starting transit encryption with key={encryption_key} mount={mount}'
        )

    def encrypt(self, data):
        # logger.debug(f'Trying to base64 encode\n{data}')

        try:
            data_b64 = base64.b64encode(data.encode())
            data_sb64 = str(data_b64, "utf-8")
            # logger.debug(f'data in b64:\n{data_sb64}')
        except Exception as err:
            logger.exception(f'Error: {err}')
Example #14
0
    def __init__(self,
                 name,
                 config=None,
                 onAccount=None,
                 onOrderMatched=None,
                 onOrderPlaced=None,
                 onMarketUpdate=None,
                 onUpdateCallOrder=None,
                 ontick=None,
                 bitshares_instance=None,
                 *args,
                 **kwargs):

        # BitShares instance
        self.bitshares = bitshares_instance or shared_bitshares_instance()

        # Storage
        Storage.__init__(self, name)

        # Events
        Events.__init__(self)

        if ontick:
            self.ontick += ontick
        if onMarketUpdate:
            self.onMarketUpdate += onMarketUpdate
        if onAccount:
            self.onAccount += onAccount
        if onOrderMatched:
            self.onOrderMatched += onOrderMatched
        if onOrderPlaced:
            self.onOrderPlaced += onOrderPlaced
        if onUpdateCallOrder:
            self.onUpdateCallOrder += onUpdateCallOrder

        # Redirect this event to also call order placed and order matched
        self.onMarketUpdate += self._callbackPlaceFillOrders

        if config:
            self.config = config
        else:
            self.config = config = Config.get_worker_config_file(name)

        # Get worker's parameters from the config
        self.worker = config["workers"][name]

        # Get Bitshares account and market for this worker
        self._account = Account(self.worker["account"],
                                full=True,
                                bitshares_instance=self.bitshares)

        self._market = Market(config["workers"][name]["market"],
                              bitshares_instance=self.bitshares)

        # Recheck flag - Tell the strategy to check for updated orders
        self.recheck_orders = False

        # Count of orders to be fetched from the API
        self.fetch_depth = 8

        # Set fee asset
        fee_asset_symbol = self.worker.get('fee_asset')

        if fee_asset_symbol:
            try:
                self.fee_asset = Asset(fee_asset_symbol,
                                       bitshares_instance=self.bitshares)
            except bitshares.exceptions.AssetDoesNotExistsException:
                self.fee_asset = Asset('1.3.0',
                                       bitshares_instance=self.bitshares)
        else:
            # If there is no fee asset, use BTS
            self.fee_asset = Asset('1.3.0', bitshares_instance=self.bitshares)

        # CER cache
        self.core_exchange_rate = None

        # Ticker
        self.ticker = self.market.ticker

        # Settings for bitshares instance
        self.bitshares.bundle = bool(self.worker.get("bundle", False))

        # Disabled flag - this flag can be flipped to True by a worker and will be reset to False after reset only
        self.disabled = False

        # Order expiration time in seconds
        self.expiration = 60 * 60 * 24 * 365 * 5

        # buy/sell actions will return order id by default
        self.returnOrderId = 'head'

        # A private logger that adds worker identify data to the LogRecord
        self.log = logging.LoggerAdapter(
            logging.getLogger('dexbot.per_worker'), {
                'worker_name': name,
                'account': self.worker['account'],
                'market': self.worker['market'],
                'is_disabled': lambda: self.disabled
            })

        self.worker_market = self.worker["market"]

        self.orders_log = logging.LoggerAdapter(
            logging.getLogger('dexbot.orders_log'), {})
Example #15
0
    def filter(self, record):
        return not any(f.filter(record) for f in self.blacklist)


logger = None
# TODO: make this a callback event instead
if getattr(C, 'DEFAULT_LOG_PATH'):
    path = C.DEFAULT_LOG_PATH
    if path and (os.path.exists(path)
                 and os.access(path, os.W_OK)) or os.access(
                     os.path.dirname(path), os.W_OK):
        logging.basicConfig(
            filename=path,
            level=logging.INFO,
            format='%(asctime)s p=%(user)s u=%(process)d | %(message)s')
        logger = logging.LoggerAdapter(logging.getLogger('ansible'),
                                       {'user': getpass.getuser()})
        for handler in logging.root.handlers:
            handler.addFilter(
                FilterBlackList(getattr(C, 'DEFAULT_LOG_FILTER', [])))
    else:
        print(
            "[WARNING]: log file at %s is not writeable and we cannot create it, aborting\n"
            % path,
            file=sys.stderr)

# map color to log levels
color_to_log_level = {
    C.COLOR_ERROR: logging.ERROR,
    C.COLOR_WARN: logging.WARNING,
    C.COLOR_OK: logging.INFO,
    C.COLOR_SKIP: logging.WARNING,
Example #16
0
 def config_logger(self, format=FORMAT):
     logging.basicConfig(format=format)
     self._logger = logging.getLogger(self._logger_name)
     self._logger = logging.LoggerAdapter(self._logger, EXTRA)
     self._logger.setLevel(self._logger_level)
Example #17
0
 def GetConfig(self, request):
     cfg = msgpack.unpackb(request.task.config, raw=False)
     logger = logging.LoggerAdapter(self.log, {'tid': request.task.id})
     cfg['logger'] = logger
     return cfg
Example #18
0
parser.add_argument('--vhost', dest='vhost', help='Add additional "vhost" field to all log records. This can be used to differentiate between virtual hosts.')
args = parser.parse_args()

"""The list of expected fields is hard-coded. Please feel free to change it
As specified above, this requires the following line in apache configuration:

    CustomLog "||/path/to/accesslog2gelf.py" "%V %h %u \"%r\" %>s %b \"%{Referer}i\""

"""
regexp = '^(?P<host>\S+) (?P<ipaddr>\S+) (?P<username>\S+) "(?P<request>[^"]*)" (?P<status>\S+) (?P<size>\S+) "(?P<referer>[^"]*)"$'

baserecord = {}
if args.vhost: baserecord['vhost'] = args.vhost

logger = logging.getLogger(args.facility)
logger.setLevel(logging.DEBUG)
logger.addHandler(graypy.GELFHandler(args.host, int(args.port), debugging_fields=False))

for line in iter(sys.stdin.readline, b''):
    matches = re.search(regexp, line)
    if matches:
        record = baserecord
        record.update(matches.groupdict())
        adapter = logging.LoggerAdapter(logging.getLogger(args.facility), record)
        """Default output message format is also hard-coded"""
        if args.vhost:
            adapter.info('%s %s (%s) "%s" %s %s "%s"' % tuple(record[f] for f in ["ipaddr", "vhost", "host", "request", "status", "size", "referer"]))
        else:
            adapter.info('%s %s "%s" %s %s "%s"' % tuple(record[f] for f in ["ipaddr", "host", "request", "status", "size", "referer"]))

Example #19
0
async def background_loop(bot):
    bot.logger.debug("Hello from the BG loop, waiting to be ready")
    await bot.wait_until_ready()
    bot.logger.debug("Hello from the BG loop, now ready")
    await event_gen(bot)
    planday = 0
    last_iter = int(time.time())
    last_hour = 0

    try:
        while not bot.is_closed() and bot.can_spawn:
            # bot.logger.debug("Looping")
            now = int(last_iter + 1)
            thisDay = now - (now % DAY)
            seconds_left = int(DAY - (now - thisDay))

            if int((now - 1) / DAY) != planday:
                # database.giveBack(logger)
                if planday == 0:
                    new_day = False
                else:
                    new_day = True
                planday = int(int(now - 1) / DAY)
                await planifie(bot, new_day=new_day)
                last_iter = int(time.time())

            if last_hour < int(int(now / 60) / 60):
                last_hour = int(int(now / 60) / 60)

            if int(now) % 60 == 0:
                bot.logger.info("Current stoners: {canards}".format(**{"canards": len(bot.stoners_spawned)}))

            if int(now) % 3600 == 0:
                await event_gen(bot)

            thishour = int((now % DAY) / HOUR)
            for channel in list(bot.stoners_planning.keys()):

                # Ici, on est au momement ou la logique s'opere:
                # Si les canards ne dorment jamais, faire comme avant, c'est à dire
                # prendre un nombre aléatoire entre 0 et le nombre de secondes restantes avec randrange
                # et le comparer au nombre de canards restants à faire apparaitre dans la journée
                #
                # Par contre, si on est dans un serveur ou les canards peuvent dormir (==)
                # sleeping_stoners_start != sleeping_stoners_stop, alors par exemple :
                # 00:00 |-----==========---------| 23:59 (= quand les canards dorment)
                # dans ce cas, il faut juste modifier la valeur de seconds_left pour enlever le nombre d'heure
                # (en seconde) afin d'augmenter la probabilité qu'un canard apparaisse pendant le reste de la journée
                sdstart = await bot.db.get_pref(channel, "sleeping_stoners_start")
                sdstop = await bot.db.get_pref(channel, "sleeping_stoners_stop")
                currently_sleeping = False

                if sdstart != sdstop:  # Dans ce cas, les canards dorment peut-etre!

                    # logger.debug("This hour is {v} UTC".format(v=thishour))
                    # Bon, donc comptons le nombre d'heures / de secondes en tout ou les canards dorment
                    if sdstart < sdstop:  # 00:00 |-----==========---------| 23:59
                        if thishour < sdstop:
                            sdseconds = (sdstop - sdstart) * HOUR
                        else:
                            sdseconds = 0
                        if sdstart <= thishour < sdstop:
                            currently_sleeping = True
                    else:  # 00:00 |====--------------======| 23:59
                        sdseconds = (24 - sdstart) * HOUR  # Non, on ne compte pas les autres secondes, car elles seront passées
                        if thishour >= sdstart or thishour < sdstop:
                            currently_sleeping = True
                else:
                    sdseconds = 0

                if not currently_sleeping:
                    sseconds_left = seconds_left - sdseconds  # Don't change seconds_left, it's used for others channels
                    if sseconds_left <= 0:
                        extra = {"channelid": channel.id, "userid": 0}
                        logger = logging.LoggerAdapter(bot.base_logger, extra)
                        logger.warning(f"Huh, sseconds_left est à {sseconds_left}... C'est problématique.\n"
                                       f"sdstart={sdstart}, sdstop={sdstop}, thishour={thishour}, sdseconds={sdseconds}, seconds_left={seconds_left}")
                        sseconds_left = 1

                    try:
                        if random.randrange(0, sseconds_left) < bot.stoners_planning[channel]:
                            bot.stoners_planning[channel] -= 1
                            await spawn_stoner(bot, channel)
                    except KeyError:  # Race condition
                        # for channel in list(commons.stoners_planned.keys()): <= channel not deleted, so in this list
                        #    if random.randrange(0, seconds_left) < commons.stoners_planned[channel]: <= Channel had been deleted, so keyerror
                        pass

            for stoner in bot.stoners_spawned:
                if stoner.staying_until < now:  # Canard qui se barre
                    _ = bot._
                    stoner.logger.debug(f"A stoner is leaving : {stoner}")

                    try:
                        if stoner.discord_leave_str:
                            await bot.send_message(where=stoner.channel, can_pm=False, mention=False, message=stoner.discord_leave_str)
                    except Exception as e:
                        stoner.logger.debug(f"I couldn't get a stoner to leave : {stoner} failed with {e}")

                    try:
                        bot.stoners_spawned.remove(stoner)
                    except ValueError:
                        stoner.logger.debug(f"Race condiction on removing {stoner}")

                        pass

            n = datetime.datetime.now()

            april_fools = n.day == 1 and n.month == 4

            # Fake stoners
            if april_fools:
                random_channel = random.choice(list(bot.stoners_planning.keys()))
                emoji = random.choice([":blowfish:", "🦞", ":shark:", ":octopus:", ":dolphin:" , ":squid:",  ":whale:", ":tropical_fish:", ":whale2:", "❥᷁)͜͡˒ ⋊"])
                try:
                    await bot.send_message(where=random_channel, can_pm=False, mention=False, message=f"-,..,.-'\`'°-,_,.-'\`'° {emoji} < **G**lub **G**lub")
                except Exception as e:
                    logger.exception("Couldn't send an april fool stoner")


            now = time.time()
            # bot.logger.debug("On schedule : " + str(last_iter + 1 - now))
            bot.loop_latency = last_iter + 1 - now

            if last_iter + 1 <= now:
                if last_iter + 1 <= now - 5:
                    bot.logger.warning("Running behind schedule ({s} seconds)... Server overloaded or clock changed?".format(s=str(float(float(last_iter + 1) - int(now)))))
            else:
                await asyncio.sleep(last_iter + 1 - now)

            last_iter += 1
    except KeyboardInterrupt:
        raise
    except Exception as e:
        bot.logger.exception("Fatal Exception")
        raise e
Example #20
0
def lambda_handler(event, context):
    """
    Callback handler for Lambda.

    Expected event structure:
    {
        "Records": [
            {
                "eventTime": "1970-01-01T00:00:00.000Z",
                "eventName": "ObjectCreated:Put",
                "eventSource": "aws:s3",
                "s3": {
                    "bucket": { "name": "source_bucket" },
                    "object": { "key": "StdError.gz" }
                }
            }
        ]
    }
    """
    json_logging.update_context(
        aws_request_id=context.aws_request_id,
        function_name=context.function_name,
        function_version=context.function_version,
        log_stream_name=context.log_stream_name,
    )
    for i, event_data in enumerate(event["Records"]):
        event_logger = logging.LoggerAdapter(
            logger, extra={"event_id": f"{context.aws_request_id}.{i}"}
        )
        bucket_name = event_data["s3"]["bucket"]["name"]
        object_key = urllib.parse.unquote_plus(event_data["s3"]["object"]["key"])
        event_logger.info(
            "Processing event: "
            "index={i}, source={eventSource}, name={eventName}, time={eventTime}".format(
                i=i, **event_data
            ),
            extra={
                "event.source": event_data["eventSource"],
                "event.name": event_data["eventName"],
                "event.time": event_data["eventTime"],
            },
        )
        file_uri = f"s3://{bucket_name}/{object_key}"
        if not (
            (object_key.startswith("_logs/") or "/logs/" in object_key)
            and object_key.endswith(("StdError.gz", "stderr.gz"))
        ):
            event_logger.info(f"Object is not a log file: {file_uri}")
            continue
        event_logger.info(f"Looking for log records in {file_uri}")

        processed = compile.load_records([file_uri])
        try:
            host, port = config.get_es_endpoint(bucket_name=bucket_name)
            es = config.connect_to_es(host, port, use_auth=True)
            if not config.exists_index_template(es):
                config.put_index_template(es)
        except botocore.exceptions.ClientError as exc:
            event_logger.exception(f"Failed in initial connection: {exc!s}")
            # Let the lambda crash and try again later.
            raise

        try:
            index_records(es, processed, event_logger)
        except parse.NoRecordsFoundError:
            event_logger.info("Failed to find log records in object '{}'".format(file_uri))
            continue
        except botocore.exceptions.ClientError as exc:
            error_code = exc.response["Error"]["Code"]
            event_logger.warning(f"Error code {error_code} for object '{file_uri}'")
            continue

        event_logger.info("Indexed log records successfully.", extra={"log_file_uri": file_uri})
Example #21
0
import json
import logging
import os
import threading

from prometheus_client import start_http_server
from prometheus_client.core import REGISTRY, GaugeMetricFamily

logger = logging.LoggerAdapter(logging.getLogger("sensiot"),
                               {"class": os.path.basename(__file__)})


class PrometheusWriter(threading.Thread):
    def __init__(self, name, event, queue, config):
        super(PrometheusWriter, self).__init__()
        self.name = name
        self.event = event
        self.queue = queue
        self.config = config

        logger.info("{} initialized successfully".format(self.name))

    def run(self):
        # FIXME: outdated data is hold!
        logger.info("Started {}".format(self.name))
        collectors = {}
        start_http_server(self.config['port'])
        while not self.event.is_set():
            self.event.wait(10)
            while not self.queue.empty():
                data = json.loads(self.queue.get())
Example #22
0
    def __init__(self,
                 name,
                 config=None,
                 onAccount=None,
                 onOrderMatched=None,
                 onOrderPlaced=None,
                 onMarketUpdate=None,
                 onUpdateCallOrder=None,
                 ontick=None,
                 bitshares_instance=None,
                 *args,
                 **kwargs):
        # BitShares instance
        self.bitshares = bitshares_instance or shared_bitshares_instance()

        # Storage
        Storage.__init__(self, name)

        # Statemachine
        StateMachine.__init__(self, name)

        # Events
        Events.__init__(self)

        if ontick:
            self.ontick += ontick
        if onMarketUpdate:
            self.onMarketUpdate += onMarketUpdate
        if onAccount:
            self.onAccount += onAccount
        if onOrderMatched:
            self.onOrderMatched += onOrderMatched
        if onOrderPlaced:
            self.onOrderPlaced += onOrderPlaced
        if onUpdateCallOrder:
            self.onUpdateCallOrder += onUpdateCallOrder

        # Redirect this event to also call order placed and order matched
        self.onMarketUpdate += self._callbackPlaceFillOrders

        if config:
            self.config = config
        else:
            self.config = config = Config.get_worker_config_file(name)

        self.worker = config["workers"][name]
        self._account = Account(self.worker["account"],
                                full=True,
                                bitshares_instance=self.bitshares)
        self._market = Market(config["workers"][name]["market"],
                              bitshares_instance=self.bitshares)

        # Recheck flag - Tell the strategy to check for updated orders
        self.recheck_orders = False

        # Settings for bitshares instance
        self.bitshares.bundle = bool(self.worker.get("bundle", False))

        # Disabled flag - this flag can be flipped to True by a worker and
        # will be reset to False after reset only
        self.disabled = False

        # A private logger that adds worker identify data to the LogRecord
        self.log = logging.LoggerAdapter(
            logging.getLogger('dexbot.per_worker'), {
                'worker_name': name,
                'account': self.worker['account'],
                'market': self.worker['market'],
                'is_disabled': lambda: self.disabled
            })
Example #23
0
 def getLogger(self, obj):
     # is this atomic enough for threads? (fine if we're running in coroutine)
     i, self.max_ident = self.max_ident + 1, self.max_ident + 1
     adapter = logging.LoggerAdapter(logging.getLogger(__name__),
                                     dict(label=("%s%d" % (obj, i))))
     return adapter
Example #24
0
from json import loads, JSONEncoder, JSONDecoder
from yaml.constructor import ConstructorError
from orderedattrdict import AttrDict, DefaultAttrDict
from slugify import slugify
from errno import EACCES, EPERM

ERROR_SHARING_VIOLATION = 32  # from winerror.ERROR_SHARING_VIOLATION

# gramex.config.app_log is the default logger used by all of gramex
# If it's not there, create one.
logging.basicConfig()
app_log = logging.getLogger('gramex')

# app_log_extra has additional parameters that may be used by the logger
app_log_extra = {'port': 'PORT'}
app_log = logging.LoggerAdapter(app_log, app_log_extra)

# sqlalchemy.create_engine requires an encoding= that must be an str across
# Python 2 and Python 3. Expose this for other modules to use
str_utf8 = str('utf-8')  # noqa

# Common slug patterns
slug = AttrDict(
    # Python modules must be lowercase, with letters, numbers or _, separated by _
    module=lambda s: slugify(
        s, lowercase=True, regex_pattern=r'[^a-z0-9_]+', separator='_'),
    # Allow files to contain ASCII characters except
    #   - spaces
    #   - wildcards: * or ?
    #   - quotes: " or '
    #   - directory or drive separators: / or \ or :
Example #25
0
    DummyOptimizer,
    EmbeddingHolder,
    allocate_shared_tensor,
    create_pool,
    fast_approx_rand,
    get_async_result,
    get_num_workers,
    hide_distributed_logging,
    round_up_to_nearest_multiple,
    split_almost_equally,
    tag_logs_with_process_name,
)


logger = logging.getLogger("torchbiggraph")
dist_logger = logging.LoggerAdapter(logger, {"distributed": True})


class Trainer(AbstractBatchProcessor):
    def __init__(
        self,
        model_optimizer: Optimizer,
        loss_fn: AbstractLossFunction,
        relation_weights: List[float],
    ) -> None:
        super().__init__(loss_fn, relation_weights)
        self.model_optimizer = model_optimizer
        self.unpartitioned_optimizers: Dict[EntityName, Optimizer] = {}
        self.partitioned_optimizers: Dict[Tuple[EntityName, Partition], Optimizer] = {}

    def _process_one_batch(
Example #26
0
# Obtain the working directory and the required directories
WORKING_DIR = os.path.abspath(
    os.path.join(os.getcwd(), os.path.dirname(sys.argv[0])))
CONF_DIR = ("%s/conf" % WORKING_DIR)
LOGS_DIR = ("%s/logs" % WORKING_DIR)
if not os.path.isdir(CONF_DIR): sys.exit("Configuration directory missing")
if not os.path.isdir(LOGS_DIR): os.makedirs(LOGS_DIR)
if not os.path.isfile("%s/logging.ini" % CONF_DIR):
    sys.exit(
        "The logging configuration file ( logging.ini ) is missing!! Provide it and try again"
    )
logging.LOG_FILE = ("%s/graderclient.log" % LOGS_DIR)
logging.config.fileConfig("%s/logging.ini" % CONF_DIR)
logger = logging.getLogger("grader_client")
extra = {'VERSION': VERSION}
logger = logging.LoggerAdapter(logger, extra)
#
#    Ensure the log files are still writeable by the service_user
#
uid = pwd.getpwnam(sys.argv[1]).pw_uid
gid = grp.getgrnam(sys.argv[1]).gr_gid
os.chown("%s/graderclient.log" % LOGS_DIR, uid, gid)

SERV_IP = ""
PORT = 58500
LAB = 0
UserID = 0
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
global keep_commune
keep_commune = True
global user_, user_hdir
Example #27
0
from chat.models import Message, Room, RoomUsers, Subscription, SubscriptionMessages, MessageHistory, \
 UploadedFile, Image, get_milliseconds, UserProfile, Channel, User
from chat.py2_3 import str_type, quote
from chat.settings import ALL_ROOM_ID, REDIS_PORT, GIPHY_URL, GIPHY_REGEX, FIREBASE_URL, REDIS_HOST, \
 REDIS_DB
from chat.tornado.constants import VarNames, HandlerNames, Actions, RedisPrefix, WebRtcRedisStates, \
 UserSettingsVarNames, UserProfileVarNames
from chat.tornado.message_creator import WebRtcMessageCreator, MessagesCreator
from chat.utils import get_max_key, validate_edit_message, \
 get_message_images_videos, update_symbols, up_files_to_img, evaluate, check_user, http_client

# from pywebpush import webpush

parent_logger = logging.getLogger(__name__)
base_logger = logging.LoggerAdapter(parent_logger, {
    'id': 0,
    'ip': '000.000.000.000'
})

# TODO https://github.com/leporo/tornado-redis#connection-pool-support
# CONNECTION_POOL = tornadoredis.ConnectionPool(
# max_connections=500,
# wait_for_available=True)

GIPHY_API_KEY = getattr(settings, "GIPHY_API_KEY", None)
FIREBASE_API_KEY = getattr(settings, "FIREBASE_API_KEY", None)


class MessagesHandler(MessagesCreator):
    def __init__(self, *args, **kwargs):
        self.closed_channels = None
        super(MessagesHandler, self).__init__()
Example #28
0
 def core(self):
     return logging.LoggerAdapter(logging.getLogger('core'),
                                  {"user_id": get_current_user_id()})
Example #29
0
File: bot.py Project: wrkzdev/DHV3
    async def send_message(self,
                           ctx: context.CustomContext = None,
                           from_: discord.Member = None,
                           where: discord.TextChannel = None,
                           message: str = "",
                           embed: discord.Embed = None,
                           can_pm: bool = True,
                           force_pm: bool = False,
                           mention=True,
                           try_: int = 1,
                           return_message: bool = False):

        if not return_message:

            async def send_m():
                return await self.send_message(ctx=ctx,
                                               from_=from_,
                                               where=where,
                                               message=message,
                                               embed=embed,
                                               can_pm=can_pm,
                                               force_pm=force_pm,
                                               mention=mention,
                                               try_=try_,
                                               return_message=True)

            return asyncio.ensure_future(send_m())

        s = time.time()

        original_message = message
        if 10000 > len(message) > 1900:
            message = message.split("\n")
            current_message = 0
            to_send = ""
            for line in message:
                line = line + "\n"
                if len(to_send) + len(line) <= 1800:
                    to_send += line
                else:
                    if "```" in to_send:
                        # TODO: Check if number of ``` match.
                        to_send += "```"
                        line = "```" + line

                    await self.send_message(ctx=ctx,
                                            from_=from_,
                                            where=where,
                                            message=to_send,
                                            embed=embed,
                                            can_pm=can_pm,
                                            force_pm=force_pm,
                                            mention=False,
                                            try_=try_,
                                            return_message=True)  #
                    # Return message at true to ensure order
                    to_send = line

            m = await self.send_message(ctx=ctx,
                                        from_=from_,
                                        where=where,
                                        message=to_send,
                                        embed=embed,
                                        can_pm=can_pm,
                                        force_pm=force_pm,
                                        mention=False,
                                        try_=try_,
                                        return_message=True)
            return m

        if ctx:
            from_ = ctx.message.author
            where = ctx.channel
            logger = ctx.logger
        else:
            extra = {
                "channelid": where.id if where else 0,
                "userid": from_.id if from_ else 0
            }
            logger = logging.LoggerAdapter(bot.base_logger, extra)

        # bot.logger.debug(f"Long message took : {time.time() - s}.")

        if where:  # Where is a TextChannel
            if force_pm or (can_pm and await self.db.get_pref(
                    where.guild, "pm_most_messages")):
                if from_:  # If I have someone to PM
                    where = await from_.create_dm()
                    permissions = True
                else:
                    logger.warning(
                        f"I want to PM this message, but I can't since I don't have a from_ User\n"
                        f"ctx={ctx}, from_={from_}, where={where},\n"
                        f"message : {message}")
                    permissions = True  # TODO: Check perms

            else:
                permissions = True  # TODO: Check perms
                if mention and from_:
                    message = f"{from_.mention} > {message}"

        else:  # Where will be a DMChannel
            if from_:
                where = await from_.create_dm()
                permissions = True
            else:
                logger.error(
                    f"Can't send the message : don't know where to send it")
                raise TypeError(
                    f"Need a `ctx`, `to` or `where`, but ctx={ctx} | from_={from_} | where={where} given"
                )

        # bot.logger.debug(f"Where took : {time.time() - s}.")

        if not permissions:
            logger.warning("No permissions to speak here")
            return False

        # Where can be a TextChannel or a DMChannel
        # from_ is a Member or None
        # ctx can be a Context or None

        try:
            m = await where.send(message, embed=embed)
            return m
        except discord.errors.Forbidden:
            logger.warning(
                f"Could not send {message} to channel : no I/O permissions")
            return False
        except discord.errors.NotFound:
            logger.warning(
                f"Could not send {message} to channel : channel not found.")
            return False
        except Exception as e:
            if try_ >= 3:
                logger.warning(
                    f"Could not send {message} to channel after 3 tries")
                logger.exception("Exception for not sending is :")
                await bot.log(level=15,
                              title="Error when sending message (x3)",
                              message=f"See bot console for exception ({e})",
                              where=ctx)

                return False
            else:
                return await self.send_message(ctx=ctx,
                                               from_=from_,
                                               where=where,
                                               message=original_message,
                                               embed=embed,
                                               can_pm=can_pm,
                                               force_pm=force_pm,
                                               mention=mention,
                                               try_=try_ + 1,
                                               return_message=return_message)
Example #30
0
def get_ticks(market, period):
    chart_data = {}
    # Получаем готовые данные свечей
    res = requests.get(
        "https://bittrex.com/Api/v2.0/pub/market/GetTicks?marketName=" +
        market + "&tickInterval=" + period,
        verify=Config.SECURE).json()
    if not res['success']:
        market_log = log.get_logger(market)
        market_log = logging.LoggerAdapter(market_log,
                                           extra={'log_name': market})
        market_log.warning(str(res))
        if res['message'] == 'INVALID_MARKET':
            market_log.warning("""
                   *******************************************************
                   * НЕПРАВИЛЬНАЯ ПАРА {pair}                            *
                   *******************************************************
                """.format(pair=market))
        return []

    for item in res['result']:
        dt_obj = datetime.strptime(item['T'], '%Y-%m-%dT%H:%M:%S')
        ts = int(time.mktime(dt_obj.timetuple()))
        if not ts in chart_data:
            chart_data[ts] = {
                'open': float(item['O']),
                'close': float(item['C']),
                'high': float(item['H']),
                'low': float(item['L'])
            }

    # Добираем недостающее
    res = requests.get(
        "https://bittrex.com/api/v1.1/public/getmarkethistory?market=" +
        market,
        verify=Config.SECURE).json()

    for trade in reversed(res['result']):
        try:
            dt_obj = datetime.strptime(trade['TimeStamp'],
                                       '%Y-%m-%dT%H:%M:%S.%f')
        except ValueError:
            dt_obj = datetime.strptime(trade['TimeStamp'], '%Y-%m-%dT%H:%M:%S')
        ts = int((time.mktime(dt_obj.timetuple()) /
                  1800)) * 1800  # округляем до 30 минут
        if not ts in chart_data:
            chart_data[ts] = {'open': 0, 'close': 0, 'high': 0, 'low': 0}

        chart_data[ts]['close'] = float(trade['Price'])

        if not chart_data[ts]['open']:
            chart_data[ts]['open'] = float(trade['Price'])

        if not chart_data[ts]['high'] or chart_data[ts]['high'] < float(
                trade['Price']):
            chart_data[ts]['high'] = float(trade['Price'])

        if not chart_data[ts]['low'] or chart_data[ts]['low'] > float(
                trade['Price']):
            chart_data[ts]['low'] = float(trade['Price'])

    return chart_data