Exemple #1
0
    def run(self, name, *args, **kwargs):
        """
            Runs an inline script hook.

            Returns:
                The return value of the method.
                None, if the script does not provide the method.

            Raises:
                ScriptException if there was an exception.
        """
        if self.ns is None:
            raise ScriptException("Script not loaded.")
        f = self.ns.get(name)
        if f:
            try:
                return f(self.ctx, *args, **kwargs)
            except Exception:
                six.reraise(
                    ScriptException,
                    ScriptException.from_exception_context(),
                    sys.exc_info()[2]
                )
        else:
            return None
Exemple #2
0
    def append_conf(self, **__):
        """Modify given ``conf.py`` file from a whitelisted user's project."""
        try:
            self.version.get_conf_py_path()
        except ProjectImportError:
            master_doc = self.create_index(extension='rst')
            self._write_config(master_doc=master_doc)

        try:
            outfile_path = self.project.conf_file(self.version.slug)
            outfile = codecs.open(outfile_path, encoding='utf-8', mode='a')
        except (ProjectImportError, IOError):
            trace = sys.exc_info()[2]
            six.reraise(ProjectImportError('Conf file not found'), None, trace)

        # Append config to project conf file
        tmpl = template_loader.get_template('doc_builder/conf.py.tmpl')
        rendered = tmpl.render(self.get_config_params())

        with outfile:
            outfile.write("\n")
            outfile.write(rendered)

        # Print the contents of conf.py in order to make the rendered
        # configfile visible in the build logs
        self.run(
            'cat', os.path.relpath(outfile_path,
                                   self.project.checkout_path(self.version.slug)),
            cwd=self.project.checkout_path(self.version.slug),
        )
Exemple #3
0
    def __start_keystone_session(
            self, retries=3, ca_cert=None, insecure=not VERIFY_SSL):
        exc_type, exc_value, exc_traceback = None, None, None
        for i in xrange(retries):
            try:
                if insecure:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth, verify=False)
                elif ca_cert:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth, verify=ca_cert)
                else:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth)
                self.keystone_session.get_auth_headers()
                return

            except ClientException as exc:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                err = "Try nr {0}. Could not get keystone token, error: {1}"
                logger.warning(err.format(i + 1, exc))
                time.sleep(5)
        if exc_type and exc_traceback and exc_value:
            six.reraise(exc_type, exc_value, exc_traceback)
        raise RuntimeError()
Exemple #4
0
def reraise_as(new_exception_or_type):
    """
    Obtained from https://github.com/dcramer/reraise/blob/master/src/reraise.py
    >>> try:
    >>>     do_something_crazy()
    >>> except Exception:
    >>>     reraise_as(UnhandledException)
    """
    __traceback_hide__ = True  # NOQA

    e_type, e_value, e_traceback = sys.exc_info()

    if inspect.isclass(new_exception_or_type):
        new_type = new_exception_or_type
        new_exception = new_exception_or_type()
    else:
        new_type = type(new_exception_or_type)
        new_exception = new_exception_or_type

    new_exception.__cause__ = e_value

    try:
        six.reraise(new_type, new_exception, e_traceback)
    finally:
        del e_traceback
    def start_response(status, headers, exc_info=None):
        if task.wroteResponseHeader() and not exc_info:
            raise AssertionError("start_response called a second time "
                                 "without providing exc_info.")
        if exc_info:
            try:
                if task.wroteResponseHeader():
                    # higher levels will catch and handle raised exception:
                    # 1. "service" method in httptask.py
                    # 2. "service" method in severchannelbase.py
                    # 3. "handlerThread" method in taskthreads.py
                    six.reraise(*exc_info)
                else:
                    # As per WSGI spec existing headers must be cleared
                    task.accumulated_headers = None
                    task.response_headers = {}
            finally:
                exc_info = None
        # Prepare the headers for output
        status, reason = re.match('([0-9]*) (.*)', status).groups()
        task.setResponseStatus(status, reason)
        task.appendResponseHeaders(['%s: %s' % i for i in headers])

        # Return the write method used to write the response data.
        return fakeWrite
Exemple #6
0
def sync(loop, func, *args, **kwargs):
    """ Run coroutine in loop running in separate thread """
    if not loop._running:
        try:
            return loop.run_sync(lambda: func(*args, **kwargs))
        except RuntimeError:  # loop already running
            pass

    from threading import Event
    e = Event()
    result = [None]
    error = [False]
    traceback = [False]

    @gen.coroutine
    def f():
        try:
            result[0] = yield gen.maybe_future(func(*args, **kwargs))
        except Exception as exc:
            logger.exception(exc)
            result[0] = exc
            error[0] = exc
            exc_type, exc_value, exc_traceback = sys.exc_info()
            traceback[0] = exc_traceback
        finally:
            e.set()

    a = loop.add_callback(f)
    while not e.is_set():
        e.wait(1000000)
    if error[0]:
        six.reraise(type(error[0]), error[0], traceback[0])
    else:
        return result[0]
Exemple #7
0
    def __call__(self, request):
        """WSGI method that controls (de)serialization and method dispatch."""
        action_args = self.get_action_args(request.environ)
        action = action_args.pop('action', None)

        try:
            deserialized_request = self.dispatch(self.deserializer,
                                                 action, request)
            action_args.update(deserialized_request)
            action_result = self.dispatch(self.controller, action,
                                          request, **action_args)
        except webob.exc.WSGIHTTPException as e:
            exc_info = sys.exc_info()
            six.reraise(translate_exception(request, e), None, exc_info[2])

        try:
            response = webob.Response(request=request)
            self.dispatch(self.serializer, action, response, action_result)
            return response
        except webob.exc.WSGIHTTPException as e:
            return translate_exception(request, e)
        except webob.exc.HTTPException as e:
            return e
        # return unserializable result (typically a webob exc)
        except Exception:
            return action_result
Exemple #8
0
 def ignore_conflict_and_not_found(self, ex):
     """Raises the exception unless it is a conflict or not-found."""
     if self.is_conflict(ex) or self.is_not_found(ex):
         return
     else:
         exc_info = sys.exc_info()
         six.reraise(*exc_info)
Exemple #9
0
 def setUp(self):
     self._tap0 = Tap()
     self._tap1 = Tap()
     self._tap2 = Tap()
     self._bridge0 = Bridge('src-')
     self._bridge1 = Bridge('target-')
     self._bridge2 = Bridge('target2-')
     self._devices = [self._tap0, self._tap1, self._tap2,
                      self._bridge0, self._bridge1, self._bridge2]
     # If setUp raise, teardown is not called, so we should either succeed,
     # or fail without leaving junk around.
     cleanup = []
     try:
         for iface in self._devices:
             iface.addDevice()
             cleanup.append(iface)
         self._bridge0.addIf(self._tap0.devName)
         self._bridge1.addIf(self._tap1.devName)
         self._bridge2.addIf(self._tap2.devName)
     except:
         t, v, tb = sys.exc_info()
         for iface in cleanup:
             try:
                 iface.delDevice()
             except Exception:
                 self.log.exception("Error removing device %s" % iface)
         six.reraise(t, v, tb)
Exemple #10
0
 def setUpClass(cls):
     # It should never be overridden by descendants
     if hasattr(super(BaseTestCase, cls), 'setUpClass'):
         super(BaseTestCase, cls).setUpClass()
     cls.setUpClassCalled = True
     # Stack of (name, callable) to be invoked in reverse order at teardown
     cls.teardowns = []
     # All the configuration checks that may generate a skip
     cls.skip_checks()
     try:
         # Allocation of all required credentials and client managers
         cls.teardowns.append(('credentials', cls.clear_isolated_creds))
         cls.setup_credentials()
         # Shortcuts to clients
         cls.setup_clients()
         # Additional class-wide test resources
         cls.teardowns.append(('resources', cls.resource_cleanup))
         cls.resource_setup()
     except Exception:
         etype, value, trace = sys.exc_info()
         LOG.info("%s raised in %s.setUpClass. Invoking tearDownClass." % (
                  etype, cls.__name__))
         cls.tearDownClass()
         try:
             six.reraise(etype, value, trace)
         finally:
             del trace  # to avoid circular refs
Exemple #11
0
def import_string(dotted_path):
    """
    Import a dotted module path and return the attribute/class designated by the
    last name in the path. Raise ImportError if the import failed.
    """
    
    try:
        from django.utils.module_loading import import_string
        return import_string(dotted_path)
    except ImportError:
        pass
        
    try:
        module_path, class_name = dotted_path.rsplit('.', 1)
    except ValueError:
        msg = "%s doesn't look like a module path" % dotted_path
        six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])

    module = import_module(module_path)

    try:
        return getattr(module, class_name)
    except AttributeError:
        msg = 'Module "%s" does not define a "%s" attribute/class' % (
            dotted_path, class_name)
        six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
Exemple #12
0
  def input_elements(self, instruction_id, expected_targets):
    """
    Generator to retrieve elements for an instruction_id
    input_elements should be called only once for an instruction_id

    Args:
      instruction_id(str): instruction_id for which data is read
      expected_targets(collection): expected targets
    """
    received = self._receiving_queue(instruction_id)
    done_targets = []
    try:
      while len(done_targets) < len(expected_targets):
        try:
          data = received.get(timeout=1)
        except queue.Empty:
          if self._exc_info:
            t, v, tb = self._exc_info
            six.reraise(t, v, tb)
        else:
          if not data.data and data.target in expected_targets:
            done_targets.append(data.target)
          else:
            assert data.target not in done_targets
            yield data
    finally:
      # Instruction_ids are not reusable so Clean queue once we are done with
      #  an instruction_id
      self._clean_receiving_queue(instruction_id)
Exemple #13
0
    def write(self, stream):
        # now, pack everything in
        crc_fields = []
        for name, field in self.ordered_fields:
            try:
                if isinstance(field, CRCField):
                    crc_offset = stream.tell()
                    field.pack(stream)
                    crc_fields.append((field, crc_offset))
                else:
                    field.pack(stream)
            except SuitcaseException:
                raise  # just reraise the same exception object
            except Exception:
                # keep the original traceback information, see
                # http://stackoverflow.com/questions/3847503/wrapping-exceptions-in-python
                exc_value = SuitcasePackException("Unexpected exception during pack of %r" % name)
                six.reraise(type(exc_value), exc_value, sys.exc_info()[2])

        # if there is a crc value, seek back to the field and
        # pack it with the right value
        if len(crc_fields) > 0:
            data = stream.getvalue()
            for field, offset in crc_fields:
                stream.seek(offset)
                checksum_data = self.crc_field.packed_checksum(data)
                stream.write(checksum_data)
Exemple #14
0
def MultiProcFailCheck():
    """ Wrap this around code that you want to globally fail if it fails
    on any MPI process in MPI_WORLD.  If not running under MPI, don't
    handle any exceptions.
    """
    if MPI is None:
        yield
    else:
        try:
            yield
        except:
            exc_type, exc_val, exc_tb = sys.exc_info()
            if exc_val is not None:
                fail = True
            else:
                fail = False

            fails = MPI.COMM_WORLD.allgather(fail)

            if fail or not any(fails):
                six.reraise(exc_type, exc_val, exc_tb)
            else:
                for i, f in enumerate(fails):
                    if f:
                        raise RuntimeError("a test failed in (at least) rank %d" % i)
    def xpath(self, query):
        """
        Find nodes matching the xpath ``query`` and return the result as a
        :class:`SelectorList` instance with all elements flattened. List
        elements implement :class:`Selector` interface too.

        ``query`` is a string containing the XPATH query to apply.
        """
        try:
            xpathev = self.root.xpath
        except AttributeError:
            return self.selectorlist_cls([])

        try:
            result = xpathev(query, namespaces=self.namespaces,
                             smart_strings=self._lxml_smart_strings)
        except etree.XPathError as exc:
            msg = u"XPath error: %s in %s" % (exc, query)
            msg = msg if six.PY3 else msg.encode('unicode_escape')
            six.reraise(ValueError, ValueError(msg), sys.exc_info()[2])

        if type(result) is not list:
            result = [result]

        result = [self.__class__(root=x, _expr=query,
                                 namespaces=self.namespaces,
                                 type=self.type)
                  for x in result]
        return self.selectorlist_cls(result)
Exemple #16
0
def cleanup_action(cleanup_func):
    """
    Context manager to carry out a given
    cleanup action after carrying out a set
    of tasks, or when an exception occurs.
    If any errors occur during the cleanup
    action, those are ignored, and the original
    traceback is preserved.

    :params func: This function is called if
    an exception occurs or at the end of the
    context block. If any exceptions raised
        by func are ignored.
    Usage:
        with cleanup_action(lambda e: print("Oops!")):
            do_something()
    """
    try:
        yield
    except Exception:
        ex_class, ex_val, ex_traceback = sys.exc_info()
        try:
            cleanup_func()
        except Exception as e:
            print("Error during exception cleanup: {0}".format(e))
        reraise(ex_class, ex_val, ex_traceback)
    cleanup_func()
Exemple #17
0
    def _check_failure_status(self):
        """ Check the status of command failures. Raise exceptions as necessary

        The failure status property is used by the various asynchronous
        command execution threads which interface with the
        remote browser manager processes. If a failure status is found, the
        appropriate steps are taken to gracefully close the infrastructure
        """
        self.logger.debug("Checking command failure status indicator...")
        if self.failure_status:
            self.logger.debug(
                "TaskManager failure status set, halting command execution.")
            self._cleanup_before_fail()
            if self.failure_status['ErrorType'] == 'ExceedCommandFailureLimit':
                raise CommandExecutionError(
                    "TaskManager exceeded maximum consecutive command "
                    "execution failures.",
                    self.failure_status['CommandSequence']
                )
            elif (self.failure_status['ErrorType'] == ("ExceedLaunch"
                                                       "FailureLimit")):
                raise CommandExecutionError(
                    "TaskManager failed to launch browser within allowable "
                    "failure limit.", self.failure_status['CommandSequence']
                )
            if self.failure_status['ErrorType'] == 'CriticalChildException':
                reraise(*pickle.loads(self.failure_status['Exception']))
Exemple #18
0
def grin_main(argv=None):
    try:
        if argv is None:
            # Look at the GRIN_ARGS environment variable for more arguments.
            env_args = shlex.split(os.getenv("GRIN_ARGS", ""))
            argv = [sys.argv[0]] + env_args + sys.argv[1:]
        parser = get_grin_arg_parser()
        args = parser.parse_args(argv[1:])
        if args.context is not None:
            args.before_context = args.context
            args.after_context = args.context
        args.use_color = args.force_color or (
            not args.no_color and sys.stdout.isatty() and (os.environ.get("TERM") != "dumb")
        )

        regex = get_regex(args)
        g = GrepText(regex, args)
        openers = dict(text=open, gzip=gzip.open)
        for filename, kind in get_filenames(args):
            report = g.grep_a_file(filename, opener=openers[kind])
            sys.stdout.write(report)
    except KeyboardInterrupt:
        raise SystemExit(0)
    except IOError as e:
        if "Broken pipe" in str(e):
            # The user is probably piping to a pager like less(1) and has
            # exited it. Just exit.
            raise SystemExit(0)
        reraise(IOError, e)
Exemple #19
0
    def connect(self):
        if self._mount.isMounted():
            return

        self.validate()

        fileUtils.createdir(self._getLocalPath())

        try:
            self._mount.mount(self.options, self._vfsType, cgroup=self.CGROUP)
        except MountError:
            t, v, tb = sys.exc_info()
            try:
                os.rmdir(self._getLocalPath())
            except OSError as e:
                self.log.warn("Error removing mountpoint directory %r: %s",
                              self._getLocalPath(), e)
            six.reraise(t, v, tb)
        else:
            try:
                fileSD.validateDirAccess(
                    self.getMountObj().getRecord().fs_file)
            except se.StorageServerAccessPermissionError:
                t, v, tb = sys.exc_info()
                try:
                    self.disconnect()
                except OSError:
                    self.log.exception("Error disconnecting")
                six.reraise(t, v, tb)
Exemple #20
0
    def _run_request(self, url, method, headers, body):
        """Run the http request and decode output.

        The call to make the request will catch a WSGIAppError from
        wsgi_intercept so that the real traceback from a catastrophic
        error in the intercepted app can be examined.
        """

        try:
            response, content = self.http.request(
                url,
                method=method,
                headers=headers,
                body=body
            )
        except wsgi_intercept.WSGIAppError as exc:
            # Extract and re-raise the wrapped exception.
            six.reraise(exc.exception_type, exc.exception_value,
                        exc.traceback)

        # Set headers and location attributes for follow on requests
        self.response = response
        if 'location' in response:
            self.location = response['location']

        # Decode and store response
        decoded_output = utils.decode_content(response, content)
        if (decoded_output and
                'application/json' in response.get('content-type', '')):
            self.json_data = json.loads(decoded_output)
        self.output = decoded_output
Exemple #21
0
    def _save(self, name, content):
        # Taken from django.core.files.storage.FileSystemStorage._save
        # Need to allow overwrite
        full_path = self.path(name)
        directory = os.path.dirname(full_path)
        if not os.path.exists(directory):
            try:
                if self.directory_permissions_mode is not None:
                    # os.makedirs applies the global umask, so we reset it,
                    # for consistency with file_permissions_mode behavior.
                    old_umask = os.umask(0)
                    try:
                        os.makedirs(directory, self.directory_permissions_mode)
                    finally:
                        os.umask(old_umask)
                else:
                    os.makedirs(directory)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    six.reraise(*sys.exc_info())

        if not os.path.isdir(directory):
            raise IOError("%s exists and is not a directory." % directory)

        with open(full_path, 'w') as f:
            for chunk in content.chunks():
                f.write(chunk)

        if self.file_permissions_mode is not None:
            os.chmod(full_path, self.file_permissions_mode)

        return name
Exemple #22
0
    def value(self):
        """Get parameter value.

        If this cached value is None and this serialized value is not None,
        calculate the new value from the serialized one.

        :return: parameter value.
        :raises: TypeError if serialized value is not an instance of self ptype
            . ParserError if parsing step raised an error.
        """

        result = self._value

        if result is None and self._svalue is not None:

            try:
                result = self._value = self.resolve()

            except Exception as e:
                reraise(
                    Parameter.Error,
                    Parameter.Error('Call the method "resolve" first.')
                )

        return result
Exemple #23
0
def run_with_timeout_and_stack(request, timeout):
    '''
    interrupts evaluation after a given time period. provides a suitable stack environment.
    '''

    # only use set_thread_stack_size if max recursion depth was changed via the environment variable
    # MATHICS_MAX_RECURSION_DEPTH. if it is set, we always use a thread, even if timeout is None, in
    # order to be able to set the thread stack size.

    if MAX_RECURSION_DEPTH > settings.DEFAULT_MAX_RECURSION_DEPTH:
        set_thread_stack_size(python_stack_size(MAX_RECURSION_DEPTH))
    elif timeout is None:
        return request()

    queue = Queue(maxsize=1)   # stores the result or exception
    thread = Thread(target=_thread_target, args=(request, queue))
    thread.start()

    thread.join(timeout)
    if thread.is_alive():
        raise TimeoutInterrupt()

    success, result = queue.get()
    if success:
        return result
    else:
        six.reraise(*result)
Exemple #24
0
    def add_paths(self, paths=None):
        """
        Adds the paths defined in the specification as endpoints

        :type paths: list
        """
        paths = paths or self.specification.get('paths', dict())
        for path, methods in paths.items():
            logger.debug('Adding %s%s...', self.base_url, path)

            # search for parameters definitions in the path level
            # http://swagger.io/specification/#pathItemObject
            path_parameters = methods.get('parameters', [])

            # TODO Error handling
            for method, endpoint in methods.items():
                if method == 'parameters':
                    continue
                try:
                    self.add_operation(method, path, endpoint, path_parameters)
                except Exception:  # pylint: disable= W0703
                    url = '{base_url}{path}'.format(base_url=self.base_url,
                                                    path=path)
                    error_msg = 'Failed to add operation for {method} {url}'.format(
                        method=method.upper(),
                        url=url)
                    if self.debug:
                        logger.exception(error_msg)
                    else:
                        logger.error(error_msg)
                        six.reraise(*sys.exc_info())
Exemple #25
0
    def __init__(self, message=None, **kwargs):
        self.kwargs = kwargs

        if 'code' not in self.kwargs:
            try:
                self.kwargs['code'] = self.code
            except AttributeError:
                pass

        if not message:
            try:
                message = self.msg_fmt % kwargs

            except Exception:
                exc_info = sys.exc_info()
                # kwargs doesn't match a variable in the message
                # log the issue and the kwargs
                LOG.exception(_LE('Exception in string format operation'))
                for name, value in six.iteritems(kwargs):
                    LOG.error("%s: %s" % (name, value))  # noqa

                if CONF.fatal_exception_format_errors:
                    six.reraise(*exc_info)
                else:
                    # at least get the core message out if something happened
                    message = self.msg_fmt

        self.message = message
        super(JacketException, self).__init__(message)
Exemple #26
0
    def set_data_for_a_field(self, model_class, __instance, __field, locators, persist_dependencies=True, **kwargs):
        if __field.name in kwargs:
            config = kwargs[__field.name]
            try:
                data = self._process_field_with_customized_fixture(__instance, __field, config, persist_dependencies)
            except PendingField:
                return # ignore this field for a while.
            except Exception as e:
                six.reraise(InvalidConfigurationError, InvalidConfigurationError(get_unique_field_name(__field), e), sys.exc_info()[2])
        else:
            data = self._process_field_with_default_fixture(__field, model_class, persist_dependencies, locators)

        if is_file_field(__field) and data:
            django_file = data
            if isinstance(django_file, File):
                setattr(__instance, __field.name, data.name) # set the attribute
                if django_file.file.mode != 'rb':
                    django_file.file.close() # this file may be open in another mode, for example, in a+b
                    opened_file = open(django_file.file.name, 'rb') # to save the file it must be open in rb mode
                    django_file.file = opened_file # we update the reference to the rb mode opened file
                getattr(__instance, __field.name).save(django_file.name, django_file) # save the file into the file storage system
                django_file.close()
            else: # string (saving just a name in the file, without saving the file to the storage file system
                setattr(__instance, __field.name, data) # Model.field = data
        else:
            if self.debug_mode:
                LOGGER.debug('%s.%s = %s' % (get_unique_model_name(model_class), __field.name, data))
            setattr(__instance, __field.name, data) # Model.field = data
        self.fields_processed.append(__field.name)
Exemple #27
0
def load_from_dir(source_directory, filename='manifest.yaml'):
    if not os.path.isdir(source_directory) or not os.path.exists(
            source_directory):
        raise e.PackageLoadError('Invalid package directory')
    full_path = os.path.join(source_directory, filename)
    if not os.path.isfile(full_path):
        raise e.PackageLoadError('Unable to find package manifest')

    try:
        with open(full_path) as stream:
            content = yaml.safe_load(stream)
    except Exception as ex:
        trace = sys.exc_info()[2]
        six.reraise(
            e.PackageLoadError,
            e.PackageLoadError("Unable to load due to '{0}'".format(ex)),
            trace)
    else:
        format_spec = str(content.get('Format') or 'MuranoPL/1.0')
        if format_spec[0].isdigit():
            format_spec = 'MuranoPL/' + format_spec
        plugin_loader = get_plugin_loader()
        handler = plugin_loader.get_package_handler(format_spec)
        if handler is None:
            raise e.PackageFormatError(
                'Unsupported format {0}'.format(format_spec))
        return handler(source_directory, content)
Exemple #28
0
    def get(self):
        """Creates a generator to extract data from the queue.

        Skip the data if it is `None`.

        # Yields
            The next element in the queue, i.e. a tuple
            `(inputs, targets)` or
            `(inputs, targets, sample_weights)`.
        """
        try:
            while self.is_running():
                try:
                    future = self.queue.get(block=True)
                    inputs = future.get(timeout=30)
                    self.queue.task_done()
                except mp.TimeoutError:
                    idx = future.idx
                    warnings.warn(
                        'The input {} could not be retrieved.'
                        ' It could be because a worker has died.'.format(idx),
                        UserWarning)
                    inputs = self.sequence[idx]
                if inputs is not None:
                    yield inputs
        except Exception:
            self.stop()
            six.reraise(*sys.exc_info())
Exemple #29
0
        def wrapped(*args, **kwargs):
            sleep_time = self._sleep_factor
            exc_info = None

            for attempt in range(self._count):
                if attempt != 0:
                    LOG.warning(_LW('Retrying failed call to %(func)s, '
                                    'attempt %(attempt)i.'),
                                {'func': func_name,
                                 'attempt': attempt})
                try:
                    return fun(*args, **kwargs)
                except self._exceptions:
                    exc_info = sys.exc_info()

                if attempt != self._count - 1:
                    if self._sleep_mechanism == self.SLEEP_NONE:
                        continue
                    elif self._sleep_mechanism == self.SLEEP_INCREMENT:
                        time.sleep(sleep_time)
                        sleep_time += self._sleep_factor
                    elif self._sleep_mechanism == self.SLEEP_DOUBLE:
                        time.sleep(sleep_time)
                        sleep_time *= 2
                    else:
                        raise ValueError('Unknown sleep mechanism: %r'
                                         % self._sleep_mechanism)

            six.reraise(exc_info[0], exc_info[1], exc_info[2])
Exemple #30
0
    def _establish_tls_with_client(self):
        self.log("Establish TLS with client", "debug")
        cert, key, chain_file = self._find_cert()

        try:
            self.client_conn.convert_to_ssl(
                cert, key,
                method=self.config.openssl_method_client,
                options=self.config.openssl_options_client,
                cipher_list=self.config.ciphers_client,
                dhparams=self.config.certstore.dhparams,
                chain_file=chain_file,
                alpn_select_callback=self.__alpn_select_callback,
            )
            # Some TLS clients will not fail the handshake,
            # but will immediately throw an "unexpected eof" error on the first read.
            # The reason for this might be difficult to find, so we try to peek here to see if it
            # raises ann error.
            self.client_conn.rfile.peek(1)
        except TlsException as e:
            six.reraise(
                ClientHandshakeException,
                ClientHandshakeException(
                    "Cannot establish TLS with client (sni: {sni}): {e}".format(
                        sni=self.client_sni, e=repr(e)
                    ),
                    self.client_sni or repr(self.server_conn.address)
                ),
                sys.exc_info()[2]
            )
Exemple #31
0
    def _load(self):
        """
        Load data from the sqlite database file.

        Load the metadata from the sqlite file, populating the
        `format_version`, `parameters`, and `unknowns` attributes of this
        CaseReader.

        The `iterations` table is read to load the keys which identify
        the individual cases/iterations from the recorded file.
        """
        self.driver_cases = DriverCases(self.filename, self.format_version,
                                        self._abs2prom, self._abs2meta,
                                        self._prom2abs, self._var_settings)
        self.driver_derivative_cases = DriverDerivativeCases(
            self.filename, self.format_version, self._abs2prom, self._abs2meta,
            self._prom2abs)
        self.system_cases = SystemCases(self.filename, self.format_version,
                                        self._abs2prom, self._abs2meta,
                                        self._prom2abs)
        self.solver_cases = SolverCases(self.filename, self.format_version,
                                        self._abs2prom, self._abs2meta,
                                        self._prom2abs)
        self.problem_cases = ProblemCases(self.filename, self.format_version,
                                          self._abs2prom, self._abs2meta,
                                          self._prom2abs)

        if self.format_version in range(1, format_version + 1):
            with sqlite3.connect(self.filename) as con:

                # Read in iterations from Drivers, Systems, Problems, and Solvers
                cur = con.cursor()
                cur.execute(
                    "SELECT iteration_coordinate FROM driver_iterations ORDER BY id ASC"
                )
                rows = cur.fetchall()
                self.driver_cases._case_keys = [coord[0] for coord in rows]
                self.driver_cases.num_cases = len(self.driver_cases._case_keys)

                try:
                    cur.execute(
                        "SELECT iteration_coordinate FROM driver_derivatives "
                        "ORDER BY id ASC")
                    rows = cur.fetchall()
                    dcase = self.driver_derivative_cases
                    dcase._case_keys = [coord[0] for coord in rows]
                    dcase.num_cases = len(dcase._case_keys)

                except sqlite3.OperationalError as err:
                    # Cases recorded in version 1 won't have a derivatives table.
                    if self.format_version >= 2:
                        reraise(*sys.exc_info())

                cur.execute(
                    "SELECT iteration_coordinate FROM system_iterations ORDER BY id ASC"
                )
                rows = cur.fetchall()
                self.system_cases._case_keys = [coord[0] for coord in rows]
                self.system_cases.num_cases = len(self.system_cases._case_keys)

                cur.execute(
                    "SELECT iteration_coordinate FROM solver_iterations ORDER BY id ASC"
                )
                rows = cur.fetchall()
                self.solver_cases._case_keys = [coord[0] for coord in rows]
                self.solver_cases.num_cases = len(self.solver_cases._case_keys)

                try:
                    cur.execute(
                        "SELECT case_name FROM problem_cases ORDER BY id ASC")
                    rows = cur.fetchall()
                    self.problem_cases._case_keys = [
                        coord[0] for coord in rows
                    ]
                    self.problem_cases.num_cases = len(
                        self.problem_cases._case_keys)

                except sqlite3.OperationalError as err:
                    # Cases recorded in some early iterations of version 1 won't have a problem
                    # table.
                    if self.format_version >= 2:
                        reraise(*sys.exc_info())

                # Read in metadata for Drivers, Systems, and Solvers
                cur.execute("SELECT model_viewer_data FROM driver_metadata")
                row = cur.fetchone()
                if row is not None:
                    if self.format_version >= 3:
                        self.driver_metadata = json.loads(row[0])
                    elif self.format_version in (1, 2):
                        if PY2:
                            self.driver_metadata = pickle.loads(str(row[0]))
                        if PY3:
                            self.driver_metadata = pickle.loads(row[0])

                cur.execute(
                    "SELECT id, scaling_factors, component_metadata FROM system_metadata"
                )
                for row in cur:
                    id = row[0]
                    self.system_metadata[id] = {}

                    if PY2:
                        self.system_metadata[id][
                            'scaling_factors'] = pickle.loads(str(row[1]))
                        self.system_metadata[id][
                            'component_options'] = pickle.loads(str(row[2]))
                    if PY3:
                        self.system_metadata[id][
                            'scaling_factors'] = pickle.loads(row[1])
                        self.system_metadata[id][
                            'component_options'] = pickle.loads(row[2])

                cur.execute(
                    "SELECT id, solver_options, solver_class FROM solver_metadata"
                )
                for row in cur:
                    id = row[0]
                    if PY2:
                        solver_options = pickle.loads(str(row[1]))
                    if PY3:
                        solver_options = pickle.loads(row[1])
                    solver_class = row[2]
                    self.solver_metadata[id] = {
                        'solver_options': solver_options,
                        'solver_class': solver_class,
                    }
            con.close()
        else:
            raise ValueError('SQliteCaseReader encountered an unhandled '
                             'format version: {0}'.format(self.format_version))
Exemple #32
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     self.close()
     if exc_type is not None:
         six.reraise(exc_type, exc_val, exc_tb)
Exemple #33
0
 def raise_(self):
     six.reraise(self.type, self.value, self.traceback)
Exemple #34
0
    def _step(self, action):
        assert self.state.color == self.player_color

        # If already terminal, then don't do anything
        if self.done:
            return self.state.board.encode(), 0., True, {'state': self.state}

        # If resigned, then we're done
        if action == _resign_action(self.board_size):
            self.done = True
            return self.state.board.encode(), -1., True, {'state': self.state}

        # Play
        #self.prev_state_stack.append( self.state )
        prev_state = self.state
        try:
            self.state, logged_move = self.state.act(action)
            self.moves_log.append(logged_move)
        except pachi_py.IllegalMove:
            if self.illegal_move_mode == 'raise':
                six.reraise(*sys.exc_info())
            elif self.illegal_move_mode == 'lose':
                # Automatic loss on illegal move
                self.done = True
                return self.state.board.encode(), -1., True, {
                    'state': self.state
                }
            else:
                raise error.Error('Unsupported illegal move action: {}'.format(\
                                    self.illegal_move_mode))

        # Opponent play
        if not self.state.board.is_terminal:
            #            self.state, opponent_resigned = self._exec_opponent_play(self.state, \
            #                            self.prev_state_stack[-1], action)
            self.state, opponent_resigned = self._exec_opponent_play(self.state, \
                            prev_state, action)
            # After opponent play, we should be back to the original color
            assert self.state.color == self.player_color

            # If the opponent resigns, then the agent wins
            if opponent_resigned:
                self.done = True
                return self.state.board.encode(), 1., True, {
                    'state': self.state
                }

        # Reward: if nonterminal, then the reward is 0
        if not self.state.board.is_terminal:
            self.done = False
            return self.state.board.encode(), 0., False, {'state': self.state}

        # We're in a terminal state. Reward is 1 if won, -1 if lost
        assert self.state.board.is_terminal
        self.done = True
        white_wins = self.state.board.official_score > 0
        black_wins = self.state.board.official_score < 0
        player_wins = (white_wins and self.player_color == pachi_py.WHITE) or \
                (black_wins and self.player_color == pachi_py.BLACK)
        reward = 1. if player_wins else -1. if (
            white_wins or black_wins) else 0.
        return self.state.board.encode(), reward, True, {'state': self.state}
Exemple #35
0
    def run(self):
        """
        Excute pyOptsparse.

        Note that pyOpt controls the execution, and the individual optimizers
        (e.g., SNOPT) control the iteration.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem
        model = problem.model
        relevant = model._relevant
        self.pyopt_solution = None
        self._total_jac = None
        self.iter_count = 0
        fwd = problem._mode == 'fwd'
        optimizer = self.options['optimizer']

        # Only need initial run if we have linear constraints or if we are using an optimizer that
        # doesn't perform one initially.
        con_meta = self._cons
        if optimizer in run_required or np.any([con['linear'] for con in itervalues(self._cons)]):
            with RecordingDebugging(optimizer, self.iter_count, self) as rec:
                # Initial Run
                model._solve_nonlinear()
                rec.abs = 0.0
                rec.rel = 0.0
            self.iter_count += 1

        # compute dynamic simul deriv coloring or just sparsity if option is set
        if coloring_mod._use_sparsity:
            if self.options['dynamic_simul_derivs']:
                coloring_mod.dynamic_simul_coloring(self, run_model=optimizer not in run_required,
                                                    do_sparsity=True)
            elif self.options['dynamic_derivs_sparsity']:
                coloring_mod.dynamic_sparsity(self)

        opt_prob = Optimization(self.options['title'], self._objfunc)

        # Add all design variables
        param_meta = self._designvars
        self._indep_list = indep_list = list(param_meta)
        param_vals = self.get_design_var_values()

        for name, meta in iteritems(param_meta):
            opt_prob.addVarGroup(name, meta['size'], type='c',
                                 value=param_vals[name],
                                 lower=meta['lower'], upper=meta['upper'])

        opt_prob.finalizeDesignVariables()

        # Add all objectives
        objs = self.get_objective_values()
        for name in objs:
            opt_prob.addObj(name)
            self._quantities.append(name)

        # Calculate and save derivatives for any linear constraints.
        lcons = [key for (key, con) in iteritems(con_meta) if con['linear']]
        if len(lcons) > 0:
            _lin_jacs = self._compute_totals(of=lcons, wrt=indep_list, return_format='dict')
            # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will
            # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation
            # of linear constraints!
            to_remove = []
            for jacdct in itervalues(_lin_jacs):
                for n, subjac in iteritems(jacdct):
                    if isinstance(subjac, np.ndarray):
                        # we can safely use coo_matrix to automatically convert the ndarray
                        # since our linear constraint jacs are constant, so zeros won't become
                        # nonzero during the optimization.
                        mat = coo_matrix(subjac)
                        if mat.row.size > 0:
                            # convert to 'coo' format here to avoid an emphatic warning
                            # by pyoptsparse.
                            jacdct[n] = {'coo': [mat.row, mat.col, mat.data], 'shape': mat.shape}

        # Add all equality constraints
        for name, meta in iteritems(con_meta):
            if meta['equals'] is None:
                continue
            size = meta['size']
            lower = upper = meta['equals']
            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name, size, lower=lower, upper=upper,
                                     linear=True, wrt=wrt, jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name, size, lower=lower, upper=upper, wrt=wrt, jac=jac)
                self._quantities.append(name)

        # Add all inequality constraints
        for name, meta in iteritems(con_meta):
            if meta['equals'] is not None:
                continue
            size = meta['size']

            # Bounds - double sided is supported
            lower = meta['lower']
            upper = meta['upper']

            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name, size, upper=upper, lower=lower,
                                     linear=True, wrt=wrt, jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name, size, upper=upper, lower=lower, wrt=wrt, jac=jac)
                self._quantities.append(name)

        # Instantiate the requested optimizer
        try:
            _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer], 0)
            opt = getattr(_tmp, optimizer)()

        except Exception as err:
            # Change whatever pyopt gives us to an ImportError, give it a readable message,
            # but raise with the original traceback.
            msg = "Optimizer %s is not available in this installation." % optimizer
            reraise(ImportError, ImportError(msg), sys.exc_info()[2])

        # Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        # Execute the optimization problem
        if self.options['gradient method'] == 'pyopt_fd':

            # Use pyOpt's internal finite difference
            # TODO: Need to get this from OpenMDAO
            # fd_step = problem.root.deriv_options['step_size']
            fd_step = 1e-6
            sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        elif self.options['gradient method'] == 'snopt_fd':
            if self.options['optimizer'] == 'SNOPT':

                # Use SNOPT's internal finite difference
                # TODO: Need to get this from OpenMDAO
                # fd_step = problem.root.deriv_options['step_size']
                fd_step = 1e-6
                sol = opt(opt_prob, sens=None, sensStep=fd_step, storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

            else:
                msg = "SNOPT's internal finite difference can only be used with SNOPT"
                raise Exception(msg)
        else:

            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        # Print results
        if self.options['print_results']:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            self.set_design_var(name, dv_dict[name])

        with RecordingDebugging(self.options['optimizer'], self.iter_count, self) as rec:
            model._solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        # Save the most recent solution.
        self.pyopt_solution = sol
        try:
            exit_status = sol.optInform['value']
            self.fail = False

            # These are various failed statuses.
            if exit_status > 2:
                self.fail = True

        except KeyError:
            # optimizers other than pySNOPT may not populate this dict
            pass

        return self.fail
Exemple #36
0
    def result(self, throw_except=True, internal_storage=None):
        """
        Return the value returned by the call.
        If the call raised an exception, this method will raise the same exception
        If the future is cancelled before completing then CancelledError will be raised.

        :param throw_except: Reraise exception if call raised. Default true.
        :param internal_storage: Storage handler to poll cloud storage. Default None.
        :return: Result of the call.
        :raises CancelledError: If the job is cancelled before completed.
        :raises TimeoutError: If job is not complete after `timeout` seconds.
        """
        if self._state == ResponseFuture.State.New:
            raise ValueError("task not yet invoked")

        if self._state == ResponseFuture.State.Success:
            return self._return_val

        if self._state == ResponseFuture.State.Futures:
            return self._new_futures

        if self._state == ResponseFuture.State.Error:
            if throw_except:
                reraise(*self._exception)
            else:
                raise FunctionException(self.executor_id, self.job_id,
                                        self.activation_id, self._exception)

        if internal_storage is None:
            internal_storage = InternalStorage(
                storage_config=self.storage_config)

        self.status(throw_except=throw_except,
                    internal_storage=internal_storage)

        if not self.produce_output:
            self._set_state(ResponseFuture.State.Success)

        if self._state == ResponseFuture.State.Success:
            return self._return_val

        if self._state == ResponseFuture.State.Futures:
            return self._new_futures

        call_output_time = time.time()
        call_output = internal_storage.get_call_output(self.executor_id,
                                                       self.job_id,
                                                       self.call_id)
        self.output_query_count += 1

        while call_output is None and self.output_query_count < self.GET_RESULT_MAX_RETRIES:
            time.sleep(self.GET_RESULT_SLEEP_SECS)
            call_output = internal_storage.get_call_output(
                self.executor_id, self.job_id, self.call_id)
            self.output_query_count += 1

        if call_output is None:
            if throw_except:
                raise Exception('Unable to get the output from call {} - '
                                'Activation ID: {}'.format(
                                    self.call_id, self.activation_id))
            else:
                self._set_state(ResponseFuture.State.Error)
                return None

        call_output = pickle.loads(call_output)
        call_output_time_done = time.time()
        self._call_output = call_output

        self._call_metadata[
            'download_output_time'] = call_output_time_done - call_output_time
        self._call_metadata['output_query_count'] = self.output_query_count
        self._call_metadata[
            'download_output_timestamp'] = call_output_time_done

        log_msg = (
            'ExecutorID {} | JobID {} - Got output from call {} - Activation '
            'ID: {}'.format(self.executor_id, self.job_id, self.call_id,
                            self.activation_id))
        logger.info(log_msg)

        function_result = call_output['result']

        if isinstance(function_result, ResponseFuture) or \
           (type(function_result) == list and len(function_result) > 0 and isinstance(function_result[0], ResponseFuture)):
            self._new_futures = [
                function_result
            ] if type(function_result) == ResponseFuture else function_result
            self._set_state(ResponseFuture.State.Futures)
            self._call_metadata['status_done_timestamp'] = self._call_metadata[
                'download_output_timestamp']
            del self._call_metadata['download_output_timestamp']
            return self._new_futures

        else:
            self._return_val = function_result
            self._set_state(ResponseFuture.State.Success)
            return self._return_val
Exemple #37
0
    def status(self, throw_except=True, internal_storage=None):
        """
        Return the status returned by the call.
        If the call raised an exception, this method will raise the same exception
        If the future is cancelled before completing then CancelledError will be raised.

        :param check_only: Return None immediately if job is not complete. Default False.
        :param throw_except: Reraise exception if call raised. Default true.
        :param storage_handler: Storage handler to poll cloud storage. Default None.
        :return: Result of the call.
        :raises CancelledError: If the job is cancelled before completed.
        :raises TimeoutError: If job is not complete after `timeout` seconds.
        """
        if self._state == ResponseFuture.State.New:
            raise ValueError("task not yet invoked")

        if self._state in [
                ResponseFuture.State.Ready, ResponseFuture.State.Success
        ]:
            return self._call_status

        if internal_storage is None:
            internal_storage = InternalStorage(self.storage_config)

        if self._call_status is None:
            check_storage_path(internal_storage.get_storage_config(),
                               self.storage_path)
            self._call_status = internal_storage.get_call_status(
                self.executor_id, self.job_id, self.call_id)
            self.status_query_count += 1

            while self._call_status is None:
                time.sleep(self.GET_RESULT_SLEEP_SECS)
                self._call_status = internal_storage.get_call_status(
                    self.executor_id, self.job_id, self.call_id)
                self.status_query_count += 1

        self.activation_id = self._call_status['activation_id']

        if self._call_status['type'] == '__init__':
            self._set_state(ResponseFuture.State.Running)
            return self._call_status

        self._call_metadata['host_submit_time'] = self._call_status[
            'host_submit_time']
        self._call_metadata['status_done_timestamp'] = time.time()
        self._call_metadata['status_query_count'] = self.status_query_count

        total_time = format(
            round(
                self._call_status['end_time'] -
                self._call_status['start_time'], 2), '.2f')

        if self._call_status['exception']:
            # the action handler/jobrunner/function had an exception
            self._set_state(ResponseFuture.State.Error)
            self._exception = pickle.loads(eval(self._call_status['exc_info']))
            msg = None

            if not self._call_status.get('exc_pickle_fail', False):
                exception_args = self._exception[1].args
                if exception_args and exception_args[0] == "WRONGVERSION":
                    msg = "PyWren version mismatch: remote library is version {}, local " \
                          "library is version {}".format(exception_args[2], exception_args[3])

                elif exception_args and exception_args[0] == "OUTATIME":
                    msg = "Process ran out of time and was killed"

                elif exception_args and exception_args[0] == "OUTOFMEMORY":
                    msg = "Process exceeded maximum memory and was killed"
            else:
                fault = Exception(self._exception['exc_value'])
                self._exception = (Exception, fault,
                                   self._exception['exc_traceback'])

            if throw_except:
                reraise(*self._exception)
            raise FunctionException(self.executor_id, self.job_id,
                                    self.activation_id, self._exception, msg)

        log_msg = (
            'ExecutorID {} | JobID {} - Got status from call {} - Activation '
            'ID: {} - Time: {} seconds'.format(self.executor_id, self.job_id,
                                               self.call_id,
                                               self.activation_id,
                                               str(total_time)))
        logger.info(log_msg)
        self._set_state(ResponseFuture.State.Ready)

        if not self._call_status['result']:
            self._set_state(ResponseFuture.State.Success)
            self.produce_output = False

        if 'new_futures' in self._call_status:
            self.result(throw_except=throw_except,
                        internal_storage=internal_storage)

        return self._call_status
Exemple #38
0
    def authenticate(self, context, auth=None):
        """Authenticate credentials and return a token.

        Accept auth as a dict that looks like::

            {
                "auth":{
                    "passwordCredentials":{
                        "username":"******",
                        "password":"******"
                    },
                    "tenantName":"customer-x"
                }
            }

        In this case, tenant is optional, if not provided the token will be
        considered "unscoped" and can later be used to get a scoped token.

        Alternatively, this call accepts auth with only a token and tenant
        that will return a token that is scoped to that tenant.
        """
        if auth is None:
            raise exception.ValidationError(attribute='auth',
                                            target='request body')

        if "token" in auth:
            # Try to authenticate using a token
            auth_info = self._authenticate_token(context, auth)
        else:
            # Try external authentication
            try:
                auth_info = self._authenticate_external(context, auth)
            except ExternalAuthNotApplicable:
                # Try local authentication
                auth_info = self._authenticate_local(context, auth)

        user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id = auth_info
        # Validate that the auth info is valid and nothing is disabled
        try:
            self.identity_api.assert_user_enabled(user_id=user_ref['id'],
                                                  user=user_ref)
            if tenant_ref:
                self.resource_api.assert_project_enabled(
                    project_id=tenant_ref['id'], project=tenant_ref)
        except AssertionError as e:
            six.reraise(exception.Unauthorized, exception.Unauthorized(e),
                        sys.exc_info()[2])
        # NOTE(morganfainberg): Make sure the data is in correct form since it
        # might be consumed external to Keystone and this is a v2.0 controller.
        # The user_ref is encoded into the auth_token_data which is returned as
        # part of the token data. The token provider doesn't care about the
        # format.
        user_ref = self.v3_to_v2_user(user_ref)
        if tenant_ref:
            tenant_ref = self.v3_to_v2_project(tenant_ref)

        auth_token_data = self._get_auth_token_data(user_ref, tenant_ref,
                                                    metadata_ref, expiry,
                                                    audit_id)

        if tenant_ref:
            catalog_ref = self.catalog_api.get_catalog(user_ref['id'],
                                                       tenant_ref['id'])
        else:
            catalog_ref = {}

        auth_token_data['id'] = 'placeholder'
        if bind:
            auth_token_data['bind'] = bind

        roles_ref = []
        for role_id in metadata_ref.get('roles', []):
            role_ref = self.role_api.get_role(role_id)
            roles_ref.append(dict(name=role_ref['name']))

        (token_id, token_data) = self.token_provider_api.issue_v2_token(
            auth_token_data, roles_ref=roles_ref, catalog_ref=catalog_ref)

        # NOTE(wanghong): We consume a trust use only when we are using trusts
        # and have successfully issued a token.
        if CONF.trust.enabled and 'trust_id' in auth:
            self.trust_api.consume_use(auth['trust_id'])

        return token_data
Exemple #39
0
 def _raise(exc):
     if six.PY2 and isinstance(exc, tuple):
         (exc_type, value, traceback) = exc
         six.reraise(exc_type, value, traceback)
     else:
         raise exc
Exemple #40
0
def handle(request,
           message=None,
           redirect=None,
           ignore=False,
           escalate=False,
           log_level=None,
           force_log=None):
    """Centralized error handling for Horizon.

    Because Horizon consumes so many different APIs with completely
    different ``Exception`` types, it's necessary to have a centralized
    place for handling exceptions which may be raised.

    Exceptions are roughly divided into 3 types:

    #. ``UNAUTHORIZED``: Errors resulting from authentication or authorization
       problems. These result in being logged out and sent to the login screen.
    #. ``NOT_FOUND``: Errors resulting from objects which could not be
       located via the API. These generally result in a user-facing error
       message, but are otherwise returned to the normal code flow. Optionally
       a redirect value may be passed to the error handler so users are
       returned to a different view than the one requested in addition to the
       error message.
    #. RECOVERABLE: Generic API errors which generate a user-facing message
       but drop directly back to the regular code flow.

    All other exceptions bubble the stack as normal unless the ``ignore``
    argument is passed in as ``True``, in which case only unrecognized
    errors are bubbled.

    If the exception is not re-raised, an appropriate wrapper exception
    class indicating the type of exception that was encountered will be
    returned.
    """
    exc_type, exc_value, exc_traceback = sys.exc_info()
    log_method = getattr(LOG, log_level or "exception")
    force_log = force_log or os.environ.get("HORIZON_TEST_RUN", False)
    force_silence = getattr(exc_value, "silence_logging", False)

    # Because the same exception may travel through this method more than
    # once (if it's re-raised) we may want to treat it differently
    # the second time (e.g. no user messages/logging).
    handled = issubclass(exc_type, HandledException)
    wrap = False

    # Restore our original exception information, but re-wrap it at the end
    if handled:
        exc_type, exc_value, exc_traceback = exc_value.wrapped
        wrap = True

    log_entry = encoding.force_text(exc_value)

    user_message = ""
    # We trust messages from our own exceptions
    if issubclass(exc_type, HorizonException):
        user_message = log_entry
    # If the message has a placeholder for the exception, fill it in
    elif message and "%(exc)s" in message:
        user_message = encoding.force_text(message) % {"exc": log_entry}
    elif message:
        user_message = encoding.force_text(message)

    for exc_handler in HANDLE_EXC_METHODS:
        if issubclass(exc_type, exc_handler['exc']):
            if exc_handler['set_wrap']:
                wrap = True
            handler = exc_handler['handler']
            ret = handler(request, user_message, redirect, ignore,
                          exc_handler.get('escalate',
                                          escalate), handled, force_silence,
                          force_log, log_method, log_entry, log_level)
            if ret:
                return ret  # return to normal code flow

    # If we've gotten here, time to wrap and/or raise our exception.
    if wrap:
        raise HandledException([exc_type, exc_value, exc_traceback])

    # assume exceptions handled in the code that pass in a message are already
    # handled appropriately and treat as recoverable
    if message:
        ret = handle_recoverable(request, user_message, redirect, ignore,
                                 escalate, handled, force_silence, force_log,
                                 log_method, log_entry, log_level)
        if ret:
            return ret

    six.reraise(exc_type, exc_value, exc_traceback)
Exemple #41
0
  def batch_jacobian(self,
                     target,
                     source,
                     unconnected_gradients=UnconnectedGradients.NONE,
                     parallel_iterations=None,
                     experimental_use_pfor=True):
    """Computes and stacks per-example jacobians.

    See http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant for the
    definition of a Jacobian.  This function is essentially an efficient
    implementation of the following:
    `tf.stack([self.jacobian(y[i], x[i]) for i in range(x.shape[0])])`.

    Note that compared to `GradientTape.jacobian` which computes gradient of
    each output value w.r.t each input value, this function is useful when
    `target[i,...] is independent of `source[j,...]` for `j != i`. This
    independence assumption allows more efficient computation as compared to
    `GradientTape.jacobian`. The output, as well as intermediate activations,
    are lower dimensional and avoid a bunch of redundant zeros which would
    result in the jacobian computation given the independence assumption.

    Example usage:
    ```python
    with tf.GradientTape() as g:
      x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
      g.watch(x)
      y = x * x
    batch_jacobian = g.batch_jacobian(y, x)
    # batch_jacobian is [[[2,  0], [0,  4]], [[6,  0], [0,  8]]]
    ```

    Args:
      target: A tensor with rank 2 or higher and with shape [b, y1, ..., y_n].
        `target[i,...]` should only depend on `source[i,...]`.
      source: A tensor with rank 2 or higher and with shape [b, x1, ..., x_m].
      unconnected_gradients: a value which can either hold 'none' or 'zero' and
        alters the value which will be returned if the target and sources are
        unconnected. The possible values and effects are detailed in
        'UnconnectedGradients' and it defaults to 'none'.
      parallel_iterations: A knob to control how many iterations are dispatched
        in parallel. This knob can be used to control the total memory usage.
      experimental_use_pfor: If true, uses pfor for computing the Jacobian. Else
        uses a tf.while_loop.

    Returns:
      A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
      is the jacobian of `target[i, ...]` w.r.t. `source[i, ...]`, i.e. stacked
      per-example jacobians.

    Raises:
      RuntimeError: If called on a non-persistent tape with eager execution
        enabled and without enabling experimental_use_pfor.
      ValueError: If vectorization of jacobian computation fails or if first
        dimension of `target` and `source` do not match.
    """
    target_shape = target.shape
    if target_shape.rank is None:
      dim = tensor_shape.Dimension(None)
    else:
      dim = target_shape.dims[0]
    if not (target_shape.with_rank_at_least(2) and
            source.shape.with_rank_at_least(2) and
            dim.is_compatible_with(source.shape[0])):
      raise ValueError(
          "Need first dimension of target shape (%s) and "
          "source shape (%s) to match." % (target.shape, source.shape))
    if target_shape.is_fully_defined():
      batch_size = int(target_shape[0])
      target_row_size = target_shape.num_elements() // batch_size
    else:
      target_shape = array_ops.shape(target)
      batch_size = target_shape[0]
      target_row_size = array_ops.size(target) // batch_size
    source_shape = array_ops.shape(source)
    # Flatten target to 2-D.
    # Note that we push and pop the tape here and below. This is needed since we
    # need gradients through the enclosed operations.
    self._push_tape()
    with ops.control_dependencies(
        [check_ops.assert_equal(batch_size, source_shape[0])]):
      target = array_ops.reshape(target, [batch_size, target_row_size])
    self._pop_tape()

    def loop_fn(i):
      self._push_tape()
      y = array_ops.gather(target, i, axis=1)
      self._pop_tape()
      return self.gradient(y, source,
                           unconnected_gradients=unconnected_gradients)

    if experimental_use_pfor:
      try:
        output = pfor_ops.pfor(loop_fn, target_row_size,
                               parallel_iterations=parallel_iterations)
      except ValueError as err:
        six.reraise(
            ValueError,
            ValueError(
                str(err) + "\nEncountered an exception while vectorizing the "
                "batch_jacobian computation. Vectorization can be disabled by "
                "setting experimental_use_pfor to False."),
            sys.exc_info()[2])
    else:
      if context.executing_eagerly() and not self._persistent:
        raise RuntimeError(
            "GradientTape must be created with persistent=True"
            " to compute the batch_jacobian with eager execution enabled and "
            " with experimental_use_pfor set to False.")
      output = pfor_ops.for_loop(loop_fn, target.dtype, target_row_size,
                                 parallel_iterations=parallel_iterations)
    if output is None:
      return None
    output = array_ops.reshape(output,
                               [target_row_size, batch_size, -1])
    output = array_ops.transpose(output, [1, 0, 2])
    new_shape = array_ops.concat([target_shape, source_shape[1:]], axis=0)
    return array_ops.reshape(output, new_shape)
Exemple #42
0
 def join(self, timeout=None):
     threading.Thread.join(self, timeout)
     if self._exc_info:
         raise six.reraise(self._exc_info[0], self._exc_info[1],
                           self._exc_info[2])
Exemple #43
0
    def func(self, **kwargs):

        # Convert data source, if necesary
        is_user_source = kwargs.get('source', None) is not None
        if is_user_source:
            source = kwargs['source']
            if not isinstance(source, ColumnarDataSource):
                try:
                    # try converting the soruce to ColumnDataSource
                    source = ColumnDataSource(source)
                except ValueError as err:
                    msg = "Failed to auto-convert {curr_type} to ColumnDataSource.\n Original error: {err}".format(
                        curr_type=str(type(source)), err=err.message)
                    reraise(ValueError, ValueError(msg), sys.exc_info()[2])

                # update reddered_kws so that others can use the new source
                kwargs['source'] = source

        # Process legend kwargs and remove legend before we get going
        legend_item_label = _get_legend_item_label(kwargs)

        # Need to check if user source is present before _pop_renderer_args
        renderer_kws = _pop_renderer_args(kwargs)
        source = renderer_kws['data_source']

        # Assign global_alpha from alpha if glyph type is an image
        if 'alpha' in kwargs and glyphclass.__name__ in ('Image', 'ImageRGBA',
                                                         'ImageURL'):
            kwargs['global_alpha'] = kwargs['alpha']

        # handle the main glyph, need to process literals
        glyph_ca = _pop_colors_and_alpha(glyphclass, kwargs)
        _process_sequence_literals(glyphclass, kwargs, source, is_user_source)
        _process_sequence_literals(glyphclass, glyph_ca, source,
                                   is_user_source)

        # handle the nonselection glyph, we always set one
        nsglyph_ca = _pop_colors_and_alpha(glyphclass,
                                           kwargs,
                                           prefix='nonselection_',
                                           default_alpha=0.1)

        # handle the selection glyph, if any properties were given
        if any(x.startswith('selection_') for x in kwargs):
            sglyph_ca = _pop_colors_and_alpha(glyphclass,
                                              kwargs,
                                              prefix='selection_')
        else:
            sglyph_ca = None

        # handle the hover glyph, if any properties were given
        if any(x.startswith('hover_') for x in kwargs):
            hglyph_ca = _pop_colors_and_alpha(glyphclass,
                                              kwargs,
                                              prefix='hover_')
        else:
            hglyph_ca = None

        # handle the mute glyph, if any properties were given
        if any(x.startswith('muted_') for x in kwargs):
            mglyph_ca = _pop_colors_and_alpha(glyphclass,
                                              kwargs,
                                              prefix='muted_')
        else:
            mglyph_ca = None

        glyph = _make_glyph(glyphclass, kwargs, glyph_ca)
        nsglyph = _make_glyph(glyphclass, kwargs, nsglyph_ca)
        sglyph = _make_glyph(glyphclass, kwargs, sglyph_ca)
        hglyph = _make_glyph(glyphclass, kwargs, hglyph_ca)
        mglyph = _make_glyph(glyphclass, kwargs, mglyph_ca)

        glyph_renderer = GlyphRenderer(glyph=glyph,
                                       nonselection_glyph=nsglyph,
                                       selection_glyph=sglyph,
                                       hover_glyph=hglyph,
                                       muted_glyph=mglyph,
                                       **renderer_kws)

        if legend_item_label:
            _update_legend(self, legend_item_label, glyph_renderer)

        self.renderers.append(glyph_renderer)

        return glyph_renderer
Exemple #44
0
  def jacobian(self,
               target,
               sources,
               unconnected_gradients=UnconnectedGradients.NONE,
               parallel_iterations=None,
               experimental_use_pfor=True):
    """Computes the jacobian using operations recorded in context of this tape.

    See http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant for the
    definition of a Jacobian.

    Example usage:

    ```python
    with tf.GradientTape() as g:
      x  = tf.constant([1.0, 2.0])
      g.watch(x)
      y = x * x
    jacobian = g.jacobian(y, x)
    # jacobian value is [[2., 0.], [0., 4.]]
    ```

    Args:
      target: Tensor to be differentiated.
      sources: a list or nested structure of Tensors or Variables. `target`
        will be differentiated against elements in `sources`.
      unconnected_gradients: a value which can either hold 'none' or 'zero' and
        alters the value which will be returned if the target and sources are
        unconnected. The possible values and effects are detailed in
        'UnconnectedGradients' and it defaults to 'none'.
      parallel_iterations: A knob to control how many iterations are dispatched
        in parallel. This knob can be used to control the total memory usage.
      experimental_use_pfor: If true, vectorizes the jacobian computation. Else
        falls back to a sequential while_loop. Vectorization can sometimes fail
        or lead to excessive memory usage. This option can be used to disable
        vectorization in such cases.

    Returns:
      a list or nested structure of Tensors (or IndexedSlices, or None),
      one for each element in `sources`. Returned structure is the same as
      the structure of `sources`.

    Raises:
      RuntimeError: If called on a non-persistent tape with eager execution
        enabled and without enabling experimental_use_pfor.
      ValueError: If vectorization of jacobian computation fails.
    """
    flat_sources = nest.flatten(sources)
    target_static_shape = target.shape
    target_shape = array_ops.shape(target)
    # Note that we push and pop the tape here and below. This is needed since we
    # need gradients through the enclosed operations.
    self._push_tape()
    target = array_ops.reshape(target, [-1])
    self._pop_tape()

    def loop_fn(i):
      self._push_tape()
      y = array_ops.gather(target, i)
      self._pop_tape()
      return self.gradient(y, flat_sources,
                           unconnected_gradients=unconnected_gradients)

    try:
      target_size = int(target.shape[0])
    except TypeError:
      target_size = array_ops.shape(target)[0]

    if experimental_use_pfor:
      try:
        output = pfor_ops.pfor(loop_fn, target_size,
                               parallel_iterations=parallel_iterations)
      except ValueError as err:
        six.reraise(
            ValueError,
            ValueError(
                str(err) + "\nEncountered an exception while vectorizing the "
                "jacobian computation. Vectorization can be disabled by setting"
                " experimental_use_pfor to False."),
            sys.exc_info()[2])
    else:
      if context.executing_eagerly() and not self._persistent:
        raise RuntimeError(
            "GradientTape must be created with persistent=True"
            " to compute the jacobian with eager execution enabled and with "
            " experimental_use_pfor set to False.")
      output = pfor_ops.for_loop(
          loop_fn, [target.dtype] * len(flat_sources), target_size,
          parallel_iterations=parallel_iterations)

    for i, out in enumerate(output):
      if out is not None:
        new_shape = array_ops.concat(
            [target_shape, array_ops.shape(out)[1:]], axis=0)
        out = array_ops.reshape(out, new_shape)
        if context.executing_eagerly():
          out.set_shape(target_static_shape.concatenate(flat_sources[i].shape))
      output[i] = out

    return nest.pack_sequence_as(sources, output)
Exemple #45
0
 def runTest(self):
     if self.exc_info is None or any(x is None for x in self.exc_info):
         self.fail("could not import %s" % self.module)
     else:
         six.reraise(*self.exc_info)
Exemple #46
0
 def null_technical_500_response(request, exc_type, exc_value, tb):
     six.reraise(exc_type, exc_value, tb)
Exemple #47
0
def dtw(X=None, Y=None, C=None, metric='euclidean', step_sizes_sigma=None,
        weights_add=None, weights_mul=None, subseq=False, backtrack=True,
        global_constraints=False, band_rad=0.25):
    '''Dynamic time warping (DTW).

    This function performs a DTW and path backtracking on two sequences.
    We follow the nomenclature and algorithmic approach as described in [1]_.

    .. [1] Meinard Mueller
           Fundamentals of Music Processing — Audio, Analysis, Algorithms, Applications
           Springer Verlag, ISBN: 978-3-319-21944-8, 2015.

    Parameters
    ----------
    X : np.ndarray [shape=(K, N)]
        audio feature matrix (e.g., chroma features)

    Y : np.ndarray [shape=(K, M)]
        audio feature matrix (e.g., chroma features)

    C : np.ndarray [shape=(N, M)]
        Precomputed distance matrix. If supplied, X and Y must not be supplied and
        ``metric`` will be ignored.

    metric : str
        Identifier for the cost-function as documented
        in `scipy.spatial.cdist()`

    step_sizes_sigma : np.ndarray [shape=[n, 2]]
        Specifies allowed step sizes as used by the dtw.

    weights_add : np.ndarray [shape=[n, ]]
        Additive weights to penalize certain step sizes.

    weights_mul : np.ndarray [shape=[n, ]]
        Multiplicative weights to penalize certain step sizes.

    subseq : binary
        Enable subsequence DTW, e.g., for retrieval tasks.

    backtrack : binary
        Enable backtracking in accumulated cost matrix.

    global_constraints : binary
        Applies global constraints to the cost matrix ``C`` (Sakoe-Chiba band).

    band_rad : float
        The Sakoe-Chiba band radius (1/2 of the width) will be
        ``int(radius*min(C.shape))``.

    Returns
    -------
    D : np.ndarray [shape=(N,M)]
        accumulated cost matrix.
        D[N,M] is the total alignment cost.
        When doing subsequence DTW, D[N,:] indicates a matching function.

    wp : np.ndarray [shape=(N,2)]
        Warping path with index pairs.
        Each row of the array contains an index pair n,m).
        Only returned when ``backtrack`` is True.

    Raises
    ------
    ParameterError
        If you are doing diagonal matching and Y is shorter than X or if an incompatible
        combination of X, Y, and C are supplied.
        If your input dimensions are incompatible.

    Examples
    --------
    >>> import numpy as np
    >>> import matplotlib.pyplot as plt
    >>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=10, duration=15)
    >>> X = librosa.feature.chroma_cens(y=y, sr=sr)
    >>> noise = np.random.rand(X.shape[0], 200)
    >>> Y = np.concatenate((noise, noise, X, noise), axis=1)
    >>> D, wp = librosa.sequence.dtw(X, Y, subseq=True)
    >>> plt.subplot(2, 1, 1)
    >>> librosa.display.specshow(D, x_axis='frames', y_axis='frames')
    >>> plt.title('Database excerpt')
    >>> plt.plot(wp[:, 1], wp[:, 0], label='Optimal path', color='y')
    >>> plt.legend()
    >>> plt.subplot(2, 1, 2)
    >>> plt.plot(D[-1, :] / wp.shape[0])
    >>> plt.xlim([0, Y.shape[1]])
    >>> plt.ylim([0, 2])
    >>> plt.title('Matching cost function')
    >>> plt.tight_layout()
    '''
    # Default Parameters
    if step_sizes_sigma is None:
        step_sizes_sigma = np.array([[1, 1], [0, 1], [1, 0]])
    if weights_add is None:
        weights_add = np.zeros(len(step_sizes_sigma))
    if weights_mul is None:
        weights_mul = np.ones(len(step_sizes_sigma))

    if len(step_sizes_sigma) != len(weights_add):
        raise ParameterError('len(weights_add) must be equal to len(step_sizes_sigma)')
    if len(step_sizes_sigma) != len(weights_mul):
        raise ParameterError('len(weights_mul) must be equal to len(step_sizes_sigma)')

    if C is None and (X is None or Y is None):
        raise ParameterError('If C is not supplied, both X and Y must be supplied')
    if C is not None and (X is not None or Y is not None):
        raise ParameterError('If C is supplied, both X and Y must not be supplied')

    # calculate pair-wise distances, unless already supplied.
    if C is None:
        # take care of dimensions
        X = np.atleast_2d(X)
        Y = np.atleast_2d(Y)

        try:
            C = cdist(X.T, Y.T, metric=metric)
        except ValueError as e:
            msg = ('scipy.spatial.distance.cdist returned an error.\n'
                   'Please provide your input in the form X.shape=(K, N) and Y.shape=(K, M).\n'
                   '1-dimensional sequences should be reshaped to X.shape=(1, N) and Y.shape=(1, M).')
            six.reraise(ParameterError, ParameterError(msg))

        # for subsequence matching:
        # if N > M, Y can be a subsequence of X
        if subseq and (X.shape[1] > Y.shape[1]):
            C = C.T

    C = np.atleast_2d(C)

    # if diagonal matching, Y has to be longer than X
    # (X simply cannot be contained in Y)
    if np.array_equal(step_sizes_sigma, np.array([[1, 1]])) and (C.shape[0] > C.shape[1]):
        raise ParameterError('For diagonal matching: Y.shape[1] >= X.shape[1] '
                             '(C.shape[1] >= C.shape[0])')

    max_0 = step_sizes_sigma[:, 0].max()
    max_1 = step_sizes_sigma[:, 1].max()

    if global_constraints:
        # Apply global constraints to the cost matrix
        fill_off_diagonal(C, band_rad, value=np.inf)

    # initialize whole matrix with infinity values
    D = np.ones(C.shape + np.array([max_0, max_1])) * np.inf

    # set starting point to C[0, 0]
    D[max_0, max_1] = C[0, 0]

    if subseq:
        D[max_0, max_1:] = C[0, :]

    # initialize step matrix with -1
    # will be filled in calc_accu_cost() with indices from step_sizes_sigma
    D_steps = -1 * np.ones(D.shape, dtype=np.int)

    # calculate accumulated cost matrix
    D, D_steps = __dtw_calc_accu_cost(C, D, D_steps,
                                      step_sizes_sigma,
                                      weights_mul, weights_add,
                                      max_0, max_1)

    # delete infinity rows and columns
    D = D[max_0:, max_1:]
    D_steps = D_steps[max_0:, max_1:]

    if backtrack:
        if subseq:
            # search for global minimum in last row of D-matrix
            wp_end_idx = np.argmin(D[-1, :]) + 1
            wp = __dtw_backtracking(D_steps[:, :wp_end_idx], step_sizes_sigma)
        else:
            # perform warping path backtracking
            wp = __dtw_backtracking(D_steps, step_sizes_sigma)

        wp = np.asarray(wp, dtype=int)

        # since we transposed in the beginning, we have to adjust the index pairs back
        if subseq and (X.shape[1] > Y.shape[1]):
            wp = np.fliplr(wp)

        return D, wp
    else:
        return D
Exemple #48
0
def _graph(node_source, edge_source, **kwargs):

    if not isinstance(node_source, ColumnarDataSource):
        try:
            # try converting the soruce to ColumnDataSource
            node_source = ColumnDataSource(node_source)
        except ValueError as err:
            msg = "Failed to auto-convert {curr_type} to ColumnDataSource.\n Original error: {err}".format(
                curr_type=str(type(node_source)), err=err.message)
            reraise(ValueError, ValueError(msg), sys.exc_info()[2])

    if not isinstance(edge_source, ColumnarDataSource):
        try:
            # try converting the soruce to ColumnDataSource
            edge_source = ColumnDataSource(edge_source)
        except ValueError as err:
            msg = "Failed to auto-convert {curr_type} to ColumnDataSource.\n Original error: {err}".format(
                curr_type=str(type(edge_source)), err=err.message)
            reraise(ValueError, ValueError(msg), sys.exc_info()[2])

    ## node stuff
    if any(x.startswith('node_selection_') for x in kwargs):
        snode_ca = _pop_colors_and_alpha(Circle,
                                         kwargs,
                                         prefix="node_selection_")
    else:
        snode_ca = None

    if any(x.startswith('node_hover_') for x in kwargs):
        hnode_ca = _pop_colors_and_alpha(Circle, kwargs, prefix="node_hover_")
    else:
        hnode_ca = None

    if any(x.startswith('node_muted_') for x in kwargs):
        mnode_ca = _pop_colors_and_alpha(Circle, kwargs, prefix="node_muted_")
    else:
        mnode_ca = None

    nsnode_ca = _pop_colors_and_alpha(Circle,
                                      kwargs,
                                      prefix="node_nonselection_")
    node_ca = _pop_colors_and_alpha(Circle, kwargs, prefix="node_")

    ## edge stuff
    if any(x.startswith('edge_selection_') for x in kwargs):
        sedge_ca = _pop_colors_and_alpha(MultiLine,
                                         kwargs,
                                         prefix="edge_selection_")
    else:
        sedge_ca = None

    if any(x.startswith('edge_hover_') for x in kwargs):
        hedge_ca = _pop_colors_and_alpha(MultiLine,
                                         kwargs,
                                         prefix="edge_hover_")
    else:
        hedge_ca = None

    if any(x.startswith('edge_muted_') for x in kwargs):
        medge_ca = _pop_colors_and_alpha(MultiLine,
                                         kwargs,
                                         prefix="edge_muted_")
    else:
        medge_ca = None

    nsedge_ca = _pop_colors_and_alpha(MultiLine,
                                      kwargs,
                                      prefix="edge_nonselection_")
    edge_ca = _pop_colors_and_alpha(MultiLine, kwargs, prefix="edge_")

    ## node stuff
    node_kwargs = {
        k.lstrip('node_'): v
        for k, v in kwargs.copy().items()
        if k.lstrip('node_') in Circle.properties()
    }

    node_glyph = _make_glyph(Circle, node_kwargs, node_ca)
    nsnode_glyph = _make_glyph(Circle, node_kwargs, nsnode_ca)
    snode_glyph = _make_glyph(Circle, node_kwargs, snode_ca)
    hnode_glyph = _make_glyph(Circle, node_kwargs, hnode_ca)
    mnode_glyph = _make_glyph(Circle, node_kwargs, mnode_ca)

    node_renderer = GlyphRenderer(glyph=node_glyph,
                                  nonselection_glyph=nsnode_glyph,
                                  selection_glyph=snode_glyph,
                                  hover_glyph=hnode_glyph,
                                  muted_glyph=mnode_glyph,
                                  data_source=node_source)

    ## edge stuff
    edge_kwargs = {
        k.lstrip('edge_'): v
        for k, v in kwargs.copy().items()
        if k.lstrip('edge_') in MultiLine.properties()
    }

    edge_glyph = _make_glyph(MultiLine, edge_kwargs, edge_ca)
    nsedge_glyph = _make_glyph(MultiLine, edge_kwargs, nsedge_ca)
    sedge_glyph = _make_glyph(MultiLine, edge_kwargs, sedge_ca)
    hedge_glyph = _make_glyph(MultiLine, edge_kwargs, hedge_ca)
    medge_glyph = _make_glyph(MultiLine, edge_kwargs, medge_ca)

    edge_renderer = GlyphRenderer(glyph=edge_glyph,
                                  nonselection_glyph=nsedge_glyph,
                                  selection_glyph=sedge_glyph,
                                  hover_glyph=hedge_glyph,
                                  muted_glyph=medge_glyph,
                                  data_source=edge_source)

    _RENDERER_ARGS = [
        'name', 'level', 'visible', 'x_range_name', 'y_range_name',
        'selection_policy', 'inspection_policy'
    ]

    renderer_kwargs = {
        attr: kwargs.pop(attr)
        for attr in _RENDERER_ARGS if attr in kwargs
    }

    renderer_kwargs["node_renderer"] = node_renderer
    renderer_kwargs["edge_renderer"] = edge_renderer

    return renderer_kwargs
Exemple #49
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import sys
from dm_control._render import base
from dm_control._render import executor
import six

# Re-raise any exceptions that occur during module import as `ImportError`s.
# This simplifies the conditional imports in `render/__init__.py`.
try:
    import glfw  # pylint: disable=g-import-not-at-top
except (ImportError, IOError, OSError) as exc:
    _, exc, tb = sys.exc_info()
    six.reraise(ImportError, ImportError(str(exc)), tb)
try:
    glfw.init()
except glfw.GLFWError as exc:
    _, exc, tb = sys.exc_info()
    six.reraise(ImportError, ImportError(str(exc)), tb)


class GLFWContext(base.ContextBase):
    """An OpenGL context backed by GLFW."""
    def __init__(self, max_width, max_height):
        # GLFWContext always uses `PassthroughRenderExecutor` rather than offloading
        # rendering calls to a separate thread because GLFW can only be safely used
        # from the main thread.
        super(GLFWContext, self).__init__(max_width, max_height,
                                          executor.PassthroughRenderExecutor)
Exemple #50
0
 def _exception(self, e_type, e_value, e_traceback):
     if self.logger:
         self.logger.exception('[%s] Raised exception: %s' %
                               (self, e_value))
     else:
         six.reraise(e_type, e_value, e_traceback)
Exemple #51
0
    def _get_data(self):
        while not self._thread_done_event.is_set():
            # For IterableDataset, batch indices is generated infinitely
            # for each worker to raise StopIteration, but a StopIteration
            # raising process will discard a batch indices which is count
            # in _send_idx but will not increase _rcvd_idx, so we check 
            # whether the worker is still alive here to skip the discarded
            # batch indices and increase _rcvd_idx
            if self._dataset_kind == _DatasetKind.ITER:
                while self._rcvd_idx < self._send_idx:
                    info = self._task_infos[self._rcvd_idx]
                    if len(info) == 3 or self._worker_status[info[0]]:
                        break
                    del self._task_infos[self._rcvd_idx]
                    self._rcvd_idx += 1
                    self._batches_outstanding -= 1
                else:
                    # NOTE: when _rcvd_idx catch up _send_idx, which means
                    #       one of following:
                    #       1. all 2 * num_workers batches have been loaded
                    #          and stored in _blocking_queue
                    #       2. all data drained
                    #       we need to let _thread blocking at _data_queue
                    #       get_data to inoccupy CPU, otherwise may occupy
                    #       CPU time for model running
                    # NOTE: in persistent workers mode, do not check data
                    #       drained here, simply let it go to _data_queue
                    #       reading to get _ResumeIteration
                    if not self._persistent_workers:
                        # NOTE: _rcvd_idx and _send_idx only record batches among
                        #       workers, if batches among workers drained, there
                        #       may also be data in blocking queue
                        if self._batches_outstanding < len(self._places):
                            return None

            if self._rcvd_idx in self._task_infos and \
                    len(self._task_infos[self._rcvd_idx]) == 3:
                info = self._task_infos.pop(self._rcvd_idx)
                self._structure_infos.append(info[2])
                return info[1]

            try:
                # [ avoid hang ]: main process may blocking at _reader.read_next when
                # KeyboardInterrupt, we do following tradeoff:
                # 1. get data with timeout, MP_STATUS_CHECK_INTERVAL(5s) as timeout
                #    default, if KeyboardInterrupt blocking, failed workers will be
                #    checked and raise RuntimeError to quit DataLoader in timeout
                #    exception handling.
                # 2. if get data timeout and check workers all alive, continue to
                #    get data again
                data = self._data_queue.get(timeout=self._timeout)
            except Exception as e:
                # check if thread done event set when waiting data
                if self._thread_done_event.is_set():
                    continue

                # check failed workers
                failed_workers = []
                for i, w in enumerate(self._workers):
                    if self._worker_status[i] and not w.is_alive():
                        failed_workers.append(w)
                        self._shutdown_worker(i)
                if len(failed_workers) > 0:
                    self._exit_thread_unexpectedly()
                    pids = ', '.join(str(w.pid) for w in failed_workers)
                    raise RuntimeError("DataLoader {} workers exit unexpectedly, " \
                                "pids: {}".format(len(failed_workers), pids))

                # get(timeout) will call _poll(timeout) and may raise IOError
                if isinstance(e, queue.Empty) or isinstance(e, IOError):
                    # continue on timeout to keep getting data from queue
                    continue

                self._exit_thread_unexpectedly()
                logging.error("DataLoader reader thread failed({}) to read data from " \
                              "workers' result queue.".format(e))
                six.reraise(*sys.exc_info())
            else:
                if self._dataset_kind == _DatasetKind.ITER and isinstance(
                        data, _IterableDatasetStopIteration):
                    # if a worker get StopIteraion, we shutdown this worker,
                    # note that this batch indices to trigger StopIteration
                    # is discard, outstanding batch number should be decrease
                    # and another indices should be put for other workers
                    # may still working.
                    if self._persistent_workers:
                        self._worker_status[data.worker_id] = False
                    else:
                        self._shutdown_worker(data.worker_id)
                        self._batches_outstanding -= 1
                    self._try_put_indices()
                    continue

                idx, batch, structure = data

                if isinstance(idx, _ResumeIteration) and batch is None \
                        and structure is None:
                    return idx

                if isinstance(batch, _WorkerException):
                    self._exit_thread_unexpectedly()
                    batch.reraise()

                if idx == self._rcvd_idx:
                    del self._task_infos[idx]
                    self._structure_infos.append(structure)
                    return batch
                else:
                    self._task_infos[idx] += (batch, structure)
                    continue
 def reraise(self):
     """Re-raise captured exception."""
     if self._exc_info:
         six.reraise(*self._exc_info)
     else:
         raise exc.WrappedFailure([self])
 def wrapper(*args, **kwargs):
     try:
         method(*args, **kwargs)
     except Exception as e:
         six.reraise(exception_class, exception_class(str(e)),
                     sys.exc_info()[2])
Exemple #54
0
    def __next__(self):
        if in_profiler_mode():
            trace_event = profiler.RecordEvent(
                name="_DataLoaderIterMultiProcess",
                event_type=profiler.TracerEventType.Dataloader)
            trace_event.begin()
        try:
            benchmark().check_if_need_record(self)
            benchmark().before_reader()
            # _batches_outstanding here record the total batch data number
            # in 'from after _try_put_indices to beforeoutput data', this
            # value should be _outstanding_capacity if data is not drained,
            # if _batches_outstanding is less than _places number, there are
            # no enough data to generate next output, close blocking_queue and
            # set _thread_done_event here, py_reader will raise StopIteration,
            # end workers and indices_queues in StopIteration handling
            if self._batches_outstanding < len(self._places):
                if self._persistent_workers:
                    raise StopIteration
                else:
                    self._thread_done_event.set()
                    self._blocking_queue.close()

            if in_dygraph_mode():
                data = core.eager.read_next_tensor_list(
                    self._reader.read_next_list()[0])
                data = _restore_batch(data, self._structure_infos.pop(0))
            else:
                if _in_legacy_dygraph():
                    data = self._reader.read_next_var_list()
                    data = _restore_batch(data, self._structure_infos.pop(0))
                else:
                    if self._return_list:
                        data = self._reader.read_next_list()
                        for i in range(len(data)):
                            data[i] = data[i]._move_to_list()
                        data = [
                            _restore_batch(d, s)
                            for d, s in zip(data, self._structure_infos[:len(
                                self._places)])
                        ]
                        self._structure_infos = self._structure_infos[len(
                            self._places):]
                        # static graph organized data on multi-device with list, if
                        # place number is 1, there is only 1 device, extra the data
                        # from list for devices to be compatible with dygraph mode
                        if len(self._places) == 1:
                            data = data[0]
                    else:
                        data = self._reader.read_next()
            self._on_output_batch()
            benchmark().after_reader()
            return data
        except StopIteration:
            if not self._persistent_workers:
                self._reader.shutdown()
                self._try_shutdown_all()
            six.reraise(*sys.exc_info())
        finally:
            if in_profiler_mode():
                trace_event.end()
Exemple #55
0
 def null_technical_500_response(request,
                                 exc_type,
                                 exc_value,
                                 tb,
                                 status_code=500):
     six.reraise(exc_type, exc_value, tb)
Exemple #56
0
 def _wrap_log_exception(self):
     exc_info = sys.exc_info()
     NovaExceptionReraiseFormatError.real_log_exception(self)
     six.reraise(*exc_info)
Exemple #57
0
 def raise_requested_exception(self):
     """If an exception has been passed to `request_stop`, this raises it."""
     with self._lock:
         if self._exc_info_to_raise:
             six.reraise(*self._exc_info_to_raise)
Exemple #58
0
 def wrapper(*args, **kwargs):
     try:
         return func(*args, **kwargs)
     except messaging.ExpectedException as e:
         six.reraise(*e.exc_info)
Exemple #59
0
    def join(self,
             threads=None,
             stop_grace_period_secs=120,
             ignore_live_threads=False):
        """Wait for threads to terminate.

    This call blocks until a set of threads have terminated.  The set of thread
    is the union of the threads passed in the `threads` argument and the list
    of threads that registered with the coordinator by calling
    `Coordinator.register_thread()`.

    After the threads stop, if an `exc_info` was passed to `request_stop`, that
    exception is re-raised.

    Grace period handling: When `request_stop()` is called, threads are given
    'stop_grace_period_secs' seconds to terminate.  If any of them is still
    alive after that period expires, a `RuntimeError` is raised.  Note that if
    an `exc_info` was passed to `request_stop()` then it is raised instead of
    that `RuntimeError`.

    Args:
      threads: List of `threading.Threads`. The started threads to join in
        addition to the registered threads.
      stop_grace_period_secs: Number of seconds given to threads to stop after
        `request_stop()` has been called.
      ignore_live_threads: If `False`, raises an error if any of the threads are
        still alive after `stop_grace_period_secs`.

    Raises:
      RuntimeError: If any thread is still alive after `request_stop()`
        is called and the grace period expires.
    """
        # Threads registered after this call will not be joined.
        with self._lock:
            if threads is None:
                threads = self._registered_threads
            else:
                threads = self._registered_threads.union(set(threads))
            # Copy the set into a list to avoid race conditions where a new thread
            # is added while we are waiting.
            threads = list(threads)

        # Wait for all threads to stop or for request_stop() to be called.
        while any(t.is_alive()
                  for t in threads) and not self.wait_for_stop(1.0):
            pass

        # If any thread is still alive, wait for the grace period to expire.
        # By the time this check is executed, threads may still be shutting down,
        # so we add a sleep of increasing duration to give them a chance to shut
        # down without loosing too many cycles.
        # The sleep duration is limited to the remaining grace duration.
        stop_wait_secs = 0.001
        while any(t.is_alive()
                  for t in threads) and stop_grace_period_secs >= 0.0:
            time.sleep(stop_wait_secs)
            stop_grace_period_secs -= stop_wait_secs
            stop_wait_secs = 2 * stop_wait_secs
            # Keep the waiting period within sane bounds.
            # The minimum value is to avoid decreasing stop_wait_secs to a value
            # that could cause stop_grace_period_secs to remain unchanged.
            stop_wait_secs = max(min(stop_wait_secs, stop_grace_period_secs),
                                 0.001)

        # List the threads still alive after the grace period.
        stragglers = [t.name for t in threads if t.is_alive()]

        # Terminate with an exception if appropriate.
        with self._lock:
            self._joined = True
            self._registered_threads = set()
            if self._exc_info_to_raise:
                six.reraise(*self._exc_info_to_raise)
            elif stragglers:
                if ignore_live_threads:
                    logging.info(
                        "Coordinator stopped with threads still running: %s",
                        " ".join(stragglers))
                else:
                    raise RuntimeError(
                        "Coordinator stopped with threads still running: %s" %
                        " ".join(stragglers))
Exemple #60
0
 def get(self):
     six.reraise(*self.exc_info)