def testSomeTrades(self):
        strat = self.__createStrategy()
        stratAnalyzer = trades.Trades()
        strat.attach_analyzer(stratAnalyzer)

        # Winning trade
        strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 0),
                       strat.get_broker().create_market_order,
                       broker.Order.Action.BUY,
                       TradesAnalyzerTestCase.TestInstrument, 1)  # 127.14
        strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 16),
                       strat.get_broker().create_market_order,
                       broker.Order.Action.SELL,
                       TradesAnalyzerTestCase.TestInstrument, 1)  # 127.16
        # Losing trade
        strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 30),
                       strat.get_broker().create_market_order,
                       broker.Order.Action.BUY,
                       TradesAnalyzerTestCase.TestInstrument, 1)  # 127.2
        strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 31),
                       strat.get_broker().create_market_order,
                       broker.Order.Action.SELL,
                       TradesAnalyzerTestCase.TestInstrument, 1)  # 127.16
        # Winning trade
        strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 38),
                       strat.get_broker().create_market_order,
                       broker.Order.Action.BUY,
                       TradesAnalyzerTestCase.TestInstrument, 1)  # 127.16
        strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 42),
                       strat.get_broker().create_market_order,
                       broker.Order.Action.SELL,
                       TradesAnalyzerTestCase.TestInstrument, 1)  # 127.26
        # Open trade.
        strat.addOrder(buildUTCDateTime(2011, 1, 3, 15, 47),
                       strat.get_broker().create_market_order,
                       broker.Order.Action.BUY,
                       TradesAnalyzerTestCase.TestInstrument, 1)  # 127.34
        strat.run()

        self.assertTrue(
            round(strat.get_broker().get_cash(), 2) == round(
                1000 + (127.16 - 127.14) + (127.16 - 127.2) +
                (127.26 - 127.16) - 127.34, 2))

        self.assertTrue(stratAnalyzer.get_count() == 3)
        self.assertTrue(stratAnalyzer.get_even_count() == 0)
        self.assertTrue(round(stratAnalyzer.get_all().mean(), 2) == 0.03)
        self.assertTrue(round(stratAnalyzer.get_all().std(ddof=1), 2) == 0.07)
        self.assertTrue(round(stratAnalyzer.get_all().std(ddof=0), 2) == 0.06)

        self.assertTrue(stratAnalyzer.get_profitable_count() == 2)
        self.assertTrue(round(stratAnalyzer.get_profits().mean(), 2) == 0.06)
        self.assertTrue(
            round(stratAnalyzer.get_profits().std(ddof=1), 2) == 0.06)
        self.assertTrue(
            round(stratAnalyzer.get_profits().std(ddof=0), 2) == 0.04)

        self.assertTrue(stratAnalyzer.get_unprofitable_count() == 1)
        self.assertTrue(round(stratAnalyzer.get_losses().mean(), 2) == -0.04)
        if version.LooseVersion(
                numpy.__version__) >= version.LooseVersion("1.6.2"):
            self.assertTrue(math.isnan(stratAnalyzer.get_losses().std(ddof=1)))
        else:
            self.assertTrue(stratAnalyzer.get_losses().std(ddof=1) == 0)
        self.assertTrue(stratAnalyzer.get_losses().std(ddof=0) == 0)
Beispiel #2
0
from neo.io.blackrockio import BlackrockIO

from neo.test.iotest.common_io_test import BaseTestIO
from neo.test.tools import assert_neo_object_is_compliant
from neo.test.rawiotest.test_blackrockrawio import TestBlackrockRawIO

# check scipy
try:
    from distutils import version
    import scipy.io
    import scipy.version
except ImportError as err:
    HAVE_SCIPY = False
    SCIPY_ERR = err
else:
    if version.LooseVersion(scipy.version.version) < '0.8':
        HAVE_SCIPY = False
        SCIPY_ERR = ImportError("your scipy version is too old to support " +
                                "MatlabIO, you need at least 0.8. " +
                                "You have %s" % scipy.version.version)
    else:
        HAVE_SCIPY = True
        SCIPY_ERR = None


class CommonTests(BaseTestIO, unittest.TestCase):
    ioclass = BlackrockIO
    entities_to_download = ['blackrock']
    entities_to_test = [
        'blackrock/FileSpec2.3001', 'blackrock/blackrock_2_1/l101210-001'
    ]
Beispiel #3
0
# the havedate check was a legacy from old matplotlib which preceeded
# datetime support
_havedate = True

#try:
#    import pkg_resources # pkg_resources is part of setuptools
#except ImportError: _have_pkg_resources = False
#else: _have_pkg_resources = True

if not _python24:
    raise ImportError('matplotlib requires Python 2.4 or later')


import numpy
from distutils import version
expected_version = version.LooseVersion(__version__numpy__)
found_version = version.LooseVersion(numpy.__version__)
if not found_version >= expected_version:
    raise ImportError(
        'numpy %s or later is required; you have %s' % (
            __version__numpy__, numpy.__version__))
del version


def is_string_like(obj):
    if hasattr(obj, 'shape'): return 0
    try: obj + ''
    except (TypeError, ValueError): return 0
    return 1

Beispiel #4
0
def check_tensorflow_version():
    min_tf_version = "1.4.0-dev20171024"
    if (version.LooseVersion(tf.__version__) <
            version.LooseVersion(min_tf_version)):
        raise EnvironmentError("Tensorflow version must >= %s" %
                               min_tf_version)
Beispiel #5
0
def is_upgrade_required(current_version, available_version):
    # NOTE(johngarbutt): agent version numbers are four part,
    # so we need to use the loose version to compare them
    current = version.LooseVersion(current_version)
    available = version.LooseVersion(available_version)
    return available > current
Beispiel #6
0
def is_nsxv_dhcp_binding_supported(nsx_version):
    return (
        (version.LooseVersion(nsx_version) >= version.LooseVersion('6.3.3')) or
        (version.LooseVersion(nsx_version) >= version.LooseVersion('6.2.8')
         and version.LooseVersion(nsx_version) < version.LooseVersion('6.3')))
Beispiel #7
0
def check_tensorflow_version():
  # LINT.IfChange
  min_tf_version = PARAM.min_TF_version
  # LINT.ThenChange(<pwd>/nmt/copy.bara.sky)
  if not (version.LooseVersion(tf.__version__) == version.LooseVersion(min_tf_version)):
    raise EnvironmentError("Tensorflow version must be '%s'" % min_tf_version)
Beispiel #8
0
    def Push(self,
             local,
             remote,
             sync=False,
             timeout=DEFAULT_SUPER_LONG_TIMEOUT,
             retries=DEFAULT_RETRIES):
        """Pushes a file from the host to the device.

    Args:
      local: Path on the host filesystem.
      remote: Path on the device filesystem.
      sync: (optional) Whether to only push files that are newer on the host.
        Not supported when using adb prior to 1.0.39.
      timeout: (optional) Timeout per try in seconds.
      retries: (optional) Number of retries to attempt.

    Raises:
      AdbVersionError if sync=True with versions of adb prior to 1.0.39.
    """
        VerifyLocalFileExists(local)

        if (du_version.LooseVersion(self.Version()) <
                du_version.LooseVersion('1.0.36')):

            # Different versions of adb handle pushing a directory to an existing
            # directory differently.

            # In the version packaged with the M SDK, 1.0.32, the following push:
            #   foo/bar -> /sdcard/foo/bar
            # where bar is an existing directory both on the host and the device
            # results in the contents of bar/ on the host being pushed to bar/ on
            # the device, i.e.
            #   foo/bar/A -> /sdcard/foo/bar/A
            #   foo/bar/B -> /sdcard/foo/bar/B
            #   ... etc.

            # In the version packaged with the N SDK, 1.0.36, the same push under
            # the same conditions results in a second bar/ directory being created
            # underneath the first bar/ directory on the device, i.e.
            #   foo/bar/A -> /sdcard/foo/bar/bar/A
            #   foo/bar/B -> /sdcard/foo/bar/bar/B
            #   ... etc.

            # In order to provide a consistent interface to clients, we check whether
            # the target is an existing directory on the device and, if so, modifies
            # the target passed to adb to emulate the behavior on 1.0.36 and above.

            # Note that this behavior may have started before 1.0.36; that's simply
            # the earliest version we've confirmed thus far.

            try:
                self.Shell('test -d %s' % remote,
                           timeout=timeout,
                           retries=retries)
                remote = posixpath.join(remote, posixpath.basename(local))
            except device_errors.AdbShellCommandFailedError:
                # The target directory doesn't exist on the device, so we can use it
                # without modification.
                pass

        push_cmd = ['push']

        if sync:
            push_cmd += ['--sync']
            if (du_version.LooseVersion(self.Version()) <
                    du_version.LooseVersion('1.0.39')):
                # The --sync flag for `adb push` is a relatively recent addition.
                # We're not sure exactly which release first contained it, but it
                # exists at least as far back as 1.0.39.
                raise device_errors.AdbVersionError(
                    push_cmd,
                    desc='--sync not supported',
                    actual_version=self.Version(),
                    min_version='1.0.39')

        push_cmd += [local, remote]

        self._RunDeviceAdbCmd(push_cmd, timeout, retries)
Beispiel #9
0
 def _version_key(version_tuple):
     """version_tuple: typing.Tuple[str, int])"""
     # FIXME figure out why importing typing causes RecursionError
     return (version.LooseVersion(version_tuple[0]), version_tuple[1])
Beispiel #10
0
import distutils.version as vers
import pytest


import astropy.version as astrov
from astropy.utils.data import get_pkg_data_filename


from ..read import CRTFParser, read_crtf
from ..write import crtf_objects_to_string
from ..core import CRTFRegionParserError

_ASTROPY_MINVERSION = vers.LooseVersion('1.1')
_ASTROPY_VERSION = vers.LooseVersion(astrov.version)

implemented_region_types = ('ellipse', 'circle', 'rectangle', 'poly', 'point', 'text', 'symbol')


def test_global_parser():
    """
    Checks that the global_parser does what's expected.
    """
    global_test_str = str("global coord=B1950_VLA, frame=BARY, corr=[I, Q], color=blue")
    global_parser = CRTFParser(global_test_str)
    assert dict(global_parser.global_meta) == {'coord': 'B1950_VLA', 'frame': 'BARY',
                                               'corr': ['I', 'Q'], 'color': 'blue'}


def test_valid_crtf_line():
    """
Beispiel #11
0
def import_scoped_meta_graph_with_return_elements(
        meta_graph_or_file,
        clear_devices=False,
        graph=None,
        import_scope=None,
        input_map=None,
        unbound_inputs_col_name="unbound_inputs",
        restore_collections_predicate=(lambda key: True),
        return_elements=None):
    """Imports graph from `MetaGraphDef` and returns vars and return elements.

  This function takes a `MetaGraphDef` protocol buffer as input. If
  the argument is a file containing a `MetaGraphDef` protocol buffer ,
  it constructs a protocol buffer from the file content. The function
  then adds all the nodes from the `graph_def` field to the
  current graph, recreates the desired collections, and returns a dictionary of
  all the Variables imported into the name scope.

  In combination with `export_scoped_meta_graph()`, this function can be used to

  * Serialize a graph along with other Python objects such as `QueueRunner`,
    `Variable` into a `MetaGraphDef`.

  * Restart training from a saved graph and checkpoints.

  * Run inference from a saved graph and checkpoints.

  Args:
    meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
      the path) containing a `MetaGraphDef`.
    clear_devices: Boolean which controls whether to clear device information
      from graph_def. Default false.
    graph: The `Graph` to import into. If `None`, use the default graph.
    import_scope: Optional `string`. Name scope into which to import the
      subgraph. If `None`, the graph is imported to the root name scope.
    input_map: A dictionary mapping input names (as strings) in `graph_def` to
      `Tensor` objects. The values of the named input tensors in the imported
      graph will be re-mapped to the respective `Tensor` values.
    unbound_inputs_col_name: Collection name for looking up unbound inputs.
    restore_collections_predicate: a predicate on collection names. A collection
      named c (i.e whose key is c) will be restored iff
      1) `restore_collections_predicate(c)` is True, and
      2) `c != unbound_inputs_col_name`.
    return_elements:  A list of strings containing operation names in the
      `MetaGraphDef` that will be returned as `Operation` objects; and/or
      tensor names in `MetaGraphDef` that will be returned as `Tensor` objects.

  Returns:
    A tuple of (
      dictionary of all the `Variables` imported into the name scope,
      list of `Operation` or `Tensor` objects from the `return_elements` list).

  Raises:
    ValueError: If the graph_def contains unbound inputs.

  """
    if context.executing_eagerly():
        raise ValueError(
            "Exporting/importing meta graphs is not supported when "
            "eager execution is enabled.")
    if isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
        meta_graph_def = meta_graph_or_file
    else:
        meta_graph_def = read_meta_graph_file(meta_graph_or_file)

    if unbound_inputs_col_name:
        for key, col_def in meta_graph_def.collection_def.items():
            if key == unbound_inputs_col_name:
                kind = col_def.WhichOneof("kind")
                field = getattr(col_def, kind)
                if field.value and (not input_map or sorted(
                    [compat.as_str(v)
                     for v in field.value]) != sorted(input_map)):
                    raise ValueError(
                        "Graph contains unbound inputs: %s. Must "
                        "provide these inputs through input_map." % ",".join([
                            compat.as_str(v) for v in field.value
                            if not input_map or v not in input_map
                        ]))
                break

    # Sets graph to default graph if it's not passed in.
    graph = graph or ops.get_default_graph()

    # Gathers the list of nodes we are interested in.
    with graph.as_default():
        producer_op_list = None
        if meta_graph_def.meta_info_def.HasField("stripped_op_list"):
            producer_op_list = meta_graph_def.meta_info_def.stripped_op_list
        input_graph_def = meta_graph_def.graph_def
        # Remove all the explicit device specifications for this node. This helps to
        # make the graph more portable.
        if clear_devices:
            for node in input_graph_def.node:
                node.device = ""

        scope_to_prepend_to_names = graph.unique_name(import_scope or "",
                                                      mark_as_used=False)

        imported_return_elements = importer.import_graph_def(
            input_graph_def,
            name=(import_scope or scope_to_prepend_to_names),
            input_map=input_map,
            producer_op_list=producer_op_list,
            return_elements=return_elements)

        # TensorFlow versions before 1.9 (not inclusive) exported SavedModels
        # without a VariableDef.trainable field set.
        tf_version = meta_graph_def.meta_info_def.tensorflow_version
        if not tf_version:
            variables_have_trainable = True
        else:
            variables_have_trainable = (
                distutils_version.LooseVersion(tf_version) >=
                distutils_version.LooseVersion("1.9"))

        # Sort collections so we see TRAINABLE_VARIABLES first and can default these
        # variables to trainable if the value is not set in their VariableDef.
        sorted_collections = []
        if ops.GraphKeys.TRAINABLE_VARIABLES in meta_graph_def.collection_def:
            sorted_collections.append((ops.GraphKeys.TRAINABLE_VARIABLES,
                                       meta_graph_def.collection_def[
                                           ops.GraphKeys.TRAINABLE_VARIABLES]))
        for key, value in sorted(meta_graph_def.collection_def.items()):
            if key != ops.GraphKeys.TRAINABLE_VARIABLES:
                sorted_collections.append((key, value))

        # Restores all the other collections.
        variable_objects = {}
        for key, col_def in sorted_collections:
            # Don't add unbound_inputs to the new graph.
            if key == unbound_inputs_col_name:
                continue
            if not restore_collections_predicate(key):
                continue

            kind = col_def.WhichOneof("kind")
            if kind is None:
                logging.error(
                    "Cannot identify data type for collection %s. Skipping.",
                    key)
                continue
            from_proto = ops.get_from_proto_function(key)

            # Temporary change to allow the TFMA evaluator to read metric variables
            # saved as a bytes list.
            # TODO(kathywu): Remove this hack once cl/248406059 has been submitted.
            if key == ops.GraphKeys.METRIC_VARIABLES:
                # Metric variables will use the same proto functions as GLOBAL_VARIABLES
                from_proto = ops.get_from_proto_function(
                    ops.GraphKeys.GLOBAL_VARIABLES)
            if from_proto and kind == "bytes_list":
                proto_type = ops.get_collection_proto_type(key)
                if key in ops.GraphKeys._VARIABLE_COLLECTIONS:  # pylint: disable=protected-access
                    for value in col_def.bytes_list.value:
                        variable = variable_objects.get(value, None)
                        if variable is None:
                            proto = proto_type()
                            proto.ParseFromString(value)
                            if not variables_have_trainable:
                                # If the VariableDef proto does not contain a "trainable"
                                # property because it was exported before that property was
                                # added, we default it to whether the variable is in the
                                # TRAINABLE_VARIABLES collection. We've sorted
                                # TRAINABLE_VARIABLES to be first, so trainable variables will
                                # be created from that collection.
                                proto.trainable = (
                                    key == ops.GraphKeys.TRAINABLE_VARIABLES)
                            variable = from_proto(
                                proto, import_scope=scope_to_prepend_to_names)
                            variable_objects[value] = variable
                        graph.add_to_collection(key, variable)
                else:
                    for value in col_def.bytes_list.value:
                        proto = proto_type()
                        proto.ParseFromString(value)
                        graph.add_to_collection(
                            key,
                            from_proto(proto,
                                       import_scope=scope_to_prepend_to_names))
            else:
                field = getattr(col_def, kind)
                if key in _COMPAT_COLLECTION_LIST:
                    logging.warning(
                        "The saved meta_graph is possibly from an older release:\n"
                        "'%s' collection should be of type 'byte_list', but instead "
                        "is of type '%s'.", key, kind)
                if kind == "node_list":
                    for value in field.value:
                        col_op = graph.as_graph_element(
                            ops.prepend_name_scope(value,
                                                   scope_to_prepend_to_names))
                        graph.add_to_collection(key, col_op)
                elif kind == "int64_list":
                    # NOTE(opensource): This force conversion is to work around the fact
                    # that Python2 distinguishes between int and long, while Python3 has
                    # only int.
                    for value in field.value:
                        graph.add_to_collection(key, int(value))
                else:
                    for value in field.value:
                        graph.add_to_collection(
                            key,
                            ops.prepend_name_scope(value,
                                                   scope_to_prepend_to_names))

        var_list = {}
        variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
                                         scope=scope_to_prepend_to_names)
        for v in variables:
            var_list[ops.strip_name_scope(v.name,
                                          scope_to_prepend_to_names)] = v

    return var_list, imported_return_elements
Beispiel #12
0
class RedisDriver(coordination.CoordinationDriverCachedRunWatchers):
    """Redis provides a few nice benefits that act as a poormans zookeeper.

    It **is** fully functional and implements all of the coordination
    driver API(s). It stores data into `redis`_ using the provided `redis`_
    API(s) using `msgpack`_ encoded values as needed.

    - Durability (when setup with `AOF`_ mode).
    - Consistent, note that this is still restricted to only
      one redis server, without the recently released redis (alpha)
      clustering > 1 server will not be consistent when partitions
      or failures occur (even redis clustering docs state it is
      not a fully AP or CP solution, which means even with it there
      will still be *potential* inconsistencies).
    - Master/slave failover (when setup with redis `sentinel`_), giving
      some notion of HA (values *can* be lost when a failover transition
      occurs).

    To use a `sentinel`_ the connection URI must point to the sentinel server.
    At connection time the sentinel will be asked for the current IP and port
    of the master and then connect there. The connection URI for sentinel
    should be written as follows::

      redis://<sentinel host>:<sentinel port>?sentinel=<master name>

    Additional sentinel hosts are listed with multiple ``sentinel_fallback``
    parameters as follows::

        redis://<sentinel host>:<sentinel port>?sentinel=<master name>&
          sentinel_fallback=<other sentinel host>:<sentinel port>&
          sentinel_fallback=<other sentinel host>:<sentinel port>&
          sentinel_fallback=<other sentinel host>:<sentinel port>

    Further resources/links:

    - http://redis.io/
    - http://redis.io/topics/sentinel
    - http://redis.io/topics/cluster-spec

    Note that this client will itself retry on transaction failure (when they
    keys being watched have changed underneath the current transaction).
    Currently the number of attempts that are tried is infinite (this might
    be addressed in https://github.com/andymccurdy/redis-py/issues/566 when
    that gets worked on). See http://redis.io/topics/transactions for more
    information on this topic.

    General recommendations/usage considerations:

    - When used for locks, run in AOF mode and think carefully about how
      your redis deployment handles losing a server (the clustering support
      is supposed to aid in losing servers, but it is also of unknown
      reliablity and is relatively new, so use at your own risk).

    .. _redis: http://redis.io/
    .. _msgpack: http://msgpack.org/
    .. _sentinel: http://redis.io/topics/sentinel
    .. _AOF: http://redis.io/topics/persistence
    """

    CHARACTERISTICS = (
        coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
        coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
        coordination.Characteristics.DISTRIBUTED_ACROSS_HOSTS,
        coordination.Characteristics.CAUSAL,
    )
    """
    Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
    enum member(s) that can be used to interogate how this driver works.
    """

    MIN_VERSION = version.LooseVersion("2.6.0")
    """
    The min redis version that this driver requires to operate with...
    """

    GROUP_EXISTS = b'__created__'
    """
    Redis deletes dictionaries that have no keys in them, which means the
    key will disappear which means we can't tell the difference between
    a group not existing and a group being empty without this key being
    saved...
    """

    #: Value used (with group exists key) to keep a group from disappearing.
    GROUP_EXISTS_VALUE = b'1'

    #: Default namespace for keys when none is provided.
    DEFAULT_NAMESPACE = b'_tooz'

    NAMESPACE_SEP = b':'
    """
    Separator that is used to combine a key with the namespace (to get
    the **actual** key that will be used).
    """

    DEFAULT_ENCODING = 'utf8'
    """
    This is for python3.x; which will behave differently when returned
    binary types or unicode types (redis uses binary internally it appears),
    so to just stick with a common way of doing this, make all the things
    binary (with this default encoding if one is not given and a unicode
    string is provided).
    """

    CLIENT_ARGS = frozenset([
        'db',
        'encoding',
        'retry_on_timeout',
        'socket_keepalive',
        'socket_timeout',
        'ssl',
        'ssl_certfile',
        'ssl_keyfile',
        'sentinel',
        'sentinel_fallback',
    ])
    """
    Keys that we allow to proxy from the coordinator configuration into the
    redis client (used to configure the redis client internals so that
    it works as you expect/want it to).

    See: http://redis-py.readthedocs.org/en/latest/#redis.Redis

    See: https://github.com/andymccurdy/redis-py/blob/2.10.3/redis/client.py
    """

    #: Client arguments that are expected/allowed to be lists.
    CLIENT_LIST_ARGS = frozenset([
        'sentinel_fallback',
    ])

    #: Client arguments that are expected to be boolean convertible.
    CLIENT_BOOL_ARGS = frozenset([
        'retry_on_timeout',
        'ssl',
    ])

    #: Client arguments that are expected to be int convertible.
    CLIENT_INT_ARGS = frozenset([
        'db',
        'socket_keepalive',
        'socket_timeout',
    ])

    #: Default socket timeout to use when none is provided.
    CLIENT_DEFAULT_SOCKET_TO = 30

    #: String used to keep a key/member alive (until it next expires).
    STILL_ALIVE = b"Not dead!"

    SCRIPTS = {
        'create_group':
        """
-- Extract *all* the variables (so we can easily know what they are)...
local namespaced_group_key = KEYS[1]
local all_groups_key = KEYS[2]
local no_namespaced_group_key = ARGV[1]
if redis.call("exists", namespaced_group_key) == 1 then
    return 0
end
redis.call("sadd", all_groups_key, no_namespaced_group_key)
redis.call("hset", namespaced_group_key,
           "${group_existence_key}", "${group_existence_value}")
return 1
""",
        'delete_group':
        """
-- Extract *all* the variables (so we can easily know what they are)...
local namespaced_group_key = KEYS[1]
local all_groups_key = KEYS[2]
local no_namespaced_group_key = ARGV[1]
if redis.call("exists", namespaced_group_key) == 0 then
    return -1
end
if redis.call("sismember", all_groups_key, no_namespaced_group_key) == 0 then
    return -2
end
if redis.call("hlen", namespaced_group_key) > 1 then
    return -3
end
-- First remove from the set (then delete the group); if the set removal
-- fails, at least the group will still exist (and can be fixed manually)...
if redis.call("srem", all_groups_key, no_namespaced_group_key) == 0 then
    return -4
end
redis.call("del", namespaced_group_key)
return 1
""",
        'update_capabilities':
        """
-- Extract *all* the variables (so we can easily know what they are)...
local group_key = KEYS[1]
local member_id = ARGV[1]
local caps = ARGV[2]
if redis.call("exists", group_key) == 0 then
    return -1
end
if redis.call("hexists", group_key, member_id) == 0 then
    return -2
end
redis.call("hset", group_key, member_id, caps)
return 1
""",
    }
    """`Lua`_ **template** scripts that will be used by various methods (they
    are turned into real scripts and loaded on call into the :func:`.start`
    method).

    .. _Lua: http://www.lua.org
    """
    def __init__(self, member_id, parsed_url, options):
        super(RedisDriver, self).__init__(member_id)
        options = utils.collapse(options, exclude=self.CLIENT_LIST_ARGS)
        self._parsed_url = parsed_url
        self._options = options
        self._encoding = options.get('encoding', self.DEFAULT_ENCODING)
        timeout = options.get('timeout', self.CLIENT_DEFAULT_SOCKET_TO)
        self.timeout = int(timeout)
        self.membership_timeout = float(
            options.get('membership_timeout', timeout))
        lock_timeout = options.get('lock_timeout', self.timeout)
        self.lock_timeout = int(lock_timeout)
        namespace = options.get('namespace', self.DEFAULT_NAMESPACE)
        self._namespace = utils.to_binary(namespace, encoding=self._encoding)
        self._group_prefix = self._namespace + b"_group"
        self._beat_prefix = self._namespace + b"_beats"
        self._groups = self._namespace + b"_groups"
        self._client = None
        self._acquired_locks = set()
        self._executor = utils.ProxyExecutor.build("Redis", options)
        self._started = False
        self._server_info = {}
        self._scripts = {}

    def _check_fetch_redis_version(self, geq_version, not_existent=True):
        if isinstance(geq_version, six.string_types):
            desired_version = version.LooseVersion(geq_version)
        elif isinstance(geq_version, version.LooseVersion):
            desired_version = geq_version
        else:
            raise TypeError("Version check expects a string/version type")
        try:
            redis_version = version.LooseVersion(
                self._server_info['redis_version'])
        except KeyError:
            return (not_existent, None)
        else:
            if redis_version < desired_version:
                return (False, redis_version)
            else:
                return (True, redis_version)

    @property
    def namespace(self):
        return self._namespace

    @property
    def running(self):
        return self._started

    def get_lock(self, name):
        return RedisLock(self, self._client, name, self.lock_timeout)

    _dumps = staticmethod(utils.dumps)
    _loads = staticmethod(utils.loads)

    @classmethod
    def _make_client(cls, parsed_url, options, default_socket_timeout):
        kwargs = {}
        if parsed_url.hostname:
            kwargs['host'] = parsed_url.hostname
            if parsed_url.port:
                kwargs['port'] = parsed_url.port
        else:
            if not parsed_url.path:
                raise ValueError("Expected socket path in parsed urls path")
            kwargs['unix_socket_path'] = parsed_url.path
        if parsed_url.password:
            kwargs['password'] = parsed_url.password
        for a in cls.CLIENT_ARGS:
            if a not in options:
                continue
            if a in cls.CLIENT_BOOL_ARGS:
                v = strutils.bool_from_string(options[a])
            elif a in cls.CLIENT_LIST_ARGS:
                v = options[a]
            elif a in cls.CLIENT_INT_ARGS:
                v = int(options[a])
            else:
                v = options[a]
            kwargs[a] = v
        if 'socket_timeout' not in kwargs:
            kwargs['socket_timeout'] = default_socket_timeout

        # Ask the sentinel for the current master if there is a
        # sentinel arg.
        if 'sentinel' in kwargs:
            sentinel_hosts = [
                tuple(fallback.split(':'))
                for fallback in kwargs.get('sentinel_fallback', [])
            ]
            sentinel_hosts.insert(0, (kwargs['host'], kwargs['port']))
            sentinel_server = sentinel.Sentinel(
                sentinel_hosts, socket_timeout=kwargs['socket_timeout'])
            sentinel_name = kwargs['sentinel']
            del kwargs['sentinel']
            if 'sentinel_fallback' in kwargs:
                del kwargs['sentinel_fallback']
            master_client = sentinel_server.master_for(sentinel_name, **kwargs)
            # The master_client is a redis.StrictRedis using a
            # Sentinel managed connection pool.
            return master_client
        return redis.StrictRedis(**kwargs)

    def _start(self):
        self._executor.start()
        try:
            self._client = self._make_client(self._parsed_url, self._options,
                                             self.timeout)
        except exceptions.RedisError as e:
            utils.raise_with_cause(coordination.ToozConnectionError,
                                   encodeutils.exception_to_unicode(e),
                                   cause=e)
        else:
            # Ensure that the server is alive and not dead, this does not
            # ensure the server will always be alive, but does insure that it
            # at least is alive once...
            with _translate_failures():
                self._server_info = self._client.info()
            # Validate we have a good enough redis version we are connected
            # to so that the basic set of features we support will actually
            # work (instead of blowing up).
            new_enough, redis_version = self._check_fetch_redis_version(
                self.MIN_VERSION)
            if not new_enough:
                raise tooz.NotImplemented("Redis version greater than or"
                                          " equal to '%s' is required"
                                          " to use this driver; '%s' is"
                                          " being used which is not new"
                                          " enough" %
                                          (self.MIN_VERSION, redis_version))
            tpl_params = {
                'group_existence_value': self.GROUP_EXISTS_VALUE,
                'group_existence_key': self.GROUP_EXISTS,
            }
            # For py3.x ensure these are unicode since the string template
            # replacement will expect unicode (and we don't want b'' as a
            # prefix which will happen in py3.x if this is not done).
            for (k, v) in six.iteritems(tpl_params.copy()):
                if isinstance(v, six.binary_type):
                    v = v.decode('ascii')
                tpl_params[k] = v
            prepared_scripts = {}
            for name, raw_script_tpl in six.iteritems(self.SCRIPTS):
                script_tpl = string.Template(raw_script_tpl)
                script = script_tpl.substitute(**tpl_params)
                prepared_scripts[name] = self._client.register_script(script)
            self._scripts = prepared_scripts
            self.heartbeat()
            self._started = True

    def _encode_beat_id(self, member_id):
        member_id = utils.to_binary(member_id, encoding=self._encoding)
        return self.NAMESPACE_SEP.join([self._beat_prefix, member_id])

    def _encode_member_id(self, member_id):
        member_id = utils.to_binary(member_id, encoding=self._encoding)
        if member_id == self.GROUP_EXISTS:
            raise ValueError("Not allowed to use private keys as a member id")
        return member_id

    def _decode_member_id(self, member_id):
        return utils.to_binary(member_id, encoding=self._encoding)

    def _encode_group_leader(self, group_id):
        group_id = utils.to_binary(group_id, encoding=self._encoding)
        return b"leader_of_" + group_id

    def _encode_group_id(self, group_id, apply_namespace=True):
        group_id = utils.to_binary(group_id, encoding=self._encoding)
        if not apply_namespace:
            return group_id
        return self.NAMESPACE_SEP.join([self._group_prefix, group_id])

    def _decode_group_id(self, group_id):
        return utils.to_binary(group_id, encoding=self._encoding)

    def heartbeat(self):
        with _translate_failures():
            beat_id = self._encode_beat_id(self._member_id)
            expiry_ms = max(0, int(self.membership_timeout * 1000.0))
            self._client.psetex(beat_id,
                                time_ms=expiry_ms,
                                value=self.STILL_ALIVE)
        for lock in self._acquired_locks.copy():
            try:
                lock.heartbeat()
            except tooz.ToozError:
                LOG.warning("Unable to heartbeat lock '%s'",
                            lock,
                            exc_info=True)
        return min(self.lock_timeout, self.membership_timeout)

    def _stop(self):
        while self._acquired_locks:
            lock = self._acquired_locks.pop()
            try:
                lock.release()
            except tooz.ToozError:
                LOG.warning("Unable to release lock '%s'", lock, exc_info=True)
        self._executor.stop()
        if self._client is not None:
            # Make sure we no longer exist...
            beat_id = self._encode_beat_id(self._member_id)
            try:
                # NOTE(harlowja): this will delete nothing if the key doesn't
                # exist in the first place, which is fine/expected/desired...
                with _translate_failures():
                    self._client.delete(beat_id)
            except tooz.ToozError:
                LOG.warning("Unable to delete heartbeat key '%s'",
                            beat_id,
                            exc_info=True)
            self._client = None
        self._server_info = {}
        self._scripts.clear()
        self._started = False

    def _submit(self, cb, *args, **kwargs):
        if not self._started:
            raise tooz.ToozError("Redis driver has not been started")
        return self._executor.submit(cb, *args, **kwargs)

    def _get_script(self, script_key):
        try:
            return self._scripts[script_key]
        except KeyError:
            raise tooz.ToozError("Redis driver has not been started")

    def create_group(self, group_id):
        script = self._get_script('create_group')

        def _create_group(script):
            encoded_group = self._encode_group_id(group_id)
            keys = [
                encoded_group,
                self._groups,
            ]
            args = [
                self._encode_group_id(group_id, apply_namespace=False),
            ]
            result = script(keys=keys, args=args)
            result = strutils.bool_from_string(result)
            if not result:
                raise coordination.GroupAlreadyExist(group_id)

        return RedisFutureResult(self._submit(_create_group, script))

    def update_capabilities(self, group_id, capabilities):
        script = self._get_script('update_capabilities')

        def _update_capabilities(script):
            keys = [
                self._encode_group_id(group_id),
            ]
            args = [
                self._encode_member_id(self._member_id),
                self._dumps(capabilities),
            ]
            result = int(script(keys=keys, args=args))
            if result == -1:
                raise coordination.GroupNotCreated(group_id)
            if result == -2:
                raise coordination.MemberNotJoined(group_id, self._member_id)

        return RedisFutureResult(self._submit(_update_capabilities, script))

    def leave_group(self, group_id):
        encoded_group = self._encode_group_id(group_id)
        encoded_member_id = self._encode_member_id(self._member_id)

        def _leave_group(p):
            if not p.exists(encoded_group):
                raise coordination.GroupNotCreated(group_id)
            p.multi()
            p.hdel(encoded_group, encoded_member_id)
            c = p.execute()[0]
            if c == 0:
                raise coordination.MemberNotJoined(group_id, self._member_id)
            else:
                self._joined_groups.discard(group_id)

        return RedisFutureResult(
            self._submit(self._client.transaction,
                         _leave_group,
                         encoded_group,
                         value_from_callable=True))

    def get_members(self, group_id):
        encoded_group = self._encode_group_id(group_id)

        def _get_members(p):
            if not p.exists(encoded_group):
                raise coordination.GroupNotCreated(group_id)
            potential_members = set()
            for m in p.hkeys(encoded_group):
                m = self._decode_member_id(m)
                if m != self.GROUP_EXISTS:
                    potential_members.add(m)
            if not potential_members:
                return set()
            # Ok now we need to see which members have passed away...
            gone_members = set()
            member_values = p.mget(
                compat_map(self._encode_beat_id, potential_members))
            for (potential_member,
                 value) in compat_zip(potential_members, member_values):
                # Always preserve self (just incase we haven't heartbeated
                # while this call/s was being made...), this does *not* prevent
                # another client from removing this though...
                if potential_member == self._member_id:
                    continue
                if not value:
                    gone_members.add(potential_member)
            # Trash all the members that no longer are with us... RIP...
            if gone_members:
                p.multi()
                encoded_gone_members = list(
                    self._encode_member_id(m) for m in gone_members)
                p.hdel(encoded_group, *encoded_gone_members)
                p.execute()
                return set(m for m in potential_members
                           if m not in gone_members)
            return potential_members

        return RedisFutureResult(
            self._submit(self._client.transaction,
                         _get_members,
                         encoded_group,
                         value_from_callable=True))

    def get_member_capabilities(self, group_id, member_id):
        encoded_group = self._encode_group_id(group_id)
        encoded_member_id = self._encode_member_id(member_id)

        def _get_member_capabilities(p):
            if not p.exists(encoded_group):
                raise coordination.GroupNotCreated(group_id)
            capabilities = p.hget(encoded_group, encoded_member_id)
            if capabilities is None:
                raise coordination.MemberNotJoined(group_id, member_id)
            return self._loads(capabilities)

        return RedisFutureResult(
            self._submit(self._client.transaction,
                         _get_member_capabilities,
                         encoded_group,
                         value_from_callable=True))

    def join_group(self, group_id, capabilities=b""):
        encoded_group = self._encode_group_id(group_id)
        encoded_member_id = self._encode_member_id(self._member_id)

        def _join_group(p):
            if not p.exists(encoded_group):
                raise coordination.GroupNotCreated(group_id)
            p.multi()
            p.hset(encoded_group, encoded_member_id, self._dumps(capabilities))
            c = p.execute()[0]
            if c == 0:
                # Field already exists...
                raise coordination.MemberAlreadyExist(group_id,
                                                      self._member_id)
            else:
                self._joined_groups.add(group_id)

        return RedisFutureResult(
            self._submit(self._client.transaction,
                         _join_group,
                         encoded_group,
                         value_from_callable=True))

    def delete_group(self, group_id):
        script = self._get_script('delete_group')

        def _delete_group(script):
            keys = [
                self._encode_group_id(group_id),
                self._groups,
            ]
            args = [
                self._encode_group_id(group_id, apply_namespace=False),
            ]
            result = int(script(keys=keys, args=args))
            if result in (-1, -2):
                raise coordination.GroupNotCreated(group_id)
            if result == -3:
                raise coordination.GroupNotEmpty(group_id)
            if result == -4:
                raise tooz.ToozError("Unable to remove '%s' key"
                                     " from set located at '%s'" %
                                     (args[0], keys[-1]))
            if result != 1:
                raise tooz.ToozError("Internal error, unable"
                                     " to complete group '%s' removal" %
                                     (group_id))

        return RedisFutureResult(self._submit(_delete_group, script))

    def _destroy_group(self, group_id):
        """Should only be used in tests..."""
        self._client.delete(self._encode_group_id(group_id))

    def get_groups(self):
        def _get_groups():
            results = []
            for g in self._client.smembers(self._groups):
                results.append(self._decode_group_id(g))
            return results

        return RedisFutureResult(self._submit(_get_groups))

    def _get_leader_lock(self, group_id):
        name = self._encode_group_leader(group_id)
        return self.get_lock(name)

    def run_elect_coordinator(self):
        for group_id, hooks in six.iteritems(self._hooks_elected_leader):
            leader_lock = self._get_leader_lock(group_id)
            if leader_lock.acquire(blocking=False):
                # We got the lock
                hooks.run(coordination.LeaderElected(group_id,
                                                     self._member_id))

    def run_watchers(self, timeout=None):
        result = super(RedisDriver, self).run_watchers(timeout=timeout)
        self.run_elect_coordinator()
        return result
 def _get_version(self):
     for host in self.cassandra_helper.cluster.metadata.all_hosts():
         return version.LooseVersion(host.release_version)
     return None
    def testSomeTrades_Position(self):
        strat = self.__createStrategy()
        stratAnalyzer = trades.Trades()
        strat.attach_analyzer(stratAnalyzer)

        # Winning trade
        strat.addPosEntry(buildUTCDateTime(2011, 1, 3, 15,
                                           0), strat.enter_long,
                          TradesAnalyzerTestCase.TestInstrument, 1)  # 127.14
        strat.addPosExit(buildUTCDateTime(2011, 1, 3, 15, 16),
                         strat.exit_position)  # 127.16
        # Losing trade
        strat.addPosEntry(buildUTCDateTime(2011, 1, 3, 15,
                                           30), strat.enter_long,
                          TradesAnalyzerTestCase.TestInstrument, 1)  # 127.2
        strat.addPosExit(buildUTCDateTime(2011, 1, 3, 15, 31),
                         strat.exit_position)  # 127.16
        # Winning trade
        strat.addPosEntry(buildUTCDateTime(2011, 1, 3, 15,
                                           38), strat.enter_long,
                          TradesAnalyzerTestCase.TestInstrument, 1)  # 127.16
        strat.addPosExit(buildUTCDateTime(2011, 1, 3, 15, 42),
                         strat.exit_position)  # 127.26
        # Unfinished trade not closed
        strat.addPosEntry(buildUTCDateTime(2011, 1, 3, 15,
                                           47), strat.enter_long,
                          TradesAnalyzerTestCase.TestInstrument, 1)  # 127.34
        strat.run()

        self.assertTrue(
            round(strat.get_broker().get_cash(), 2) == round(
                1000 + (127.16 - 127.14) + (127.16 - 127.2) +
                (127.26 - 127.16) - 127.34, 2))

        self.assertTrue(stratAnalyzer.get_count() == 3)
        self.assertTrue(stratAnalyzer.get_even_count() == 0)
        self.assertTrue(round(stratAnalyzer.get_all().mean(), 2) == 0.03)
        self.assertTrue(round(stratAnalyzer.get_all().std(ddof=1), 2) == 0.07)
        self.assertTrue(round(stratAnalyzer.get_all().std(ddof=0), 2) == 0.06)

        self.assertTrue(stratAnalyzer.get_profitable_count() == 2)
        self.assertTrue(round(stratAnalyzer.get_profits().mean(), 2) == 0.06)
        self.assertTrue(
            round(stratAnalyzer.get_profits().std(ddof=1), 2) == 0.06)
        self.assertTrue(
            round(stratAnalyzer.get_profits().std(ddof=0), 2) == 0.04)
        self.assertEqual(stratAnalyzer.get_positive_returns()[0],
                         (127.16 - 127.14) / 127.14)
        self.assertEqual(stratAnalyzer.get_positive_returns()[1],
                         (127.26 - 127.16) / 127.16)

        self.assertTrue(stratAnalyzer.get_unprofitable_count() == 1)
        self.assertTrue(round(stratAnalyzer.get_losses().mean(), 2) == -0.04)
        if version.LooseVersion(
                numpy.__version__) >= version.LooseVersion("1.6.2"):
            self.assertTrue(math.isnan(stratAnalyzer.get_losses().std(ddof=1)))
        else:
            self.assertTrue(stratAnalyzer.get_losses().std(ddof=1) == 0)
        self.assertTrue(stratAnalyzer.get_losses().std(ddof=0) == 0)
        self.assertEqual(stratAnalyzer.get_negative_returns()[0],
                         (127.16 - 127.2) / 127.2)
Beispiel #15
0
def is_nsx_version_2_1_0(nsx_version):
    return (version.LooseVersion(nsx_version) >= version.LooseVersion(
        v3_const.NSX_VERSION_2_1_0))
Beispiel #16
0
def is_min_version(found, minversion):
    """
    Returns whether *found* is a version at least as high as *minversion*.
    """
    return version.LooseVersion(found) >= version.LooseVersion(minversion)
Beispiel #17
0
def is_nsxv_version_6_3(nsx_version):
    return (version.LooseVersion(nsx_version) >= version.LooseVersion('6.3'))
Beispiel #18
0
# Licensed under a 3-clause BSD style license - see LICENSE.rst

# TEST_UNICODE_LITERALS

import sys

from distutils import version
import numpy as np

from ...tests.helper import pytest, catch_warnings
from ... import table
from ...table import Row
from ...utils.exceptions import AstropyDeprecationWarning
from .conftest import MaskedTable

numpy_lt_1p8 = version.LooseVersion(
    np.__version__) < version.LooseVersion('1.8')


def test_masked_row_with_object_col():
    """
    Numpy < 1.8 has a bug in masked array that prevents access a row if there is
    a column with object type.
    """
    t = table.Table([[1]], dtypes=['O'], masked=True)
    if numpy_lt_1p8:
        with pytest.raises(ValueError):
            t['col0'].mask = False
            t[0]
        with pytest.raises(ValueError):
            t['col0'].mask = True
            t[0]
Beispiel #19
0
            }
        }]
    },
    {
        "desc": "add loadbalancer resource type",
        "type": "create_resource_type",
        "resource_type": "loadbalancer",
        "data": [{
            "attributes": {}
        }]
    },
]

# NOTE(sileht): We use LooseVersion because pbr can generate invalid
# StrictVersion like 9.0.1.dev226
REQUIRED_VERSION = version.LooseVersion("4.2.0")


def upgrade_resource_types(conf):
    gnocchi = get_gnocchiclient(conf)

    gnocchi_version = version.LooseVersion(gnocchi.build.get())
    if gnocchi_version < REQUIRED_VERSION:
        raise Exception("required gnocchi version is %s, got %s" %
                        (REQUIRED_VERSION, gnocchi_version))

    for name, attributes in resources_initial.items():
        try:
            gnocchi.resource_type.get(name=name)
        except (gnocchi_exc.ResourceTypeNotFound, gnocchi_exc.NotFound):
            rt = {'name': name, 'attributes': attributes}
import pyevolve.GenomeBase
import pyevolve.G1DList
import pyevolve.Mutators
import pyevolve.Initializators

import networkx as nx
from distutils import version

if version.LooseVersion(nx.__version__) < version.LooseVersion("1.5"):
    raise RuntimeError("Networkx should have at least version 1.5")

import random
import exceptions
import math
from collections import defaultdict


class NodeRepresentation():
    '''
    An object instance of NodeRepresentation will be created on the fly by
    GraphGenome to compact information about a single node. It has no other
    use apart from being a representation
    
    '''
    def __init__(self):
        self.number = None
        self.type_id = None
        self.out_edges = {}
        self.params = None

    def __repr__(self):
Beispiel #21
0
import re

import numpy as np

from ....utils import OrderedDict
from ....tests.helper import pytest
from ... import ascii
from ....table import Table
from ....units import Unit
from distutils import version

from .common import (raises, assert_equal, assert_almost_equal, assert_true,
                     setup_function, teardown_function)
from ....tests.helper import pytest

_NUMPY_VERSION = version.LooseVersion(np.__version__)


def test_convert_overflow():
    """
    Test reading an extremely large integer, which falls through to
    string due to an overflow error (#2234).
    """
    # Before Numpy 1.6 the exception from np.array(['1' * 10000], dtype=np.int)
    # is exactly the same as np.array(['abc'], dtype=np.int).  In this case
    # it falls through to float, so we just accept this as a known issue for
    # numpy < 1.6.
    expected_kind = (
        'f', ) if _NUMPY_VERSION < version.LooseVersion('1.6') else ('S', 'U')
    dat = ascii.read(['a', '1' * 10000], format='basic', guess=False)
    assert dat['a'].dtype.kind in expected_kind
Beispiel #22
0
from datetime import datetime
from distutils import version
import re

import numpy as np
import quantities as pq

# check scipy
try:
    import scipy.io
    import scipy.version
except ImportError as err:
    HAVE_SCIPY = False
    SCIPY_ERR = err
else:
    if version.LooseVersion(scipy.version.version) < '0.12.0':
        HAVE_SCIPY = False
        SCIPY_ERR = ImportError("your scipy version is too old to support " +
                                "MatlabIO, you need at least 0.12.0. " +
                                "You have %s" % scipy.version.version)
    else:
        HAVE_SCIPY = True
        SCIPY_ERR = None

from neo.io.baseio import BaseIO
from neo.core import (Block, Segment, AnalogSignal, Event, Epoch, SpikeTrain,
                      objectnames, class_by_name)

classname_lower_to_upper = {}
for k in objectnames:
    classname_lower_to_upper[k.lower()] = k
Beispiel #23
0
def chk_ci_version():
    v = version.LooseVersion(
        pkg_resources.get_distribution('cloud-init').version)
    return v >= version.LooseVersion('0.6.0')
Beispiel #24
0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BoltOn Method for privacy."""

from distutils import version
import sys

import tensorflow.compat.v1 as tf

if version.LooseVersion(tf.__version__) < version.LooseVersion("2.0.0"):
  raise ImportError("Please upgrade your version "
                    "of tensorflow from: {0} to at least 2.0.0 to "
                    "use privacy/bolt_on".format(
                        version.LooseVersion(tf.__version__)))
if hasattr(sys, "skip_tf_privacy_import"):  # Useful for standalone scripts.
  pass
else:
  from tensorflow_privacy.privacy.bolt_on.models import BoltOnModel  # pylint: disable=g-import-not-at-top
  from tensorflow_privacy.privacy.bolt_on.optimizers import BoltOn  # pylint: disable=g-import-not-at-top
  from tensorflow_privacy.privacy.bolt_on.losses import StrongConvexHuber  # pylint: disable=g-import-not-at-top
  from tensorflow_privacy.privacy.bolt_on.losses import StrongConvexBinaryCrossentropy  # pylint: disable=g-import-not-at-top
Beispiel #25
0
def is_before_4_1(ver):
    return version.LooseVersion(ver) < version.LooseVersion('4.1')
Beispiel #26
0
 def UseGCPAuthProvider(cluster):
     return (
         cluster.currentMainVersion
         and dist_version.LooseVersion(cluster.currentMainVersion) >=
         dist_version.LooseVersion(MIN_GCP_AUTH_PROVIDER_VERSION) and
         not properties.VALUES.container.use_client_certificate.GetBool())
Beispiel #27
0
try:
    import montage
    if not hasattr(montage, 'reproject_hdu'):
        raise
    montage_installed = True
except:
    montage_installed = False

try:
    from pyavm import AVM
    avm_installed = True
except:
    avm_installed = False

if montage_installed:
    if version.LooseVersion(
            montage.__version__) < version.LooseVersion('0.9.2'):
        warnings.warn(
            "Python-montage installation is not recent enough (version 0.9.2 or later is required). Disabling Montage-related functionality."
        )
        montage_installed = False

from . import image_util
from . import math_util


def _data_stretch(image, vmin=None, vmax=None, pmin=0.25, pmax=99.75, \
                  stretch='linear', vmid=None, exponent=2):

    min_auto = not math_util.isnumeric(vmin)
    max_auto = not math_util.isnumeric(vmax)
Beispiel #28
0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function  # For print debugging with python 2 or 3

from distutils import version

import numpy as np

from ...tests.helper import pytest
from ... import table
from ...table import Column
from ...utils import OrderedDict

numpy_lt_1p5 = version.LooseVersion(
    np.__version__) < version.LooseVersion('1.5')


class MaskedTable(table.Table):
    def __init__(self, *args, **kwargs):
        kwargs['masked'] = True
        table.Table.__init__(self, *args, **kwargs)


# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
@pytest.fixture(params=[False] if numpy_lt_1p5 else [False, True])
def table_type(request):
    return MaskedTable if request.param else table.Table


@pytest.mark.usefixtures('table_type')
class BaseInitFrom():
Beispiel #29
0
def LooseVersion(vstring):
    # Our development version is something like '0.10.9+aac7bfc'
    # This function just ignored the git commit id.
    vstring = vstring.split("+")[0]
    return version.LooseVersion(vstring)
Beispiel #30
0
from .baseio import BaseIO
from .tools import create_many_to_one_relationship
from tables import NoSuchNodeError as NSNE
import tables as tb
import numpy as np
import quantities as pq
import logging
import uuid

import tables

logger = logging.getLogger("Neo")

#version checking
from distutils import version
if version.LooseVersion(tables.__version__) < '2.2':
    raise ImportError(
        "your pytables version is too old to support NeoHdf5IO, you need at least 2.2 you have %s"
        % tables.__version__)
"""
SETTINGS:
filename:       the full path to the HDF5 file.
cascade:        If 'True' all children are retrieved when get(object) is called.
lazy:           If 'True' data (arrays) is retrieved when get(object) is called.
"""
settings = {'filename': "neo.h5", 'cascade': True, 'lazy': True}


def _func_wrapper(func):
    try:
        return func