Exemplo n.º 1
0
from distutils.version import StrictVersion

import rest_framework
from rest_framework import serializers
from rest_framework import fields

if StrictVersion(rest_framework.__version__) < StrictVersion("3.0.0"):

    class BooleanField(serializers.BooleanField):
        """
        Version of BooleanField which handles fields which are 1,0
        """
        def to_native(self, value):
            if isinstance(value, int) and value in [0, 1]:
                return bool(value)
            else:
                super(BooleanField, self).to_native(value)
else:
    # rest-framework 3 booleanfield handles 0, 1
    BooleanField = fields.BooleanField

if StrictVersion(rest_framework.__version__) < StrictVersion("3.0.0"):

    class UuidField(serializers.CharField):
        """
        For strings like Ceph service UUIDs and Ceph cluster FSIDs
        """
        type_name = "UuidField"
        type_label = "uuid string"
else:
    # rest-framework 3 has built in uuid field.
Exemplo n.º 2
0
from distutils.version import StrictVersion
from collections import defaultdict
from PIL import Image
from HTML_utils import *
from OCR_utils import *
from OD_utils import *
from selenium import webdriver
import time

# This is needed since the notebook is stored in the object_detection folder.
root = os.getcwd()
sys.path.append(os.path.join(root,'models','research'))

from object_detection.utils import ops as utils_ops

if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
  raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')

from utils import label_map_util
from utils import visualization_utils as vis_util



# Path of model to be loaded.
MODEL_NAME = 'models/research/opgraph_dir/'

# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = MODEL_NAME + 'frozen_inference_graph.pb'

# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('/home/archisha/prototype-2.0/version2/models/research/object_detection/data', 'mscoco_label_map.pbtxt')
Exemplo n.º 3
0
        self.log.write("Un-Block;" + "{:.5f}".format(time.time()) + ";\r\n")
        self.log.flush()

    def run_test(self):
        time.sleep(2)
        self.block()
        time.sleep(0.5)
        self.unblock()
        time.sleep(2)
        print('########### TEST DONE ############')


if __name__ == '__main__':
    parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
    (options, args) = parser.parse_args()
    if (StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0")):
        Qt.QApplication.setGraphicsSystem(gr.prefs().get_string(
            'qtgui', 'style', 'raster'))
    qapp = Qt.QApplication(sys.argv)
    tb = rrrm_test_client()
    tb.start()
    my_test = rrrm_test(tb, qapp)
    per_thread = threading.Thread(target=my_test.run_test)
    per_thread.start()
    tb.show()

    def quitting():
        tb.stop()
        tb.wait()

    qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
Exemplo n.º 4
0
def multihead_attention(queries, 
                        keys, 
                        num_units=None, 
                        num_heads=8, 
                        dropout_rate=0,
                        is_training=True,
                        causality=False,
                        key_masks=None,
                        query_masks = None,
                        scope="multihead_attention",
                        cache=None,
                        reuse=None):
    '''Applies multihead attention.
    
    Args:
      queries: A 3d tensor with shape of [N, T_q, C_q].
      keys: A 3d tensor with shape of [N, T_k, C_k].
      num_units: A scalar. Attention size.
      dropout_rate: A floating point number.
      is_training: Boolean. Controller of mechanism for dropout.
      causality: Boolean. If true, units that reference the future are masked. 
      num_heads: An int. Number of heads.
      scope: Optional scope for `variable_scope`.
      reuse: Boolean, whether to reuse the weights of a previous layer
        by the same name.
        
    Returns
      A 3d tensor with shape of (N, T_q, C)  
    '''

    with tf.variable_scope(scope, reuse=reuse):
        # Set the fall back option for num_units
        if num_units is None:
            num_units = queries.get_shape().as_list[-1]

        # Linear projections
        Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu) # (N, T_q, C)
        K = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C)
        V = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C)

        if cache is not None:
            K = cache["k"] = tf.concat([cache["k"], K], axis=1)
            V = cache["v"] = tf.concat([cache["v"], V], axis=1)

        # Split and concat
        Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0) # (h*N, T_q, C/h)
        K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
        V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)

        # Multiplication
        outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k)

        # Scale
        outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5)

        # Key Masking
        if cache is None:
          if key_masks is None:
            key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k)
          key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k)
          key_masks = tf.tile(tf.expand_dims(key_masks, 1),
                              [1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k)

          paddings = tf.ones_like(outputs)*(-2**32+1)
          outputs = tf.where(tf.equal(key_masks, 0), paddings, outputs) # (h*N, T_q, T_k)

        # Causality = Future blinding
        if causality and cache is None:
            diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k)

            from distutils.version import StrictVersion

            if StrictVersion(tf.__version__) >= StrictVersion('1.5'):
              tf_lower_triangular = tf.linalg.LinearOperatorLowerTriangular
            else:
              tf_lower_triangular = tf.contrib.linalg.LinearOperatorTriL
            tril = tf_lower_triangular(diag_vals).to_dense() # (T_q, T_k)
            masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k)

            paddings = tf.ones_like(masks)*(-2**32+1)
            outputs = tf.where(tf.equal(masks, 0), paddings, outputs) # (h*N, T_q, T_k)

        # Activation
        outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k)

        alignments = tf.split(outputs, num_heads, axis=0) #  [ h x tensor(N, T_q, T_k) ]
         
        # Query Masking
        if cache is None:
          if query_masks is None:
            query_masks = tf.sign(tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q)
          query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q)
          query_masks = tf.tile(tf.expand_dims(query_masks, -1), [1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k)
          outputs *= query_masks # broadcasting. (N, T_q, C)
          
        # Dropouts
        outputs = tf.layers.dropout(outputs,
                                    rate=dropout_rate,
                                    training=tf.convert_to_tensor(is_training))
               
        # Weighted sum
        outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h)
        
        # Restore shape
        outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2 ) # (N, T_q, C)
              
        # Residual connection
        outputs += queries
              
        # Normalize
        outputs = normalize(outputs) # (N, T_q, C)
 
    return outputs, alignments
Exemplo n.º 5
0
import asteval

# pylint: disable=C0103

register = template.Library()

NoneFilterExpression = FilterExpression("None", None)
FalseFilterExpression = FilterExpression("False", None)
TrueFilterExpression = FilterExpression("True", None)

MESSAGE_LEVELS = {
    constants.ERROR: 'alert',
}

IS_DJANGO_1_11_OR_ABOVE = StrictVersion(
    get_version()) >= StrictVersion('1.11.0')


@register.filter
def message_class(msg):
    '''
    Return the foundation alert class for a message level.
    '''
    try:
        return MESSAGE_LEVELS[msg.level]
    except KeyError:
        try:
            return constants.DEFAULT_TAGS[msg.level]
        except KeyError:
            return msg.level
Exemplo n.º 6
0
eventlet.hubs.use_hub(utils.get_hub())
eventlet.patcher.monkey_patch(all=False, socket=True)
eventlet.debug.hub_exceptions(False)

from swiftclient import get_auth, http_connection

has_insecure = False
try:
    from swiftclient import __version__ as client_version
    # Prevent a ValueError in StrictVersion with '2.0.3.68.ga99c2ff'
    client_version = '.'.join(client_version.split('.')[:3])
except ImportError:
    # Pre-PBR we had version, not __version__. Anyhow...
    client_version = '1.2'
from distutils.version import StrictVersion
if StrictVersion(client_version) >= StrictVersion('2.0'):
    has_insecure = True


config = {}
web_front_end = None
normalized_urls = None

# If no config was read, we will fall back to old school env vars
swift_test_auth_version = None
swift_test_auth = os.environ.get('SWIFT_TEST_AUTH')
swift_test_user = [os.environ.get('SWIFT_TEST_USER'), None, None]
swift_test_key = [os.environ.get('SWIFT_TEST_KEY'), None, None]
swift_test_tenant = ['', '', '']
swift_test_perm = ['', '', '']
Exemplo n.º 7
0
from distutils.version import StrictVersion

from ._base import Descriptor
from ._graph_matrix import DistanceMatrix as D
from ._matrix_attributes import SM1, methods, get_method

__all__ = ("DistanceMatrix", )

_version_remove_SM1_D = StrictVersion("1.1.0")


class DistanceMatrix(Descriptor):
    r"""distance matrix descriptor.

    :type type: str
    :param type: :ref:`matrix_aggregating_methods`
    """

    since = "1.0.0"
    __slots__ = ("_type", )
    explicit_hydrogens = False

    def description(self):
        return "{} from distance matrix".format(self._type.description())

    @classmethod
    def preset(cls, version):
        if version >= _version_remove_SM1_D:
            return (cls(m) for m in methods if m != SM1)
        else:
            return map(cls, methods)
Exemplo n.º 8
0
        try:
            os.remove('rattletrap-binaries/.DS_Store')
        except FileNotFoundError:
            pass

        current_binaries = os.listdir('rattletrap-binaries')

        current_version = '0.0.0'

        if len(current_binaries) > 0:
            current_version = current_binaries[0].split('-')[1]

        print('GH: {}. RLR: {}. Update? {}'.format(
            rattletrap_release['name'],
            current_version,
            'Yes' if StrictVersion(rattletrap_release['name']) > StrictVersion(current_version) else 'No'
        ))

        if StrictVersion(rattletrap_release['name']) > StrictVersion(current_version):
            if rattletrap_release['name'] in version_blacklist:
                print('Skipping this version.')
            else:
                # Download the latest version.
                for file in current_binaries:
                    os.remove('rattletrap-binaries/{}'.format(file))

                for asset in rattletrap_release['assets']:
                    if 'windows' in asset['browser_download_url']:
                        continue

                    print('Downloading {}'.format(asset['name']))
class TestSklearnTfidfVectorizerRegex(unittest.TestCase):

    def get_options(self):
        return {TfidfVectorizer: {"tokenexp": ""}}

    @unittest.skipIf(
        StrictVersion(onnx.__version__) < StrictVersion("1.4.1"),
        reason="Requires opset 9.")
    def test_model_tfidf_vectorizer11(self):
        corpus = numpy.array([
            'This is the first document.',
            'This document is the second document.',
            'And this is the third one.',
            'Is this the first document?',
        ]).reshape((4, 1))
        vect = TfidfVectorizer(ngram_range=(1, 1), norm=None)
        vect.fit(corpus.ravel())
        model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
                                     [('input', StringTensorType([1, 1]))],
                                     options=self.get_options())
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            corpus, vect, model_onnx,
            basename="SklearnTfidfVectorizer11Regex-OneOff-SklCol",
            allow_failure="StrictVersion(onnxruntime.__version__) <= "
                          "StrictVersion('0.4.0')")

    @unittest.skipIf(
        StrictVersion(onnx.__version__) < StrictVersion("1.4.1"),
        reason="Requires opset 9.")
    def test_model_tfidf_vectorizer11_word4(self):
        corpus = numpy.array([
            'This is the first document.',
            'This document is the second document.',
            'And this is the third one.',
            'Is this the first document?',
        ]).reshape((4, 1))
        vect = TfidfVectorizer(ngram_range=(
            1, 1), norm=None, token_pattern="[a-zA-Z]{1,4}")
        vect.fit(corpus.ravel())
        model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
                                     [('input', StringTensorType([1, 1]))],
                                     options=self.get_options())
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            corpus, vect, model_onnx,
            basename="SklearnTfidfVectorizer11Regex4-OneOff-SklCol",
            allow_failure="StrictVersion(onnxruntime.__version__) <= "
                          "StrictVersion('0.4.0')")

    @unittest.skipIf(
        StrictVersion(onnx.__version__) < StrictVersion("1.4.1"),
        reason="Requires opset 9.")
    def test_model_tfidf_vectorizer11_empty_string(self):
        corpus = numpy.array([
            'This is the first document.',
            'This document is the second document.',
            'And this is the third one.',
            '',
        ]).reshape((4, 1))
        vect = TfidfVectorizer(ngram_range=(1, 1), norm=None)
        vect.fit(corpus.ravel())
        model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
                                     [('input', StringTensorType([1, 1]))],
                                     options=self.get_options())
        self.assertTrue(model_onnx is not None)
        # TfidfVectorizer in onnxruntime fails with empty strings
        dump_data_and_model(
            corpus, vect, model_onnx,
            basename="SklearnTfidfVectorizer11EmptyStringRegex-OneOff-SklCol",
            allow_failure="StrictVersion(onnxruntime.__version__) "
                          "<= StrictVersion('0.4.0')")

    @unittest.skipIf(
        StrictVersion(onnx.__version__) < StrictVersion("1.4.1"),
        reason="Requires opset 9.")
    def test_model_tfidf_vectorizer11_out_vocabulary(self):
        corpus = numpy.array([
            'This is the first document.',
            'This document is the second document.',
            'And this is the third one.',
            'Is this the first document?',
        ]).reshape((4, 1))
        vect = TfidfVectorizer(ngram_range=(1, 1), norm=None)
        vect.fit(corpus.ravel())
        model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
                                     [('input', StringTensorType([1, 1]))],
                                     options=self.get_options())
        self.assertTrue(model_onnx is not None)
        corpus = numpy.array([
            'AZZ ZZ This is the first document.',
            'BZZ ZZ This document is the second document.',
            'ZZZ ZZ And this is the third one.',
            'WZZ ZZ Is this the first document?',
        ]).reshape((4, 1))
        dump_data_and_model(
            corpus, vect, model_onnx,
            basename="SklearnTfidfVectorizer11OutVocabRegex-OneOff-SklCol",
            allow_failure="StrictVersion(onnxruntime.__version__) <= "
                          "StrictVersion('0.4.0')")

    @unittest.skipIf(
        StrictVersion(onnx.__version__) < StrictVersion("1.4.1"),
        reason="Requires opset 9.")
    def test_model_tfidf_vectorizer22(self):
        corpus = numpy.array([
            'This is the first document.',
            'This document is the second document.',
            'And this is the third one.',
            'Is this the first document?',
        ]).reshape((4, 1))
        vect = TfidfVectorizer(ngram_range=(2, 2), norm=None)
        vect.fit(corpus.ravel())
        model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
                                     [('input', StringTensorType([1, 1]))],
                                     options=self.get_options())
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            corpus, vect, model_onnx,
            basename="SklearnTfidfVectorizer22Regex-OneOff-SklCol",
            allow_failure="StrictVersion(onnxruntime.__version__) <= "
                          "StrictVersion('0.4.0')")

    @unittest.skipIf(
        StrictVersion(onnx.__version__) < StrictVersion("1.4.1"),
        reason="Requires opset 9.")
    def test_model_tfidf_vectorizer12(self):
        corpus = numpy.array([
            'AA AA',
            'AA AA BB',
        ]).reshape((2, 1))
        vect = TfidfVectorizer(ngram_range=(1, 2), norm=None)
        vect.fit(corpus.ravel())
        model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
                                     [('input', StringTensorType([1, 1]))],
                                     options=self.get_options())
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            corpus, vect, model_onnx,
            basename="SklearnTfidfVectorizer12SRegex-OneOff-SklCol",
            allow_failure="StrictVersion(onnxruntime.__version__) <= "
                          "StrictVersion('0.4.0')")

    @unittest.skipIf(
        StrictVersion(onnx.__version__) < StrictVersion("1.4.1"),
        reason="Requires opset 9.")
    def test_model_tfidf_vectorizer122(self):
        corpus = numpy.array([
            'This is the first document.',
            'This document is the second document.',
            'And this is the third one.',
            'Is this the first document?',
        ]).reshape((4, 1))
        vect = TfidfVectorizer(ngram_range=(1, 2), norm=None)
        vect.fit(corpus.ravel())
        model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
                                     [('input', StringTensorType([1, 1]))],
                                     options=self.get_options())
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            corpus, vect, model_onnx,
            basename="SklearnTfidfVectorizer12Regex-OneOff-SklCol",
            allow_failure="StrictVersion(onnxruntime.__version__) <= "
                          "StrictVersion('0.4.0')")

    @unittest.skipIf(
        StrictVersion(onnx.__version__) < StrictVersion("1.4.1"),
        reason="Requires opset 9.")
    def test_model_tfidf_vectorizer12_normL1(self):
        corpus = numpy.array([
            'This is the first document.',
            'This document is the second document.',
            'And this is the third one.',
            'Is this the first document?',
        ]).reshape((4, 1))
        vect = TfidfVectorizer(ngram_range=(1, 2), norm='l1')
        vect.fit(corpus.ravel())
        model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
                                     [('input', StringTensorType([1, 1]))])
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            corpus, vect, model_onnx,
            basename="SklearnTfidfVectorizer12L1Regex-OneOff-SklCol",
            allow_failure="StrictVersion(onnxruntime.__version__) <= "
                          "StrictVersion('0.4.0')")

    @unittest.skipIf(
        StrictVersion(onnx.__version__) < StrictVersion("1.4.1"),
        reason="Requires opset 9.")
    def test_model_tfidf_vectorizer12_normL2(self):
        corpus = numpy.array([
            'This is the first document.',
            'This document is the second document.',
            'And this is the third one.',
            'Is this the first document?',
        ]).reshape((4, 1))
        vect = TfidfVectorizer(ngram_range=(1, 2), norm='l2')
        vect.fit(corpus.ravel())
        model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
                                     [('input', StringTensorType([1, 1]))],
                                     options=self.get_options())
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            corpus, vect, model_onnx,
            basename="SklearnTfidfVectorizer12L2Regex-OneOff-SklCol",
            allow_failure="StrictVersion(onnxruntime.__version__) <= "
                          "StrictVersion('0.4.0')")

    @unittest.skipIf(
        StrictVersion(onnx.__version__) < StrictVersion("1.4.1"),
        reason="Requires opset 9.")
    def test_model_tfidf_vectorizer13(self):
        corpus = numpy.array([
            'This is the first document.',
            'This document is the second document.',
            'And this is the third one.',
            'Is this the first document?',
        ]).reshape((4, 1))
        vect = TfidfVectorizer(ngram_range=(1, 3), norm=None)
        vect.fit(corpus.ravel())
        model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
                                     [('input', StringTensorType([1, 1]))],
                                     options=self.get_options())
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            corpus, vect, model_onnx,
            basename="SklearnTfidfVectorizer13Regex-OneOff-SklCol",
            allow_failure="StrictVersion(onnxruntime.__version__) <= "
                          "StrictVersion('0.4.0')")

    @unittest.skipIf(
        StrictVersion(onnx.__version__) < StrictVersion("1.4.1"),
        reason="Requires opset 9.")
    def test_model_tfidf_vectorizer11parenthesis_class(self):
        corpus = numpy.array([
            'This is the first document.',
            'This document is the second document.',
            'And this is the third one.',
            'Is this the (first) document?',
        ]).reshape((4, 1))
        vect = TfidfVectorizer(ngram_range=(1, 1), norm=None)
        vect.fit(corpus.ravel())
        extra = {TfidfVectorizer: {'separators': [
                                        ' ', '[.]', '\\?', ',', ';',
                                        ':', '\\!', '\\(', '\\)'
                                    ],
                                   'tokenexp': None}}
        model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
                                     [('input', StringTensorType([1, 1]))],
                                     options=extra)
        self.assertTrue(model_onnx is not None)
        # This test depends on this issue:
        # https://github.com/Microsoft/onnxruntime/issues/957.
        dump_data_and_model(
            corpus, vect, model_onnx,
            basename="SklearnTfidfVectorizer11ParenthesisClassRegex-"
                     "OneOff-SklCol",
            allow_failure="StrictVersion(onnxruntime.__version__) <= "
                          "StrictVersion('0.4.0')")

    @unittest.skipIf(
        StrictVersion(onnx.__version__) < StrictVersion("1.4.1"),
        reason="Requires opset 9.")
    def test_model_tfidf_vectorizer11_idparenthesis_id(self):
        corpus = numpy.array([
            'This is the first document.',
            'This document is the second document.',
            'And this is the third one.',
            'Is this the (first) document?',
        ]).reshape((4, 1))
        vect = TfidfVectorizer(ngram_range=(1, 1), norm=None)
        vect.fit(corpus.ravel())

        extra = {id(vect): {"sep2": [' ', '.', '?', ',', ';', ':',
                                     '!', '(', ')'],
                            'regex': None}}
        try:
            convert_sklearn(vect, 'TfidfVectorizer',
                            [('input', StringTensorType([1, 1]))],
                            options=extra)
        except RuntimeError:
            pass

        extra = {id(vect): {"separators": [
                                ' ', '[.]', '\\?', ',', ';', ':',
                                '\\!', '\\(', '\\)'
                            ],
                            "tokenexp": None}}
        model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
                                     [('input', StringTensorType([1, 1]))],
                                     options=extra)
        self.assertTrue(model_onnx is not None)
        # This test depends on this issue:
        # https://github.com/Microsoft/onnxruntime/issues/957.
        dump_data_and_model(
            corpus, vect, model_onnx,
            basename="SklearnTfidfVectorizer11ParenthesisIdRegex-"
                     "OneOff-SklCol",
            allow_failure="StrictVersion(onnxruntime.__version__) <= "
                          "StrictVersion('0.4.0')")
Exemplo n.º 10
0
import ldap
import ldap.sasl
import urllib
import re
from distutils.version import StrictVersion

from nss_cache import error
from nss_cache.maps import automount
from nss_cache.maps import group
from nss_cache.maps import netgroup
from nss_cache.maps import passwd
from nss_cache.maps import shadow
from nss_cache.maps import sshkey
from nss_cache.sources import source

IS_LDAP24_OR_NEWER = StrictVersion(ldap.__version__) >= StrictVersion('2.4')

# ldap.LDAP_CONTROL_PAGE_OID is unavailable on some systems, so we define it here
LDAP_CONTROL_PAGE_OID = '1.2.840.113556.1.4.319'


def RegisterImplementation(registration_callback):
    registration_callback(LdapSource)


def makeSimplePagedResultsControl(page_size):
    # The API for this is different on older versions of python-ldap, so we need
    # to handle this case.
    if IS_LDAP24_OR_NEWER:
        return ldap.controls.SimplePagedResultsControl(True,
                                                       size=page_size,
Exemplo n.º 11
0
PACKAGE_DESCRIPTION = 'Apache Beam SDK for Python'
PACKAGE_URL = 'https://beam.apache.org'
PACKAGE_DOWNLOAD_URL = 'https://pypi.python.org/pypi/apache-beam'
PACKAGE_AUTHOR = 'Apache Software Foundation'
PACKAGE_EMAIL = '*****@*****.**'
PACKAGE_KEYWORDS = 'apache beam'
PACKAGE_LONG_DESCRIPTION = '''
Apache Beam is a unified programming model for both batch and streaming
data processing, enabling efficient execution across diverse distributed
execution engines and providing extensibility points for connecting to
different technologies and user communities.
'''

REQUIRED_PIP_VERSION = '7.0.0'
_PIP_VERSION = get_distribution('pip').version
if StrictVersion(_PIP_VERSION) < StrictVersion(REQUIRED_PIP_VERSION):
    warnings.warn(
        "You are using version {0} of pip. " \
        "However, version {1} is recommended.".format(
            _PIP_VERSION, REQUIRED_PIP_VERSION
        )
    )

REQUIRED_CYTHON_VERSION = '0.28.1'
try:
    _CYTHON_VERSION = get_distribution('cython').version
    if StrictVersion(_CYTHON_VERSION) < StrictVersion(REQUIRED_CYTHON_VERSION):
        warnings.warn(
            "You are using version {0} of cython. " \
            "However, version {1} is recommended.".format(
                _CYTHON_VERSION, REQUIRED_CYTHON_VERSION
Exemplo n.º 12
0
from distutils.version import StrictVersion

VERSION = StrictVersion('3.2.3')
Exemplo n.º 13
0
 def get_allow_cost_estimate(cls, version: Optional[str] = None) -> bool:
     return version is not None and StrictVersion(version) >= StrictVersion(
         "0.319")
Exemplo n.º 14
0
def skip_if_server_version_lt(min_version):
    check = StrictVersion(get_version()) < StrictVersion(min_version)
    return pytest.mark.skipif(check, reason="")
Exemplo n.º 15
0
def tractFindVisits(rerun,
                    tract,
                    filter='HSC-I',
                    patch=None,
                    dataDir='/lustre/Subaru/SSP/rerun/'):
    """Return the list of input Visits to coadd."""
    butler = dafPersist.Butler(os.path.join(dataDir, rerun))

    pipeVersion = dafPersist.eupsVersions.EupsVersions().versions['hscPipe']
    if StrictVersion(pipeVersion) >= StrictVersion('3.9.0'):
        coaddData = "deepCoadd_calexp"
    else:
        coaddData = "deepCoadd"

    if patch is not '':
        """
        Only 1 Patch is required
        """
        coadd = butler.get(coaddData,
                           dataId={
                               "tract": tract,
                               "patch": patch,
                               "filter": filter
                           },
                           immediate=True)
        ccdInputs = coadd.getInfo().getCoaddInputs().ccds
        visits = np.unique(ccdInputs.get("visit"))
        print "\n# Visits for Tract=%d Filter=%s Patch=%s\n" % (tract, filter,
                                                                patch)
    else:
        """
        Go through all the possible patches
        """
        visits = np.empty([0], dtype=int)
        for pa in itertools.combinations_with_replacement((np.arange(9)), 2):
            patch = str(pa[0]) + ',' + str(pa[1])
            try:
                coadd = butler.get(coaddData,
                                   dataId={
                                       "tract": tract,
                                       "patch": patch,
                                       "filter": filter
                                   },
                                   immediate=True)
            except Exception:
                continue
            ccdInputs = coadd.getInfo().getCoaddInputs().ccds
            vTemp = np.unique(ccdInputs.get("visit"))
            visits = np.unique(np.append(visits, vTemp))

        for pa in itertools.combinations_with_replacement((np.arange(9)), 2):
            patch = str(pa[1]) + ',' + str(pa[0])
            try:
                coadd = butler.get(coaddData,
                                   dataId={
                                       "tract": tract,
                                       "patch": patch,
                                       "filter": filter
                                   },
                                   immediate=True)
            except Exception:
                continue
            ccdInputs = coadd.getInfo().getCoaddInputs().ccds
            vTemp = np.unique(ccdInputs.get("visit"))
            visits = np.unique(np.append(visits, vTemp))

        print "\n# Input visits for Tract=%d Filter=%s\n" % (tract, filter)

    line = ''
    print " # Input CCDs includes %d Visits\n" % len(visits)
    for vv in visits:
        line = line + str(vv) + '^'

    print line[:-1] + '\n'

    return visits
Exemplo n.º 16
0
def process_ha_config_upgrade(hass: HomeAssistant) -> None:
    """Upgrade configuration if necessary.

    This method needs to run in an executor.
    """
    version_path = hass.config.path(VERSION_FILE)

    try:
        with open(version_path, 'rt') as inp:
            conf_version = inp.readline().strip()
    except FileNotFoundError:
        # Last version to not have this file
        conf_version = '0.7.7'

    if conf_version == __version__:
        return

    _LOGGER.info("Upgrading configuration directory from %s to %s",
                 conf_version, __version__)

    version_obj = StrictVersion(conf_version)

    if version_obj < StrictVersion('0.50'):
        # 0.50 introduced persistent deps dir.
        lib_path = hass.config.path('deps')
        if os.path.isdir(lib_path):
            shutil.rmtree(lib_path)

    if version_obj < StrictVersion('0.92'):
        # 0.92 moved google/tts.py to google_translate/tts.py
        config_path = find_config_file(hass.config.config_dir)
        assert config_path is not None

        with open(config_path, 'rt', encoding='utf-8') as config_file:
            config_raw = config_file.read()

        if TTS_PRE_92 in config_raw:
            _LOGGER.info("Migrating google tts to google_translate tts")
            config_raw = config_raw.replace(TTS_PRE_92, TTS_92)
            try:
                with open(config_path, 'wt', encoding='utf-8') as config_file:
                    config_file.write(config_raw)
            except IOError:
                _LOGGER.exception("Migrating to google_translate tts failed")
                pass

    if version_obj < StrictVersion('0.94.0b6') and is_docker_env():
        # In 0.94 we no longer install packages inside the deps folder when
        # running inside a Docker container.
        lib_path = hass.config.path('deps')
        if os.path.isdir(lib_path):
            shutil.rmtree(lib_path)

    with open(version_path, 'wt') as outp:
        outp.write(__version__)

    _LOGGER.debug("Migrating old system configuration files to new locations")
    for oldf, newf in FILE_MIGRATION:
        if os.path.isfile(hass.config.path(oldf)):
            _LOGGER.info("Migrating %s to %s", oldf, newf)
            os.rename(hass.config.path(oldf), hass.config.path(newf))
    def __init__(self, fc=436e6, mx=4, my=4, n=2):
        gr.top_block.__init__(self, "Live Test")
        Qt.QWidget.__init__(self)
        self.setWindowTitle("Live Test")
        qtgui.util.check_set_qss()
        try:
            self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
        except:
            pass
        self.top_scroll_layout = Qt.QVBoxLayout()
        self.setLayout(self.top_scroll_layout)
        self.top_scroll = Qt.QScrollArea()
        self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
        self.top_scroll_layout.addWidget(self.top_scroll)
        self.top_scroll.setWidgetResizable(True)
        self.top_widget = Qt.QWidget()
        self.top_scroll.setWidget(self.top_widget)
        self.top_layout = Qt.QVBoxLayout(self.top_widget)
        self.top_grid_layout = Qt.QGridLayout()
        self.top_layout.addLayout(self.top_grid_layout)

        self.settings = Qt.QSettings("GNU Radio", "multiplesignal_livetest")

        try:
            if StrictVersion(Qt.qVersion()) < StrictVersion("5.0.0"):
                self.restoreGeometry(
                    self.settings.value("geometry").toByteArray())
            else:
                self.restoreGeometry(self.settings.value("geometry"))
        except:
            pass

        ##################################################
        # Parameters
        ##################################################
        self.fc = fc
        self.mx = mx
        self.my = my
        self.n = n

        ##################################################
        # Variables
        ##################################################
        self.const_2 = const_2 = digital.constellation_16qam().base()
        self.const_1 = const_1 = digital.constellation_qpsk().base()
        self.theta_2 = theta_2 = 70
        self.theta_1 = theta_1 = 45
        self.samp_rate = samp_rate = 150000
        self.phi_2 = phi_2 = -25
        self.phi_1 = phi_1 = 70
        self.noise_voltage_2 = noise_voltage_2 = 0.05
        self.noise_voltage_1 = noise_voltage_1 = 0.05
        self.element_separation = element_separation = 0
        self.bps_2 = bps_2 = const_2.bits_per_symbol()
        self.bps_1 = bps_1 = const_1.bits_per_symbol()

        ##################################################
        # Blocks
        ##################################################
        self._theta_2_range = Range(0, 90, 1, 70, 200)
        self._theta_2_win = RangeWidget(self._theta_2_range, self.set_theta_2,
                                        'Elevation 2', "counter_slider", float)
        self.top_grid_layout.addWidget(self._theta_2_win, 3, 0, 1, 1)
        for r in range(3, 4):
            self.top_grid_layout.setRowStretch(r, 1)
        for c in range(0, 1):
            self.top_grid_layout.setColumnStretch(c, 1)
        self._theta_1_range = Range(0, 90, 1, 45, 200)
        self._theta_1_win = RangeWidget(self._theta_1_range, self.set_theta_1,
                                        'Elevation 1', "counter_slider", float)
        self.top_grid_layout.addWidget(self._theta_1_win, 1, 0, 1, 1)
        for r in range(1, 2):
            self.top_grid_layout.setRowStretch(r, 1)
        for c in range(0, 1):
            self.top_grid_layout.setColumnStretch(c, 1)
        self._phi_2_range = Range(-180, 180, 1, -25, 200)
        self._phi_2_win = RangeWidget(self._phi_2_range, self.set_phi_2,
                                      'Azimut 2', "counter_slider", float)
        self.top_grid_layout.addWidget(self._phi_2_win, 3, 1, 1, 1)
        for r in range(3, 4):
            self.top_grid_layout.setRowStretch(r, 1)
        for c in range(1, 2):
            self.top_grid_layout.setColumnStretch(c, 1)
        self._phi_1_range = Range(-180, 180, 1, 70, 200)
        self._phi_1_win = RangeWidget(self._phi_1_range, self.set_phi_1,
                                      'Azimut 1', "counter_slider", float)
        self.top_grid_layout.addWidget(self._phi_1_win, 1, 1, 1, 1)
        for r in range(1, 2):
            self.top_grid_layout.setRowStretch(r, 1)
        for c in range(1, 2):
            self.top_grid_layout.setColumnStretch(c, 1)
        self._noise_voltage_2_range = Range(0, 10, 0.001, 0.05, 200)
        self._noise_voltage_2_win = RangeWidget(self._noise_voltage_2_range,
                                                self.set_noise_voltage_2,
                                                'Noise Voltage 2',
                                                "counter_slider", float)
        self.top_grid_layout.addWidget(self._noise_voltage_2_win, 4, 0, 1, 2)
        for r in range(4, 5):
            self.top_grid_layout.setRowStretch(r, 1)
        for c in range(0, 2):
            self.top_grid_layout.setColumnStretch(c, 1)
        self._noise_voltage_1_range = Range(0, 10, 0.001, 0.05, 200)
        self._noise_voltage_1_win = RangeWidget(self._noise_voltage_1_range,
                                                self.set_noise_voltage_1,
                                                'Noise Voltage 1',
                                                "counter_slider", float)
        self.top_grid_layout.addWidget(self._noise_voltage_1_win, 2, 0, 1, 2)
        for r in range(2, 3):
            self.top_grid_layout.setRowStretch(r, 1)
        for c in range(0, 2):
            self.top_grid_layout.setColumnStretch(c, 1)
        self._element_separation_range = Range(0, 10, 0.01, 0, 200)
        self._element_separation_win = RangeWidget(
            self._element_separation_range, self.set_element_separation,
            'Element Separation', "counter_slider", float)
        self.top_grid_layout.addWidget(self._element_separation_win, 5, 0, 1,
                                       2)
        for r in range(5, 6):
            self.top_grid_layout.setRowStretch(r, 1)
        for c in range(0, 2):
            self.top_grid_layout.setColumnStretch(c, 1)
        self.qtgui_const_sink_x_0_0 = qtgui.const_sink_c(
            1024,  #size
            "",  #name
            1  #number of inputs
        )
        self.qtgui_const_sink_x_0_0.set_update_time(0.10)
        self.qtgui_const_sink_x_0_0.set_y_axis(-2, 2)
        self.qtgui_const_sink_x_0_0.set_x_axis(-2, 2)
        self.qtgui_const_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE,
                                                     qtgui.TRIG_SLOPE_POS, 0.0,
                                                     0, "")
        self.qtgui_const_sink_x_0_0.enable_autoscale(False)
        self.qtgui_const_sink_x_0_0.enable_grid(False)
        self.qtgui_const_sink_x_0_0.enable_axis_labels(True)

        labels = ['', '', '', '', '', '', '', '', '', '']
        widths = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
        colors = [
            "blue", "red", "red", "red", "red", "red", "red", "red", "red",
            "red"
        ]
        styles = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        markers = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        alphas = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]

        for i in range(1):
            if len(labels[i]) == 0:
                self.qtgui_const_sink_x_0_0.set_line_label(
                    i, "Data {0}".format(i))
            else:
                self.qtgui_const_sink_x_0_0.set_line_label(i, labels[i])
            self.qtgui_const_sink_x_0_0.set_line_width(i, widths[i])
            self.qtgui_const_sink_x_0_0.set_line_color(i, colors[i])
            self.qtgui_const_sink_x_0_0.set_line_style(i, styles[i])
            self.qtgui_const_sink_x_0_0.set_line_marker(i, markers[i])
            self.qtgui_const_sink_x_0_0.set_line_alpha(i, alphas[i])

        self._qtgui_const_sink_x_0_0_win = sip.wrapinstance(
            self.qtgui_const_sink_x_0_0.pyqwidget(), Qt.QWidget)
        self.top_grid_layout.addWidget(self._qtgui_const_sink_x_0_0_win, 6, 1,
                                       1, 1)
        for r in range(6, 7):
            self.top_grid_layout.setRowStretch(r, 1)
        for c in range(1, 2):
            self.top_grid_layout.setColumnStretch(c, 1)
        self.qtgui_const_sink_x_0 = qtgui.const_sink_c(
            1024,  #size
            "",  #name
            1  #number of inputs
        )
        self.qtgui_const_sink_x_0.set_update_time(0.10)
        self.qtgui_const_sink_x_0.set_y_axis(-2, 2)
        self.qtgui_const_sink_x_0.set_x_axis(-2, 2)
        self.qtgui_const_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE,
                                                   qtgui.TRIG_SLOPE_POS, 0.0,
                                                   0, "")
        self.qtgui_const_sink_x_0.enable_autoscale(False)
        self.qtgui_const_sink_x_0.enable_grid(False)
        self.qtgui_const_sink_x_0.enable_axis_labels(True)

        labels = ['', '', '', '', '', '', '', '', '', '']
        widths = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
        colors = [
            "blue", "red", "red", "red", "red", "red", "red", "red", "red",
            "red"
        ]
        styles = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        markers = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        alphas = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]

        for i in range(1):
            if len(labels[i]) == 0:
                self.qtgui_const_sink_x_0.set_line_label(
                    i, "Data {0}".format(i))
            else:
                self.qtgui_const_sink_x_0.set_line_label(i, labels[i])
            self.qtgui_const_sink_x_0.set_line_width(i, widths[i])
            self.qtgui_const_sink_x_0.set_line_color(i, colors[i])
            self.qtgui_const_sink_x_0.set_line_style(i, styles[i])
            self.qtgui_const_sink_x_0.set_line_marker(i, markers[i])
            self.qtgui_const_sink_x_0.set_line_alpha(i, alphas[i])

        self._qtgui_const_sink_x_0_win = sip.wrapinstance(
            self.qtgui_const_sink_x_0.pyqwidget(), Qt.QWidget)
        self.top_grid_layout.addWidget(self._qtgui_const_sink_x_0_win, 6, 0, 1,
                                       1)
        for r in range(6, 7):
            self.top_grid_layout.setRowStretch(r, 1)
        for c in range(0, 1):
            self.top_grid_layout.setColumnStretch(c, 1)
        self.digital_constellation_decoder_cb_0_0 = digital.constellation_decoder_cb(
            const_2.base())
        self.digital_constellation_decoder_cb_0 = digital.constellation_decoder_cb(
            const_1.base())
        self.digital_chunks_to_symbols_xx_0_0 = digital.chunks_to_symbols_bc(
            const_2.points(), 1)
        self.digital_chunks_to_symbols_xx_0 = digital.chunks_to_symbols_bc(
            const_1.points(), 1)
        self.blocks_unpacked_to_packed_xx_0_0 = blocks.unpacked_to_packed_bb(
            bps_2, gr.GR_MSB_FIRST)
        self.blocks_unpacked_to_packed_xx_0 = blocks.unpacked_to_packed_bb(
            bps_1, gr.GR_MSB_FIRST)
        self.blocks_throttle_0_0 = blocks.throttle(gr.sizeof_char * 1,
                                                   samp_rate, True)
        self.blocks_throttle_0 = blocks.throttle(gr.sizeof_char * 1, samp_rate,
                                                 True)
        self.blocks_packed_to_unpacked_xx_0_0 = blocks.packed_to_unpacked_bb(
            bps_2, gr.GR_MSB_FIRST)
        self.blocks_packed_to_unpacked_xx_0 = blocks.packed_to_unpacked_bb(
            bps_1, gr.GR_MSB_FIRST)
        self.blocks_null_sink_1 = blocks.null_sink(gr.sizeof_char * 1)
        self.blocks_null_sink_0 = blocks.null_sink(gr.sizeof_char * 1)
        self.blocks_add_xx_1 = blocks.add_vcc(mx * my)
        self.blocks_add_xx_0_0 = blocks.add_vcc(mx * my)
        self.blocks_add_xx_0 = blocks.add_vcc(mx * my)
        self.beamforming_randomsampler_0 = beamforming.randomsampler(
            mx * my, 8)
        self.beamforming_phasedarray_0_0 = beamforming.phasedarray(
            mx, my, theta_2, phi_2, 436e6, (299792458 / (2 * fc)),
            element_separation)
        self.beamforming_phasedarray_0 = beamforming.phasedarray(
            mx, my, theta_1, phi_1, 436e6, (299792458 / (2 * fc)),
            element_separation)
        self.beamforming_doaesprit_py_cf_0 = beamforming.doaesprit_py_cf(
            mx, my, fc, (299792458 / (2 * fc)), n, 128)
        self.beamforming_beamformer_0_0 = beamforming.beamformer(mx, my, 1)
        self.beamforming_beamformer_0 = beamforming.beamformer(mx, my, 0)
        self.analog_vectornoise_source_0_0 = analog.vectornoise_source(
            noise_voltage_2, mx * my)
        self.analog_vectornoise_source_0 = analog.vectornoise_source(
            noise_voltage_1, mx * my)
        self.analog_random_source_x_0_0 = blocks.vector_source_b(
            list(map(int, numpy.random.randint(0, 256, 100))), True)
        self.analog_random_source_x_0 = blocks.vector_source_b(
            list(map(int, numpy.random.randint(0, 256, 1000))), True)

        ##################################################
        # Connections
        ##################################################
        self.msg_connect((self.beamforming_doaesprit_py_cf_0, 'doa_port'),
                         (self.beamforming_beamformer_0, 'doa_port'))
        self.msg_connect((self.beamforming_doaesprit_py_cf_0, 'doa_port'),
                         (self.beamforming_beamformer_0_0, 'doa_port'))
        self.connect((self.analog_random_source_x_0, 0),
                     (self.blocks_throttle_0, 0))
        self.connect((self.analog_random_source_x_0_0, 0),
                     (self.blocks_throttle_0_0, 0))
        self.connect((self.analog_vectornoise_source_0, 0),
                     (self.blocks_add_xx_0, 1))
        self.connect((self.analog_vectornoise_source_0_0, 0),
                     (self.blocks_add_xx_0_0, 1))
        self.connect((self.beamforming_beamformer_0, 0),
                     (self.digital_constellation_decoder_cb_0, 0))
        self.connect((self.beamforming_beamformer_0, 0),
                     (self.qtgui_const_sink_x_0, 0))
        self.connect((self.beamforming_beamformer_0_0, 0),
                     (self.digital_constellation_decoder_cb_0_0, 0))
        self.connect((self.beamforming_beamformer_0_0, 0),
                     (self.qtgui_const_sink_x_0_0, 0))
        self.connect((self.beamforming_phasedarray_0, 0),
                     (self.blocks_add_xx_0, 0))
        self.connect((self.beamforming_phasedarray_0_0, 0),
                     (self.blocks_add_xx_0_0, 0))
        self.connect((self.beamforming_randomsampler_0, 0),
                     (self.beamforming_doaesprit_py_cf_0, 0))
        self.connect((self.blocks_add_xx_0, 0), (self.blocks_add_xx_1, 0))
        self.connect((self.blocks_add_xx_0_0, 0), (self.blocks_add_xx_1, 1))
        self.connect((self.blocks_add_xx_1, 0),
                     (self.beamforming_beamformer_0, 0))
        self.connect((self.blocks_add_xx_1, 0),
                     (self.beamforming_beamformer_0_0, 0))
        self.connect((self.blocks_add_xx_1, 0),
                     (self.beamforming_randomsampler_0, 0))
        self.connect((self.blocks_packed_to_unpacked_xx_0, 0),
                     (self.digital_chunks_to_symbols_xx_0, 0))
        self.connect((self.blocks_packed_to_unpacked_xx_0_0, 0),
                     (self.digital_chunks_to_symbols_xx_0_0, 0))
        self.connect((self.blocks_throttle_0, 0),
                     (self.blocks_packed_to_unpacked_xx_0, 0))
        self.connect((self.blocks_throttle_0_0, 0),
                     (self.blocks_packed_to_unpacked_xx_0_0, 0))
        self.connect((self.blocks_unpacked_to_packed_xx_0, 0),
                     (self.blocks_null_sink_0, 0))
        self.connect((self.blocks_unpacked_to_packed_xx_0_0, 0),
                     (self.blocks_null_sink_1, 0))
        self.connect((self.digital_chunks_to_symbols_xx_0, 0),
                     (self.beamforming_phasedarray_0, 0))
        self.connect((self.digital_chunks_to_symbols_xx_0_0, 0),
                     (self.beamforming_phasedarray_0_0, 0))
        self.connect((self.digital_constellation_decoder_cb_0, 0),
                     (self.blocks_unpacked_to_packed_xx_0, 0))
        self.connect((self.digital_constellation_decoder_cb_0_0, 0),
                     (self.blocks_unpacked_to_packed_xx_0_0, 0))
Exemplo n.º 18
0
import time
import numpy as np
from MEArec.tools import *
import MEAutility as mu
import shutil
import yaml
import os
from distutils.version import StrictVersion
from pathlib import Path
from joblib import Parallel, delayed, cpu_count
from MEArec.simulate_cells import compute_eap_for_cell_model, compute_eap_based_on_tempgen

if StrictVersion(yaml.__version__) >= StrictVersion('5.0.0'):
    use_loader = True
else:
    use_loader = False


def simulate_cell_templates(i, simulate_script, tot, cell_model, model_folder,
                            intraonly, params, verbose):
    model_folder = Path(model_folder)
    print(f"Starting {i + 1}")
    print(f'\n\n {cell_model} {i + 1}/{tot}\n\n')
    os.system(
        f'python {simulate_script} {i} {str(model_folder / cell_model)} {intraonly} {params} {verbose}'
    )
    print(f"Exiting {i + 1}")


class TemplateGenerator:
    """
Exemplo n.º 19
0
        },
    },
]

MIDDLEWARE = (
    'django.middleware.common.CommonMiddleware',
    'django.contrib.sessions.middleware.SessionMiddleware',
    'django.middleware.csrf.CsrfViewMiddleware',
    'django.contrib.auth.middleware.AuthenticationMiddleware',
    'django.contrib.messages.middleware.MessageMiddleware',
)

# This allows the tests to be run against Django 1.8 to current
# MIDDLEWARE_CLASSES is deprecated since Django 1.10, and is completely
# removed from Django 2.0
if StrictVersion(django.get_version()) < StrictVersion('1.10'):
    MIDDLEWARE_CLASSES = MIDDLEWARE

MEDIA_ROOT = ''
MEDIA_URL = '/'
STATIC_ROOT = ''
STATIC_URL = '/'

MODERNRPC_METHODS_MODULES = [
    'testsite.rpc_methods_stub.generic',
    'testsite.rpc_methods_stub.specific_types',
    'testsite.rpc_methods_stub.specific_protocol',
    'testsite.rpc_methods_stub.with_authentication',
]

LOGGING = {
from qtpy import QtWidgets, QtCore, QtGui
import flika
from flika import global_vars as g
from flika.window import Window
from flika.utils.io import tifffile
from flika.process.file_ import get_permutation_tuple
from flika.utils.misc import open_file_gui
import pyqtgraph as pg
import time
import os
from os import listdir
from os.path import expanduser, isfile, join
from distutils.version import StrictVersion

flika_version = flika.__version__
if StrictVersion(flika_version) < StrictVersion('0.2.23'):
    from flika.process.BaseProcess import BaseProcess, SliderLabel, CheckBox, ComboBox
else:
    from flika.utils.BaseProcess import BaseProcess, SliderLabel, CheckBox, ComboBox


class VolumeSlider(BaseProcess):
    def __init__(self):
        super().__init__()
        self.numberOfTimeSlices = 0
        self.displayedTimeSlice = 0

        return

    def startVolumeSlider(self):
        #get volume arrays
Exemplo n.º 21
0
    def run_sampler(self):
        # set the step method
        pymc3, STEP_METHODS, floatX = self._import_external_sampler()
        step_methods = {m.__name__.lower(): m.__name__ for m in STEP_METHODS}
        if 'step' in self._kwargs:
            self.step_method = self._kwargs.pop('step')

            # 'step' could be a dictionary of methods for different parameters,
            # so check for this
            if self.step_method is None:
                pass
            elif isinstance(self.step_method, (dict, OrderedDict)):
                for key in self.step_method:
                    if key not in self._search_parameter_keys:
                        raise ValueError(
                            "Setting a step method for an unknown parameter '{}'"
                            .format(key))
                    else:
                        # check if using a compound step (a list of step
                        # methods for a particular parameter)
                        if isinstance(self.step_method[key], list):
                            sms = self.step_method[key]
                        else:
                            sms = [self.step_method[key]]
                        for sm in sms:
                            if sm.lower() not in step_methods:
                                raise ValueError(
                                    "Using invalid step method '{}'".format(
                                        self.step_method[key]))
            else:
                # check if using a compound step (a list of step
                # methods for a particular parameter)
                if isinstance(self.step_method, list):
                    sms = self.step_method
                else:
                    sms = [self.step_method]

                for i in range(len(sms)):
                    if sms[i].lower() not in step_methods:
                        raise ValueError(
                            "Using invalid step method '{}'".format(sms[i]))
        else:
            self.step_method = None

        # initialise the PyMC3 model
        self.pymc3_model = pymc3.Model()

        # set the prior
        self.set_prior()

        # if a custom log_likelihood function requires a `sampler` argument
        # then use that log_likelihood function, with the assumption that it
        # takes in a Pymc3 Sampler, with a pymc3_model attribute, and defines
        # the likelihood within that context manager
        likeargs = infer_args_from_method(self.likelihood.log_likelihood)
        if 'sampler' in likeargs:
            self.likelihood.log_likelihood(sampler=self)
        else:
            # set the likelihood function from predefined functions
            self.set_likelihood()

        # get the step method keyword arguments
        step_kwargs = self.kwargs.pop("step_kwargs")
        if step_kwargs is not None:
            # remove all individual default step kwargs if passed together using
            # step_kwargs keywords
            for key in self.default_step_kwargs:
                self.kwargs.pop(key)
        else:
            # remove any None default step keywords and place others in step_kwargs
            step_kwargs = {}
            for key in self.default_step_kwargs:
                if self.kwargs[key] is None:
                    self.kwargs.pop(key)
                else:
                    step_kwargs[key] = self.kwargs.pop(key)

        nuts_kwargs = self.kwargs.pop("nuts_kwargs")
        if nuts_kwargs is not None:
            # remove all individual default nuts kwargs if passed together using
            # nuts_kwargs keywords
            for key in self.default_nuts_kwargs:
                self.kwargs.pop(key)
        else:
            # remove any None default nuts keywords and place others in nut_kwargs
            nuts_kwargs = {}
            for key in self.default_nuts_kwargs:
                if self.kwargs[key] is None:
                    self.kwargs.pop(key)
                else:
                    nuts_kwargs[key] = self.kwargs.pop(key)
        methodslist = []

        # set the step method
        if isinstance(self.step_method, (dict, OrderedDict)):
            # create list of step methods (any not given will default to NUTS)
            self.kwargs['step'] = []
            with self.pymc3_model:
                for key in self.step_method:
                    # check for a compound step list
                    if isinstance(self.step_method[key], list):
                        for sms in self.step_method[key]:
                            curmethod = sms.lower()
                            methodslist.append(curmethod)
                            nuts_kwargs = self._create_nuts_kwargs(
                                curmethod, key, nuts_kwargs, pymc3,
                                step_kwargs, step_methods)
                    else:
                        curmethod = self.step_method[key].lower()
                        methodslist.append(curmethod)
                        nuts_kwargs = self._create_nuts_kwargs(
                            curmethod, key, nuts_kwargs, pymc3, step_kwargs,
                            step_methods)
        else:
            with self.pymc3_model:
                # check for a compound step list
                if isinstance(self.step_method, list):
                    compound = []
                    for sms in self.step_method:
                        curmethod = sms.lower()
                        methodslist.append(curmethod)
                        args, nuts_kwargs = self._create_args_and_nuts_kwargs(
                            curmethod, nuts_kwargs, step_kwargs)
                        compound.append(
                            pymc3.__dict__[step_methods[curmethod]](**args))
                        self.kwargs['step'] = compound
                else:
                    self.kwargs['step'] = None
                    if self.step_method is not None:
                        curmethod = self.step_method.lower()
                        methodslist.append(curmethod)
                        args, nuts_kwargs = self._create_args_and_nuts_kwargs(
                            curmethod, nuts_kwargs, step_kwargs)
                        self.kwargs['step'] = pymc3.__dict__[
                            step_methods[curmethod]](**args)
                    else:
                        # re-add step_kwargs if no step methods are set
                        if len(step_kwargs) > 0 and StrictVersion(
                                pymc3.__version__) < StrictVersion("3.7"):
                            self.kwargs['step_kwargs'] = step_kwargs

        # check whether only NUTS step method has been assigned
        if np.all([sm.lower() == 'nuts' for sm in methodslist]):
            # in this case we can let PyMC3 autoinitialise NUTS, so remove the step methods and re-add nuts_kwargs
            self.kwargs['step'] = None

            if len(nuts_kwargs) > 0 and StrictVersion(
                    pymc3.__version__) < StrictVersion("3.7"):
                self.kwargs['nuts_kwargs'] = nuts_kwargs
            elif len(nuts_kwargs) > 0:
                # add NUTS kwargs to standard kwargs
                self.kwargs.update(nuts_kwargs)

        with self.pymc3_model:
            # perform the sampling
            trace = pymc3.sample(**self.kwargs)

        nparams = len([
            key for key in self.priors.keys()
            if not isinstance(self.priors[key], DeltaFunction)
        ])
        nsamples = len(trace) * self.chains

        self.result.samples = np.zeros((nsamples, nparams))
        count = 0
        for key in self.priors.keys():
            if not isinstance(self.priors[key],
                              DeltaFunction):  # ignore DeltaFunction variables
                if not isinstance(self.priors[key], MultivariateGaussian):
                    self.result.samples[:, count] = trace[key]
                else:
                    # get multivariate Gaussian samples
                    priorset = self.multivariate_normal_sets[key]['set']
                    index = self.multivariate_normal_sets[key]['index']
                    self.result.samples[:, count] = trace[priorset][:, index]
                count += 1
        self.result.sampler_output = np.nan
        self.calculate_autocorrelation(self.result.samples)
        self.result.log_evidence = np.nan
        self.result.log_evidence_err = np.nan
        self.calc_likelihood_count()
        return self.result
Exemplo n.º 22
0
def deploy(
    model_uri,
    workspace,
    deployment_config=None,
    service_name=None,
    model_name=None,
    tags=None,
    mlflow_home=None,
    synchronous=True,
):
    """
    Register an MLflow model with Azure ML and deploy a websevice to Azure Container Instances (ACI)
    or Azure Kubernetes Service (AKS).

    The deployed service will contain a webserver that processes model queries.
    For information about the input data formats accepted by this webserver, see the
    :ref:`MLflow deployment tools documentation <azureml_deployment>`.

    :param model_uri: The location, in URI format, of the MLflow model used to build the Azure
                      ML deployment image. For example:

                      - ``/Users/me/path/to/local/model``
                      - ``relative/path/to/local/model``
                      - ``s3://my_bucket/path/to/model``
                      - ``runs:/<mlflow_run_id>/run-relative/path/to/model``
                      - ``models:/<model_name>/<model_version>``
                      - ``models:/<model_name>/<stage>``

                      For more information about supported URI schemes, see
                      `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
                      artifact-locations>`_.
    :param workspace: The AzureML workspace in which to deploy the service. This is a
                      `azureml.core.Workspace` object.
    :param deployment_config: The configuration for the Azure web service. This configuration
                              allows you to specify the resources the webservice will use and
                              the compute cluster it will be deployed in. If unspecified, the web
                              service will be deployed into a Azure Container Instance. This is a
                              `azureml.core.DeploymentConfig` object. For more information, see
                              `<https://docs.microsoft.com/python/api/azureml-core/
                              azureml.core.webservice.aks.aksservicedeploymentconfiguration>`_ and
                              `<https://docs.microsoft.com/en-us/python/api/azureml-core/azureml
                              .core.webservice.aci.aciservicedeploymentconfiguration>`_
    :param service_name: The name to assign the Azure Machine learning webservice that will be
                         created. If unspecified, a unique name will be generated.
    :param model_name: The name to assign the Azure Model will be created. If unspecified,
                       a unique model name will be generated.
    :param tags: A collection of tags, represented as a dictionary of string key-value pairs, to
                 associate with the Azure Model and Deployment that will be created.
                 These tags are added to a set of default tags that include the model uri,
                 and more. For more information, see
                 `<https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model(class)?view=azure-ml-py>`_.
    :param mlflow_home: Path to a local copy of the MLflow GitHub repository. If specified, the
                        image will install MLflow from this directory. Otherwise, it will install
                        MLflow from pip.
    :param synchronous: If ``True``, this method blocks until the image creation procedure
                        terminates before returning. If ``False``, the method returns immediately,
                        but the returned image will not be available until the asynchronous
                        creation process completes. Use the
                        ``azureml.core.Webservice.wait_for_deployment()`` function to wait
                        for the deployment process to complete.
    :return: A tuple containing the following elements in order:
            - An ``azureml.core.webservice.Webservice`` object containing metadata for the
            new service.
            - An ``azureml.core.model.Model`` object containing metadata for the new model.

    .. code-block:: python
        :caption: Example

        import mlflow.azureml
        from azureml.core import Workspace
        from azureml.core.webservice import AciWebservice, Webservice

        # Load or create an Azure ML Workspace
        workspace_name = "<Name of your Azure ML workspace>"
        subscription_id = "<Your Azure subscription ID>"
        resource_group = "<Name of the Azure resource group in which to create Azure ML resources>"
        location = "<Name of the Azure location (region) in which to create Azure ML resources>"
        azure_workspace = Workspace.create(name=workspace_name,
                                           subscription_id=subscription_id,
                                           resource_group=resource_group,
                                           location=location,
                                           create_resource_group=True,
                                           exist_ok=True)

        # Create an Azure Container Instance webservice for an MLflow model
        azure_service, azure_model = mlflow.azureml.deploy(model_uri="<model_uri>",
                                                           service_name="<deployment-name>",
                                                           workspace=azure_workspace,
                                                           synchronous=True)
    """
    # The Azure ML SDK is only compatible with Python 3. However, the `mlflow.azureml` module should
    # still be accessible for import from Python 2. Therefore, we will only import from the SDK
    # upon method invocation.
    # pylint: disable=import-error
    from azureml.core.model import Model as AzureModel, InferenceConfig
    from azureml.core import Environment as AzureEnvironment
    from azureml.core import VERSION as AZUREML_VERSION
    from azureml.core.webservice import AciWebservice

    absolute_model_path = _download_artifact_from_uri(model_uri)

    model_pyfunc_conf, model = _load_pyfunc_conf_with_model(
        model_path=absolute_model_path)
    model_python_version = model_pyfunc_conf.get(pyfunc.PY_VERSION, None)
    run_id = None
    run_id_tag = None
    try:
        run_id = model.run_id
        run_id_tag = run_id
    except AttributeError:
        run_id = str(uuid.uuid4())
    if model_python_version is not None and StrictVersion(
            model_python_version) < StrictVersion("3.0.0"):
        raise MlflowException(
            message=
            ("Azure ML can only deploy models trained in Python 3 and above. See"
             " the following MLflow GitHub issue for a thorough explanation of this"
             " limitation and a workaround to enable support for deploying models"
             " trained in Python 2: https://github.com/mlflow/mlflow/issues/668"
             ),
            error_code=INVALID_PARAMETER_VALUE,
        )

    tags = _build_tags(
        model_uri=model_uri,
        model_python_version=model_python_version,
        user_tags=tags,
        run_id=run_id_tag,
    )

    if service_name is None:
        service_name = _get_mlflow_azure_name(run_id)
    if model_name is None:
        model_name = _get_mlflow_azure_name(run_id)

    with TempDir(chdr=True) as tmp:
        model_directory_path = tmp.path("model")
        tmp_model_path = os.path.join(
            model_directory_path,
            _copy_file_or_tree(src=absolute_model_path,
                               dst=model_directory_path),
        )

        registered_model = AzureModel.register(workspace=workspace,
                                               model_path=tmp_model_path,
                                               model_name=model_name,
                                               tags=tags)

        _logger.info(
            "Registered an Azure Model with name: `%s` and version: `%s`",
            registered_model.name,
            registered_model.version,
        )

        # Create an execution script (entry point) for the image's model server. Azure ML requires
        # the container's execution script to be located in the current working directory during
        # image creation, so we create the execution script as a temporary file in the current
        # working directory.
        execution_script_path = tmp.path("execution_script.py")
        _create_execution_script(output_path=execution_script_path,
                                 azure_model=registered_model)

        environment = None
        if pyfunc.ENV in model_pyfunc_conf:
            environment = AzureEnvironment.from_conda_specification(
                _get_mlflow_azure_name(run_id),
                os.path.join(tmp_model_path, model_pyfunc_conf[pyfunc.ENV]),
            )
        else:
            environment = AzureEnvironment(_get_mlflow_azure_name(run_id))

        if mlflow_home is not None:
            path = tmp.path("dist")
            _logger.info("Bulding temporary MLFlow wheel in %s", path)
            wheel = _create_mlflow_wheel(mlflow_home, path)
            whl_url = AzureEnvironment.add_private_pip_wheel(
                workspace=workspace, file_path=wheel, exist_ok=True)
            environment.python.conda_dependencies.add_pip_package(whl_url)
        else:
            environment.python.conda_dependencies.add_pip_package(
                "mlflow=={}".format(mlflow_version))

        # AzureML requires azureml-defaults to be installed to include
        # flask for the inference server.
        environment.python.conda_dependencies.add_pip_package(
            "azureml-defaults=={}".format(AZUREML_VERSION))

        inference_config = InferenceConfig(entry_script=execution_script_path,
                                           environment=environment)

        if deployment_config is not None:
            if deployment_config.tags is not None:
                # We want more narrowly-scoped tags to win on merge
                tags.update(deployment_config.tags)
            deployment_config.tags = tags
        else:
            deployment_config = AciWebservice.deploy_configuration(tags=tags)

        webservice = AzureModel.deploy(
            workspace=workspace,
            name=service_name,
            models=[registered_model],
            inference_config=inference_config,
            deployment_config=deployment_config,
        )
        _logger.info("Deploying an Azure Webservice with name: `%s`",
                     webservice.name)
        if synchronous:
            webservice.wait_for_deployment(show_output=True)
        return webservice, registered_model
Exemplo n.º 23
0
def get_python_version():
    return StrictVersion('.'.join(map(str, sys.version_info[:3])))
Exemplo n.º 24
0
def build_image(
    model_uri,
    workspace,
    image_name=None,
    model_name=None,
    mlflow_home=None,
    description=None,
    tags=None,
    synchronous=True,
):
    """
    Register an MLflow model with Azure ML and build an Azure ML ContainerImage for deployment.
    The resulting image can be deployed as a web service to Azure Container Instances (ACI) or
    Azure Kubernetes Service (AKS).

    The resulting Azure ML ContainerImage will contain a webserver that processes model queries.
    For information about the input data formats accepted by this webserver, see the
    :ref:`MLflow deployment tools documentation <azureml_deployment>`.

    :param model_uri: The location, in URI format, of the MLflow model used to build the Azure
                      ML deployment image. For example:

                      - ``/Users/me/path/to/local/model``
                      - ``relative/path/to/local/model``
                      - ``s3://my_bucket/path/to/model``
                      - ``runs:/<mlflow_run_id>/run-relative/path/to/model``
                      - ``models:/<model_name>/<model_version>``
                      - ``models:/<model_name>/<stage>``

                      For more information about supported URI schemes, see
                      `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
                      artifact-locations>`_.

    :param image_name: The name to assign the Azure Container Image that will be created. If
                       unspecified, a unique image name will be generated.
    :param model_name: The name to assign the Azure Model will be created. If unspecified,
                       a unique model name will be generated.
    :param workspace: The AzureML workspace in which to build the image. This is a
                      `azureml.core.Workspace` object.
    :param mlflow_home: Path to a local copy of the MLflow GitHub repository. If specified, the
                        image will install MLflow from this directory. Otherwise, it will install
                        MLflow from pip.
    :param description: A string description to associate with the Azure Container Image and the
                        Azure Model that will be created. For more information, see
                        `<https://docs.microsoft.com/en-us/python/api/azureml-core/
                        azureml.core.image.container.containerimageconfig?view=azure-ml-py>`_ and
                        `<https://docs.microsoft.com/en-us/python/api/azureml-core/
                        azureml.core.model.model?view=azure-ml-py#register>`_.
    :param tags: A collection of tags, represented as a dictionary of string key-value pairs, to
                 associate with the Azure Container Image and the Azure Model that will be created.
                 These tags are added to a set of default tags that include the model uri,
                 and more. For more information, see
                 `<https://docs.microsoft.com/en-us/python/api/azureml-core/
                 azureml.core.image.container.containerimageconfig?view-azure-ml-py>`_ and
                 `<https://docs.microsoft.com/en-us/python/api/azureml-core/
                 azureml.core.model.model?view=azure-ml-py#register>`_.
    :param synchronous: If ``True``, this method blocks until the image creation procedure
                        terminates before returning. If ``False``, the method returns immediately,
                        but the returned image will not be available until the asynchronous
                        creation process completes. Use the
                        ``azureml.core.Image.wait_for_creation()`` function to wait for the creation
                        process to complete.
    :return: A tuple containing the following elements in order:
            - An ``azureml.core.image.ContainerImage`` object containing metadata for the new image.
            - An ``azureml.core.model.Model`` object containing metadata for the new model.

    .. code-block:: python
        :caption: Example

        import mlflow.azureml
        from azureml.core import Workspace
        from azureml.core.webservice import AciWebservice, Webservice

        # Load or create an Azure ML Workspace
        workspace_name = "<Name of your Azure ML workspace>"
        subscription_id = "<Your Azure subscription ID>"
        resource_group = "<Name of the Azure resource group in which to create Azure ML resources>"
        location = "<Name of the Azure location (region) in which to create Azure ML resources>"
        azure_workspace = Workspace.create(name=workspace_name,
                                           subscription_id=subscription_id,
                                           resource_group=resource_group,
                                           location=location,
                                           create_resource_group=True,
                                           exist_ok=True)

        # Build an Azure ML Container Image for an MLflow model
        azure_image, azure_model = mlflow.azureml.build_image(model_uri="<model_uri>",
                                                              workspace=azure_workspace,
                                                              synchronous=True)
        # If your image build failed, you can access build logs at the following URI:
        print("Access the following URI for build logs: {}".format(azure_image.image_build_log_uri))

        # Deploy the image to Azure Container Instances (ACI) for real-time serving
        webservice_deployment_config = AciWebservice.deploy_configuration()
        webservice = Webservice.deploy_from_image(
                            image=azure_image, workspace=azure_workspace, name="<deployment-name>")
        webservice.wait_for_deployment()
    """
    # The Azure ML SDK is only compatible with Python 3. However, the `mlflow.azureml` module should
    # still be accessible for import from Python 2. Therefore, we will only import from the SDK
    # upon method invocation.
    # pylint: disable=import-error
    from azureml.core.image import ContainerImage
    from azureml.core.model import Model as AzureModel

    absolute_model_path = _download_artifact_from_uri(model_uri)

    model_pyfunc_conf, _ = _load_pyfunc_conf_with_model(
        model_path=absolute_model_path)
    model_python_version = model_pyfunc_conf.get(pyfunc.PY_VERSION, None)
    if model_python_version is not None and StrictVersion(
            model_python_version) < StrictVersion("3.0.0"):
        raise MlflowException(
            message=
            ("Azure ML can only deploy models trained in Python 3 and above. See"
             " the following MLflow GitHub issue for a thorough explanation of this"
             " limitation and a workaround to enable support for deploying models"
             " trained in Python 2: https://github.com/mlflow/mlflow/issues/668"
             ),
            error_code=INVALID_PARAMETER_VALUE,
        )

    tags = _build_tags(model_uri=model_uri,
                       model_python_version=model_python_version,
                       user_tags=tags)

    if image_name is None:
        image_name = _get_mlflow_azure_resource_name()
    if model_name is None:
        model_name = _get_mlflow_azure_resource_name()

    with TempDir(chdr=True) as tmp:
        model_directory_path = tmp.path("model")
        tmp_model_path = os.path.join(
            model_directory_path,
            _copy_file_or_tree(src=absolute_model_path,
                               dst=model_directory_path),
        )

        registered_model = AzureModel.register(
            workspace=workspace,
            model_path=tmp_model_path,
            model_name=model_name,
            tags=tags,
            description=description,
        )
        _logger.info(
            "Registered an Azure Model with name: `%s` and version: `%s`",
            registered_model.name,
            registered_model.version,
        )

        # Create an execution script (entry point) for the image's model server. Azure ML requires
        # the container's execution script to be located in the current working directory during
        # image creation, so we create the execution script as a temporary file in the current
        # working directory.
        execution_script_path = tmp.path("execution_script.py")
        _create_execution_script(output_path=execution_script_path,
                                 azure_model=registered_model)
        # Azure ML copies the execution script into the image's application root directory by
        # prepending "/var/azureml-app" to the specified script path. The script is then executed
        # by referencing its path relative to the "/var/azureml-app" directory. Unfortunately,
        # if the script path is an absolute path, Azure ML attempts to reference it directly,
        # resulting in a failure. To circumvent this problem, we provide Azure ML with the relative
        # script path. Because the execution script was created in the current working directory,
        # this relative path is the script path's base name.
        execution_script_path = os.path.basename(execution_script_path)

        if mlflow_home is not None:
            _logger.info(
                "Copying the specified mlflow_home directory: `%s` to a temporary location for"
                " container creation",
                mlflow_home,
            )
            mlflow_home = os.path.join(
                tmp.path(),
                _copy_project(src_path=mlflow_home, dst_path=tmp.path()))
            image_file_dependencies = [mlflow_home]
        else:
            image_file_dependencies = None
        dockerfile_path = tmp.path("Dockerfile")
        _create_dockerfile(output_path=dockerfile_path,
                           mlflow_path=mlflow_home)

        conda_env_path = None
        if pyfunc.ENV in model_pyfunc_conf:
            conda_env_path = os.path.join(tmp_model_path,
                                          model_pyfunc_conf[pyfunc.ENV])

        image_configuration = ContainerImage.image_configuration(
            execution_script=execution_script_path,
            runtime="python",
            docker_file=dockerfile_path,
            dependencies=image_file_dependencies,
            conda_file=conda_env_path,
            description=description,
            tags=tags,
        )
        image = ContainerImage.create(
            workspace=workspace,
            name=image_name,
            image_config=image_configuration,
            models=[registered_model],
        )
        _logger.info(
            "Building an Azure Container Image with name: `%s` and version: `%s`",
            image.name,
            image.version,
        )
        if synchronous:
            image.wait_for_creation(show_output=True)
        return image, registered_model
Exemplo n.º 25
0
    def __init__(self, verbose=0, dry_run=0, force=0):

        distutils.cygwinccompiler.CygwinCCompiler.__init__(
            self, verbose, dry_run, force)

        # we need to support 3.2 which doesn't match the standard
        # get_versions methods regex
        if self.gcc_version is None:
            import re
            p = subprocess.Popen(['gcc', '-dumpversion'],
                                 shell=True,
                                 stdout=subprocess.PIPE)
            out_string = p.stdout.read()
            p.stdout.close()
            result = re.search(r'(\d+\.\d+)', out_string)
            if result:
                self.gcc_version = StrictVersion(result.group(1))

        # A real mingw32 doesn't need to specify a different entry point,
        # but cygwin 2.91.57 in no-cygwin-mode needs it.
        if self.gcc_version <= "2.91.57":
            entry_point = '--entry _DllMain@12'
        else:
            entry_point = ''

        if self.linker_dll == 'dllwrap':
            # Commented out '--driver-name g++' part that fixes weird
            #   g++.exe: g++: No such file or directory
            # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5).
            # If the --driver-name part is required for some environment
            # then make the inclusion of this part specific to that
            # environment.
            self.linker = 'dllwrap'  #  --driver-name g++'
        elif self.linker_dll == 'gcc':
            self.linker = 'g++'

        # **changes: eric jones 4/11/01
        # 1. Check for import library on Windows.  Build if it doesn't exist.

        build_import_library()

        # Check for custom msvc runtime library on Windows. Build if it doesn't exist.
        msvcr_success = build_msvcr_library()
        msvcr_dbg_success = build_msvcr_library(debug=True)
        if msvcr_success or msvcr_dbg_success:
            # add preprocessor statement for using customized msvcr lib
            self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')

        # Define the MSVC version as hint for MinGW
        msvcr_version = msvc_runtime_version()
        if msvcr_version:
            self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version)

        # MS_WIN64 should be defined when building for amd64 on windows,
        # but python headers define it only for MS compilers, which has all
        # kind of bad consequences, like using Py_ModuleInit4 instead of
        # Py_ModuleInit4_64, etc... So we add it here
        if get_build_architecture() == 'AMD64':
            if self.gcc_version < "4.0":
                self.set_executables(
                    compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall',
                    compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0'
                    ' -Wall -Wstrict-prototypes',
                    linker_exe='gcc -g -mno-cygwin',
                    linker_so='gcc -g -mno-cygwin -shared')
            else:
                # gcc-4 series releases do not support -mno-cygwin option
                self.set_executables(
                    compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall',
                    compiler_so=
                    'gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes',
                    linker_exe='gcc -g',
                    linker_so='gcc -g -shared')
        else:
            if self.gcc_version <= "3.0.0":
                self.set_executables(
                    compiler='gcc -mno-cygwin -O2 -w',
                    compiler_so='gcc -mno-cygwin -mdll -O2 -w'
                    ' -Wstrict-prototypes',
                    linker_exe='g++ -mno-cygwin',
                    linker_so='%s -mno-cygwin -mdll -static %s' %
                    (self.linker, entry_point))
            elif self.gcc_version < "4.0":
                self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall',
                                     compiler_so='gcc -mno-cygwin -O2 -Wall'
                                     ' -Wstrict-prototypes',
                                     linker_exe='g++ -mno-cygwin',
                                     linker_so='g++ -mno-cygwin -shared')
            else:
                # gcc-4 series releases do not support -mno-cygwin option
                self.set_executables(
                    compiler='gcc -O2 -Wall',
                    compiler_so='gcc -O2 -Wall -Wstrict-prototypes',
                    linker_exe='g++ ',
                    linker_so='g++ -shared')
        # added for python2.3 support
        # we can't pass it through set_executables because pre 2.2 would fail
        self.compiler_cxx = ['g++']

        # Maybe we should also append -mthreads, but then the finished dlls
        # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support
        # thread-safe exception handling on `Mingw32')

        # no additional libraries needed
        #self.dll_libraries=[]
        return
Exemplo n.º 26
0
 def setVersion(self, vstring):
     self.version = StrictVersion(vstring)
Exemplo n.º 27
0
    from os.path import samefile
except ImportError:
    # Windows does not have the samefile function
    from os import stat

    def samefile(file1, file2):
        return stat(file1) == stat(file2)


from PIL import Image
try:
    import h5py
except ImportError:
    h5py_available = False
else:
    h5py_available = StrictVersion(h5py.__version__) >= StrictVersion('2.2.1')

import sima.misc
from sima.motion._motion import _align_frame
with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    from sima.misc.tifffile import TiffFileWriter


class Sequence(with_metaclass(ABCMeta, object)):
    """Object containing data from sequentially acquired imaging data.

    Sequences are created with a call to the create method.

    >>> from sima import Sequence
    >>> from sima.misc import example_hdf5
Exemplo n.º 28
0
                       QgsVectorLayer,
                       QgsProject,
                       QgsMultiRenderChecker,
                       QgsSingleSymbolRenderer,
                       QgsProperty
                       )
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath

# Convenience instances in case you may need them
# not used in this test
start_app()

TEST_DATA_DIR = unitTestDataPath()

if StrictVersion(PYQT_VERSION_STR) < StrictVersion('5.7'):
    from qgis.PyQt.QtCore import pyqtWrapperType
    EXPECTED_TYPE = pyqtWrapperType
else:
    EXPECTED_TYPE = type(QObject)


class TestQgsSymbolLayer(unittest.TestCase):

    """
     This class test the sip binding for QgsSymbolLayer descendants
     Every class is tested using the createFromSld implementation
     An exception is done for:
     - QgsLinePatternFillSymbolLayer where createFromSld implementation
         returns NULL
     - QgsPointPatternFillSymbolLayer where createFromSld implementation
Exemplo n.º 29
0
    def execute(self, nets, vlan_ids, **kwargs):
        self.remove_previous_task()

        task = Task(name=consts.TASK_NAMES.check_networks,
                    cluster=self.cluster)

        if len([n for n in self.cluster.nodes if n.online]) < 2:
            task.status = consts.TASK_STATUSES.error
            task.progress = 100
            task.message = ('At least two online nodes are required to be '
                            'in the environment for network verification.')
            db().add(task)
            db().commit()
            return task

        if len(self.cluster.node_groups) > 1:
            task.status = consts.TASK_STATUSES.error
            task.progress = 100
            task.message = ('Network verification is disabled for '
                            'environments containing more than one node '
                            'group.')
            db().add(task)
            db().commit()
            return task

        if self.cluster.status in self._blocking_statuses:
            task.status = consts.TASK_STATUSES.error
            task.progress = 100
            task.message = (
                "Environment is not ready to run network verification "
                "because it is in '{0}' state.".format(self.cluster.status))
            db().add(task)
            db().commit()
            return task

        db().add(task)
        db().commit()

        self._call_silently(task,
                            tasks.CheckNetworksTask,
                            data=nets,
                            check_all_parameters=True)
        db().refresh(task)

        if task.status != consts.TASK_STATUSES.error:
            # this one is connected with UI issues - we need to
            # separate if error happened inside nailgun or somewhere
            # in the orchestrator, and UI does it by task name.
            task.name = consts.TASK_NAMES.verify_networks
            verify_task = tasks.VerifyNetworksTask(task, vlan_ids)

            if tasks.CheckDhcpTask.enabled(self.cluster):
                dhcp_subtask = objects.task.Task.create_subtask(
                    task, name=consts.TASK_NAMES.check_dhcp)
                verify_task.add_subtask(
                    tasks.CheckDhcpTask(dhcp_subtask, vlan_ids))

            if tasks.MulticastVerificationTask.enabled(self.cluster):
                multicast = objects.task.Task.create_subtask(
                    task, name=consts.TASK_NAMES.multicast_verification)
                verify_task.add_subtask(
                    tasks.MulticastVerificationTask(multicast))

            # we have remote connectivity checks since fuel 6.1,
            # so we should not create those tasks for old envs
            if StrictVersion(self.cluster.release.environment_version) >= \
                    StrictVersion(consts.FUEL_REMOTE_REPOS):

                # repo connectivity check via default gateway
                repo_check_task = objects.task.Task.create_subtask(
                    task, name=consts.TASK_NAMES.check_repo_availability)
                verify_task.add_subtask(
                    tasks.CheckRepoAvailability(repo_check_task, vlan_ids))

                # repo connectivity check via external gateway
                conf, errors = tasks.CheckRepoAvailabilityWithSetup.get_config(
                    self.cluster)
                # if there is no conf - there is no nodes on which
                # we need to setup network
                if conf:
                    repo_check_task = objects.task.Task.create_subtask(
                        task,
                        consts.TASK_NAMES.check_repo_availability_with_setup)
                    verify_task.add_subtask(
                        tasks.CheckRepoAvailabilityWithSetup(
                            repo_check_task, conf))

                if errors:
                    notifier.notify("warning", '\n'.join(errors),
                                    self.cluster.id)

            db().commit()
            self._call_silently(task, verify_task)

        return task
Exemplo n.º 30
0
    def __init__(self):
        gr.top_block.__init__(self, "AM")
        Qt.QWidget.__init__(self)
        self.setWindowTitle("AM")
        qtgui.util.check_set_qss()
        try:
            self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
        except:
            pass
        self.top_scroll_layout = Qt.QVBoxLayout()
        self.setLayout(self.top_scroll_layout)
        self.top_scroll = Qt.QScrollArea()
        self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
        self.top_scroll_layout.addWidget(self.top_scroll)
        self.top_scroll.setWidgetResizable(True)
        self.top_widget = Qt.QWidget()
        self.top_scroll.setWidget(self.top_widget)
        self.top_layout = Qt.QVBoxLayout(self.top_widget)
        self.top_grid_layout = Qt.QGridLayout()
        self.top_layout.addLayout(self.top_grid_layout)

        self.settings = Qt.QSettings("GNU Radio", "am")

        try:
            if StrictVersion(Qt.qVersion()) < StrictVersion("5.0.0"):
                self.restoreGeometry(self.settings.value("geometry").toByteArray())
            else:
                self.restoreGeometry(self.settings.value("geometry"))
        except:
            pass

        ##################################################
        # Variables
        ##################################################
        self.signal_offset = signal_offset = 1
        self.samp_rate = samp_rate = 288000
        self.in_gain = in_gain = 0.5

        ##################################################
        # Blocks
        ##################################################
        self._signal_offset_range = Range(0, 2, 0.1, 1, 200)
        self._signal_offset_win = RangeWidget(self._signal_offset_range, self.set_signal_offset, 'Signal Offset', "counter_slider", float)
        self.top_grid_layout.addWidget(self._signal_offset_win)
        self._in_gain_range = Range(0, 2, 0.1, 0.5, 200)
        self._in_gain_win = RangeWidget(self._in_gain_range, self.set_in_gain, 'Input gain', "counter_slider", float)
        self.top_grid_layout.addWidget(self._in_gain_win)
        self.qtgui_time_sink_x_0_1 = qtgui.time_sink_f(
            2048, #size
            samp_rate, #samp_rate
            'Wave Form', #name
            2 #number of inputs
        )
        self.qtgui_time_sink_x_0_1.set_update_time(0.10)
        self.qtgui_time_sink_x_0_1.set_y_axis(-0.5, 0.5)

        self.qtgui_time_sink_x_0_1.set_y_label('Amplitude', "")

        self.qtgui_time_sink_x_0_1.enable_tags(True)
        self.qtgui_time_sink_x_0_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
        self.qtgui_time_sink_x_0_1.enable_autoscale(False)
        self.qtgui_time_sink_x_0_1.enable_grid(False)
        self.qtgui_time_sink_x_0_1.enable_axis_labels(True)
        self.qtgui_time_sink_x_0_1.enable_control_panel(False)
        self.qtgui_time_sink_x_0_1.enable_stem_plot(False)


        labels = ['Original', 'Demodulated', 'Signal 3', 'Signal 4', 'Signal 5',
            'Signal 6', 'Signal 7', 'Signal 8', 'Signal 9', 'Signal 10']
        widths = [1, 1, 1, 1, 1,
            1, 1, 1, 1, 1]
        colors = ['blue', 'black', 'green', 'black', 'cyan',
            'magenta', 'yellow', 'dark red', 'dark green', 'dark blue']
        alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
            1.0, 1.0, 1.0, 1.0, 1.0]
        styles = [1, 1, 1, 1, 1,
            1, 1, 1, 1, 1]
        markers = [-1, -1, -1, -1, -1,
            -1, -1, -1, -1, -1]


        for i in range(2):
            if len(labels[i]) == 0:
                self.qtgui_time_sink_x_0_1.set_line_label(i, "Data {0}".format(i))
            else:
                self.qtgui_time_sink_x_0_1.set_line_label(i, labels[i])
            self.qtgui_time_sink_x_0_1.set_line_width(i, widths[i])
            self.qtgui_time_sink_x_0_1.set_line_color(i, colors[i])
            self.qtgui_time_sink_x_0_1.set_line_style(i, styles[i])
            self.qtgui_time_sink_x_0_1.set_line_marker(i, markers[i])
            self.qtgui_time_sink_x_0_1.set_line_alpha(i, alphas[i])

        self._qtgui_time_sink_x_0_1_win = sip.wrapinstance(self.qtgui_time_sink_x_0_1.pyqwidget(), Qt.QWidget)
        self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_1_win)
        self.qtgui_time_sink_x_0 = qtgui.time_sink_f(
            2048, #size
            samp_rate, #samp_rate
            'Modulated', #name
            1 #number of inputs
        )
        self.qtgui_time_sink_x_0.set_update_time(0.10)
        self.qtgui_time_sink_x_0.set_y_axis(-4, 4)

        self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")

        self.qtgui_time_sink_x_0.enable_tags(True)
        self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
        self.qtgui_time_sink_x_0.enable_autoscale(False)
        self.qtgui_time_sink_x_0.enable_grid(False)
        self.qtgui_time_sink_x_0.enable_axis_labels(True)
        self.qtgui_time_sink_x_0.enable_control_panel(False)
        self.qtgui_time_sink_x_0.enable_stem_plot(False)


        labels = ['Modulated', 'Signal 2', 'Signal 3', 'Signal 4', 'Signal 5',
            'Signal 6', 'Signal 7', 'Signal 8', 'Signal 9', 'Signal 10']
        widths = [1, 1, 1, 1, 1,
            1, 1, 1, 1, 1]
        colors = ['blue', 'red', 'green', 'black', 'cyan',
            'magenta', 'yellow', 'dark red', 'dark green', 'dark blue']
        alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
            1.0, 1.0, 1.0, 1.0, 1.0]
        styles = [1, 1, 1, 1, 1,
            1, 1, 1, 1, 1]
        markers = [-1, -1, -1, -1, -1,
            -1, -1, -1, -1, -1]


        for i in range(1):
            if len(labels[i]) == 0:
                self.qtgui_time_sink_x_0.set_line_label(i, "Data {0}".format(i))
            else:
                self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
            self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
            self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
            self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
            self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
            self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])

        self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
        self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_win)
        self.low_pass_filter_0 = filter.fir_filter_fff(
            1,
            firdes.low_pass(
                1,
                samp_rate,
                22000,
                100,
                firdes.WIN_HAMMING,
                6.76))
        self.blocks_wavfile_source_0 = blocks.wavfile_source('/home/irodrigu/Proyectos/sc-clases/2020-2021/P1/src/songs/Lofi.wav', True)
        self.blocks_throttle_0 = blocks.throttle(gr.sizeof_float*1, samp_rate,True)
        self.blocks_sub_xx_0 = blocks.sub_ff(1)
        self.blocks_null_source_0 = blocks.null_source(gr.sizeof_float*1)
        self.blocks_multiply_xx_0 = blocks.multiply_vff(1)
        self.blocks_multiply_const_vxx_0_0_0 = blocks.multiply_const_ff(in_gain)
        self.blocks_multiply_const_vxx_0 = blocks.multiply_const_ff(1)
        self.blocks_max_xx_0 = blocks.max_ff(1, 1)
        self.blocks_add_xx_0 = blocks.add_vff(1)
        self.band_pass_filter_0 = filter.fir_filter_fff(
            1,
            firdes.band_pass(
                1,
                samp_rate,
                85000,
                115000,
                100,
                firdes.WIN_HAMMING,
                6.76))
        self.audio_sink_0 = audio.sink(48000, 'pulse', True)
        self.analog_sig_source_x_0_0 = analog.sig_source_f(samp_rate, analog.GR_COS_WAVE, 100000, 3, 0, 0)
        self.analog_const_source_x_0_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, signal_offset)
        self.analog_const_source_x_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, signal_offset)
        self.analog_agc_xx_0 = analog.agc_ff(6e-8, 0, 1.0)
        self.analog_agc_xx_0.set_max_gain(65536)



        ##################################################
        # Connections
        ##################################################
        self.connect((self.analog_agc_xx_0, 0), (self.audio_sink_0, 0))
        self.connect((self.analog_agc_xx_0, 0), (self.qtgui_time_sink_x_0_1, 1))
        self.connect((self.analog_const_source_x_0, 0), (self.blocks_add_xx_0, 0))
        self.connect((self.analog_const_source_x_0_0, 0), (self.blocks_sub_xx_0, 1))
        self.connect((self.analog_sig_source_x_0_0, 0), (self.blocks_multiply_xx_0, 1))
        self.connect((self.band_pass_filter_0, 0), (self.blocks_max_xx_0, 0))
        self.connect((self.blocks_add_xx_0, 0), (self.blocks_multiply_xx_0, 0))
        self.connect((self.blocks_max_xx_0, 0), (self.low_pass_filter_0, 0))
        self.connect((self.blocks_multiply_const_vxx_0, 0), (self.qtgui_time_sink_x_0_1, 0))
        self.connect((self.blocks_multiply_const_vxx_0_0_0, 0), (self.blocks_throttle_0, 0))
        self.connect((self.blocks_multiply_xx_0, 0), (self.band_pass_filter_0, 0))
        self.connect((self.blocks_multiply_xx_0, 0), (self.qtgui_time_sink_x_0, 0))
        self.connect((self.blocks_null_source_0, 0), (self.blocks_max_xx_0, 1))
        self.connect((self.blocks_sub_xx_0, 0), (self.analog_agc_xx_0, 0))
        self.connect((self.blocks_throttle_0, 0), (self.blocks_add_xx_0, 1))
        self.connect((self.blocks_wavfile_source_0, 0), (self.blocks_multiply_const_vxx_0, 0))
        self.connect((self.blocks_wavfile_source_0, 0), (self.blocks_multiply_const_vxx_0_0_0, 0))
        self.connect((self.low_pass_filter_0, 0), (self.blocks_sub_xx_0, 0))