def compare(current, operator, reference):
    # if len(sys.argv) < 4:
    #     script_name = sys.argv[0]
    #     print 'Usage:'
    #     print '{} current-version operator reference-version'.format(script_name)
    #     print '{} v0.10.29 \'>=\' v0.10.20'.format(script_name)
    #     print '{} v0.10.29 \'<\' v0.12.00'.format(script_name)
    #     sys.exit(1)

    current = LooseVersion(current.replace('v', ''))
    reference = LooseVersion(reference.replace('v', ''))

    if operator == '>=':
        if not current >= reference:
            sys.exit(1)
    elif operator == '>':
        if not current > reference:
            sys.exit(1)
    elif operator == '<=':
        if not current <= reference:
            sys.exit(1)
    elif operator == '<':
        if not current < reference:
            sys.exit(1)
    else:
        print 'Unknown operator {}'.format(operator)
        sys.exit(1)
예제 #2
0
파일: util.py 프로젝트: MekliCZ/positron
 def __init__(self, version):
     # Can't use super, LooseVersion's base class is not a new-style class.
     LooseVersion.__init__(self, version)
     # Take the first three integer components, stopping at the first
     # non-integer and padding the rest with zeroes.
     (self.major, self.minor, self.patch) = list(itertools.chain(
         itertools.takewhile(lambda x:isinstance(x, int), self.version),
         (0, 0, 0)))[:3]
예제 #3
0
def _clean_freebayes_fmt_cl():
    """For FreeBayes pre 1.1.0, need to remove problematic DPR FORMAT annotation.

    This can be removed after bcbio 1.0.1 release which will default to
    FreeBayes 1.1.0
    """
    version = subprocess.check_output(["freebayes", "--version"])
    version = LooseVersion(version.split("version:")[-1].strip().replace("v", ""))
    return "bcftools annotate -x FMT/DPR |" if version < LooseVersion("1.1.0") else ""
예제 #4
0
파일: versions.py 프로젝트: sigma/emacs-ci
 def isCompatibleVersionNumber(self, version):
     v = LooseVersion(version).version
     ref = LooseVersion(self._version).version
     try:
         for cpt in ref:
             head = v.pop(0)
             if cpt != head:
                 return False
         return True
     except:
         return False
예제 #5
0
파일: objects.py 프로젝트: darvid/borealis
 def __init__(self, string):
     match = re.match(self.version_re, string)
     if not match:
         raise Exception("invalid version string format")
     LooseVersion.__init__(self, string)
     self.epoch = match.group(1) or 0
     self.version = match.group(2)
     # someone please inform foobnix's maintainer that the letter "o" should
     # never, ever, ever, *ever* be used to represent zero.
     if match.group(3) == "o":
         self.release = 0
     else:
         self.release = int(match.group(3)) if match.group(3) else 1
예제 #6
0
파일: version.py 프로젝트: harshach/kafka
    def __init__(self, version_string):
        self.is_dev = (version_string.lower() == "dev")
        if self.is_dev:
            version_string = kafkatest_version()

            # Drop dev suffix if present
            dev_suffix_index = version_string.find(".dev")
            if dev_suffix_index >= 0:
                version_string = version_string[:dev_suffix_index]

        # Don't use the form super.(...).__init__(...) because
        # LooseVersion is an "old style" python class
        LooseVersion.__init__(self, version_string)
예제 #7
0
def bump_version(ctx, major=False, minor=False, micro=False):
    """
    Bump the version number in the VERSION file.
    """

    from distutils.version import LooseVersion

    if major == minor == micro == False:
        micro = True

    with open('VERSION') as F:
        vdata = F.read()
        version = LooseVersion(vdata).version
        version = dict(enumerate(version))

    # Fix major
    if major:
        version[0] += 1
        for k, v in version.items():
            if isinstance(v, int) and not k == 0:
                version[k] = 0

    # Fix minor
    minor_idx = 1
    if minor:
        while isinstance(version.get(minor_idx), str):
            minor_idx += 1
        version[minor_idx] = version.get(minor_idx, 0) + 1

        for k, v in version.items():
            if isinstance(v, int) and k > minor_idx:
                version[k] = 0

    # Fix micro
    micro_idx = minor_idx + 1
    if micro:
        while isinstance(version.get(micro_idx), str):
            micro_idx += 1
        version[micro_idx] = version.get(micro_idx, 0) + 1

        for k in list(version):
            if k > micro_idx:
                del version[k]

    # Reconstruct version string
    vstring = ''
    for (i, v) in sorted(version.items()):
        if i and isinstance(v, int) and isinstance(version[i - 1], int):
            vstring += '.%s' % v
        else:
            vstring += str(v)
    vstring += '\n'

    # Save version
    with open('VERSION', 'w') as F:
        F.write(vstring)
    print('Version bumped from %s to %s' % (vdata.strip(), vstring.strip()))
    return vstring
예제 #8
0
    def __init__(self, version_string):
        self.is_trunk = (version_string.lower() == "trunk")
        if self.is_trunk:
            # Since "trunk" may actually be a branch that is not trunk,
            # use kafkatest_version() for comparison purposes,
            # and track whether we're in "trunk" with a flag
            version_string = kafkatest_version()

            # Drop dev suffix if present
            dev_suffix_index = version_string.find(".dev")
            if dev_suffix_index >= 0:
                version_string = version_string[:dev_suffix_index]

        # Don't use the form super.(...).__init__(...) because
        # LooseVersion is an "old style" python class
        LooseVersion.__init__(self, version_string)
예제 #9
0
    def __init__(self, source=None):
        PwebFormatter.__init__(self, source)
        pandoc_ver = False

        try:
            pandoc = Popen(["pandoc", "--version"], stdin=PIPE, stdout=PIPE)
            pandoc_ver = pandoc.communicate()[0].decode('utf-8').split("\n")[0]
            pandoc_ver = LooseVersion(pandoc_ver.split(" ")[1])
        except:
            pandoc_ver = LooseVersion("0.0.1")
            print("Error in trying to detect pandoc version")

        if pandoc_ver < LooseVersion("1.16.0"):
            self.new_pandoc = False
            print("Your pandoc version is below 1.16, not setting figure size and id")
        else:
            self.new_pandoc = True
예제 #10
0
    def deprecated(self, msg, cur_ver, max_ver, depth=2, exception=None, *args, **kwargs):
        """
        Log deprecation message, throw error if current version is passed given threshold.

        Checks only major/minor version numbers (MAJ.MIN.x) by default, controlled by 'depth' argument.
        """
        loose_cv = LooseVersion(cur_ver)
        loose_mv = LooseVersion(max_ver)

        loose_cv.version = loose_cv.version[:depth]
        loose_mv.version = loose_mv.version[:depth]

        if loose_cv >= loose_mv:
            self.raiseException("DEPRECATED (since v%s) functionality used: %s" % (max_ver, msg), exception=exception)
        else:
            deprecation_msg = "Deprecated functionality, will no longer work in v%s: %s" % (max_ver, msg)
            self.warning(deprecation_msg)
예제 #11
0
 def version_tuple(self):
     """Return a tuple in the format of django.VERSION."""
     version = self.version.replace('-', '').replace('_', '')
     version = LooseVersion(version).version
     if len(version) == 2:
         version.append(0)
     if not isinstance(version[2], int):
         version.insert(2, 0)
     if len(version) == 3:
         version.append('final')
     if version[3] not in ('alpha', 'beta', 'rc', 'final'):
         version[3] = {'a': 'alpha', 'b': 'beta', 'c': 'rc'}[version[3]]
     if len(version) == 4:
         version.append(0)
     return tuple(version)
예제 #12
0
 def version_tuple(self):
     """Return a tuple in the format of django.VERSION."""
     version = self.version.replace("-", "").replace("_", "")
     version = LooseVersion(version).version
     if len(version) == 2:
         version.append(0)
     if not isinstance(version[2], int):
         version.insert(2, 0)
     if len(version) == 3:
         version.append("final")
     if version[3] not in ("alpha", "beta", "rc", "final"):
         version[3] = {"a": "alpha", "b": "beta", "c": "rc"}[version[3]]
     if len(version) == 4:
         version.append(0)
     return tuple(version)
예제 #13
0
def _cmp (self, other):
    if isinstance(other, str):
        other = LooseVersion(other)

    stypes = map(lambda c: str if isinstance(c, str) else int, self.version)
    otypes = map(lambda c: str if isinstance(c, str) else int, other.version)
    
    for i, (stype, otype) in enumerate(zip(stypes, otypes)):
        if stype == str and otype == int:
            other.version[i] = str(other.version[i])
        if stype == int and otype == str:
            self.version[i] = str(self.version[i])
    
    if self.version == other.version:
        return 0
    if self.version < other.version:
        return -1
    if self.version > other.version:
        return 1
예제 #14
0
    def __init__(self, vstring=None, v_prefix=None):
        self._v_prefix = v_prefix

        if isinstance(vstring, (list, tuple)):
            type_ = type(vstring)
            vstring = '.'.join(str(i) for i in vstring)
        else:
            type_ = list

        vstring = vstring.strip()

        if vstring.startswith('v'):
            vstring = vstring[1:]
            if vstring.startswith('!'):
                raise ValueError('Invalid use of epoch')
            if v_prefix is not False:
                self._v_prefix = True

        # Can not use super(..) on Python 2.7
        LooseVersion.__init__(self, vstring)
        if self._v_prefix:
            self.vstring = 'v' + self.vstring
        if len(self.version) > 1 and self.version[1] == '!':
            self._epoch = self.version[0]
            if not isinstance(self._epoch, int) or len(self.version) < 3:
                raise ValueError('Invalid use of epoch')

        # Normalise to lower case
        self.version = [
            x if isinstance(x, int) else x.lower() for x in self.version
            if x not in ('-', '_')]

        if self.version[-1] != '*' and not isinstance(self.version[-1], int):
            self.version += (0, )

        if type_ is tuple:
            self.version = tuple(self.version)

        self._final = None
        self._previous = None
예제 #15
0
파일: dependencies.py 프로젝트: H1d3r/cobra
 def __init__(self, vstring=None):
     self.vstring = vstring
     self.version = []
     LooseVersion.__init__(self, vstring=vstring)
예제 #16
0
파일: utils.py 프로젝트: semyont/dask
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.api.types import is_categorical_dtype, is_scalar
try:
    from pandas.api.types import is_datetime64tz_dtype
except ImportError:
    # pandas < 0.19.2
    from pandas.core.common import is_datetime64tz_dtype

from ..core import get_deps
from ..local import get_sync
from ..utils import asciitable, is_arraylike


PANDAS_VERSION = LooseVersion(pd.__version__)


def shard_df_on_index(df, divisions):
    """ Shard a DataFrame by ranges on its index

    Examples
    --------

    >>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
    >>> df
        a  b
    0   0  5
    1  10  4
    2  20  3
    3  30  2
예제 #17
0
def test_tensorflow_version():
    # Check TensorFlow Version
    assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), \
        'Please use TensorFlow version 1.0 or newer.  You are using {}'.format(tf.__version__)
    print('TensorFlow Version: {}'.format(tf.__version__))
예제 #18
0
 def __init__(self, vstring=None):
     LooseVersion.__init__(self, vstring=vstring)
     self.version = tuple(map(str, self.version))
예제 #19
0
#source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'Python-Tempo'
copyright = u'2015, Andrew Pashkin'
author = u'Andrew Pashkin'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
_version = LooseVersion()

_version.parse(open(os.path.join(
  os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir, 'VERSION'
)).read().strip())

version = '.'.join([str(e) for e in _version.version][:2])
# The full version, including alpha/beta/rc tags.
release = _version.vstring

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
예제 #20
0
  # Get the flags passed by the driver and look for -dwarf-version.
  cmd = f'{llvm_config.use_llvm_tool("clang")} -g -xc  -c - -v -### --target={triple}'
  stderr = subprocess.run(cmd.split(), stderr=subprocess.PIPE).stderr.decode()
  match = re.search('-dwarf-version=(\d+)', stderr)
  if match is None:
    print("Cannot determine default dwarf version", file=sys.stderr)
    return None
  return match.group(1)

# Some cross-project-tests use gdb, but not all versions of gdb are compatible
# with clang's dwarf. Add feature `gdb-clang-incompatibility` to signal that
# there's an incompatibility between clang's default dwarf version for this
# platform and the installed gdb version.
dwarf_version_string = get_clang_default_dwarf_version_string(config.host_triple)
gdb_version_string = get_gdb_version_string()
if dwarf_version_string and gdb_version_string:
  if int(dwarf_version_string) >= 5:
    if LooseVersion(gdb_version_string) < LooseVersion('10.1'):
      # Example for llgdb-tests, which use lldb on darwin but gdb elsewhere:
      # XFAIL: !system-darwin && gdb-clang-incompatibility
      config.available_features.add('gdb-clang-incompatibility')
      print("XFAIL some tests: use gdb version >= 10.1 to restore test coverage", file=sys.stderr)

llvm_config.feature_config(
    [('--build-mode', {'Debug|RelWithDebInfo': 'debug-info'})]
)

# Allow 'REQUIRES: XXX-registered-target' in tests.
for arch in config.targets_to_build:
    config.available_features.add(arch.lower() + '-registered-target')
예제 #21
0
def make_lammps_input(ensemble,
                      conf_file,
                      graphs,
                      nsteps,
                      dt,
                      neidelay,
                      trj_freq,
                      mass_map,
                      temp,
                      jdata,
                      tau_t=0.1,
                      pres=None,
                      tau_p=0.5,
                      pka_e=None,
                      ele_temp_f=None,
                      ele_temp_a=None,
                      max_seed=1000000,
                      nopbc=False,
                      deepmd_version='0.1'):
    if (ele_temp_f is not None or ele_temp_a
            is not None) and LooseVersion(deepmd_version) < LooseVersion('1'):
        raise RuntimeError(
            'the electron temperature is only supported by deepmd-kit >= 1.0.0, please upgrade your deepmd-kit'
        )
    if ele_temp_f is not None and ele_temp_a is not None:
        raise RuntimeError(
            'the frame style ele_temp and atom style ele_temp should not be set at the same time'
        )
    ret = "variable        NSTEPS          equal %d\n" % nsteps
    ret += "variable        THERMO_FREQ     equal %d\n" % trj_freq
    ret += "variable        DUMP_FREQ       equal %d\n" % trj_freq
    ret += "variable        TEMP            equal %f\n" % temp
    if ele_temp_f is not None:
        ret += "variable        ELE_TEMP        equal %f\n" % ele_temp_f
    if ele_temp_a is not None:
        ret += "variable        ELE_TEMP        equal %f\n" % ele_temp_a
    ret += "variable        PRES            equal %f\n" % pres
    ret += "variable        TAU_T           equal %f\n" % tau_t
    ret += "variable        TAU_P           equal %f\n" % tau_p
    ret += "\n"
    ret += "units           metal\n"
    if nopbc:
        ret += "boundary        f f f\n"
    else:
        ret += "boundary        p p p\n"
    ret += "atom_style      atomic\n"
    ret += "\n"
    ret += "neighbor        1.0 bin\n"
    if neidelay is not None:
        ret += "neigh_modify    delay %d\n" % neidelay
    ret += "\n"
    ret += "box          tilt large\n"
    ret += "if \"${restart} > 0\" then \"read_restart dpgen.restart.*\" else \"read_data %s\"\n" % conf_file
    ret += "change_box   all triclinic\n"
    for jj in range(len(mass_map)):
        ret += "mass            %d %f\n" % (jj + 1, mass_map[jj])
    graph_list = ""
    for ii in graphs:
        graph_list += ii + " "
    if LooseVersion(deepmd_version) < LooseVersion('1'):
        # 0.x
        ret += "pair_style      deepmd %s ${THERMO_FREQ} model_devi.out\n" % graph_list
    else:
        # 1.x
        keywords = ""
        if jdata.get('use_clusters', False):
            keywords += "atomic "
        if jdata.get('use_relative', False):
            keywords += "relative %s " % jdata['epsilon']
        if jdata.get('use_relative_v', False):
            keywords += "relative_v %s " % jdata['epsilon_v']
        if ele_temp_f is not None:
            keywords += "fparam ${ELE_TEMP}"
        if ele_temp_a is not None:
            keywords += "aparam ${ELE_TEMP}"
        ret += "pair_style      deepmd %s out_freq ${THERMO_FREQ} out_file model_devi.out %s\n" % (
            graph_list, keywords)
    ret += "pair_coeff      \n"
    ret += "\n"
    ret += "thermo_style    custom step temp pe ke etotal press vol lx ly lz xy xz yz\n"
    ret += "thermo          ${THERMO_FREQ}\n"
    ret += "dump            1 all custom ${DUMP_FREQ} traj/*.lammpstrj id type x y z fx fy fz\n"
    ret += "restart         10000 dpgen.restart\n"
    ret += "\n"
    if pka_e is None:
        ret += "if \"${restart} == 0\" then \"velocity        all create ${TEMP} %d\"" % (
            random.randrange(max_seed - 1) + 1)
    else:
        sys = dpdata.System(conf_file, fmt='lammps/lmp')
        sys_data = sys.data
        pka_mass = mass_map[sys_data['atom_types'][0] - 1]
        pka_vn = pka_e * pc.electron_volt / \
                 (0.5 * pka_mass * 1e-3 / pc.Avogadro * (pc.angstrom / pc.pico) ** 2)
        pka_vn = np.sqrt(pka_vn)
        print(pka_vn)
        pka_vec = _sample_sphere()
        pka_vec *= pka_vn
        ret += 'group           first id 1\n'
        ret += 'if \"${restart} == 0\" then \"velocity        first set %f %f %f\"\n' % (
            pka_vec[0], pka_vec[1], pka_vec[2])
        ret += 'fix	       2 all momentum 1 linear 1 1 1\n'
    ret += "\n"
    if ensemble.split('-')[0] == 'npt':
        assert (pres is not None)
        if nopbc:
            raise RuntimeError('ensemble %s is conflicting with nopbc' %
                               ensemble)
    if ensemble == "npt" or ensemble == "npt-i" or ensemble == "npt-iso":
        ret += "fix             1 all npt temp ${TEMP} ${TEMP} ${TAU_T} iso ${PRES} ${PRES} ${TAU_P}\n"
    elif ensemble == 'npt-a' or ensemble == 'npt-aniso':
        ret += "fix             1 all npt temp ${TEMP} ${TEMP} ${TAU_T} aniso ${PRES} ${PRES} ${TAU_P}\n"
    elif ensemble == 'npt-t' or ensemble == 'npt-tri':
        ret += "fix             1 all npt temp ${TEMP} ${TEMP} ${TAU_T} tri ${PRES} ${PRES} ${TAU_P}\n"
    elif ensemble == "nvt":
        ret += "fix             1 all nvt temp ${TEMP} ${TEMP} ${TAU_T}\n"
    elif ensemble == 'nve':
        ret += "fix             1 all nve\n"
    else:
        raise RuntimeError("unknown emsemble " + ensemble)
    if nopbc:
        ret += "velocity        all zero linear\n"
        ret += "fix             fm all momentum 1 linear 1 1 1\n"
    ret += "\n"
    ret += "timestep        %f\n" % dt
    ret += "run             ${NSTEPS} upto\n"
    return ret
 def check_bigiq_version(self):
     version = bigiq_version(self.client)
     if LooseVersion(version) >= LooseVersion('6.1.0'):
         raise F5ModuleError(
             'Module supports only BIGIQ version 6.0.x or lower.'
         )
Attacks for TensorFlow Eager
"""
from distutils.version import LooseVersion

import numpy as np
import tensorflow as tf

from cleverhans import attacks
from cleverhans import utils
from cleverhans.model import CallableModelWrapper, wrapper_warning
from cleverhans.model import Model
from cleverhans.loss import LossCrossEntropy

_logger = utils.create_logger("cleverhans.attacks_tfe")

if LooseVersion(tf.__version__) < LooseVersion('1.8.0'):
    error_msg = ('For eager execution',
                 'use Tensorflow version greather than 1.8.0.')
    raise ValueError(error_msg)


class Attack(attacks.Attack):
    """
    Abstract base class for all eager attack classes.
    :param model: An instance of the cleverhans.model.Model class.
    :param back: The backend to use. Inherited from AttackBase class.
    :param dtypestr: datatype of the input data samples and crafted
                     adversarial attacks.
    """

    def __init__(self, model, dtypestr='float32'):
예제 #24
0
파일: sparse.py 프로젝트: wangxiaoyunNV/dgl
import torch as th
from distutils.version import LooseVersion
from ...base import is_all, ALL
from ...sparse import _gspmm, _gspmm_hetero, _gsddmm, _gsddmm_hetero, _segment_reduce, _bwd_segment_cmp
from ...sparse import _csrmm, _csrsum, _csrmask, _scatter_add, _update_grad_minmax_hetero
from ...sparse import _gather_mm, _gather_mm_scatter, _segment_mm, _segment_mm_backward_B
from ...sparse import _gspmm, _gspmm_hetero, _gsddmm, _gsddmm_hetero, _segment_reduce, _bwd_segment_cmp, _edge_softmax_forward, _edge_softmax_backward
from ...sparse import _csrmm, _csrsum, _csrmask, _scatter_add, _update_grad_minmax_hetero
from ...heterograph_index import create_unitgraph_from_csr

if LooseVersion(th.__version__) >= LooseVersion("1.6.0"):
    from torch.cuda.amp import custom_fwd, custom_bwd
else:
    import functools
    """PyTorch natively supports automatic mixed precision in DGL 1.6, we redefine
    the custom_fwd and custom_bwd function to be compatible with DGL 1.5.
    """
    def custom_fwd(**kwargs):
        def custom_fwd_inner(fwd):
            @functools.wraps(fwd)
            def decorate_fwd(*args, **kwargs):
                return fwd(*args, **kwargs)
            return decorate_fwd
        return custom_fwd_inner

    def custom_bwd(bwd):
        @functools.wraps(bwd)
        def decorate_bwd(*args, **kwargs):
            return bwd(*args, **kwargs)
        return decorate_bwd
예제 #25
0
#!/usr/bin/env python
import json
import cv2
import tensorflow.contrib.slim as slim
import datetime
import random
import time
import string
import argparse
import os
import threading
from scipy import misc
import tensorflow as tf
import numpy as np
from distutils.version import LooseVersion
if LooseVersion(tf.__version__) >= LooseVersion('1.0'):
    rnn_cell = tf.contrib.rnn
else:
    try:
        from tensorflow.models.rnn import rnn_cell
    except ImportError:
        rnn_cell = tf.nn.rnn_cell
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops

random.seed(0)
np.random.seed(0)

from utils import train_utils, googlenet_load, tf_concat

예제 #26
0
    for _setup, QT_API in _candidates:
        try:
            _setup()
        except ImportError:
            continue
        break
    else:
        raise ImportError("Failed to import any qt binding")
else:  # We should not get there.
    raise AssertionError("Unexpected QT_API: {}".format(QT_API))


# Fixes issues with Big Sur
# https://bugreports.qt.io/browse/QTBUG-87014, fixed in qt 5.15.2
if (sys.platform == 'darwin' and
        LooseVersion(platform.mac_ver()[0]) >= LooseVersion("10.16") and
        LooseVersion(QtCore.qVersion()) < LooseVersion("5.15.2") and
        "QT_MAC_WANTS_LAYER" not in os.environ):
    os.environ["QT_MAC_WANTS_LAYER"] = "1"


# These globals are only defined for backcompatibility purposes.
ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
           pyqt5=(QT_API_PYQT5, 5), pyside2=(QT_API_PYSIDE2, 5))

QT_RC_MAJOR_VERSION = int(QtCore.qVersion().split(".")[0])

if QT_RC_MAJOR_VERSION == 4:
    _api.warn_deprecated("3.3", name="support for Qt4")

예제 #27
0
import mxnet as mx

from distutils.version import LooseVersion

from mxnet.base import MXNetError
from mxnet.test_utils import almost_equal, same

import horovod.mxnet as hvd

has_gpu = mx.context.num_gpus() > 0

ccl_supported_types = set(['int32', 'int64', 'float32', 'float64'])

# MXNet 1.4.x will kill test MPI process if error occurs during operation enqueue. Skip
# those tests for versions earlier than 1.5.0.
_skip_enqueue_errors = LooseVersion(mx.__version__) < LooseVersion('1.5.0')

class MXTests(unittest.TestCase):
    """
    Tests for ops in horovod.mxnet.
    """

    def _current_context(self):
        if has_gpu:
            return mx.gpu(hvd.local_rank())
        else:
            return mx.current_context()

    def filter_supported_types(self, types):
        if 'CCL_ROOT' in os.environ:
           types = [t for t in types if t in ccl_supported_types]
예제 #28
0
 def version_sorter(x):
     return LooseVersion(x) if key is None else LooseVersion(key(x))
예제 #29
0
import os.path
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
import pickle 
  
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer.  You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))

# Check for a GPU
if not tf.test.gpu_device_name():
    warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
    print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))


def load_vgg(sess, vgg_path):
    """
    Load Pretrained VGG Model into TensorFlow.
    :param sess: TensorFlow Session
    :param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
    :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
    """
    # TODO: Implement function
    #   Use tf.saved_model.loader.load to load the model and weights
    vgg_tag = 'vgg16'
    vgg_input_tensor_name = 'image_input:0'
    vgg_keep_prob_tensor_name = 'keep_prob:0'
예제 #30
0
    def __init__(self, client):

        super(ContainerManager, self).__init__()

        self.client = client
        self.project_src = None
        self.files = None
        self.project_name = None
        self.state = None
        self.definition = None
        self.hostname_check = None
        self.timeout = None
        self.remove_images = None
        self.remove_orphans = None
        self.remove_volumes = None
        self.stopped = None
        self.restarted = None
        self.recreate = None
        self.build = None
        self.dependencies = None
        self.services = None
        self.scale = None
        self.debug = None
        self.pull = None
        self.nocache = None

        for key, value in client.module.params.items():
            setattr(self, key, value)

        self.check_mode = client.check_mode

        if not self.debug:
            self.debug = client.module._debug

        self.options = dict()
        self.options.update(self._get_auth_options())
        self.options[u'--skip-hostname-check'] = (not self.hostname_check)

        if self.project_name:
            self.options[u'--project-name'] = self.project_name

        if self.files:
            self.options[u'--file'] = self.files

        if not HAS_COMPOSE:
            self.client.fail(
                "Unable to load docker-compose. Try `pip install docker-compose`. Error: %s"
                % HAS_COMPOSE_EXC)

        if LooseVersion(compose_version) < LooseVersion(
                MINIMUM_COMPOSE_VERSION):
            self.client.fail(
                "Found docker-compose version %s. Minimum required version is %s. "
                "Upgrade docker-compose to a min version of %s." %
                (compose_version, MINIMUM_COMPOSE_VERSION,
                 MINIMUM_COMPOSE_VERSION))

        self.log("options: ")
        self.log(self.options, pretty_print=True)

        if self.definition:
            if not HAS_YAML:
                self.client.fail(
                    "Unable to load yaml. Try `pip install PyYAML`. Error: %s"
                    % HAS_YAML_EXC)

            if not self.project_name:
                self.client.fail(
                    "Parameter error - project_name required when providing definition."
                )

            self.project_src = tempfile.mkdtemp(prefix="ansible")
            compose_file = os.path.join(self.project_src, "docker-compose.yml")
            try:
                self.log('writing: ')
                self.log(yaml.dump(self.definition, default_flow_style=False))
                with open(compose_file, 'w') as f:
                    f.write(
                        yaml.dump(self.definition, default_flow_style=False))
            except Exception as exc:
                self.client.fail("Error writing to %s - %s" %
                                 (compose_file, str(exc)))
        else:
            if not self.project_src:
                self.client.fail("Parameter error - project_src required.")

        try:
            self.log("project_src: %s" % self.project_src)
            self.project = project_from_options(self.project_src, self.options)
        except Exception as exc:
            self.client.fail("Configuration error - %s" % str(exc))
예제 #31
0
def main():
    """Check the installation."""

    python_version = sys.version.replace("\n", " ")
    print(f"[x] python={python_version}")

    print()
    print("Python modules:")
    try:
        import torch

        print(f"[x] torch={torch.__version__}")

        if torch.cuda.is_available():
            print(f"[x] torch cuda={torch.version.cuda}")
        else:
            print("[ ] torch cuda")

        if torch.backends.cudnn.is_available():
            print(f"[x] torch cudnn={torch.backends.cudnn.version()}")
        else:
            print("[ ] torch cudnn")

        if torch.distributed.is_nccl_available():
            print("[x] torch nccl")
        else:
            print("[ ] torch nccl")

    except ImportError:
        print("[ ] torch")

    try:
        import chainer

        print(f"[x] chainer={chainer.__version__}")
        if LooseVersion(chainer.__version__) != LooseVersion("6.0.0"):
            print(f"Warning! chainer={chainer.__version__} is not supported. "
                  "Supported version is 6.0.0")

        if chainer.backends.cuda.available:
            print("[x] chainer cuda")
        else:
            print("[ ] chainer cuda")

        if chainer.backends.cuda.cudnn_enabled:
            print("[x] chainer cudnn")
        else:
            print("[ ] chainer cudnn")

    except ImportError:
        print("[ ] chainer")

    try:
        import cupy

        print(f"[x] cupy={cupy.__version__}")
        try:
            from cupy.cuda import nccl  # NOQA

            print("[x] cupy nccl")
        except ImportError:
            print("[ ] cupy nccl")
    except ImportError:
        print("[ ] cupy")

    to_install = []
    for name, versions, installer in module_list:
        try:
            m = importlib.import_module(name)
            if hasattr(m, "__version__"):
                version = m.__version__
                print(f"[x] {name}={version}")
                if versions is not None and version not in versions:
                    print(f"Warning! {name}={version} is not suppoted. "
                          "Supported versions are {versions}")
            else:
                print(f"[x] {name}")
        except ImportError:
            print(f"[ ] {name}")
            if installer is not None:
                to_install.append(f"Use '{installer}' to install {name}")

    print()
    print("Executables:")
    for name, installer in executable_list:
        if shutil.which(name) is not None:
            print(f"[x] {name}")
        else:
            print(f"[ ] {name}")
            if installer is not None:
                to_install.append(f"Use '{installer}' to install {name}")

    print()
    print("INFO:")
    for m in to_install:
        print(m)
예제 #32
0
    def forward(self, hs_pad, hlens, ys_pad, strm_idx=0, tgt_lang_ids=None):
        """Decoder forward

        :param torch.Tensor hs_pad: batch of padded hidden state sequences (B, Tmax, D)
                                    [in multi-encoder case,
                                    list of torch.Tensor, [(B, Tmax_1, D), (B, Tmax_2, D), ..., ] ]
        :param torch.Tensor hlens: batch of lengths of hidden state sequences (B)
                                    [in multi-encoder case, list of torch.Tensor, [(B), (B), ..., ]
        :param torch.Tensor ys_pad: batch of padded character id sequence tensor (B, Lmax)
        :param int strm_idx: stream index indicates the index of decoding stream.
        :param torch.Tensor tgt_lang_ids: batch of target language id tensor (B, 1)
        :return: attention loss value
        :rtype: torch.Tensor
        :return: accuracy
        :rtype: float
        """
        # to support mutiple encoder asr mode, in single encoder mode, convert torch.Tensor to List of torch.Tensor
        if self.num_encs == 1:
            hs_pad = [hs_pad]
            hlens = [hlens]

        # TODO(kan-bayashi): need to make more smart way
        ys = [y[y != self.ignore_id] for y in ys_pad]  # parse padded ys
        # attention index for the attention module
        # in SPA (speaker parallel attention), att_idx is used to select attention module. In other cases, it is 0.
        att_idx = min(strm_idx, len(self.att) - 1)

        # hlens should be list of list of integer
        hlens = [list(map(int, hlens[idx])) for idx in range(self.num_encs)]

        self.loss = None
        # prepare input and output word sequences with sos/eos IDs
        eos = ys[0].new([self.eos])
        sos = ys[0].new([self.sos])
        if self.replace_sos:
            ys_in = [
                torch.cat([idx, y], dim=0) for idx, y in zip(tgt_lang_ids, ys)
            ]
        else:
            ys_in = [torch.cat([sos, y], dim=0) for y in ys]
        ys_out = [torch.cat([y, eos], dim=0) for y in ys]

        # padding for ys with -1
        # pys: utt x olen
        ys_in_pad = pad_list(ys_in, self.eos)
        ys_out_pad = pad_list(ys_out, self.ignore_id)

        # get dim, length info
        batch = ys_out_pad.size(0)
        olength = ys_out_pad.size(1)
        for idx in range(self.num_encs):
            logging.info(self.__class__.__name__ +
                         'Number of Encoder:{}; enc{}: input lengths: {}.'.
                         format(self.num_encs, idx + 1, hlens[idx]))
        logging.info(self.__class__.__name__ + ' output lengths: ' +
                     str([y.size(0) for y in ys_out]))

        # initialization
        c_list = [self.zero_state(hs_pad[0])]
        z_list = [self.zero_state(hs_pad[0])]
        for _ in six.moves.range(1, self.dlayers):
            c_list.append(self.zero_state(hs_pad[0]))
            z_list.append(self.zero_state(hs_pad[0]))
        z_all = []
        if self.num_encs == 1:
            att_w = None
            self.att[att_idx].reset()  # reset pre-computation of h
        else:
            att_w_list = [None] * (self.num_encs + 1)  # atts + han
            att_c_list = [None] * (self.num_encs)  # atts
            for idx in range(self.num_encs + 1):
                self.att[idx].reset(
                )  # reset pre-computation of h in atts and han

        # pre-computation of embedding
        eys = self.dropout_emb(self.embed(ys_in_pad))  # utt x olen x zdim

        # loop for an output sequence
        for i in six.moves.range(olength):
            if self.num_encs == 1:
                att_c, att_w = self.att[att_idx](
                    hs_pad[0], hlens[0], self.dropout_dec[0](z_list[0]), att_w)
            else:
                for idx in range(self.num_encs):
                    att_c_list[idx], att_w_list[idx] = self.att[idx](
                        hs_pad[idx], hlens[idx],
                        self.dropout_dec[0](z_list[0]), att_w_list[idx])
                hs_pad_han = torch.stack(att_c_list, dim=1)
                hlens_han = [self.num_encs] * len(ys_in)
                att_c, att_w_list[self.num_encs] = self.att[self.num_encs](
                    hs_pad_han, hlens_han, self.dropout_dec[0](z_list[0]),
                    att_w_list[self.num_encs])
            if i > 0 and random.random() < self.sampling_probability:
                logging.info(' scheduled sampling ')
                z_out = self.output(z_all[-1])
                z_out = np.argmax(z_out.detach().cpu(), axis=1)
                z_out = self.dropout_emb(self.embed(to_device(self, z_out)))
                ey = torch.cat((z_out, att_c), dim=1)  # utt x (zdim + hdim)
            else:
                ey = torch.cat((eys[:, i, :], att_c),
                               dim=1)  # utt x (zdim + hdim)
            z_list, c_list = self.rnn_forward(ey, z_list, c_list, z_list,
                                              c_list)
            if self.context_residual:
                z_all.append(
                    torch.cat((self.dropout_dec[-1](z_list[-1]), att_c),
                              dim=-1))  # utt x (zdim + hdim)
            else:
                z_all.append(self.dropout_dec[-1](z_list[-1]))  # utt x (zdim)

        z_all = torch.stack(z_all, dim=1).view(batch * olength, -1)
        # compute loss
        y_all = self.output(z_all)
        if LooseVersion(torch.__version__) < LooseVersion('1.0'):
            reduction_str = 'elementwise_mean'
        else:
            reduction_str = 'mean'
        self.loss = F.cross_entropy(y_all,
                                    ys_out_pad.view(-1),
                                    ignore_index=self.ignore_id,
                                    reduction=reduction_str)
        # compute perplexity
        ppl = math.exp(self.loss.item())
        # -1: eos, which is removed in the loss computation
        self.loss *= (np.mean([len(x) for x in ys_in]) - 1)
        acc = th_accuracy(y_all, ys_out_pad, ignore_label=self.ignore_id)
        logging.info('att loss:' + ''.join(str(self.loss.item()).split('\n')))

        # show predicted character sequence for debug
        if self.verbose > 0 and self.char_list is not None:
            ys_hat = y_all.view(batch, olength, -1)
            ys_true = ys_out_pad
            for (i, y_hat), y_true in zip(
                    enumerate(ys_hat.detach().cpu().numpy()),
                    ys_true.detach().cpu().numpy()):
                if i == MAX_DECODER_OUTPUT:
                    break
                idx_hat = np.argmax(y_hat[y_true != self.ignore_id], axis=1)
                idx_true = y_true[y_true != self.ignore_id]
                seq_hat = [self.char_list[int(idx)] for idx in idx_hat]
                seq_true = [self.char_list[int(idx)] for idx in idx_true]
                seq_hat = "".join(seq_hat)
                seq_true = "".join(seq_true)
                logging.info("groundtruth[%d]: " % i + seq_true)
                logging.info("prediction [%d]: " % i + seq_hat)

        if self.labeldist is not None:
            if self.vlabeldist is None:
                self.vlabeldist = to_device(self,
                                            torch.from_numpy(self.labeldist))
            loss_reg = -torch.sum(
                (F.log_softmax(y_all, dim=1) * self.vlabeldist).view(-1),
                dim=0) / len(ys_in)
            self.loss = (
                1. - self.lsm_weight) * self.loss + self.lsm_weight * loss_reg

        return self.loss, acc, ppl
예제 #33
0
        "python-dateutil >= 2.6.1",
        "pytz >= 2017.2",
        f"numpy >= {min_numpy_ver}",
    ],
    "setup_requires": [f"numpy >= {min_numpy_ver}"],
    "zip_safe":
    False,
}

try:
    import Cython

    _CYTHON_VERSION = Cython.__version__
    from Cython.Build import cythonize

    _CYTHON_INSTALLED = _CYTHON_VERSION >= LooseVersion(min_cython_ver)
except ImportError:
    _CYTHON_VERSION = None
    _CYTHON_INSTALLED = False
    cythonize = lambda x, *args, **kwargs: x  # dummy func

# The import of Extension must be after the import of Cython, otherwise
# we do not get the appropriately patched class.
# See https://cython.readthedocs.io/en/latest/src/reference/compilation.html
from distutils.extension import Extension  # noqa: E402 isort:skip
from distutils.command.build import build  # noqa: E402 isort:skip

if _CYTHON_INSTALLED:
    from Cython.Distutils.old_build_ext import old_build_ext as _build_ext

    cython = True
예제 #34
0
파일: conf.py 프로젝트: bashtage/arch
examples = glob.glob(os.path.join(example_path, '*.ipynb'))
for example in examples:
    _, filename = os.path.split(example)
    mod = filename.split('_')[0]
    target = os.path.join(root, mod, filename)
    shutil.copyfile(example, target)

# -- Project information -----------------------------------------------------

project = 'arch'
copyright = '2018, Kevin Sheppard'
author = 'Kevin Sheppard'

# The short X.Y version
version = arch.__version__
ver = LooseVersion(arch.__version__).version
if '+' in ver:
    loc = ver.index('+')
    version = '.'.join(map(str, ver[:loc]))
    version += ' (+{0})'.format(ver[loc+1])
# The full version, including alpha/beta/rc tags.
release = arch.__version__


# -- General configuration ---------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be
예제 #35
0
import numpy.testing as npt
from numpy.testing.decorators import skipif
import pandas.util.testing as tm

from . import PlotTestCase
from .. import axisgrid as ag
from .. import rcmod
from ..palettes import color_palette
from ..distributions import kdeplot
from ..categorical import pointplot
from ..linearmodels import pairplot
from ..utils import categorical_order

rs = np.random.RandomState(0)

old_matplotlib = LooseVersion(mpl.__version__) < "1.4"


class TestFacetGrid(PlotTestCase):

    df = pd.DataFrame(dict(x=rs.normal(size=60),
                           y=rs.gamma(4, size=60),
                           a=np.repeat(list("abc"), 20),
                           b=np.tile(list("mn"), 30),
                           c=np.tile(list("tuv"), 20),
                           d=np.tile(list("abcdefghij"), 6)))

    def test_self_data(self):

        g = ag.FacetGrid(self.df)
        nt.assert_is(g.data, self.df)
예제 #36
0
 def __str__(self):
     if self.is_trunk:
         return "trunk"
     else:
         return LooseVersion.__str__(self)
예제 #37
0
from pandas._libs.internals import BlockPlacement
from pandas.compat import lrange, u

import pandas as pd
from pandas import (
    Categorical, DataFrame, DatetimeIndex, Index, MultiIndex, Series,
    SparseArray)
import pandas.core.algorithms as algos
from pandas.core.arrays import DatetimeArray, TimedeltaArray
from pandas.core.internals import BlockManager, SingleBlockManager, make_block
import pandas.util.testing as tm
from pandas.util.testing import (
    assert_almost_equal, assert_frame_equal, assert_series_equal, randn)

# in 3.6.1 a c-api slicing function changed, see src/compat_helper.h
PY361 = LooseVersion(sys.version) >= LooseVersion('3.6.1')


@pytest.fixture
def mgr():
    return create_mgr(
        'a: f8; b: object; c: f8; d: object; e: f8;'
        'f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;'
        'k: M8[ns, US/Eastern]; l: M8[ns, CET];')


def assert_block_equal(left, right):
    tm.assert_numpy_array_equal(left.values, right.values)
    assert left.dtype == right.dtype
    assert isinstance(left.mgr_locs, BlockPlacement)
    assert isinstance(right.mgr_locs, BlockPlacement)
예제 #38
0
def am_i_updated(version):

    version = LooseVersion(version.replace('v', ''))
    last_version = get_last()

    return last_version <= version
def monkeypatch_property(cls):
    """
    A decorator to add a single method as a property to an existing class::

        @monkeypatch_property(<someclass>)
        def <newmethod>(self, [...]):
            pass
    """

    def decorator(func):
        setattr(cls, func.__name__, property(func))
        return func
    return decorator


if LooseVersion(get_version()) < LooseVersion('1.10'):
    def ct_render_to_string(template, ctx, **kwargs):
        from django.template import RequestContext

        context_instance = kwargs.get('context')
        if context_instance is None and kwargs.get('request'):
            context_instance = RequestContext(kwargs['request'])

        return render_to_string(
            template,
            ctx,
            context_instance=context_instance)
else:
    def ct_render_to_string(template, ctx, **kwargs):
        return render_to_string(
            template,
예제 #40
0
    return sys.platform == 'win32' or sys.platform == 'cygwin'


def is_platform_linux():
    return sys.platform == 'linux2'


def is_platform_mac():
    return sys.platform == 'darwin'


min_cython_ver = '0.24'
try:
    import Cython
    ver = Cython.__version__
    _CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver)
except ImportError:
    _CYTHON_INSTALLED = False

min_numpy_ver = '1.9.0'
setuptools_kwargs = {
    'install_requires': [
        'python-dateutil >= 2.5.0',
        'pytz >= 2011k',
        'numpy >= {numpy_ver}'.format(numpy_ver=min_numpy_ver),
    ],
    'setup_requires': ['numpy >= {numpy_ver}'.format(numpy_ver=min_numpy_ver)],
    'zip_safe':
    False,
}
예제 #41
0
def ensure(module, client):
    subca_name = module.params['subca_name']
    subca_subject_dn = module.params['subca_subject']
    subca_desc = module.params['subca_desc']

    state = module.params['state']

    ipa_subca = client.subca_find(subca_name)
    module_subca = dict(description=subca_desc, subca_subject=subca_subject_dn)

    changed = False
    if state == 'present':
        if not ipa_subca:
            changed = True
            if not module.check_mode:
                client.subca_add(subca_name=subca_name,
                                 subject_dn=subca_subject_dn,
                                 details=module_subca)
        else:
            diff = get_subca_diff(client, ipa_subca, module_subca)
            # IPA does not allow to modify Sub CA's subject DN
            # So skip it for now.
            if 'ipacasubjectdn' in diff:
                diff.remove('ipacasubjectdn')
                del module_subca['subca_subject']

            if len(diff) > 0:
                changed = True
                if not module.check_mode:
                    client.subca_mod(subca_name=subca_name,
                                     diff=diff,
                                     details=module_subca)
    elif state == 'absent':
        if ipa_subca:
            changed = True
            if not module.check_mode:
                client.subca_del(subca_name=subca_name)
    elif state == 'disable':
        ipa_version = client.get_ipa_version()
        if LooseVersion(ipa_version) < LooseVersion('4.4.2'):
            module.fail_json(
                msg=
                "Current version of IPA server [%s] does not support 'CA disable' option. Please upgrade to "
                "version greater than 4.4.2")
        if ipa_subca:
            changed = True
            if not module.check_mode:
                client.subca_disable(subca_name=subca_name)
    elif state == 'enable':
        ipa_version = client.get_ipa_version()
        if LooseVersion(ipa_version) < LooseVersion('4.4.2'):
            module.fail_json(
                msg=
                "Current version of IPA server [%s] does not support 'CA enable' option. Please upgrade to "
                "version greater than 4.4.2")
        if ipa_subca:
            changed = True
            if not module.check_mode:
                client.subca_enable(subca_name=subca_name)

    return changed, client.subca_find(subca_name)
예제 #42
0
파일: cli.py 프로젝트: nexw/linode-cli
    def do_request(self, operation, args):
        """
        Makes a request to an operation's URL and returns the resulting JSON, or
        prints and error if a non-200 comes back
        """
        method = getattr(requests, operation.method)
        headers = {
            'Authorization': "Bearer {}".format(self.token),
            'Content-Type': 'application/json',
            'User-Agent': "linode-cli:{}".format(self.version),
        }

        parsed_args = operation.parse_args(args)

        url = operation.url.format(**vars(parsed_args))

        if operation.method == 'get':
            url += '?page={}'.format(self.page)

        body = None
        if operation.method == 'get':
            filters = vars(parsed_args)
            # remove URL parameters
            for p in operation.params:
                if p.name in filters:
                    del filters[p.name]
            # remove empty filters
            filters = {k: v for k, v in filters.items() if v is not None}
            # apply filter, if any
            if filters:
                headers["X-Filter"] = json.dumps(filters)
        else:
            if self.defaults:
                parsed_args = self.config.update(parsed_args)

            to_json = {
                k: v
                for k, v in vars(parsed_args).items() if v is not None
            }

            expanded_json = {}
            # expand paths
            for k, v in to_json.items():
                cur = expanded_json
                for part in k.split('.')[:-1]:
                    if part not in cur:
                        cur[part] = {}
                    cur = cur[part]
                cur[k.split('.')[-1]] = v

            body = json.dumps(expanded_json)

        result = method(self.base_url + url, headers=headers, data=body)

        # if the API indicated it's newer than the client, print a warning
        if 'X-Spec-Version' in result.headers:
            spec_version = result.headers.get('X-Spec-Version')
            try:
                if LooseVersion(spec_version) > LooseVersion(
                        self.spec_version):
                    print(
                        "The API responded with version {}, which is newer than "
                        "the CLI's version of {}.  Please update the CLI to get "
                        "access to the newest features.  You can update with a "
                        "simple `pip install --upgrade linode-cli`".format(
                            spec_version, self.spec_version),
                        file=stderr)
            except:
                # if this comparison or parsing failed, still process output
                print(
                    "Parsing failed when comparing local version {} with server "
                    "version {}.  If this problem persists, please open a ticket "
                    "with `linode-cli support ticket-create`".format(
                        self.spec_version, spec_version),
                    file=stderr)

        if not 199 < result.status_code < 399:
            self._handle_error(result)

        return result
예제 #43
0
    def check(self, instance):
        host = instance.get('host', 'localhost')
        port = int(instance.get('port', 2181))
        timeout = float(instance.get('timeout', 3.0))
        expected_mode = (instance.get('expected_mode') or '').strip()
        tags = instance.get('tags', [])
        cx_args = (host, port, timeout)
        sc_tags = ["host:{0}".format(host), "port:{0}".format(port)] + list(set(tags))
        hostname = self.hostname
        report_instance_mode = instance.get("report_instance_mode", True)

        zk_version = None  # parse_stat will parse and set version string

        # Send a service check based on the `ruok` response.
        # Set instance status to down if not ok.
        try:
            ruok_out = self._send_command('ruok', *cx_args)
        except ZKConnectionFailure:
            # The server should not respond at all if it's not OK.
            status = AgentCheck.CRITICAL
            message = 'No response from `ruok` command'
            self.increment('zookeeper.timeouts')

            if report_instance_mode:
                self.report_instance_mode(hostname, 'down', tags)
            raise
        else:
            ruok_out.seek(0)
            ruok = ruok_out.readline()
            if ruok == 'imok':
                status = AgentCheck.OK
            else:
                status = AgentCheck.WARNING
            message = u'Response from the server: %s' % ruok
        finally:
            self.service_check(
                'zookeeper.ruok', status, message=message, tags=sc_tags
            )

        # Read metrics from the `stat` output.
        try:
            stat_out = self._send_command('stat', *cx_args)
        except ZKConnectionFailure:
            self.increment('zookeeper.timeouts')
            if report_instance_mode:
                self.report_instance_mode(hostname, 'down', tags)
            raise
        except Exception as e:
            self.warning(e)
            self.increment('zookeeper.datadog_client_exception')
            if report_instance_mode:
                self.report_instance_mode(hostname, 'unknown', tags)
            raise
        else:
            # Parse the response
            metrics, new_tags, mode, zk_version = self.parse_stat(stat_out)

            # Write the data
            if mode != 'inactive':
                for metric, value, m_type in metrics:
                    submit_metric = getattr(self, m_type)
                    submit_metric(metric, value, tags=tags + new_tags)

            if report_instance_mode:
                self.report_instance_mode(hostname, mode, tags)

            if expected_mode:
                if mode == expected_mode:
                    status = AgentCheck.OK
                    message = u"Server is in %s mode" % mode
                else:
                    status = AgentCheck.CRITICAL
                    message = u"Server is in %s mode but check expects %s mode"\
                              % (mode, expected_mode)
                self.service_check('zookeeper.mode', status, message=message,
                                   tags=sc_tags)

        # Read metrics from the `mntr` output
        if zk_version and LooseVersion(zk_version) > LooseVersion("3.4.0"):
            try:
                mntr_out = self._send_command('mntr', *cx_args)
            except ZKConnectionFailure:
                self.increment('zookeeper.timeouts')
                if report_instance_mode:
                    self.report_instance_mode(hostname, 'down', tags)
                raise
            except Exception as e:
                self.warning(e)
                self.increment('zookeeper.datadog_client_exception')
                if report_instance_mode:
                    self.report_instance_mode(hostname, 'unknown', tags)
                raise
            else:
                metrics, mode = self.parse_mntr(mntr_out)
                mode_tag = "mode:%s" % mode
                if mode != 'inactive':
                    for metric, value, m_type in metrics:
                        submit_metric = getattr(self, m_type)
                        submit_metric(metric, value, tags=tags + [mode_tag])

                if report_instance_mode:
                    self.report_instance_mode(hostname, mode, tags)
예제 #44
0
 def parse(self, vstring):
     LooseVersion.parse(self, vstring)
     self.int_version = list(filter(lambda x:isinstance(x, int),
                                    self.version))
예제 #45
0
    def parse_stat(self, buf):
        ''' `buf` is a readable file-like object
            returns a tuple: (metrics, tags, mode, version)
        '''
        metrics = []
        buf.seek(0)

        # Check the version line to make sure we parse the rest of the
        # body correctly. Particularly, the Connections val was added in
        # >= 3.4.4.
        start_line = buf.readline()
        match = self.version_pattern.search(start_line)
        if match is None:
            return (None, None, "inactive", None)
            raise Exception("Could not parse version from stat command output: %s" % start_line)
        else:
            version = match.group()
        has_connections_val = LooseVersion(version) > LooseVersion("3.4.4")

        # Clients:
        buf.readline()  # skip the Clients: header
        connections = 0
        client_line = buf.readline().strip()
        if client_line:
            connections += 1
        while client_line:
            client_line = buf.readline().strip()
            if client_line:
                connections += 1

        # Latency min/avg/max: -10/0/20007
        _, value = buf.readline().split(':')
        l_min, l_avg, l_max = [int(v) for v in value.strip().split('/')]
        metrics.append(ZKMetric('zookeeper.latency.min', l_min))
        metrics.append(ZKMetric('zookeeper.latency.avg', l_avg))
        metrics.append(ZKMetric('zookeeper.latency.max', l_max))

        # Received: 101032173
        _, value = buf.readline().split(':')
        metrics.append(ZKMetric('zookeeper.bytes_received', long(value.strip())))

        # Sent: 1324
        _, value = buf.readline().split(':')
        metrics.append(ZKMetric('zookeeper.bytes_sent', long(value.strip())))

        if has_connections_val:
            # Connections: 1
            _, value = buf.readline().split(':')
            metrics.append(ZKMetric('zookeeper.connections', int(value.strip())))
        else:
            # If the zk version doesnt explicitly give the Connections val,
            # use the value we computed from the client list.
            metrics.append(ZKMetric('zookeeper.connections', connections))

        # Outstanding: 0
        _, value = buf.readline().split(':')
        # Fixme: This metric name is wrong. It should be removed in a major version of the agent
        # See https://github.com/DataDog/dd-agent/issues/1383
        metrics.append(ZKMetric('zookeeper.bytes_outstanding', long(value.strip())))
        metrics.append(ZKMetric('zookeeper.outstanding_requests', long(value.strip())))

        # Zxid: 0x1034799c7
        _, value = buf.readline().split(':')
        # Parse as a 64 bit hex int
        zxid = long(value.strip(), 16)
        # convert to bytes
        zxid_bytes = struct.pack('>q', zxid)
        # the higher order 4 bytes is the epoch
        (zxid_epoch,) = struct.unpack('>i', zxid_bytes[0:4])
        # the lower order 4 bytes is the count
        (zxid_count,) = struct.unpack('>i', zxid_bytes[4:8])

        metrics.append(ZKMetric('zookeeper.zxid.epoch', zxid_epoch))
        metrics.append(ZKMetric('zookeeper.zxid.count', zxid_count))

        # Mode: leader
        _, value = buf.readline().split(':')
        mode = value.strip().lower()
        tags = [u'mode:' + mode]

        # Node count: 487
        _, value = buf.readline().split(':')
        metrics.append(ZKMetric('zookeeper.nodes', long(value.strip())))

        return metrics, tags, mode, version
예제 #46
0
파일: util.py 프로젝트: MekliCZ/positron
 def __cmp__(self, other):
     # LooseVersion checks isinstance(StringType), so work around it.
     if isinstance(other, unicode):
         other = other.encode('ascii')
     return LooseVersion.__cmp__(self, other)
예제 #47
0
		def num(*k):
			if isinstance(k[0], int):
				return LooseVersion('.'.join([str(x) for x in k]))
			else:
				return LooseVersion(k[0])
예제 #48
0
    def __init__(self,
                 name: str,
                 address: str,
                 model: str = None,
                 timeout: float = 5.,
                 HD: bool = True,
                 terminator: str = '\n',
                 **kwargs) -> None:
        """
        Args:
            name: name of the instrument
            address: VISA resource address
            model: The instrument model. For newer firmware versions,
                this can be auto-detected
            timeout: The VISA query timeout
            HD: Does the unit have the High Definition Option (allowing
                16 bit vertical resolution)
            terminator: Command termination character to strip from VISA
                commands.
        """
        super().__init__(name=name,
                         address=address,
                         timeout=timeout,
                         terminator=terminator,
                         **kwargs)

        # With firmware versions earlier than 3.65, it seems that the
        # model number can NOT be queried from the instrument
        # (at least fails with RTO1024, fw 2.52.1.1), so in that case
        # the user must provide the model manually.
        firmware_version = self.get_idn()['firmware']

        if LooseVersion(firmware_version) < LooseVersion('3'):
            log.warning('Old firmware version detected. This driver may '
                        'not be compatible. Please upgrade your firmware.')

        if LooseVersion(firmware_version) >= LooseVersion('3.65'):
            # strip just in case there is a newline character at the end
            self.model = self.ask('DIAGnostic:SERVice:WFAModel?').strip()
            if model is not None and model != self.model:
                warnings.warn("The model number provided by the user "
                              "does not match the instrument's response."
                              " I am going to assume that this oscilloscope "
                              f"is a model {self.model}")
        else:
            if model is None:
                raise ValueError('No model number provided. Please provide '
                                 'a model number (eg. "RTO1024").')
            else:
                self.model = model

        self.HD = HD

        # Now assign model-specific values
        self.num_chans = int(self.model[-1])
        self.num_meas = 8

        self._horisontal_divs = int(self.ask('TIMebase:DIVisions?'))

        self.add_parameter('display',
                           label='Display state',
                           set_cmd='SYSTem:DISPlay:UPDate {}',
                           val_mapping={
                               'remote': 0,
                               'view': 1
                           })

        # Triggering

        self.add_parameter('trigger_display',
                           label='Trigger display state',
                           set_cmd='DISPlay:TRIGger:LINes {}',
                           get_cmd='DISPlay:TRIGger:LINes?',
                           val_mapping={
                               'ON': 1,
                               'OFF': 0
                           })

        # TODO: (WilliamHPNielsen) There are more available trigger
        # settings than implemented here. See p. 1261 of the manual
        # here we just use trigger1, which is the A-trigger

        self.add_parameter('trigger_source',
                           label='Trigger source',
                           set_cmd='TRIGger1:SOURce {}',
                           get_cmd='TRIGger1:SOURce?',
                           val_mapping={
                               'CH1': 'CHAN1',
                               'CH2': 'CHAN2',
                               'CH3': 'CHAN3',
                               'CH4': 'CHAN4',
                               'EXT': 'EXT'
                           })

        self.add_parameter('trigger_mode',
                           label='Trigger mode',
                           set_cmd='TRIGger:MODE {}',
                           get_cmd='TRIGger1:SOURce?',
                           vals=vals.Enum('AUTO', 'NORMAL', 'FREERUN'),
                           docstring='Sets the trigger mode which determines'
                           ' the behaviour of the instrument if no'
                           ' trigger occurs.\n'
                           'Options: AUTO, NORMAL, FREERUN.',
                           unit='none')

        self.add_parameter('trigger_type',
                           label='Trigger type',
                           set_cmd='TRIGger1:TYPE {}',
                           get_cmd='TRIGger1:TYPE?',
                           val_mapping={
                               'EDGE': 'EDGE',
                               'GLITCH': 'GLIT',
                               'WIDTH': 'WIDT',
                               'RUNT': 'RUNT',
                               'WINDOW': 'WIND',
                               'TIMEOUT': 'TIM',
                               'INTERVAL': 'INT',
                               'SLEWRATE': 'SLEW',
                               'DATATOCLOCK': 'DAT',
                               'STATE': 'STAT',
                               'PATTERN': 'PATT',
                               'ANEDGE': 'ANED',
                               'SERPATTERN': 'SERP',
                               'NFC': 'NFC',
                               'TV': 'TV',
                               'CDR': 'CDR'
                           })
        # See manual p. 1262 for an explanation of trigger types

        self.add_parameter('trigger_level',
                           label='Trigger level',
                           set_cmd=self._set_trigger_level,
                           get_cmd=self._get_trigger_level)

        self.add_parameter('trigger_edge_slope',
                           label='Edge trigger slope',
                           set_cmd='TRIGger1:EDGE:SLOPe {}',
                           get_cmd='TRIGger1:EDGE:SLOPe?',
                           vals=vals.Enum('POS', 'NEG', 'EITH'))

        # Horizontal settings

        self.add_parameter('timebase_scale',
                           label='Timebase scale',
                           set_cmd=self._set_timebase_scale,
                           get_cmd='TIMebase:SCALe?',
                           unit='s/div',
                           get_parser=float,
                           vals=vals.Numbers(25e-12, 10000))

        self.add_parameter('timebase_range',
                           label='Timebase range',
                           set_cmd=self._set_timebase_range,
                           get_cmd='TIMebase:RANGe?',
                           unit='s',
                           get_parser=float,
                           vals=vals.Numbers(250e-12, 100e3))

        self.add_parameter('timebase_position',
                           label='Horizontal position',
                           set_cmd=self._set_timebase_position,
                           get_cmd='TIMEbase:HORizontal:POSition?',
                           get_parser=float,
                           unit='s',
                           vals=vals.Numbers(-100e24, 100e24))

        # Acquisition

        # I couldn't find a way to query the run mode, so we manually keep
        # track of it. It is very important when getting the trace to make
        # sense of completed_acquisitions.
        self.add_parameter('run_mode',
                           label='Run/acquisition mode of the scope',
                           get_cmd=None,
                           set_cmd=None)

        self.run_mode('RUN CONT')

        self.add_parameter('num_acquisitions',
                           label='Number of single acquisitions to perform',
                           get_cmd='ACQuire:COUNt?',
                           set_cmd='ACQuire:COUNt {}',
                           vals=vals.Ints(1, 16777215),
                           get_parser=int)

        self.add_parameter('completed_acquisitions',
                           label='Number of completed acquisitions',
                           get_cmd='ACQuire:CURRent?',
                           get_parser=int)

        self.add_parameter('sampling_rate',
                           label='Sample rate',
                           docstring='Number of averages for measuring '
                           'trace.',
                           unit='Sa/s',
                           get_cmd='ACQuire:POINts:ARATe' + '?',
                           get_parser=int)

        self.add_parameter('acquisition_sample_rate',
                           label='Acquisition sample rate',
                           unit='Sa/s',
                           docstring='recorded waveform samples per second',
                           get_cmd='ACQuire:SRATe' + '?',
                           set_cmd='ACQuire:SRATe ' + ' {:.2f}',
                           vals=vals.Numbers(2, 20e12),
                           get_parser=float)

        # Data

        self.add_parameter('dataformat',
                           label='Export data format',
                           set_cmd='FORMat:DATA {}',
                           get_cmd='FORMat:DATA?',
                           vals=vals.Enum('ASC,0', 'REAL,32', 'INT,8',
                                          'INT,16'))

        # High definition mode (might not be available on all instruments)

        if HD:
            self.add_parameter('high_definition_state',
                               label='High definition (16 bit) state',
                               set_cmd=self._set_hd_mode,
                               get_cmd='HDEFinition:STAte?',
                               val_mapping=create_on_off_val_mapping(
                                   on_val=1, off_val=0),
                               docstring='Sets the filter bandwidth for the'
                               ' high definition mode.\n'
                               'ON: high definition mode, up to 16'
                               ' bit digital resolution\n'
                               'Options: ON, OFF\n\n'
                               'Warning/Bug: By opening the HD '
                               'acquisition menu on the scope, '
                               'this value will be set to "ON".')

            self.add_parameter('high_definition_bandwidth',
                               label='High definition mode bandwidth',
                               set_cmd='HDEFinition:BWIDth {}',
                               get_cmd='HDEFinition:BWIDth?',
                               unit='Hz',
                               get_parser=float,
                               vals=vals.Numbers(1e4, 1e9))

        self.add_parameter('error_count',
                           label='Number of errors in the error stack',
                           get_cmd='SYSTem:ERRor:COUNt?',
                           unit='#',
                           get_parser=int)

        self.add_parameter('error_next',
                           label='Next error from the error stack',
                           get_cmd='SYSTem:ERRor:NEXT?',
                           get_parser=str)

        # Add the channels to the instrument
        for ch in range(1, self.num_chans + 1):
            chan = ScopeChannel(self, f'channel{ch}', ch)
            self.add_submodule(f'ch{ch}', chan)

        for measId in range(1, self.num_meas + 1):
            measCh = ScopeMeasurement(self, f'measurement{measId}', measId)
            self.add_submodule(f'meas{measId}', measCh)

        self.add_function('stop', call_cmd='STOP')
        self.add_function('reset', call_cmd='*RST')
        self.add_parameter('opc', get_cmd='*OPC?')
        self.add_parameter('stop_opc', get_cmd='STOP;*OPC?')
        self.add_parameter('status_operation',
                           get_cmd='STATus:OPERation:CONDition?',
                           get_parser=int)
        self.add_function('run_continues', call_cmd='RUNContinous')
        # starts the shutdown of the system
        self.add_function('system_shutdown', call_cmd='SYSTem:EXIT')

        self.connect_message()
예제 #49
0
 def version_is_less_than_12(self):
     version = tmos_version(self.client)
     if LooseVersion(version) < LooseVersion('12.0.0'):
         return True
     else:
         return False
예제 #50
0
def compareVersions(newVersion, oldVersion):
    return LooseVersion(newVersion) > LooseVersion(oldVersion)
예제 #51
0
def main(scripts, name=None, onefile=None,
         console=True, debug=None, strip=False, noupx=False, upx_exclude=None,
         runtime_tmpdir=None, pathex=None, version_file=None, specpath=None,
         bootloader_ignore_signals=False,
         datas=None, binaries=None, icon_file=None, manifest=None, resources=None, bundle_identifier=None,
         hiddenimports=None, hookspath=None, key=None, runtime_hooks=None,
         excludes=None, uac_admin=False, uac_uiaccess=False,
         win_no_prefer_redirects=False, win_private_assemblies=False,
         **kwargs):
    # If appname is not specified - use the basename of the main script as name.
    if name is None:
        name = os.path.splitext(os.path.basename(scripts[0]))[0]

    # If specpath not specified - use default value - current working directory.
    if specpath is None:
        specpath = DEFAULT_SPECPATH
    else:
        # Expand tilde to user's home directory.
        specpath = expand_path(specpath)
    # If cwd is the root directory of PyInstaller then generate .spec file
    # subdirectory ./appname/.
    if specpath == HOMEPATH:
        specpath = os.path.join(HOMEPATH, name)
    # Create directory tree if missing.
    if not os.path.exists(specpath):
        os.makedirs(specpath)

    # Append specpath to PYTHONPATH - where to look for additional Python modules.
    pathex = pathex or []
    pathex = pathex[:]
    pathex.append(specpath)

    # Handle additional EXE options.
    exe_options = ''
    if version_file:
        exe_options = "%s, version='%s'" % (exe_options, quote_win_filepath(version_file))
    if uac_admin:
        exe_options = "%s, uac_admin=%s" % (exe_options, 'True')
    if uac_uiaccess:
        exe_options = "%s, uac_uiaccess=%s" % (exe_options, 'True')
    if icon_file:
        # Icon file for Windows.
        # On Windows default icon is embedded in the bootloader executable.
        exe_options = "%s, icon='%s'" % (exe_options, quote_win_filepath(icon_file))
        # Icon file for OSX.
        # We need to encapsulate it into apostrofes.
        icon_file = "'%s'" % icon_file
    else:
        # On OSX default icon has to be copied into the .app bundle.
        # The the text value 'None' means - use default icon.
        icon_file = 'None'

    if bundle_identifier:
        # We need to encapsulate it into apostrofes.
        bundle_identifier = "'%s'" % bundle_identifier

    if manifest:
        if "<" in manifest:
            # Assume XML string
            exe_options = "%s, manifest='%s'" % (exe_options, manifest.replace("'", "\\'"))
        else:
            # Assume filename
            exe_options = "%s, manifest='%s'" % (exe_options, quote_win_filepath(manifest))
    if resources:
        resources = list(map(quote_win_filepath, resources))
        exe_options = "%s, resources=%s" % (exe_options, repr(resources))

    hiddenimports = hiddenimports or []
    upx_exclude = upx_exclude or []

    # If file extension of the first script is '.pyw', force --windowed option.
    if is_win and os.path.splitext(scripts[0])[-1] == '.pyw':
        console = False

    # If script paths are relative, make them relative to the directory containing .spec file.
    scripts = [make_path_spec_relative(x, specpath) for x in scripts]
    # With absolute paths replace prefix with variable HOMEPATH.
    scripts = list(map(Path, scripts))

    if key:
        # Tries to import PyCrypto since we need it for bytecode obfuscation. Also make sure its
        # version is >= 2.4.
        try:
            import Crypto
            is_version_acceptable = LooseVersion(Crypto.__version__) >= LooseVersion('2.4')
            if not is_version_acceptable:
                logger.error('PyCrypto version must be >= 2.4, older versions are not supported.')
                sys.exit(1)
        except ImportError:
            logger.error('We need PyCrypto >= 2.4 to use byte-code obfuscation but we could not')
            logger.error('find it. You can install it with pip by running:')
            logger.error('  pip install PyCrypto')
            sys.exit(1)
        cipher_init = cipher_init_template % {'key': key}
    else:
        cipher_init = cipher_absent_template

    # Translate the default of ``debug=None`` to an empty list.
    if debug is None:
        debug = []
    # Translate the ``all`` option.
    if DEBUG_ALL_CHOICE[0] in debug:
        debug = DEBUG_ARGUMENT_CHOICES

    d = {
        'scripts': scripts,
        'pathex': pathex,
        'binaries': binaries,
        'datas': datas,
        'hiddenimports': hiddenimports,
        'name': name,
        'noarchive': 'noarchive' in debug,
        'options': [('v', None, 'OPTION')] if 'imports' in debug else [],
        'debug_bootloader': 'bootloader' in debug,
        'bootloader_ignore_signals': bootloader_ignore_signals,
        'strip': strip,
        'upx': not noupx,
        'upx_exclude': upx_exclude,
        'runtime_tmpdir': runtime_tmpdir,
        'exe_options': exe_options,
        'cipher_init': cipher_init,
        # Directory with additional custom import hooks.
        'hookspath': hookspath,
        # List with custom runtime hook files.
        'runtime_hooks': runtime_hooks or [],
        # List of modules/pakages to ignore.
        'excludes': excludes or [],
        # only Windows and Mac OS X distinguish windowed and console apps
        'console': console,
        # Icon filename. Only OSX uses this item.
        'icon': icon_file,
        # .app bundle identifier. Only OSX uses this item.
        'bundle_identifier': bundle_identifier,
        # Windows assembly searching options
        'win_no_prefer_redirects': win_no_prefer_redirects,
        'win_private_assemblies': win_private_assemblies,
    }

    # Write down .spec file to filesystem.
    specfnm = os.path.join(specpath, name + '.spec')
    with open_file(specfnm, 'w', encoding='utf-8') as specfile:
        if onefile:
            specfile.write(text_type(onefiletmplt % d))
            # For OSX create .app bundle.
            if is_darwin and not console:
                specfile.write(text_type(bundleexetmplt % d))
        else:
            specfile.write(text_type(onedirtmplt % d))
            # For OSX create .app bundle.
            if is_darwin and not console:
                specfile.write(text_type(bundletmplt % d))

    return specfnm
예제 #52
0
파일: version.py 프로젝트: harshach/kafka
 def __str__(self):
     if self.is_dev:
         return "dev"
     else:
         return LooseVersion.__str__(self)
예제 #53
0
    def configure_step(self):
        """
        Configure PETSc by setting configure options and running configure script.

        Configure procedure is much more concise for older versions (< v3).
        """
        if LooseVersion(self.version) >= LooseVersion("3"):

            # compilers
            self.cfg.update('configopts', '--with-cc="%s"' % os.getenv('CC'))
            self.cfg.update('configopts', '--with-cxx="%s" --with-c++-support' % os.getenv('CXX'))
            self.cfg.update('configopts', '--with-fc="%s"' % os.getenv('F90'))

            # compiler flags
            if LooseVersion(self.version) >= LooseVersion("3.5"):
                self.cfg.update('configopts', '--CFLAGS="%s"' % os.getenv('CFLAGS'))
                self.cfg.update('configopts', '--CXXFLAGS="%s"' % os.getenv('CXXFLAGS'))
                self.cfg.update('configopts', '--FFLAGS="%s"' % os.getenv('F90FLAGS'))
            else:
                self.cfg.update('configopts', '--with-cflags="%s"' % os.getenv('CFLAGS'))
                self.cfg.update('configopts', '--with-cxxflags="%s"' % os.getenv('CXXFLAGS'))
                self.cfg.update('configopts', '--with-fcflags="%s"' % os.getenv('F90FLAGS'))

            if not self.toolchain.comp_family() == toolchain.GCC:  #@UndefinedVariable
                self.cfg.update('configopts', '--with-gnu-compilers=0')

            # MPI
            if self.toolchain.options.get('usempi', None):
                self.cfg.update('configopts', '--with-mpi=1')

            # build options
            self.cfg.update('configopts', '--with-build-step-np=%s' % self.cfg['parallel'])
            self.cfg.update('configopts', '--with-shared-libraries=%d' % self.cfg['shared_libs'])
            self.cfg.update('configopts', '--with-debugging=%d' % self.toolchain.options['debug'])
            self.cfg.update('configopts', '--with-pic=%d' % self.toolchain.options['pic'])
            self.cfg.update('configopts', '--with-x=0 --with-windows-graphics=0')

            # PAPI support
            if self.cfg['with_papi']:
                papi_inc = self.cfg['papi_inc']
                papi_inc_file = os.path.join(papi_inc, "papi.h")
                papi_lib = self.cfg['papi_lib']
                if os.path.isfile(papi_inc_file) and os.path.isfile(papi_lib):
                    self.cfg.update('configopts', '--with-papi=1')
                    self.cfg.update('configopts', '--with-papi-include=%s' % papi_inc)
                    self.cfg.update('configopts', '--with-papi-lib=%s' % papi_lib)
                else:
                    self.log.error("PAPI header (%s) and/or lib (%s) not found, " % (papi_inc_file,
                                                                                     papi_lib) + \
                                   "can not enable PAPI support?")

            # Python extensions_step
            if get_software_root('Python'):
                self.cfg.update('configopts', '--with-numpy=1')
                if self.cfg['shared_libs']:
                    self.cfg.update('configopts', '--with-mpi4py=1')

            # FFTW, ScaLAPACK (and BLACS for older PETSc versions)
            deps = ["FFTW", "ScaLAPACK"]
            if LooseVersion(self.version) < LooseVersion("3.5"):
                deps.append("BLACS")
            for dep in deps:
                inc = os.getenv('%s_INC_DIR' % dep.upper())
                libdir = os.getenv('%s_LIB_DIR' % dep.upper())
                libs = os.getenv('%s_STATIC_LIBS' % dep.upper())
                if inc and libdir and libs:
                    with_arg = "--with-%s" % dep.lower()
                    self.cfg.update('configopts', '%s=1' % with_arg)
                    self.cfg.update('configopts', '%s-include=%s' % (with_arg, inc))
                    self.cfg.update('configopts', '%s-lib=[%s/%s]' % (with_arg, libdir, libs))
                else:
                    self.log.info("Missing inc/lib info, so not enabling %s support." % dep)

            # BLAS, LAPACK libraries
            bl_libdir = os.getenv('BLAS_LAPACK_LIB_DIR')
            bl_libs = os.getenv('BLAS_LAPACK_STATIC_LIBS')
            if bl_libdir and bl_libs:
                self.cfg.update('configopts', '--with-blas-lapack-lib=[%s/%s]' % (bl_libdir, bl_libs))
            else:
                self.log.error("One or more environment variables for BLAS/LAPACK not defined?")

            # additional dependencies
            # filter out deps handled seperately
            depfilter = self.cfg.builddependencies() + ["BLACS", "BLAS", "CMake", "FFTW", "LAPACK", "numpy",
                                                        "mpi4py", "papi", "ScaLAPACK", "SuiteSparse"]

            deps = [dep['name'] for dep in self.cfg.dependencies() if not dep['name'] in depfilter]
            for dep in deps:
                if type(dep) == str:
                    dep = (dep, dep)
                deproot = get_software_root(dep[0])
                if deproot:
                    if (LooseVersion(self.version) >= LooseVersion("3.5")) and (dep[1] == "SCOTCH"):
                        withdep = "--with-pt%s" % dep[1].lower()  # --with-ptscotch is the configopt PETSc >= 3.5
                    else:
                        withdep = "--with-%s" % dep[1].lower()
                    self.cfg.update('configopts', '%s=1 %s-dir=%s' % (withdep, withdep, deproot))

            # SuiteSparse options changed in PETSc 3.5,
            suitesparse = get_software_root('SuiteSparse')
            if suitesparse:
                if LooseVersion(self.version) >= LooseVersion("3.5"):
                    withdep = "--with-suitesparse"
                    # specified order of libs matters!
                    ss_libs = ["UMFPACK", "KLU", "CHOLMOD", "BTF", "CCOLAMD", "COLAMD", "CAMD", "AMD"]

                    suitesparse_inc = [os.path.join(suitesparse, l, "Include")
                                    for l in ss_libs]
                    suitesparse_inc.append(os.path.join(suitesparse, "SuiteSparse_config"))
                    inc_spec = "-include=[%s]" % ','.join(suitesparse_inc)

                    suitesparse_libs = [os.path.join(suitesparse, l, "Lib", "lib%s.a" % l.lower())
                                    for l in ss_libs]
                    suitesparse_libs.append(os.path.join(suitesparse, "SuiteSparse_config", "libsuitesparseconfig.a"))
                    lib_spec = "-lib=[%s]" % ','.join(suitesparse_libs)
                else:
                    # CHOLMOD and UMFPACK are part of SuiteSparse (PETSc < 3.5)
                    withdep = "--with-umfpack"
                    inc_spec = "-include=%s" % os.path.join(suitesparse, "UMFPACK", "Include")
                    # specified order of libs matters!
                    umfpack_libs = [os.path.join(suitesparse, l, "Lib", "lib%s.a" % l.lower())
                                    for l in ["UMFPACK", "CHOLMOD", "COLAMD", "AMD"]]
                    lib_spec = "-lib=[%s]" % ','.join(umfpack_libs)

                self.cfg.update('configopts', ' '.join([withdep + spec for spec in ['=1', inc_spec, lib_spec]]))

            # set PETSC_DIR for configure (env) and build_step
            env.setvar('PETSC_DIR', self.cfg['start_dir'])
            self.cfg.update('buildopts', 'PETSC_DIR=%s' % self.cfg['start_dir'])

            if self.cfg['sourceinstall']:
                # run configure without --prefix (required)
                cmd = "%s ./configure %s" % (self.cfg['preconfigopts'], self.cfg['configopts'])
                (out, _) = run_cmd(cmd, log_all=True, simple=False)
            else:
                out = super(EB_PETSc, self).configure_step()

            # check for errors in configure
            error_regexp = re.compile("ERROR")
            if error_regexp.search(out):
                self.log.error("Error(s) detected in configure output!")

            if self.cfg['sourceinstall']:
                # figure out PETSC_ARCH setting
                petsc_arch_regex = re.compile("^\s*PETSC_ARCH:\s*(\S+)$", re.M)
                res = petsc_arch_regex.search(out)
                if res:
                    self.petsc_arch = res.group(1)
                    self.cfg.update('buildopts', 'PETSC_ARCH=%s' % self.petsc_arch)
                else:
                    self.log.error("Failed to determine PETSC_ARCH setting.")

            self.petsc_subdir = '%s-%s' % (self.name.lower(), self.version)

        else:  # old versions (< 3.x)

            self.cfg.update('configopts', '--prefix=%s' % self.installdir)
            self.cfg.update('configopts', '--with-shared=1')

            # additional dependencies
            for dep in ["SCOTCH"]:
                deproot = get_software_root(dep)
                if deproot:
                    withdep = "--with-%s" % dep.lower()
                    self.cfg.update('configopts', '%s=1 %s-dir=%s' % (withdep, withdep, deproot))

            cmd = "./config/configure.py %s" % self.get_cfg('configopts')
            run_cmd(cmd, log_all=True, simple=True)
        # PETSc > 3.5, make does not accept -j 
        if LooseVersion(self.version) >= LooseVersion("3.5"):
            self.cfg['parallel'] = None