Example #1
0
    def _save_signal_data(self, db, dbhist, analysis, dbdet, iso, m, kind):
        if not (len(m.xs) and len(m.ys)):
            self.debug('no data for {} {}'.format(iso.name, kind))
            return

        self.debug('saving data {} {} xs={}'.format(iso.name, kind, len(m.xs)))
        dbiso = db.add_isotope(analysis, iso.name, dbdet, kind=kind)
        data = ''.join([struct.pack('>ff', x, y) for x, y in zip(m.xs, m.ys)])
        db.add_signal(dbiso, data)

        add_result = kind in ('baseline', 'signal')

        if add_result:
            fod = self._get_filter_outlier_dict(iso, kind)
            m.set_filtering(fod)
            if m.fit:
                # add fit
                db.add_fit(dbhist, dbiso,
                           fit=m.fit,
                           filter_outliers=fod.get('filter_outliers', False),
                           filter_outlier_iterations=fod.get('iterations', 1),
                           filter_outlier_std_devs=fod.get('std_devs', 2))

            # add isotope result
            # print 'a',m.value, m.error, type(m.error), type(nan)
            v, e = float(m.value), float(m.error)
            v = 0 if math.isnan(v) or math.isinf(v) else v
            e = 0 if math.isnan(e) or math.isinf(e) else e

            db.add_isotope_result(dbiso,
                                  dbhist,
                                  signal_=v, signal_err=e)
Example #2
0
def adapt_canopsis_data_to_ember(data):
    """
    Transform canopsis data to ember data (in changing ``id`` to ``cid``).

    :param data: data to transform
    """

    if isinstance(data, dict):
        for key, item in data.iteritems():
            if isinstance(item, float) and (isnan(item) or isinf(item)):
                data[key] = None

            else:
                if isinstance(item, (tuple, frozenset)):
                    item = list(item)
                    data[key] = item

                adapt_canopsis_data_to_ember(item)

    elif isiterable(data, is_str=False):
        for i in range(len(data)):
            item = data[i]

            if isinstance(item, float) and (isnan(item) or isinf(item)):
                data[i] = None

            else:
                if isinstance(item, (tuple, frozenset)):
                    item = list(item)
                    data[i] = item

                adapt_canopsis_data_to_ember(item)
Example #3
0
 def strictly_simpler(self, x, y):
     if math.isnan(x):
         return False
     if math.isnan(y):
         return True
     if math.isinf(y) and not math.isinf(x):
         return True
     if math.isinf(x) and not math.isinf(y):
         return False
     if x < 0 and y >= 0:
         return False
     if y < 0 and x >= 0:
         return True
     if is_integral(x):
         if not is_integral(y):
             return True
         return self.int_strategy.strictly_simpler(int(x), int(y))
     if is_integral(y):
         return False
     if y > 0:
         return 0 <= x < y
     else:
         # The y == 0 case is handled by is_integral(y)
         assert y < 0
         return x > y
Example #4
0
def PureStrategyDominance(game, conditional=True, weak=False):
	"""
	pure-strategy dominance criterion for IEDS

	conditional==0==False --> unconditional dominance
	conditional==1==True ---> conditional dominance
	conditional==2 ---------> extra-conservative conditional dominance
	"""
	undominated = {r:set(game.strategies[r]) for r in game.roles}
	for r in game.roles:
		for dominant, dominated in product(game.strategies[r], repeat=2):
			if dominant == dominated or dominated not in undominated[r]:
				continue
			dominance_proved = False
			for profile in game:
				if dominated in profile[r]:
					reg = regret(game, profile, r, dominated, dominant)
					if reg > 0 and not isinf(reg):
						dominance_proved = True
					elif (reg < 0) or (reg == 0 and not weak) or \
							(isinf(reg) and conditional):
						dominance_proved = False
						break
				elif dominant in profile[r] and conditional > 1:
					if profile.deviate(r, dominant, dominated) not in game:
						dominance_proved = False
						break
			if dominance_proved:
				undominated[r].remove(dominated)
	return Subgame(game, undominated)
Example #5
0
    def test_pow(self):
        import math

        def pw(x, y):
            return x ** y
        def espeq(x, y):
            return not abs(x-y) > 1e05
        raises(ZeroDivisionError, pw, 0.0, -1)
        assert pw(0, 0.5) == 0.0
        assert espeq(pw(4.0, 0.5), 2.0)
        assert pw(4.0, 0) == 1.0
        assert pw(-4.0, 0) == 1.0
        assert type(pw(-1.0, 0.5)) == complex
        assert pw(-1.0, 2.0) == 1.0
        assert pw(-1.0, 3.0) == -1.0
        assert pw(-1.0, 1e200) == 1.0
        if self.py26:
            assert pw(0.0, float("-inf")) == float("inf")
            assert math.isnan(pw(-3, float("nan")))
            assert math.isnan(pw(-3., float("nan")))
            assert pw(-1.0, -float('inf')) == 1.0
            assert pw(-1.0, float('inf')) == 1.0
            assert pw(float('inf'), 0) == 1.0
            assert pw(float('nan'), 0) == 1.0

            assert math.isinf(pw(-0.5, float('-inf')))
            assert math.isinf(pw(+0.5, float('-inf')))
            assert pw(-1.5, float('-inf')) == 0.0
            assert pw(+1.5, float('-inf')) == 0.0

            assert str(pw(float('-inf'), -0.5)) == '0.0'
            assert str(pw(float('-inf'), -2.0)) == '0.0'
            assert str(pw(float('-inf'), -1.0)) == '-0.0'
            assert str(pw(float('-inf'), 1.0)) == '-inf'
            assert str(pw(float('-inf'), 2.0)) == 'inf'
def py_quadInterpolate(C,X1,X2,X3,Y1,Y2,Y3):
	resL = quadInterpolate(-1*C,X1,X2,X3,Y1,Y2,Y3)
	resH = quadInterpolate(C,X1,X2,X3,Y1,Y2,Y3)
	if math.isnan(resL) or math.isinf(resL) or  math.isnan(resH) or math.isinf(resL): return " - "
	if abs(resL - 1) < 0.00001 or abs(resL - 1) > 1: return " - "
	if abs(resH - 1) < 0.00001 or abs(resH - 1) > 1: return " - "
	return " %.3f/%.3f "%(resL,resH) 
Example #7
0
def xsString(xc, p, source):
    if isinstance(source,bool):
        return 'true' if source else 'false'
    elif isinstance(source,float):
        if isnan(source):
            return "NaN"
        elif isinf(source):
            return "INF"
        '''
        numMagnitude = fabs(source)
        if numMagnitude < 1000000 and numMagnitude > .000001:
            # don't want floating notation which python does for more than 4 decimal places
            s = 
        '''
        s = str(source)
        if s.endswith(".0"):
            s = s[:-2]
        return s
    elif isinstance(source,Decimal):
        if isnan(source):
            return "NaN"
        elif isinf(source):
            return "INF"
        return str(source)
    elif isinstance(source,ModelValue.DateTime):
        return ('{0:%Y-%m-%d}' if source.dateOnly else '{0:%Y-%m-%dT%H:%M:%S}').format(source)
    return str(source)
Example #8
0
def isclose(a, b, rel_tol = 1e-09, abs_tol = 0.0):
    """
    returns True if a is close in value to b. False otherwise
    :param a: one of the values to be tested
    :param b: the other value to be tested
    :param rel_tol=1e-9: The relative tolerance -- the amount of error
                         allowed, relative to the absolute value of the
                         larger input values.
    :param abs_tol=0.0: The minimum absolute tolerance level -- useful
                        for comparisons to zero.
    NOTES:
    -inf, inf and NaN behave similarly to the IEEE 754 Standard. That
    is, NaN is not close to anything, even itself. inf and -inf are
    only close to themselves.
    The function can be used with any type that supports comparison,
    substratcion and multiplication, including Decimal, Fraction, and
    Complex
    Complex values are compared based on their absolute value.
    See PEP-0485 for a detailed description
    """
    if a == b:
        return True
    if rel_tol < 0.0 or abs_tol < 0.0:
        raise ValueError('error tolerances must be non-negative')
    if math.isinf(abs(a)) or math.isinf(abs(b)):
        return False
    diff = abs(b - a)
    return diff <= abs(rel_tol * b) or diff <= abs(rel_tol * a) or diff <= abs_tol
Example #9
0
def combine_rshift(range1, range2):
    """
    Combiner for Right shift operation.

    >>> import ast
    >>> combine(Range(10, 100), Range(3, 8), ast.RShift())
    Range(low=0, high=12)
    >>> combine(Range(10, float("inf")), Range(3, 8),
    ...                       ast.RShift())
    Range(low=0, high=inf)
    >>> combine(Range(-float("inf"), 0), Range(3, 8),
    ...                       ast.RShift())
    Range(low=-inf, high=0)
    >>> combine(Range(-30, 10), Range(3, float('inf')),
    ...                       ast.RShift())
    Range(low=-4, high=1)
    """
    if range1.low <= 0:
        if isinf(range1.low):
            min_ = range1.low
        else:
            min_ = range1.low >> range2.low
    elif isinf(range2.high):
        min_ = 0
    else:
        min_ = range1.low >> range2.high
    if isinf(range1.high):
        max_ = range1.high
    elif isinf(range2.low):
        max_ = 0
    else:
        max_ = range1.high >> range2.low
    return Range(min_, max_)
Example #10
0
    def _encode_numbers(self, obj):
        """Returns a JSON representation of a Python number (int, float or Decimal)"""

        # strict checks first - for speed
        if obj.__class__ is int:
            if abs(obj) > JAVASCRIPT_MAXINT:
                raise ValueError("Number out of range: {!r}".format(obj))
            return str(obj)

        if obj.__class__ is float:
            if isnan(obj):
                raise ValueError("NaN is not supported")
            if isinf(obj):
                raise ValueError("Infinity is not supported")
            return repr(obj)

        # more in-depth class analysis last
        if isinstance(obj, int):
            obj = int(obj)
            if abs(obj) > JAVASCRIPT_MAXINT:
                raise ValueError("Number out of range: {!r}".format(obj))
            return str(obj)

        if isinstance(obj, float):
            if isnan(obj):
                raise ValueError("NaN is not supported")
            if isinf(obj):
                raise ValueError("Infinity is not supported")
            return repr(obj)

        if isinstance(obj, Decimal):
            return '"' + str(obj) + '"'

        # for complex and other Numbers
        return self._encode(self.default(obj))
Example #11
0
    def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
        """
        Determine whether two floating point numbers are close in value.

        rel_tol
           maximum difference for being considered "close", relative to the
           magnitude of the input values
        abs_tol
           maximum difference for being considered "close", regardless of the
           magnitude of the input values

        Return True if a is close in value to b, and False otherwise.

        For the values to be considered close, the difference between them
        must be smaller than at least one of the tolerances.

        -inf, inf and NaN behave similarly to the IEEE 754 Standard.  That
        is, NaN is not close to anything, even itself.  inf and -inf are
        only close to themselves.
        """

        if rel_tol < 0.0 or abs_tol < 0.0:
            raise ValueError('error tolerances must be non-negative')

        if a == b:  # short-circuit exact equality
            return True
        if math.isinf(a) or math.isinf(b):
            # This includes the case of two infinities of opposite sign, or
            # one infinity and one finite number. Two infinities of opposite sign
            # would otherwise have an infinite relative tolerance.
            return False
        diff = abs(b - a)
        return (((diff <= abs(rel_tol * b)) and
                 (diff <= abs(rel_tol * a))) or
                (diff <= abs_tol))
Example #12
0
    def __rshift__(range1, range2):
        """
        Combiner for Right shift operation.

        >>> Interval(10, 100) >> Interval(3, 8)
        Interval(low=0, high=12)
        >>> Interval(10, float("inf")) >> Interval(3, 8)
        Interval(low=0, high=inf)
        >>> Interval(-float("inf"), 0) >> Interval(3, 8)
        Interval(low=-inf, high=0)
        >>> Interval(-30, 10) >> Interval(3, float('inf'))
        Interval(low=-4, high=1)
        """
        if range1.low <= 0:
            if isinf(range1.low):
                min_ = range1.low
            else:
                min_ = range1.low >> range2.low
        elif isinf(range2.high):
            min_ = 0
        else:
            min_ = range1.low >> range2.high
        if isinf(range1.high):
            max_ = range1.high
        elif isinf(range2.low):
            max_ = 0
        else:
            max_ = range1.high >> range2.low
        return Interval(min_, max_)
Example #13
0
    def toVector(longitude, latitude):
        """Converts a set of spherical coordinates to a 3-vector.

        The conversion shall not be performed by any library, to ensure
        that the test case does not duplicate the code being tested.

        Parameters
        ----------
        longitude : `Angle`
            The longitude (right ascension, azimuth, etc.) of the
            position.
        latitude : `Angle`
            The latitude (declination, elevation, etc.) of the
            position.

        Returns
        -------
        x, y, z : `number`
            Components of the unit vector representation of
            `(longitude, latitude)`
        """
        alpha = longitude.asRadians()
        delta = latitude.asRadians()
        if math.isnan(alpha) or math.isinf(alpha) or math.isnan(delta) or math.isinf(delta):
            return (nan, nan, nan)

        x = math.cos(alpha)*math.cos(delta)
        y = math.sin(alpha)*math.cos(delta)
        z = math.sin(delta)
        return (x, y, z)
def test_floats_in_tiny_interval_within_bounds(data, center):
    assume(not (math.isinf(next_down(center)) or math.isinf(next_up(center))))
    lo = Decimal.from_float(next_down(center)).next_plus()
    hi = Decimal.from_float(next_up(center)).next_minus()
    assert float(lo) < lo < center < hi < float(hi)
    val = data.draw(st.floats(lo, hi))
    assert lo < val < hi
Example #15
0
    def retreive_WMS_metadata(typename):
        workspace, layername = decodeTypeName(typename)

        # workspace is hard-coded in the importer
        url = settings.OGC_SERVER['default']['LOCATION'] + workspace+"/"
        url += layername + "/wms?request=GetCapabilities&version=1.1.1"

        get_cap_data = CreateStoryLayerThumbnailTask.request_geoserver_with_credentials(
            url)
        wms = WebMapService(url, xml=get_cap_data)

        # I found that some dataset advertise illegal bounds - fix them up
        xmin = wms[layername].boundingBoxWGS84[0]
        if math.isnan(xmin) or math.isinf(xmin) or xmin < -180:
            xmin = -180

        ymin = wms[layername].boundingBoxWGS84[1]
        if math.isnan(ymin) or math.isinf(ymin) or ymin < -90:
            ymin = -90

        xmax = wms[layername].boundingBoxWGS84[2]
        if math.isnan(xmax) or math.isinf(xmax) or xmax > 180:
            xmax = 180

        ymax = wms[layername].boundingBoxWGS84[3]
        if math.isnan(ymax) or math.isinf(ymax) or ymax > 90:
            ymax = 90

        return [xmin, ymin, xmax, ymax], wms[layername].timepositions
    def compute_logp(self, mu, r, u, c, v):
        # This function computes the log posterior probability (with the weight
        # parameter marginalized out).
        if math.isinf(self.prior_param['obs_df']):
            loglik = - np.sum((self.y_coo.data - mu) ** 2 * self.prior_param['weight'])/ 2
        else:
            loglik = - (self.prior_param['obs_df'] + 1) / 2 * np.sum(
                np.log( 1 + (self.y_coo.data - mu) ** 2 * self.prior_param['weight'] / self.prior_param['obs_df'])
            )

        r_scaled = r / self.prior_param['row_bias_scale']
        c_scaled = c / self.prior_param['col_bias_scale']
        u_scaled = u / np.tile(self.prior_param['factor_scale'], (u.shape[0], 1))
        v_scaled = v / np.tile(self.prior_param['factor_scale'], (v.shape[0], 1))
        if math.isinf(self.prior_param['param_df']):
            logp_prior = \
                - np.sum(r_scaled ** 2) / 2 + \
                - np.sum(u_scaled ** 2, (0, 1)) / 2 + \
                - np.sum(c_scaled ** 2) / 2 + \
                - np.sum(v_scaled ** 2, (0, 1)) / 2
        else:
            logp_prior = \
                - (self.prior_param['param_df'] + 1) / 2 * \
                    np.sum(np.log(1 + r_scaled ** 2 / self.prior_param['param_df'])) + \
                - (self.prior_param['param_df'] + 1) / 2 * \
                    np.sum(np.log(1 + u_scaled ** 2 / self.prior_param['param_df']), (0, 1)) + \
                - (self.prior_param['param_df'] + 1) / 2 * \
                    np.sum(np.log(1 + c_scaled ** 2 / self.prior_param['param_df'])) + \
                - (self.prior_param['param_df'] + 1) / 2 * \
                    np.sum(np.log(1 + v_scaled ** 2 / self.prior_param['param_df']), (0, 1))

        return loglik + logp_prior
Example #17
0
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
    '''
    Python 2 implementation of Python 3.5 math.isclose()
    https://hg.python.org/cpython/file/tip/Modules/mathmodule.c#l1993
    '''
    # sanity check on the inputs
    if rel_tol < 0 or abs_tol < 0:
        raise ValueError("tolerances must be non-negative")

    # short circuit exact equality -- needed to catch two infinities of
    # the same sign. And perhaps speeds things up a bit sometimes.
    if a == b:
        return True

    # This catches the case of two infinities of opposite sign, or
    # one infinity and one finite number. Two infinities of opposite
    # sign would otherwise have an infinite relative tolerance.
    # Two infinities of the same sign are caught by the equality check
    # above.
    if math.isinf(a) or math.isinf(b):
        return False

    # equality check above would return false for nan, but we want
    # to return true
    if math.isnan(a) and math.isnan(b):
        return True

    # now do the regular computation
    # this is essentially the "weak" test from the Boost library
    diff = math.fabs(b - a)
    result = (((diff <= math.fabs(rel_tol * b)) or
               (diff <= math.fabs(rel_tol * a))) or
              (diff <= abs_tol))
    return result
Example #18
0
def exp(x):
    z = _make_complex(x)

    exp_special = [
        [0+0j, None, complex(0, -0.0), 0+0j, None, 0+0j, 0+0j],
        [nan+nanj, None, None, None, None, nan+nanj, nan+nanj],
        [nan+nanj, None, 1-0j, 1+0j, None, nan+nanj, nan+nanj],
        [nan+nanj, None, 1-0j, 1+0j, None, nan+nanj, nan+nanj],
        [nan+nanj, None, None, None, None, nan+nanj, nan+nanj],
        [inf+nanj, None, complex(float("inf"), -0.0), inf, None, inf+nanj, inf+nanj],
        [nan+nanj, nan+nanj, complex(float("nan"), -0.0), nan, nan+nanj, nan+nanj, nan+nanj]
    ]

    if not isfinite(z):
        if math.isinf(z.real) and math.isfinite(z.imag) and z.imag != 0:
            if z.real > 0:
                ret = complex(math.copysign(inf, math.cos(z.imag)),
                              math.copysign(inf, math.sin(z.imag)))
            else:
                ret = complex(math.copysign(0, math.cos(z.imag)),
                              math.copysign(0, math.sin(z.imag)))
        else:
            ret = exp_special[_special_type(z.real)][_special_type(z.imag)]
        if math.isinf(z.imag) and (math.isfinite(z.real) or
                                   (math.isinf(z.real) and z.real > 0)):
            raise ValueError
        return ret

    if z.real > _LOG_LARGE_DOUBLE:
        ret = e * rect(math.exp(z.real - 1), z.imag)
    else:
        ret = rect(math.exp(z.real), z.imag)
    if math.isinf(ret.real) or math.isinf(ret.imag):
        raise OverflowError
    return ret
 def test_unusual_numbers(self):
     j = '{ "aListOfDouble": ["inf", "-inf", "nan"]}'
     stuff_read = readStuffFromJSON(j)
     self.assertEqual(len(stuff_read.aListOfDouble), 3)
     self.assertTrue(math.isinf(stuff_read.aListOfDouble[0]))
     self.assertTrue(math.isinf(stuff_read.aListOfDouble[1]))
     self.assertTrue(math.isnan(stuff_read.aListOfDouble[2]))
Example #20
0
def cosh(x):
    _cosh_special = [
        [inf+nanj, None, inf, complex(float("inf"), -0.0), None, inf+nanj, inf+nanj],
        [nan+nanj, None, None, None, None, nan+nanj, nan+nanj],
        [nan, None, 1, complex(1, -0.0), None, nan, nan],
        [nan, None, complex(1, -0.0), 1, None, nan, nan],
        [nan+nanj, None, None, None, None, nan+nanj, nan+nanj],
        [inf+nanj, None, complex(float("inf"), -0.0), inf, None, inf+nanj, inf+nanj],
        [nan+nanj, nan+nanj, nan, nan, nan+nanj, nan+nanj, nan+nanj]
    ]

    z = _make_complex(x)

    if not isfinite(z):
        if math.isinf(z.imag) and not math.isnan(z.real):
            raise ValueError
        if math.isinf(z.real) and math.isfinite(z.imag) and z.imag != 0:
            if z.real > 0:
                return complex(math.copysign(inf, math.cos(z.imag)),
                               math.copysign(inf, math.sin(z.imag)))
            return complex(math.copysign(inf, math.cos(z.imag)),
                           -math.copysign(inf, math.sin(z.imag)))
        return _cosh_special[_special_type(z.real)][_special_type(z.imag)]

    if abs(z.real) > _LOG_LARGE_DOUBLE:
        x_minus_one = z.real - math.copysign(1, z.real)
        ret = complex(e * math.cos(z.imag) * math.cosh(x_minus_one),
                      e * math.sin(z.imag) * math.sinh(x_minus_one))
    else:
        ret = complex(math.cos(z.imag) * math.cosh(z.real),
                      math.sin(z.imag) * math.sinh(z.real))
    if math.isinf(ret.real) or math.isinf(ret.imag):
        raise OverflowError

    return ret
Example #21
0
def sinh(x):

    _sinh_special = [
        [inf+nanj, None, complex(-float("inf"), -0.0), -inf, None, inf+nanj, inf+nanj],
        [nan+nanj, None, None, None, None, nan+nanj, nan+nanj],
        [nanj, None, complex(-0.0, -0.0), complex(-0.0, 0.0), None, nanj, nanj],
        [nanj, None, complex(0.0, -0.0), complex(0.0, 0.0), None, nanj, nanj],
        [nan+nanj, None, None, None, None, nan+nanj, nan+nanj],
        [inf+nanj, None, complex(float("inf"), -0.0), inf, None, inf+nanj, inf+nanj],
        [nan+nanj, nan+nanj, complex(float("nan"), -0.0), nan, nan+nanj, nan+nanj, nan+nanj]
    ]

    z = _make_complex(x)

    if not isfinite(z):
        if math.isinf(z.imag) and not math.isnan(z.real):
            raise ValueError
        if math.isinf(z.real) and math.isfinite(z.imag) and z.imag != 0:
            if z.real > 0:
                return complex(math.copysign(inf, math.cos(z.imag)),
                               math.copysign(inf, math.sin(z.imag)))
            return complex(-math.copysign(inf, math.cos(z.imag)),
                           math.copysign(inf, math.sin(z.imag)))
        return _sinh_special[_special_type(z.real)][_special_type(z.imag)]

    if abs(z.real) > _LOG_LARGE_DOUBLE:
        x_minus_one = z.real - math.copysign(1, z.real)
        return complex(math.cos(z.imag) * math.sinh(x_minus_one) * e,
                       math.sin(z.imag) * math.cosh(x_minus_one) * e)
    return complex(math.cos(z.imag) * math.sinh(z.real),
                   math.sin(z.imag) * math.cosh(z.real))
Example #22
0
    def weigh_objects(self, weighed_obj_list, weight_properties):
        """Override the weigh objects.


        This override calls the parent to do the weigh objects and then
        replaces any infinite weights with a value that is a multiple of the
        delta between the min and max values.

        NOTE(jecarey): the infinite weight value is only used when the
        smallest value is being favored (negative multiplier).  When the
        largest weight value is being used a weight of -1 is used instead.
        See _weigh_object method.
        """
        tmp_weights = super(CapacityWeigher, self).weigh_objects(
            weighed_obj_list, weight_properties)

        if math.isinf(self.maxval):
            # NOTE(jecarey): if all weights were infinite then parent
            # method returns 0 for all of the weights.  Thus self.minval
            # cannot be infinite at this point
            copy_weights = [w for w in tmp_weights if not math.isinf(w)]
            self.maxval = max(copy_weights)
            offset = (self.maxval - self.minval) * OFFSET_MULT
            self.maxval += OFFSET_MIN if offset == 0.0 else offset
            tmp_weights = [self.maxval if math.isinf(w) else w
                           for w in tmp_weights]

        return tmp_weights
Example #23
0
def parallel_line2d(m, b, d):
    b_ = None
    if math.isinf(m) or math.isinf(-m):
        b_ = d
    else:
        b_ = b + d * math.sqrt(m * m + 1)
    return m, b_
Example #24
0
	def getBoundingBox2D(self):
		"""
		 Returns the bounding box of all active process elements in their 2D coordinate system.

		@return  :
		@author
		"""
		#Helper variables
		maxX = float("-inf")
		maxY = float("-inf")
		minX = float("inf")
		minY = float("inf")
		#Now iterate over all active process components
		for active in (self.activeProcessComponents + self.messageExchanges):
			if(hasattr(active, "hasAbstractVisualRepresentation")):
				point = active.hasAbstractVisualRepresentation.getPoint2D()
				#Max tests
				if(maxX < point[0]):
					maxX = point[0]
				if(maxY < point[1]):
					maxY = point[1]
				#Min tests
				if(minX > point[0]):
					minX = point[0]
				if(minY > point[1]):
					minY = point[1]
		#inf tests
		if(math.isinf(maxX)):
			maxX = 0
			minX = 0
		if(math.isinf(maxY)):
			maxY = 0
			minY = 0
		return [[minX, minY], [maxX, maxY]]
Example #25
0
def lgamma(x):
    """Compute the natural logarithm of the gamma function for x."""
    if isnan(x):
        return x
    if isinf(x):
        return INFINITY
    if x == math.floor(x) and x <= 2.0:
        if x <= 0.0:
            raise ValueError("math range error")
        return 0.0
    absx = abs(x)
    if absx < 1e-20:
        return -math.log(absx)
    if x > 0.0:
        r = math.log(_lanczos_sum(x)) - _lanczos_g + (x - 0.5) * (math.log(x + _lanczos_g - 0.5) - 1)
    else:
        r = (
            math.log(math.pi)
            - math.log(abs(_sinpi(absx)))
            - math.log(absx)
            - (math.log(_lanczos_sum(absx)) - _lanczos_g + (absx - 0.5) * (math.log(absx + _lanczos_g - 0.5) - 1))
        )
    if isinf(r):
        raise OverflowError("math domain error")
    return r
 def dataToScreen(self, values):
     results = []
     if not isinstance(values,list):
         values = [values]
     for value in values:
         try:
             value = float(value)
             if math.isinf(value):
                 value = 'Missing'
             elif math.isinf(value):
                 value = 'Allele Masked'
         except (ValueError,TypeError):
             if value == variant.MISSING or value == None:
                 value = 'Missing'
             elif value == variant.ALLELE_MASKED:
                 value = 'Allele Masked'
         
         if isinstance(value,str):
             index = self.cats.get(value,len(self.cats)-1)
             if index < 0:
                 results.append(self.labelTop)
             elif index > self.latticeLength:
                 results.append(self.labelBottom)
             else:
                 results.append(self.labelTop + (index+0.5)*self.cellSize)
         else:
             results.append(self.numericPixelLow + (value-self.numericDataLow)*self.dataToPixelRatio)
     return results
Example #27
0
    def normalize(self):
        "It recovers the values got bad during the GML write-read cycle."
        if "normalized" in dir(self) and self.normalized:
            return
        if "type" in self.vs.attributes():
            virtual = self.vs.select(type=1)
            for vertex in virtual:
                for attr in ("priority", "filesize",
                             "section", "summary", "version"):
                    vertex[attr] = None
                if "architecture" in self.vs.attributes():
                    vertex["architecture"] = None
        if "revision" in self.attributes():
            revision = self["revision"]
            if isinstance(revision, float) and not math.isnan(revision):
                self["revision"] = int(revision)

        del self.vs["id"]
        integer_attributes = (
            ("type", self.vs),
            ("filesize", self.vs),
            ("type", self.es),
        )
        for attr, object_ in integer_attributes:
            if attr in object_.attributes():
                for item in object_:
                    value = item[attr]
                    if isinstance(value, float) and\
                            not math.isinf(value) and not math.isnan(value):
                        item[attr] = int(value)
                    elif value is not None and (math.isinf(value) or math.isnan(value)):
                        print("The value of the {0} attribute is {1} ({2})."
                              .format(attr, value, item["name"]))
        self.normalized = True
Example #28
0
    def add_point(self, label, pos, setpos, dmin, dmax):
        point = dict()
        point['pos'] = int(pos)
        label = label.upper()
        # Calculate point calibration if required
        if self.has_calibration():
            # Set to current physical position if no value supplied as argument
            if math.isinf(setpos):
                point['set'] = self.motor.position
            else:
                point['set'] = float(setpos)
            # If point exists, we use current min, max values
            if label in self.keys() and math.isinf(dmin) and math.isinf(dmax):
                p = self[label]
                min_pos = point['set'] + p['set'] - p['min']
                max_pos = point['set'] + p['set'] - p['max']
            # else, new point has new calibration,
            elif math.isinf(dmin) and math.isinf(dmax):
                min_pos = point['set']
                max_pos = point['set']
            else:
                min_pos = point['set'] + dmin
                max_pos = point['set'] + dmax

            point['min'] = min_pos
            point['max'] = max_pos

        self[label] = point
        self._update()
Example #29
0
    def SetNuisanceBkgValue(self, nuisname, value, bkg_name, year=None):
        if self.split_bkg_by_year and year is None:
            raise Exception("ERROR: backgrounds are split by year, you must specify which year you're doing!")        
        if year not in self.years:
            raise Exception("ERROR: year {0} not in list of years!".format(year))
        if bkg_name not in self.bkg_names:
            raise Exception("ERROR: bkg {0} not in list of backgrounds!".format(bkg_name))

        fullname = self.GetFullNuisName(nuisname, year)
        fullbkg = bkg_name + (str(year) if year is not None else "")
        fullidx = self.split_bkg_names.index(fullbkg)

        if type(value)==tuple:
            if self.nuisances[fullname].type not in  ["lnN","lnU"]:
                raise Exception("ERROR: only lnN/lnU nuisances support 2-sided values!")
            if value[0]>2.05 or value[1]>2.05 or value[0]<0.3 or value[1]<0.3:
                print "WARNING: nuisance {0} has a large value {1} (year {2}). Card: {3}".format(fullname, value, year, self.name)
            if isnan(value[0]) or isinf(value[0]) or isnan(value[1]) or isinf(value[1]):
                raise Exception("ERROR: nuisance value is nan or inf for nuis {0}, background {1}, year {2}".format(nuisname, bkg_name, year))
        elif type(value)==float:
            if self.nuisances[fullname].type in ["lnN","lnU"] and (value > 2.05 or value < 0.3):
                print "WARNING: nuisance {0} has a large value {1} (year {2}). Card: {3}".format(fullname, value, year, self.name)
            if isnan(value) or isinf(value):
                raise Exception("ERROR: nuisance value is nan or inf for nuis {0}, background {1}, year {2}".format(nuisname, bkg_name, year))
        else:
            raise Exception("ERROR: value must be a float or tuple of 2 float (upper,lower) (lnN/lnU only)")

        self.nuisances[fullname].bkg_values[fullidx] = value
Example #30
0
def sqrt(x):
    sqrt_special = [
        [inf-infj, 0-infj, 0-infj, infj, infj, inf+infj, nan+infj],
        [inf-infj, None, None, None, None, inf+infj, nan+nanj],
        [inf-infj, None, 0-0j, 0+0j, None, inf+infj, nan+nanj],
        [inf-infj, None, 0-0j, 0+0j, None, inf+infj, nan+nanj],
        [inf-infj, None, None, None, None, inf+infj, nan+nanj],
        [inf-infj, complex(float("inf"), -0.0), complex(float("inf"), -0.0), inf, inf, inf+infj, inf+nanj],
        [inf-infj, nan+nanj, nan+nanj, nan+nanj, nan+nanj, inf+infj, nan+nanj]
    ]

    z = _make_complex(x)

    if math.isinf(z.real) or math.isinf(z.imag):
        return sqrt_special[_special_type(z.real)][_special_type(z.imag)]

    abs_x, abs_y = abs(z.real), abs(z.imag)
    if abs_x < _DBL_MIN and abs_y < _DBL_MIN:
        if abs_x > 0 or abs_y > 0:
            abs_x = math.ldexp(abs_x, _CM_SCALE_UP)
            s = math.ldexp(math.sqrt(abs_x +
                                     math.hypot(abs_x,
                                                math.ldexp(abs_y,
                                                           _CM_SCALE_UP))),
                           _CM_SCALE_DOWN)
        else:
            return complex(0, z.imag)
    else:
        abs_x /= 8
        s = 2 * math.sqrt(abs_x + math.hypot(abs_x, abs_y/8))

    if z.real >= 0:
        return complex(s, math.copysign(abs_y/(2*s), z.imag))
    return complex(abs_y/(2*s), math.copysign(s, z.imag))
Example #31
0
 def is_valid_number(x):
     return not (math.isnan(x) or math.isinf(x) or x > 1e4)
        [kshell, clustering, degree, strength, betweenness, closeness])

    start_node = np.random.randint(0, num_of_nodes, 50)

    infected_list = []
    for idx, node in enumerate(start_node):
        infected = SI(network, sorted_flights, node, 0.5)
        infected_list.append(infected)

    infected_list_np = np.array(infected_list)
    median_infection = []
    for i in range(num_of_nodes):
        node_simulation = infected_list_np[:, i]
        infected_count = 0
        for time in node_simulation:
            if not isinf(time):
                infected_count += 1
        print(infected_count)
        if infected_count >= 25:
            median_infection.append(np.median(node_simulation))
        else:
            np.delete(centrality, i, axis=1)

    y_label = 'Median infection times'
    scatter_base_path = 'scatter'
    titles = [
        'k-shell', 'Clustering', 'Degree', 'Strength', 'Betweenness',
        'Closeness'
    ]

    for i in range(6):
Example #33
0
def assert_stuff(m):

    # This makes sure that automatically imported builtins go after docstrings.
    assert m.__doc__ == u'This is a module docstring.'

    assert m.mystring == "foofoofoo"

    assert m.long_string == u"This is a very long string literal, which would surely exceed any limitations on how long a line or a string literal can be. The string literal alone exceeds 256 characters. It also has a character outside the Basic Multilingual Plane: 😂. Here's a double quote: \". Here are some escaped newlines:\n\n\nHere is a literal newline:\nCall me Ishmael. Some years ago—never mind how long precisely—having little or no money in my purse, and nothing particular to interest me on shore, I thought I would sail about a little and see the watery part of the world. It is a way I have of driving off the spleen and regulating the circulation. Whenever I find myself growing grim about the mouth; whenever it is a damp, drizzly November in my soul; whenever I find myself involuntarily pausing before coffin warehouses, and bringing up the rear of every funeral I meet; and especially whenever my hypos get such an upper hand of me, that it requires a strong moral principle to prevent me from deliberately stepping into the street, and methodically knocking people’s hats off—then, I account it high time to get to sea as soon as I can. This is my substitute for pistol and ball. With a philosophical flourish Cato throws himself upon his sword; I quietly take to the ship. There is nothing surprising in this. If they but knew it, almost all men in their degree, some time or other, cherish very nearly the same feelings towards the ocean with me."

    assert getattr(
        m, mangle(u"identifier-that-has☝️💯☝️-to-be-mangled")) == "ponies"

    assert m.mynumber == 3
    assert m.myhex == 0x123
    assert m.mylong - 1234567890987654321234567890987654320 == 1
    assert m.myfloat == 3.34e15
    assert math.isnan(m.mynan)
    assert math.isinf(m.pinf)
    assert m.pinf > 0
    assert math.isinf(m.ninf)
    assert m.ninf < 0
    assert math.isinf(m.mycomplex.real)
    assert m.mycomplex.real < 0
    assert m.mycomplex.imag == 5
    assert math.isnan(m.mycomplex2.real)
    assert math.isinf(m.mycomplex2.imag)
    assert m.mycomplex2.imag < 0

    assert m.num_expr == 9

    assert m.mylist == [1, 2, 3]
    assert m.mytuple == ("a", "b", "c")
    assert m.myset == {4, 5, 6}
    assert m.mydict == {7: 8, 9: 900, 10: 15}

    assert m.emptylist == []
    assert m.emptytuple == ()
    assert m.emptyset == set()
    assert m.emptydict == {}

    assert m.mylistcomp == [1, 3, 5, 7, 9]
    assert m.mysetcomp == {0, 2, 4}
    assert m.mydictcomp == dict(a="A", b="B", d="D", e="E")
    assert type(m.mygenexpr) is type((x for x in [1, 2, 3]))
    assert list(itertools.islice(m.mygenexpr, 5)) == [1, 3, 1, 3, 1]

    assert m.attr_ref is str.upper
    assert m.subscript == "l"
    assert m.myslice == "el"
    assert m.call == 5
    assert m.comparison is True
    assert m.boolexpr is True
    assert m.condexpr == "y"
    assert type(m.mylambda) is type(lambda x: x + "z")
    assert m.mylambda("a") == "az"

    assert m.augassign == 25

    assert m.delstatement == ["a", "c", "d", "e"]

    assert m.math is math
    assert m.sqrt is math.sqrt
    assert m.sine is math.sin
    import datetime
    assert m.timedelta is datetime.timedelta

    assert m.if_block == "cd"
    assert m.while_block == "xxxxe"
    assert m.cont_and_break == "xyzxyzxxyzxy"
    assert m.for_block == "fufifo"
    assert m.caught_assertion is True
    assert m.ran_finally is True
    assert m.myraise == "payload"
    assert m.ran_try_else is True

    assert type(m.fun) is type(lambda x: x)
    assert m.fun.__doc__ == "function docstring"
    assert m.funcall1 == [
        1, 2, 3, 4, ("a", "b", "c"), [("k1", "v1"), ("k2", "v2")]
    ]
    assert m.funcall2 == [7, 8, 9, 10, (11, ), [("x1", "y1"), ("x2", "y2")]]

    assert m.myret == 1
    assert m.myyield == ["a", "b", "c"]
    assert m.mydecorated.newattr == "hello"
    assert m.myglobal == 103

    class C:
        pass

    assert type(m.C1) is type(C)

    assert m.C2.__doc__ == "class docstring"
    assert issubclass(m.C2, m.C1)
    assert (m.C2.attr1, m.C2.attr2, m.C2.attr3) == (5, 6, 7)

    assert m.closed == ["v2", "v1"]
Example #34
0
def pack_half_1x16(f32, func_opts):
    """Component-wise function of packHalf2x16."""
    assert (isinstance(f32, float32))

    # The bit layout of a float16 is:
    #
    #   sign:     15
    #   exponent: 10:14
    #   mantissa: 0:9
    #
    # The sign, exponent, and mantissa determine its value by:
    #
    # if e = 0 and m = 0, then zero:       (-1)^s * 0
    # if e = 0 and m != 0, then subnormal: (-1)^s * 2^(e - 14) * m / 2^10
    # if 0 < e < 31, then normal:          (-1)^s * 2^(e - 15) * (1 + m / 2^10)
    # if e = 31 and m = 0, then inf:       (-1)^s * inf
    # if e = 31 and m != 0, then nan
    #
    # where 0 <= m < 2^10.
    #
    # Some key boundary values of float16 are:
    #
    #   min_normal16  = 2^(1 - 15) * (1 + 0 / 2^10)
    #   max_normal16  = 2^(30 - 15) * (1 + 1023 / 2^10)
    #
    # The maximum float16 step value is:
    #
    #   max_step16 = 2^5
    #
    # Observe that each of the above boundary values lies in the range of
    # normal float32 values. If we represent each of the above boundary values
    # in the form returned by frexpf() for normal float32 values, 2^E
    # * F where 0.5 <= F < 1, then:
    #
    #   min_normal16 = 2^(-13) * 0.5
    #   max_normal16 = 2^16 * 0.99951171875

    # The resultant float16's sign, exponent, and mantissa bits.
    s = 0
    e = 0
    m = 0

    # Calculate sign bit.
    # Use copysign() to handle the case where x is -0.0.
    if copysign(1.0, f32) < 0.0:
        s = 1

    # To reduce the number of cases in the if-tree below, decompose `abs(f32)`
    # rather than `f32`.
    (F, E) = frexp(fabs(f32))

    # The output of frexp falls into three classes:
    #   - If f32 is NaN, then F is NaN .
    #   - If f32 is ±inf, then F is ±inf .
    #   - If f32 is ±0.0, then F is ±0.0 .
    #   - Otherwise, f32 = 2^E * F where 0.5 <= F < 1.0 .
    #
    # Since we decomposed `abs(f32)`, we only need be concerned with the
    # postive cases.
    if isnan(F):
        # The resultant float16 is NaN.
        e = 31
        m = 1
    elif isinf(F):
        # The resultant float16 is infinite.
        e = 31
        m = 0
    elif F == 0:
        # f32 is zero, therefore the resultant float16 is zero.
        e = 0
        m = 0
    elif E < -13:
        # f32 lies in the range (0.0, min_normal16). Round f32 to a nearby
        # float16 value. The resultant float16 will be either zero, subnormal,
        # or normal.
        e = 0
        m = int(func_opts.round(2**(E + 24) * F))
    elif E <= 16:
        # f32 lies in the range [min_normal16, max_normal16 + max_step16).
        # Round f32 to a nearby float16 value. The resultant float16 will be
        # either normal or infinite.
        e = int(E + 14)
        m = int(func_opts.round(2**11 * F - 2**10))
    else:
        # f32 lies in the range [max_normal16 + max_step16, inf), which is
        # outside the range of finite float16 values. The resultant float16 is
        # infinite.
        e = 31
        m = 0

    if (m == 1024):
        # f32 was rounded upwards into the range of the next exponent.  This
        # correctly handles the case where f32 should be rounded up to float16
        # infinity.
        e += 1
        m = 0

    assert (s == 0 or s == 1)
    assert (0 <= e and e <= 31)
    assert (0 <= m and m <= 1023)

    return uint16((s << 15) | (e << 10) | m)
Example #35
0
    def paintEvent(self, event):
        unused(event)
        if self.isVisible():

            # Read out most important values to limit hash table lookups
            # Low-pass roll, pitch and yaw
            self.rollLP = self.roll#rollLP * 0.2f + 0.8f * roll
            self.pitchLP = self.pitch#pitchLP * 0.2f + 0.8f * pitch
            self.yawLP = self.yaw if isinf(self.yaw) == False and isnan(self.yaw) == False else self.yawLP#yawLP * 0.2f + 0.8f * yaw

            # Translate for yaw
            maxYawTrans = 60.0

            newYawDiff = self.yawDiff
            if isinf(newYawDiff):
                newYawDiff = self.yawDiff
            if newYawDiff > M_PI:
                newYawDiff = newYawDiff - M_PI

            if newYawDiff < -M_PI:
                newYawDiff = newYawDiff + M_PI

            newYawDiff = self.yawDiff * 0.8 + newYawDiff * 0.2

            self.yawDiff = newYawDiff

            self.yawInt += newYawDiff

            if self.yawInt > M_PI:
                self.yawInt = M_PI
            if self.yawInt < -M_PI:
                self.yawInt = -M_PI

            yawTrans = self.yawInt * maxYawTrans
            self.yawInt *= 0.6

            if (yawTrans < 5.0) and (yawTrans > -5.0):
                yawTrans = 0

            # Negate to correct direction
            yawTrans = -yawTrans
            yawTrans = 0
            #qDebug() << "yaw translation" << yawTrans << "integral" << yawInt << "difference" << yawDiff << "yaw" << yaw

            # And if either video or the data stream is enabled, draw the next frame.
            if self.videoEnabled:
                self.xImageFactor = self.width() / float(self.glImage.width())
                self.yImageFactor = self.height() / float(self.glImage.height())

            painter = QPainter()
            painter.begin(self)
            painter.setRenderHint(QPainter.Antialiasing, True)
            painter.setRenderHint(QPainter.HighQualityAntialiasing, True)
            pmap = QPixmap.fromImage(self.glImage).scaledToWidth(self.width())
            painter.drawPixmap(0, (self.height() - pmap.height()) / 2, pmap)

            # END OF OPENGL PAINTING

            if self.HUDInstrumentsEnabled:
                #glEnable(GL_MULTISAMPLE)
                # QT PAINTING
                #makeCurrent()

                painter.translate((self.vwidth/2.0+self.xCenterOffset)*self.scalingFactor, (self.vheight/2.0+self.yCenterOffset)*self.scalingFactor)
                # COORDINATE FRAME IS NOW (0,0) at CENTER OF WIDGET
                # Draw all fixed indicators
                # BATTERY
                self.paintText(self.fuelStatus, self.fuelColor, 6.0, (-self.vwidth/2.0) + 10, -self.vheight/2.0 + 6, painter)
                # Waypoint
                self.paintText(self.waypointName, self.defaultColor, 6.0, (-self.vwidth/3.0) + 10, +self.vheight/3.0 + 15, painter)

                linePen = QPen(Qt.SolidLine)
                linePen.setWidth(self.refLineWidthToPen(1.0))
                linePen.setColor(self.defaultColor)
                painter.setBrush(Qt.NoBrush)
                painter.setPen(linePen)

                # YAW INDICATOR
                #
                #      .
                #    .   .
                #   .......
                #
                _yawIndicatorWidth = 12.0
                _yawIndicatorY = self.vheight/2.0 - 15.0
                yawIndicator = QPolygon(4)
                yawIndicator.setPoint(0, QPoint(self.refToScreenX(0.0), self.refToScreenY(_yawIndicatorY)))
                yawIndicator.setPoint(1, QPoint(self.refToScreenX(_yawIndicatorWidth/2.0), self.refToScreenY(_yawIndicatorY+_yawIndicatorWidth)))
                yawIndicator.setPoint(2, QPoint(self.refToScreenX(-_yawIndicatorWidth/2.0), self.refToScreenY(_yawIndicatorY+_yawIndicatorWidth)))
                yawIndicator.setPoint(3, QPoint(self.refToScreenX(0.0), self.refToScreenY(_yawIndicatorY)))
                painter.drawPolyline(yawIndicator)
                painter.setPen(linePen)
                # CENTER

                # HEADING INDICATOR
                #
                #    __      __
                #       \/\/
                #
                _hIndicatorWidth = 20.0
                _hIndicatorY = -25.0
                _hIndicatorYLow = _hIndicatorY + _hIndicatorWidth / 6.0
                _hIndicatorSegmentWidth = _hIndicatorWidth / 7.0
                hIndicator = QPolygon(7)
                hIndicator.setPoint(0, QPoint(self.refToScreenX(0.0-_hIndicatorWidth/2.0), self.refToScreenY(_hIndicatorY)))
                hIndicator.setPoint(1, QPoint(self.refToScreenX(0.0-_hIndicatorWidth/2.0+_hIndicatorSegmentWidth*1.75), self.refToScreenY(_hIndicatorY)))
                hIndicator.setPoint(2, QPoint(self.refToScreenX(0.0-_hIndicatorSegmentWidth*1.0), self.refToScreenY(_hIndicatorYLow)))
                hIndicator.setPoint(3, QPoint(self.refToScreenX(0.0), self.refToScreenY(_hIndicatorY)))
                hIndicator.setPoint(4, QPoint(self.refToScreenX(0.0+_hIndicatorSegmentWidth*1.0), self.refToScreenY(_hIndicatorYLow)))
                hIndicator.setPoint(5, QPoint(self.refToScreenX(0.0+_hIndicatorWidth/2.0-_hIndicatorSegmentWidth*1.75), self.refToScreenY(_hIndicatorY)))
                hIndicator.setPoint(6, QPoint(self.refToScreenX(0.0+_hIndicatorWidth/2.0), self.refToScreenY(_hIndicatorY)))
                painter.drawPolyline(hIndicator)

                # SETPOINT
                _centerWidth = 8.0
                painter.drawEllipse(
                    QPointF(self.refToScreenX(min(10.0, self.desiredRoll * 10.0)),
                            self.refToScreenY(min(10.0, self.desiredPitch * 10.0))),
                    self.refToScreenX(_centerWidth/2.0), self.refToScreenX(_centerWidth/2.0))

                _centerCrossWidth = 20.0
                # left
                painter.drawLine(QPointF(self.refToScreenX(-_centerWidth / 2.0), self.refToScreenY(0.0)),
                                 QPointF(self.refToScreenX(-_centerCrossWidth / 2.0), self.refToScreenY(0.0)))
                # right
                painter.drawLine(QPointF(self.refToScreenX(_centerWidth / 2.0), self.refToScreenY(0.0)),
                                 QPointF(self.refToScreenX(_centerCrossWidth / 2.0), self.refToScreenY(0.0)))
                # top
                painter.drawLine(QPointF(self.refToScreenX(0.0), self.refToScreenY(-_centerWidth / 2.0)),
                                 QPointF(self.refToScreenX(0.0), self.refToScreenY(-_centerCrossWidth / 2.0)))

                # COMPASS
                _compassY = -self.vheight/2.0 + 6.0
                compassRect = QRectF(QPointF(self.refToScreenX(-12.0), self.refToScreenY(_compassY)),
                                     QSizeF(self.refToScreenX(24.0), self.refToScreenY(12.0)))
                painter.setBrush(Qt.NoBrush)
                painter.setPen(linePen)
                painter.drawRoundedRect(compassRect, 3, 3)

                # YAW is in compass-human readable format, so 0 .. 360 deg.
                _yawDeg = (self.yawLP / M_PI) * 180.0
                if _yawDeg < 0:
                    _yawDeg += 360
                if _yawDeg > 360:
                    _yawDeg -= 360
                # final safeguard for really stupid systems
                _yawAngle = '{:3d}'.format(int(_yawDeg) % 360)
                self.paintText(_yawAngle, self.defaultColor, 8.5, -9.8, _compassY + 1.7, painter)

                painter.setBrush(Qt.NoBrush)
                painter.setPen(linePen)

                # CHANGE RATE STRIPS
                self.drawChangeRateStrip(-95.0, -60.0, 40.0, -10.0, 10.0, self.zSpeed, painter)

                # CHANGE RATE STRIPS
                self.drawChangeRateStrip(95.0, -60.0, 40.0, -10.0, 10.0, self.totalAcc, painter,True)

                # GAUGES

                # Left altitude gauge
                _gaugeAltitude = self.alt if self.alt != 0 else -self.zPos

                painter.setBrush(Qt.NoBrush)
                painter.setPen(linePen)

                self.drawChangeIndicatorGauge(-self.vGaugeSpacing, 35.0, 15.0, 10.0, _gaugeAltitude, self.defaultColor, painter, False)
                self.paintText('alt m', self.defaultColor, 5.5, -73.0, 50, painter)

                # Right speed gauge
                self.drawChangeIndicatorGauge(self.vGaugeSpacing, 35.0, 15.0, 10.0, self.totalSpeed, self.defaultColor, painter, False)
                self.paintText('v m/s', self.defaultColor, 5.5, 55.0, 50, painter)

                # Waypoint name
                if self.waypointName != '':
                    self.paintText(self.waypointName, self.defaultColor, 2.0, (-self.vwidth/3.0) + 10, +self.vheight/3.0 + 15, painter)

                # MOVING PARTS
                painter.translate(self.refToScreenX(yawTrans), 0)
                attColor = painter.pen().color()

                # Draw multi-component attitude
                for key in self.attitudes:
                    att = self.attitudes[key]
                    attColor = attColor.darker(200)
                    painter.setPen(attColor)
                    # Rotate view and draw all roll-dependent indicators
                    painter.rotate((att.x()/M_PI)* -180.0)
                    painter.translate(0, (-att.y()/M_PI)* -180.0 * self.refToScreenY(1.8))
                    #qDebug() << "ROLL" << roll << "PITCH" << pitch << "YAW DIFF" << valuesDot.value("roll", 0.0)
                    # PITCH
                    self.paintPitchLines(att.y(), painter)
                    painter.translate(0, -(-att.y()/M_PI)* -180.0 * self.refToScreenY(1.8))
                    painter.rotate(-(att.x()/M_PI)* -180.0)
            painter.end()
Example #36
0
 def updateComponentAttitude(self, uas, timestamp, component, roll, pitch, yaw):
     unused(uas, timestamp)
     if isnan(roll) == False and isinf(roll) == False \
     and isnan(pitch) == False and isinf(pitch)== False \
     and isnan(yaw) == False and isinf(yaw) == False:
         self.attitudes[component] = QVector3D(roll, pitch*3.35, yaw) # Constant here is the 'focal length' of the projection onto the plane
Example #37
0
    def create_optical_system(self,
                              matdict=None,
                              options=None,
                              elementname="zmxelem"):
        """
        Creates optical system from ZEMAX file with material
        data and options.
        """
        # It is intended that matdict and options should not
        # be changed at a higher level from within this function.
        if matdict is None:
            matdict = {}
        if options is None:
            options = {}

        self.info("Creating optical system from ZMX")

        (name, _) = self.read_name_and_notes()

        optical_system = OpticalSystem.p(name=name)

        # extract surface blockstrings

        self.info("Extract surface blockstrings")
        surface_blockstrings = self.filter_block_strings("SURF")

        # construct basis coordinate system
        self.info("Construct basis coordinate system")
        lc0 = optical_system.addLocalCoordinateSystem(
            LocalCoordinates.p(name="object", decz=0.0),
            refname=optical_system.rootcoordinatesystem.name)
        elem = OpticalElement.p(lc0, name=elementname)

        # construct materials
        self.info("Construct materials")
        if matdict != {}:
            for (key, mat) in list(matdict.items()):
                mat.lc = lc0
                # different material coordinate systems are not supported
                elem.addMaterial(key, mat)
        else:
            self.info("checking for external material" +
                      "objects in dict with the following identifiers")
            found_necessary_glasses = False
            for blk in surface_blockstrings:
                surfres = self.read_surf_block(blk)
                glass_dict = surfres.get("GLAS", None)
                if glass_dict is not None:
                    self.debug(str(glass_dict))
                    material_name = glass_dict.get("name", None)
                    material_code = glass_dict.get("code", None)
                    self.debug("mat name \"%s\" mat code %s" %
                               (material_name, material_code))
                    if material_name != "MIRROR" and material_code != 1:
                        found_necessary_glasses = True
                    self.info(material_name)
            self.info("Are there necessary glasses? " +
                      str(found_necessary_glasses))
            if found_necessary_glasses:
                self.error("found material names: exiting")
                return (None, [("zmxelem", [])])
            else:
                self.info("found only mirrors or no material: continuing")

        self.info("Reading field")

        self.debug(self.read_field())

        self.info("Reading surface blocks")

        refname = lc0.name
        # lastlc = lc0
        lastmatname = None
        lastsurfname = None
        surfname = None

        surflist_for_sequence = []
        lastthickness = 0
        thickness = 0

        for blk in surface_blockstrings:
            lastthickness = thickness
            self.debug("----")
            surfres = self.read_surf_block(blk)
            self.debug("Found surface with contents (except GARR):")
            self.debug([(k, v) for (k, v) in list(surfres.items())
                        if k != "GARR"])

            surf_options_dict = {}

            # comment = surfres.get("COMM", "")
            lastsurfname = surfname
            surfname = "surf" + str(surfres["SURF"])
            thickness = surfres["DISZ"]
            curv = surfres["CURV"]
            conic = surfres.get("CONI", 0.0)
            stop = surfres["STOP"]
            surftype = surfres["TYPE"]
            parms = surfres["PARM"]
            xdat = surfres["XDAT"]

            sqap = surfres.get("SQAP", None)
            clap = surfres.get("CLAP", None)
            obdc = surfres.get("OBDC", None)

            if math.isinf(thickness):
                self.info("infinite object distance!")
                thickness = 0
            if stop:
                surf_options_dict["is_stop"] = True

            localcoordinates = optical_system.addLocalCoordinateSystem(
                LocalCoordinates.p(name=surfname, decz=lastthickness),
                refname=refname)

            read_glass = surfres.get("GLAS", None)
            self.debug("MATERIAL: %s" % (str(read_glass), ))
            matname = None
            mat = None

            if read_glass is not None:
                if read_glass["name"] == "MIRROR":
                    matname = lastmatname
                    surf_options_dict["is_mirror"] = True
                if matdict.get(read_glass["name"], None) is not None:
                    matname = read_glass["name"]
                if read_glass["code"] == 1:
                    matname = "modelglass" + str(uuid.uuid4())
                    # TODO: use global known uuid function
                    nd_value = read_glass["nd"]
                    vd_value = read_glass["vd"]
                    if abs(vd_value) < numerical_tolerance:
                        mat = ConstantIndexGlass.p(localcoordinates, nd_value)
                    else:
                        mat = ModelGlass.p(localcoordinates)
                        mat.calcCoefficientsFrom_nd_vd(nd=nd_value,
                                                       vd=vd_value)
                    elem.addMaterial(matname, mat)
            else:
                if surftype == "COORDBRK":
                    matname = lastmatname

            if obdc is None:
                lcapdec = optical_system.addLocalCoordinateSystem(
                    LocalCoordinates.p(name=surfname + "_ap"),
                    refname=surfname)
            else:
                self.info("Aperture decenter %f %f" % tuple(obdc))
                lcapdec = optical_system.addLocalCoordinateSystem(
                    LocalCoordinates.p(name=surfname + "_ap",
                                       decx=obdc[0],
                                       decy=obdc[1]),
                    refname=surfname)

            if sqap is None and clap is None:
                aper = None
            elif sqap is not None and clap is None:
                self.debug("Rectangular aperture %f x %f" % tuple(sqap))
                aper = RectangularAperture.p(lcapdec,
                                             width=sqap[0] * 2,
                                             height=sqap[1] * 2)
            elif clap is not None and sqap is None:
                self.debug("Circular aperture min %f max %f" % tuple(clap))
                aper = CircularAperture.p(lcapdec,
                                          minradius=clap[0],
                                          maxradius=clap[1])
            else:  # both are not None
                aper = None

            if surftype == "STANDARD":
                self.debug("SURFACE: Standard surface found")
                actsurf = Surface.p(localcoordinates,
                                    name=surfname,
                                    shape=Conic.p(localcoordinates,
                                                  curv=curv,
                                                  cc=conic),
                                    aperture=aper)
            elif surftype == "EVENASPH":
                self.debug("SURFACE: Polynomial asphere surface found")
                acoeffs = [parms.get(1 + i, 0.0) for i in range(8)]
                self.debug(acoeffs)
                actsurf = Surface.p(localcoordinates,
                                    name=surfname,
                                    shape=Asphere.p(localcoordinates,
                                                    curv=curv,
                                                    cc=conic,
                                                    coefficients=acoeffs),
                                    aperture=aper)
            elif surftype == "BICONICX":
                self.debug("SURFACE: biconic surface found")
                Rx = parms.get(1, 0.0)
                if abs(Rx) < 1e-16:
                    curvx = 0.0
                else:
                    curvx = 1 / Rx
                ccx = parms.get(2, 0.0)
                actsurf = Surface.p(localcoordinates,
                                    name=surfname,
                                    shape=Biconic.p(localcoordinates,
                                                    curvy=curv,
                                                    ccy=conic,
                                                    curvx=curvx,
                                                    ccx=ccx),
                                    aperture=aper)
            elif surftype == "FZERNSAG":  # Zernike Fringe Sag
                self.debug("SURFACE: Zernike standard surface found")
                # ignore extrapolate flag
                # curv, cc, parms, xdat
                acoeffs = [parms.get(1 + i, 0.0) for i in range(8)]
                extrapolate = parms.get(0, 1)
                zdecx = parms.get(9, 0.)
                zdecy = parms.get(10, 0.)
                self.debug("extrapolate: %d zdecx: %f zdecy: %f" %
                           (extrapolate, zdecx, zdecy))
                numterms = int(xdat[1]["value"])
                normradius = xdat[2]["value"]
                self.debug("numterms: %d normradius: %f" %
                           (numterms, normradius))

                zcoeffs = [
                    xdat[i + 3].get("value", 0.) for i in range(numterms)
                ]

                lcz = localcoordinates.addChild(
                    LocalCoordinates.p(name=surfname + "_zerndec",
                                       decx=zdecx,
                                       decy=zdecy))
                actsurf =\
                    Surface.p(localcoordinates,
                              name=surfname,
                              shape=LinearCombination.p(
                                  localcoordinates,
                                  list_of_coefficients_and_shapes=[
                                      (1.0,
                                       Asphere.p(
                                           localcoordinates,
                                           curv=curv,
                                           cc=conic,
                                           name=surfname +
                                           "_zernasph")),
                                      (1.0,
                                       ZernikeFringe.p(
                                           lcz,
                                           normradius=normradius,
                                           coefficients=zcoeffs,
                                           name=surfname+"_zernike"))]))

            elif surftype == "GRID_SAG":
                # TODO: conic + aspheric polynomials + zernike standard sag
                self.debug("SURFACE: Grid Sag found")
                (num_pts_x, num_pts_y, delta_x, delta_y) = surfres["GDAT"]
                self.debug("nx %d ny %d dx %f dy %f" %
                           (num_pts_x, num_pts_y, delta_x, delta_y))
                sagarray = np.array([
                    surfres["GARR"][key]
                    for key in sorted(surfres["GARR"].keys())
                ])
                self.debug(sagarray)
                xvector = np.linspace(-num_pts_x * delta_x * 0.5,
                                      num_pts_x * delta_x * 0.5, num_pts_x)
                yvector = np.linspace(-num_pts_y * delta_y * 0.5,
                                      num_pts_y * delta_y * 0.5, num_pts_y)
                zgrid = np.flipud(sagarray[:, 0].reshape(num_pts_x,
                                                         num_pts_y)).T
                # first line

                actsurf = Surface.p(localcoordinates,
                                    name=surfname,
                                    shape=GridSag.p(localcoordinates,
                                                    (xvector, yvector, zgrid)))

            elif surftype == "COORDBRK":
                # COORDBRK
                # parm1: decx
                # parm2: decy
                # parm3: rx
                # parm4: ry
                # parm5: rz
                # parm6: order 0: means 1. decx, 2. decy, 3. rx, 4. ry, 5. rz
                #         order 1: means 1. rz 2. ry 3. rz, 4. decy, 5. decx
                # disz: thickness (where does this appear in the transform?
                #                  step 6)
                # all in all: 1st step: transform coordinate system by former
                # disz, dec, tilt (or tilt, dec)
                # 2nd step: update vertex

                self.debug("SURFACE: Coordinate break found")
                localcoordinates.decx.set_value(parms.get(1, 0.0))
                localcoordinates.decy.set_value(parms.get(2, 0.0))
                localcoordinates.tiltx.set_value(parms.get(3, 0.0) * degree)
                localcoordinates.tilty.set_value(parms.get(4, 0.0) * degree)
                localcoordinates.tiltz.set_value(parms.get(5, 0.0) * degree)
                localcoordinates.tiltThenDecenter = bool(parms.get(6, 0))
                localcoordinates.update()
                actsurf = Surface.p(localcoordinates, name=surfname)
            else:
                actsurf = Surface.p(localcoordinates, name=surfname)

            if lastsurfname is not None:
                elem.addSurface(surfname, actsurf, (lastmatname, matname))
                self.info("addsurf: %s at material boundary %s" %
                          (surfname, (lastmatname, matname)))
                surflist_for_sequence.append((surfname, surf_options_dict))

            lastmatname = matname
            refname = localcoordinates.name

            # lastlc = localcoordinates

        optical_system.addElement(elementname, elem)
        seq = [(elementname, surflist_for_sequence)]

        self.info(optical_system.rootcoordinatesystem.pprint())

        return (optical_system, seq)
Example #38
0
def lgamma(x):
    sgngam = 1
    if math.isnan(x):
        return x
    if math.isinf(x) and x > 0:
	return x
    if math.isinf(x) and x < 0:
	return -x
    
    if x < -34.0:
        q = -x
        w = lgamma(q)
        p = math.floor(q)
        if p == q:
            return float('inf')
        i = p
        if (i & 1) == 0:
            sgngam = -1
        else:
            sgngam = 1
        z = q - p
        if z > 0.5:
            p += 1.0
            z = p - q
        z = q * math.sin(math.pi * z)
        if z == 0.0:
            return float('inf')
        z = LOGPI - math.log(z) - w;
        return z
    if x < 13.0:
        z = 1.0
        p = 0.0
        u = x
        while u >= 3.0:
            p -= 1.0
            u = x + p
            z *= u
        while u < 2.0:
            if u == 0.0:
                return float('inf')
            z /= u
            p += 1.0
            u = x + p
        if z < 0.0:
            sgngam = -1
            z = -z
        else:
            sgngam = 1
        if u == 2.0:
            return math.log(z)
        p -= 2.0
        x = x + p
        p = x * polevl( x, B, 5 ) / polevl( x, C, 6)
        return math.log(z) + p
    if x > MAXLGM:
        return sgngam * float('inf')
    q = ( x - 0.5 ) * math.log(x) - x + LS2PI
    if x > 1.0e8:
        return q
    p = 1.0/(x*x)
    if x >= 1000.0:
        q += ((   7.9365079365079365079365e-4 * p \
                - 2.7777777777777777777778e-3) * p \
                + 0.0833333333333333333333) / x
    else:
        q += polevl( p, A, 4 ) / x
    return q
Example #39
0
                psnrs.append(psnr_bicubic_up_scipy)
                #psnrs.append(psnr_bicubic_up1)
                #psnrs.append(psnr_bicubic_up_matlab)

                xlabels = ['ORG', 'CNN_UP', 'BICUBIC_UP\n(pred_from_cnn_up)',
                           'BICUBIC_UP\n(pred_from_2d)',
                           'BICUBIC_UP\n(pred_from_label)',
                           'BICUBIC_UP\n(a=-0.5)', 'BICUBIC_UP\n(scipy)', 'BICUBIC_UP\n(matlab)']

                # separate dir depending on PSNR
                # psnr diff
                #psnr_diff = psnr_bicubic_up_pred - psnr_bicubic_up
                #psnr_diff = psnr_bicubic_up - psnr_bicubic_up_scipy
                psnr_diff = psnr_pred_a_from_label - psnr_cnn_up

                if math.isinf(psnr_diff):
                    psnr_diff = 99.0

                psnr_diff_floor = math.floor(psnr_diff)
                str_psnr_diff_floor = str(psnr_diff_floor)
                output_path_psnr = os.path.join(output_path, 'group_psnr_' + str_psnr_diff_floor)

                if not os.path.exists(output_path_psnr):
                    os.makedirs(output_path_psnr)

                plot_diff(imgs, psnrs, xlabels, idx, each_frame_index, x, y, save_dir=output_path_psnr)

                # to save the raw data
                list_raw = [[each_yuv, each_frame_index, x, y,
                             best_a_from_cnn_up, psnr_cnn_up, psnr_bicubic_up_pred,
                             #psnr_bicubic_up_pred_2d,
Example #40
0
    def _preprocess(self, lines=None, build_layers=False, layer_callback=None):
        """Checks for imperial/relativeness settings and tool changes"""
        if not lines:
            lines = self.lines
        imperial = self.imperial
        relative = self.relative
        relative_e = self.relative_e
        current_tool = self.current_tool
        current_x = self.current_x
        current_y = self.current_y
        current_z = self.current_z
        offset_x = self.offset_x
        offset_y = self.offset_y
        offset_z = self.offset_z

        # Extrusion computation
        current_e = self.current_e
        offset_e = self.offset_e
        total_e = self.total_e
        max_e = self.max_e

        current_e_multi = self.current_e_multi[current_tool]
        offset_e_multi = self.offset_e_multi[current_tool]
        total_e_multi = self.total_e_multi[current_tool]
        max_e_multi = self.max_e_multi[current_tool]

        # Store this one out of the build_layers scope for efficiency
        cur_layer_has_extrusion = False

        # Initialize layers and other global computations
        if build_layers:
            # Bounding box computation
            xmin = float("inf")
            ymin = float("inf")
            zmin = 0
            xmax = float("-inf")
            ymax = float("-inf")
            zmax = float("-inf")
            # Also compute extrusion-only values
            xmin_e = float("inf")
            ymin_e = float("inf")
            xmax_e = float("-inf")
            ymax_e = float("-inf")

            # Duration estimation
            # TODO:
            # get device caps from firmware: max speed, acceleration/axis
            # (including extruder)
            # calculate the maximum move duration accounting for above ;)
            lastx = lasty = lastz = laste = lastf = 0.0
            lastdx = 0
            lastdy = 0
            x = y = e = f = 0.0
            currenttravel = 0.0
            moveduration = 0.0
            totalduration = 0.0
            acceleration = 2000.0  # mm/s^2
            layerbeginduration = 0.0

            # Initialize layers
            all_layers = self.all_layers = []
            all_zs = self.all_zs = set()
            layer_idxs = self.layer_idxs = []
            line_idxs = self.line_idxs = []

            layer_id = 0
            layer_line = 0

            last_layer_z = None
            prev_z = None
            prev_base_z = (None, None)
            cur_z = None
            cur_lines = []

        if self.line_class != Line:
            get_line = lambda l: Line(l.raw)
        else:
            get_line = lambda l: l
        for true_line in lines:
            # # Parse line
            # Use a heavy copy of the light line to preprocess
            line = get_line(true_line)
            split_raw = split(line)
            if line.command:
                # Update properties
                if line.is_move:
                    line.relative = relative
                    line.relative_e = relative_e
                    line.current_tool = current_tool
                elif line.command == "G20":
                    imperial = True
                elif line.command == "G21":
                    imperial = False
                elif line.command == "G90":
                    relative = False
                    relative_e = False
                elif line.command == "G91":
                    relative = True
                    relative_e = True
                elif line.command == "M82":
                    relative_e = False
                elif line.command == "M83":
                    relative_e = True
                elif line.command[0] == "T":
                    current_tool = int(line.command[1:])
                    while (current_tool + 1 > len(self.current_e_multi)):
                        self.current_e_multi += [0]
                        self.offset_e_multi += [0]
                        self.total_e_multi += [0]
                        self.max_e_multi += [0]
                current_e_multi = self.current_e_multi[current_tool]
                offset_e_multi = self.offset_e_multi[current_tool]
                total_e_multi = self.total_e_multi[current_tool]
                max_e_multi = self.max_e_multi[current_tool]

                if line.command[0] == "G":
                    parse_coordinates(line, split_raw, imperial)

                # Compute current position
                if line.is_move:
                    x = line.x
                    y = line.y
                    z = line.z

                    if line.f is not None:
                        self.current_f = line.f

                    if line.relative:
                        x = current_x + (x or 0)
                        y = current_y + (y or 0)
                        z = current_z + (z or 0)
                    else:
                        if x is not None: x = x + offset_x
                        if y is not None: y = y + offset_y
                        if z is not None: z = z + offset_z

                    if x is not None: current_x = x
                    if y is not None: current_y = y
                    if z is not None: current_z = z

                elif line.command == "G28":
                    home_all = not any([line.x, line.y, line.z])
                    if home_all or line.x is not None:
                        offset_x = 0
                        current_x = self.home_x
                    if home_all or line.y is not None:
                        offset_y = 0
                        current_y = self.home_y
                    if home_all or line.z is not None:
                        offset_z = 0
                        current_z = self.home_z

                elif line.command == "G92":
                    if line.x is not None: offset_x = current_x - line.x
                    if line.y is not None: offset_y = current_y - line.y
                    if line.z is not None: offset_z = current_z - line.z

                line.current_x = current_x
                line.current_y = current_y
                line.current_z = current_z

                # # Process extrusion
                if line.e is not None:
                    if line.is_move:
                        if line.relative_e:
                            line.extruding = line.e > 0
                            total_e += line.e
                            current_e += line.e
                            total_e_multi += line.e
                            current_e_multi += line.e
                        else:
                            new_e = line.e + offset_e
                            line.extruding = new_e > current_e
                            total_e += new_e - current_e
                            current_e = new_e
                            new_e_multi = line.e + offset_e_multi
                            total_e_multi += new_e_multi - current_e_multi
                            current_e_multi = new_e_multi

                        max_e = max(max_e, total_e)
                        max_e_multi = max(max_e_multi, total_e_multi)
                        cur_layer_has_extrusion |= line.extruding
                    elif line.command == "G92":
                        offset_e = current_e - line.e
                        offset_e_multi = current_e_multi - line.e

                self.current_e_multi[current_tool] = current_e_multi
                self.offset_e_multi[current_tool] = offset_e_multi
                self.max_e_multi[current_tool] = max_e_multi
                self.total_e_multi[current_tool] = total_e_multi

                # # Create layers and perform global computations
                if build_layers:
                    # Update bounding box
                    if line.is_move:
                        if line.extruding:
                            if line.current_x is not None:
                                xmin_e = min(xmin_e, line.current_x)
                                xmax_e = max(xmax_e, line.current_x)
                            if line.current_y is not None:
                                ymin_e = min(ymin_e, line.current_y)
                                ymax_e = max(ymax_e, line.current_y)
                        if max_e <= 0:
                            if line.current_x is not None:
                                xmin = min(xmin, line.current_x)
                                xmax = max(xmax, line.current_x)
                            if line.current_y is not None:
                                ymin = min(ymin, line.current_y)
                                ymax = max(ymax, line.current_y)

                    # Compute duration
                    if line.command == "G0" or line.command == "G1":
                        x = line.x if line.x is not None else lastx
                        y = line.y if line.y is not None else lasty
                        z = line.z if line.z is not None else lastz
                        e = line.e if line.e is not None else laste
                        # mm/s vs mm/m => divide by 60
                        f = line.f / 60.0 if line.f is not None else lastf

                        # given last feedrate and current feedrate calculate the
                        # distance needed to achieve current feedrate.
                        # if travel is longer than req'd distance, then subtract
                        # distance to achieve full speed, and add the time it took
                        # to get there.
                        # then calculate the time taken to complete the remaining
                        # distance

                        # FIXME: this code has been proven to be super wrong when 2
                        # subsquent moves are in opposite directions, as requested
                        # speed is constant but printer has to fully decellerate
                        # and reaccelerate
                        # The following code tries to fix it by forcing a full
                        # reacceleration if this move is in the opposite direction
                        # of the previous one
                        dx = x - lastx
                        dy = y - lasty
                        if dx * lastdx + dy * lastdy <= 0:
                            lastf = 0

                        currenttravel = math.hypot(dx, dy)
                        if currenttravel == 0:
                            if line.z is not None:
                                currenttravel = abs(
                                    line.z) if line.relative else abs(line.z -
                                                                      lastz)
                            elif line.e is not None:
                                currenttravel = abs(
                                    line.e) if line.relative_e else abs(
                                        line.e - laste)
                        # Feedrate hasn't changed, no acceleration/decceleration planned
                        if f == lastf:
                            moveduration = currenttravel / f if f != 0 else 0.
                        else:
                            # FIXME: review this better
                            # this looks wrong : there's little chance that the feedrate we'll decelerate to is the previous feedrate
                            # shouldn't we instead look at three consecutive moves ?
                            distance = 2 * abs(
                                ((lastf + f) *
                                 (f - lastf) * 0.5) / acceleration
                            )  # multiply by 2 because we have to accelerate and decelerate
                            if distance <= currenttravel and lastf + f != 0 and f != 0:
                                moveduration = 2 * distance / (
                                    lastf + f
                                )  # This is distance / mean(lastf, f)
                                moveduration += (currenttravel - distance) / f
                            else:
                                moveduration = 2 * currenttravel / (
                                    lastf + f
                                )  # This is currenttravel / mean(lastf, f)
                                # FIXME: probably a little bit optimistic, but probably a much better estimate than the previous one:
                                # moveduration = math.sqrt(2 * distance / acceleration) # probably buggy : not taking actual travel into account

                        lastdx = dx
                        lastdy = dy

                        totalduration += moveduration

                        lastx = x
                        lasty = y
                        lastz = z
                        laste = e
                        lastf = f
                    elif line.command == "G4":
                        moveduration = P(line)
                        if moveduration:
                            moveduration /= 1000.0
                            totalduration += moveduration

                    # FIXME : looks like this needs to be tested with "lift Z on move"
                    if line.z is not None:
                        if line.command == "G92":
                            cur_z = line.z
                        elif line.is_move:
                            if line.relative and cur_z is not None:
                                cur_z += line.z
                            else:
                                cur_z = line.z

                    # FIXME: the logic behind this code seems to work, but it might be
                    # broken
                    if cur_z != prev_z:
                        if prev_z is not None and last_layer_z is not None:
                            offset = self.est_layer_height if self.est_layer_height else 0.01
                            if abs(prev_z - last_layer_z) < offset:
                                if self.est_layer_height is None:
                                    zs = sorted([
                                        l.z for l in all_layers
                                        if l.z is not None
                                    ])
                                    heights = [
                                        round(zs[i + 1] - zs[i], 3)
                                        for i in range(len(zs) - 1)
                                    ]
                                    heights = [
                                        height for height in heights if height
                                    ]
                                    if len(heights) >= 2:
                                        self.est_layer_height = heights[1]
                                    elif heights:
                                        self.est_layer_height = heights[0]
                                    else:
                                        self.est_layer_height = 0.1
                                base_z = round(
                                    prev_z - (prev_z % self.est_layer_height),
                                    2)
                            else:
                                base_z = round(prev_z, 2)
                        else:
                            base_z = prev_z

                        if base_z != prev_base_z:
                            new_layer = Layer(cur_lines, base_z)
                            new_layer.duration = totalduration - layerbeginduration
                            layerbeginduration = totalduration
                            all_layers.append(new_layer)
                            if cur_layer_has_extrusion and prev_z not in all_zs:
                                all_zs.add(prev_z)
                            cur_lines = []
                            cur_layer_has_extrusion = False
                            layer_id += 1
                            layer_line = 0
                            last_layer_z = base_z
                            if layer_callback is not None:
                                layer_callback(self, len(all_layers) - 1)

                        prev_base_z = base_z

            if build_layers:
                cur_lines.append(true_line)
                layer_idxs.append(layer_id)
                line_idxs.append(layer_line)
                layer_line += 1
                prev_z = cur_z
            # ## Loop done

        # Store current status
        self.imperial = imperial
        self.relative = relative
        self.relative_e = relative_e
        self.current_tool = current_tool
        self.current_x = current_x
        self.current_y = current_y
        self.current_z = current_z
        self.offset_x = offset_x
        self.offset_y = offset_y
        self.offset_z = offset_z
        self.current_e = current_e
        self.offset_e = offset_e
        self.max_e = max_e
        self.total_e = total_e
        self.current_e_multi[current_tool] = current_e_multi
        self.offset_e_multi[current_tool] = offset_e_multi
        self.max_e_multi[current_tool] = max_e_multi
        self.total_e_multi[current_tool] = total_e_multi

        # Finalize layers
        if build_layers:
            if cur_lines:
                new_layer = Layer(cur_lines, prev_z)
                new_layer.duration = totalduration - layerbeginduration
                layerbeginduration = totalduration
                all_layers.append(new_layer)
                if cur_layer_has_extrusion and prev_z not in all_zs:
                    all_zs.add(prev_z)

            self.append_layer_id = len(all_layers)
            self.append_layer = Layer([])
            self.append_layer.duration = 0
            all_layers.append(self.append_layer)
            self.layer_idxs = array('I', layer_idxs)
            self.line_idxs = array('I', line_idxs)

            # Compute bounding box
            all_zs = self.all_zs.union(set([zmin])).difference(set([None]))
            zmin = min(all_zs)
            zmax = max(all_zs)

            self.filament_length = self.max_e
            while len(self.filament_length_multi) < len(self.max_e_multi):
                self.filament_length_multi += [0]
            for i in enumerate(self.max_e_multi):
                self.filament_length_multi[i[0]] = i[1]

            if self.filament_length > 0:
                self.xmin = xmin_e if not math.isinf(xmin_e) else 0
                self.xmax = xmax_e if not math.isinf(xmax_e) else 0
                self.ymin = ymin_e if not math.isinf(ymin_e) else 0
                self.ymax = ymax_e if not math.isinf(ymax_e) else 0
            else:
                self.xmin = xmin if not math.isinf(xmin) else 0
                self.xmax = xmax if not math.isinf(xmax) else 0
                self.ymin = ymin if not math.isinf(ymin) else 0
                self.ymax = ymax if not math.isinf(ymax) else 0
            self.zmin = zmin if not math.isinf(zmin) else 0
            self.zmax = zmax if not math.isinf(zmax) else 0
            self.width = self.xmax - self.xmin
            self.depth = self.ymax - self.ymin
            self.height = self.zmax - self.zmin

            # Finalize duration
            totaltime = datetime.timedelta(seconds=int(totalduration))
            self.duration = totaltime
def train_model(model):
    args = model.args
    unchanged, best_dev = 0, 200

    unroll_size = args.unroll_size
    batch_size = args.batch_size
    criterion = nn.CrossEntropyLoss(size_average=False)

    trainer = SGD(model.parameters(),
                  lr=args.lr,
                  weight_decay=args.weight_decay)

    map_to_ids = model.embedding_layer.map_to_ids
    train = read_corpus(args.train, shuffle=False)
    train = create_batches(train, map_to_ids, args.batch_size, cuda=args.gpu)
    dev = read_corpus(args.dev)
    dev = create_batches(dev, map_to_ids, 1, cuda=args.gpu)
    test = read_corpus(args.test)
    test = create_batches(test, map_to_ids, 1, cuda=args.gpu)
    for epoch in range(args.max_epoch):

        start_time = time.time()

        N = (len(train[0]) - 1) // unroll_size + 1
        hidden = model.init_hidden(batch_size)
        total_loss, cur_loss = 0.0, 0.0
        for i in range(N):
            model.train()
            x = train[0][i * unroll_size:(i + 1) * unroll_size]
            y = train[1][i * unroll_size:(i + 1) * unroll_size].view(-1)

            x, y = Variable(x), Variable(y)
            model.zero_grad()
            output, hidden = model(x, hidden)
            hidden = repackage_hidden(args, hidden)
            assert x.size(1) == batch_size
            loss = criterion(output, y) / x.size(1)
            loss.backward()
            if math.isnan(loss.data[0]) or math.isinf(loss.data[0]):
                print("nan/inf loss encoutered in training.")
                sys.exit(0)
                return
            total_loss += loss.data[0] / x.size(0)
            cur_loss += loss.data[0] / x.size(0)
            torch.nn.utils.clip_grad_norm(model.parameters(), args.clip_grad)
            for p in model.parameters():
                if not p.requires_grad:
                    continue
                if p.grad is not None:
                    if args.weight_decay > 0:
                        p.data.mul_(1.0 - args.weight_decay)
                    p.data.add_(-args.lr, p.grad.data)

            if (i + 1) % args.eval_ite == 0:
                dev_ppl = eval_model(model, dev)
                sys.stdout.write(
                    "| Epoch={} | ite={} | lr={:.4f} | train_ppl={:.2f} | dev_ppl={:.2f} |"
                    "\n".format(epoch, i + 1, trainer.defaults["lr"],
                                np.exp(cur_loss / args.eval_ite), dev_ppl))
                model.print_pnorm()
                sys.stdout.flush()
                cur_loss = 0.0

                if dev_ppl < best_dev:
                    unchanged = 0
                    best_dev = dev_ppl
                    test_ppl = eval_model(model, test)
                    sys.stdout.write(
                        "\t[eval]  test_ppl={:.2f}\n".format(test_ppl))
                    sys.stdout.flush()

        train_ppl = np.exp(total_loss / N)
        dev_ppl = eval_model(model, dev)

        sys.stdout.write("-" * 89 + "\n")
        sys.stdout.write(
            "| End of epoch {} | lr={:.4f} | train_ppl={:.2f} | dev_ppl={:.2f} |"
            "[{:.2f}m] |\n".format(epoch, trainer.defaults["lr"], train_ppl,
                                   dev_ppl, (time.time() - start_time) / 60.0))
        sys.stdout.write("-" * 89 + "\n")
        model.print_pnorm()
        sys.stdout.flush()

        if dev_ppl < best_dev:
            unchanged = 0
            best_dev = dev_ppl
            start_time = time.time()
            test_ppl = eval_model(model, test)
            sys.stdout.write("\t[eval]  test_ppl={:.2f}\t[{:.2f}m]\n".format(
                test_ppl, (time.time() - start_time) / 60.0))
            sys.stdout.flush()
        else:
            unchanged += 1
        if args.lr_decay_epoch > 0 and epoch >= args.lr_decay_epoch:
            args.lr *= args.lr_decay
        if unchanged >= args.patience:
            sys.stdout.write(
                "Reached " + str(args.patience) +
                " iterations without improving dev loss. Reducing learning rate."
            )
            args.lr /= 2
            unchanged = 0
        trainer.defaults["lr"] = args.lr
        sys.stdout.write("\n")
    return
Example #42
0
    def train_step(self, samples, dummy_batch=False, raise_oom=False):
        """Do forward, backward and parameter update."""
        if self._dummy_batch is None:
            self._dummy_batch = samples[0]

        self._set_seed()
        self.model.train()
        self.criterion.train()
        self.zero_grad()

        if not dummy_batch:
            self.meters['train_wall'].start()

        # forward and backward pass
        logging_outputs, sample_sizes, ooms = [], [], 0
        for i, sample in enumerate(samples):
            sample = self._prepare_sample(sample)
            if sample is None:
                # when sample is None, run forward/backward on a dummy batch
                # and ignore the resulting gradients
                sample = self._prepare_sample(self._dummy_batch)
                ignore_grad = True
            else:
                ignore_grad = False

            def maybe_no_sync():
                """
                Whenever *samples* contains more than one mini-batch, we
                want to accumulate gradients locally and only call
                all-reduce in the last backwards pass.
                """
                if (self.args.distributed_world_size > 1
                        and hasattr(self.model, 'no_sync')
                        and i < len(samples) - 1):
                    return self.model.no_sync()
                else:
                    return contextlib.ExitStack()  # dummy contextmanager

            try:
                with maybe_no_sync():
                    # forward and backward
                    loss, sample_size, logging_output = self.task.train_step(
                        sample, self.model, self.criterion, self.optimizer,
                        ignore_grad)

                if not ignore_grad:
                    logging_outputs.append(logging_output)
                    sample_sizes.append(sample_size)

                    if self.fast_stat_sync:
                        self._all_reduce_list[0] += sample_size
                        self._all_reduce_list[1] += logging_output.get(
                            'nsentences', 0.0)
                        self._all_reduce_list[2] += logging_output.get(
                            'loss', 0.0)
                        self._all_reduce_list[3] += logging_output.get(
                            'nll_loss', 0.0)
                        self._all_reduce_list[4] += logging_output.get(
                            'ntokens', 0.0)
            except RuntimeError as e:
                if 'out of memory' in str(e):
                    msg = ('| WARNING: ran out of memory with exception: ' +
                           '{};'.format(e) + '\n Skipping batch')
                    # TODO: print should really go to logger, this print goes
                    # to stderr, which is buffered, which in many cases is not
                    # printed out if another exception happens.
                    # NB(jerry): added a flush to mitigate this
                    print(msg, file=sys.stderr)
                    if torch.cuda.is_available() and hasattr(
                            torch.cuda, "memory_summary"):
                        for device_idx in range(torch.cuda.device_count()):
                            print(torch.cuda.memory_summary(
                                device=torch.cuda.device(device_idx)),
                                  file=sys.stderr)
                    sys.stderr.flush()

                    if raise_oom:
                        raise ValueError(msg)
                    ooms += 1
                    self.zero_grad()
                else:
                    raise e

            if self.fast_stat_sync:
                self._all_reduce_list[5] += ooms

        if ooms > 0 and self._oom_batch is not None:
            self.handle_ooms(ooms)

        if dummy_batch:
            return None

        # gather logging outputs from all replicas
        if self.fast_stat_sync:
            # rework all_gather_list
            all_reduce_list_tensor = torch.cuda.DoubleTensor(
                self._all_reduce_list)
            if self._sync_stats():
                torch.distributed.all_reduce(all_reduce_list_tensor)
            # Normalize loss and nll_loss by "sample_size"
            # and convert to log base 2
            all_reduce_list_tensor[2:4].div_(
                (all_reduce_list_tensor[0:1] *
                 torch.log(torch.cuda.DoubleTensor([2]))))
            self._all_reduce_list = all_reduce_list_tensor.tolist()
            logging_output = {}
            [
                sample_size,
                logging_output['nsentences'],
                logging_output['loss'],
                logging_output['nll_loss'],
                logging_output['ntokens'],
                ooms,
            ] = self._all_reduce_list
        elif self._sync_stats():
            logging_outputs, sample_sizes, ooms, prev_norms = \
                zip(*distributed_utils.all_gather_list(
                    [logging_outputs, sample_sizes, ooms, self._prev_grad_norm],
                ))
            logging_outputs = list(chain.from_iterable(logging_outputs))
            sample_sizes = list(chain.from_iterable(sample_sizes))
            ooms = sum(ooms)

            if not self.args.use_bmuf:
                assert (
                    all(norm == prev_norms[0] for norm in prev_norms) or all(
                        math.isnan(norm) or math.isinf(norm)
                        for norm in prev_norms)
                ), 'Fatal error: gradients are inconsistent between workers'

        self.meters['oom'].update(ooms, len(samples))
        if ooms == self.args.distributed_world_size * len(samples):
            print('| WARNING: OOM in all workers, skipping update')
            self.zero_grad()
            return None

        if not self.fast_stat_sync:
            # aggregate logging outputs and sample sizes
            logging_output = self.task.aggregate_logging_outputs(
                logging_outputs, self.get_criterion())
            sample_size = self.task.grad_denom(sample_sizes,
                                               self.get_criterion())

        if not all(k in logging_output for k in ['ntokens', 'nsentences']):
            raise Exception(
                ('Please update the {}.aggregate_logging_outputs() method to '
                 'return ntokens and nsentences').format(
                     self.task.__class__.__name__))

        try:
            # normalize grads by sample size
            if sample_size > 0:
                self.optimizer.multiply_grads(
                    self.args.distributed_world_size / float(sample_size))

            # clip grads
            grad_norm = self.optimizer.clip_grad_norm(self.args.clip_norm)
            self._prev_grad_norm = grad_norm

            # take an optimization step
            self.optimizer.step()
            self.set_num_updates(self.get_num_updates() + 1)

            # task specific update per step
            self.task.update_step(self._num_updates)

            # update meters
            ntokens = logging_output.get('ntokens', 0)
            nsentences = logging_output.get('nsentences', 0)
            self.meters['wps'].update(ntokens)
            self.meters['ups'].update(1.)
            self.meters['wpb'].update(ntokens)
            self.meters['bsz'].update(nsentences)
            self.meters['gnorm'].update(grad_norm)
            self.meters['clip'].update(1. if grad_norm > self.args.clip_norm
                                       and self.args.clip_norm > 0 else 0.)
            self.meters['train_loss'].update(logging_output.get('loss', 0),
                                             sample_size)
            if 'train_acc' in self.meters:
                self.meters['train_acc'].update(logging_output.get('acc', 0),
                                                sample_size)

            if 'nll_loss' in logging_output:
                self.meters['train_nll_loss'].update(
                    logging_output.get('nll_loss', 0), ntokens)

            # clear CUDA cache to reduce memory fragmentation
            if (self.args.empty_cache_freq > 0 and
                ((self.get_num_updates() + self.args.empty_cache_freq - 1) %
                 self.args.empty_cache_freq) == 0
                    and torch.cuda.is_available() and not self.args.cpu):
                torch.cuda.empty_cache()
        except OverflowError as e:
            print('| WARNING: overflow detected, ' + str(e))
            self.zero_grad()
            logging_output = None

        if self.args.fp16:
            self.meters['loss_scale'].reset()
            self.meters['loss_scale'].update(self.optimizer.scaler.loss_scale)

        self.clear_buffered_stats()
        self.meters['train_wall'].stop()

        return logging_output
Example #43
0
print(1e100 == float('inf'))
# False

print(float('inf') == math.inf == np.inf)
# True

print(1e1000 == math.inf)
# True

print(1e100 == math.inf)
# False

print(float('inf') == float('inf') * 100)
# True

print(math.isinf(1e1000))
# True

print(math.isinf(1e100))
# False

print(math.isinf(-1e1000))
# True

a = np.array([1, np.inf, -np.inf])
print(a)
# [  1.  inf -inf]

print(np.isinf(a))
# [False  True  True]
Example #44
0
                        best_node_mean='NaN',
                        best_node_sd='NaN',
                        linear_discr_mean='NaN',
                        linear_discr_sd='NaN',
                        naive_bayes_mean='NaN',
                        naive_bayes_sd='NaN'
                    WHERE id={};
                    '''.format(id)
                conn.execute(update_statement)
                continue
            if 'nr_class' not in mf:
                print('Calculation failed.')
                continue

            for key, value in mf.items():
                if math.isinf(value):
                    if value > 0:
                        mf[key] = sys.maxsize
                    else:
                        mf[key] = -sys.maxsize
                if np.isnan(value):
                    store = False
                    break

            if not store:
                print('Skipping {} due to missing value in {}'.format(id, key))
                continue

            update_statement = '''
                UPDATE datasets SET 
                    nr_inst={nr_inst},
Example #45
0
    def _get_factors(self, output_desired, t_upper):
        """
        Apply the continued fractions to find r and the gcd to find the desired factors.
        """
        x_value = int(output_desired, 2)
        logger.info('In decimal, x_final value for this result is: %s.', x_value)

        if x_value <= 0:
            self._ret['results'][output_desired] = \
                'x_value is <= 0, there are no continued fractions.'
            return False

        logger.debug('Running continued fractions for this case.')

        # Calculate T and x/T
        T = pow(2, t_upper)
        x_over_T = x_value / T

        # Cycle in which each iteration corresponds to putting one more term in the
        # calculation of the Continued Fraction (CF) of x/T

        # Initialize the first values according to CF rule
        i = 0
        b = array.array('i')
        t = array.array('f')

        b.append(math.floor(x_over_T))
        t.append(x_over_T - b[i])

        while i >= 0:

            # From the 2nd iteration onwards, calculate the new terms of the CF based
            # on the previous terms as the rule suggests
            if i > 0:
                b.append(math.floor(1 / t[i - 1]))
                t.append((1 / t[i - 1]) - b[i])

            # Calculate the CF using the known terms
            aux = 0
            j = i
            while j > 0:
                aux = 1 / (b[j] + aux)
                j = j - 1

            aux = aux + b[0]

            # Get the denominator from the value obtained
            frac = fractions.Fraction(aux).limit_denominator()
            denominator = frac.denominator

            logger.debug('Approximation number %s of continued fractions:', i + 1)
            logger.debug("Numerator:%s \t\t Denominator: %s.", frac.numerator, frac.denominator)

            # Increment i for next iteration
            i = i + 1

            if denominator % 2 == 1:
                if i >= self._N:
                    self._ret['results'][output_desired] = \
                        'unable to find factors after too many attempts.'
                    return False
                logger.debug('Odd denominator, will try next iteration of continued fractions.')
                continue

            # If denominator even, try to get factors of N
            # Get the exponential a^(r/2)
            exponential = 0

            if denominator < 1000:
                exponential = pow(self._a, denominator / 2)

            # Check if the value is too big or not
            if math.isinf(exponential) or exponential > 1000000000:
                self._ret['results'][output_desired] = \
                    'denominator of continued fraction is too big.'
                return False

            # If the value is not to big (infinity),
            # then get the right values and do the proper gcd()
            putting_plus = int(exponential + 1)
            putting_minus = int(exponential - 1)
            one_factor = math.gcd(putting_plus, self._N)
            other_factor = math.gcd(putting_minus, self._N)

            # Check if the factors found are trivial factors or are the desired factors
            if one_factor == 1 or one_factor == self._N or \
                    other_factor == 1 or other_factor == self._N:
                logger.debug('Found just trivial factors, not good enough.')
                # Check if the number has already been found,
                # use i-1 because i was already incremented
                if t[i - 1] == 0:
                    self._ret['results'][output_desired] = \
                        'the continued fractions found exactly x_final/(2^(2n)).'
                    return False
                if i >= self._N:
                    self._ret['results'][output_desired] = \
                        'unable to find factors after too many attempts.'
                    return False
            else:
                logger.debug('The factors of %s are %s and %s.', self._N, one_factor, other_factor)
                logger.debug('Found the desired factors.')
                self._ret['results'][output_desired] = (one_factor, other_factor)
                factors = sorted((one_factor, other_factor))
                if factors not in self._ret['factors']:
                    self._ret['factors'].append(factors)
                return True
Example #46
0
    def gauss_laplace_momentum(self, potential,potential_disc, kinetic,\
                               stpsze, x0, p0, logp, grad, aux, n_disc = 0,\
                               M  = None):
        '''Performs one numerical integration step of the DHMC  mixed 
        integrator, Alogirthm 2 from Nishimura et al. 2017. 
        
        Parameters
        ----------
        
        stpsze -  A scalar representing step size.  
        x0     -  A torch.Tensor \in mathbb{R}^{D x 1}. Represents the intial 
                  starting value. 
        p0     -  A torch.Tensor \in mathbb{R}^{D x 1}. Represents the intial,
                  sampled momentum. 
        logp   -  A function representing the log of the posterior. 
        grad   -  A torch.Tensor, representing the gradient of the potential 
                  energy evaluated at the specified point. 
                  ** May not need this if we include the functionality in the
                  potential function. **
        M      -  A torch.Tensor M \in mathb{R}^{D x D}. Represents the 
                  diagonal of the mass matrix. 
        
        Outputs
        --------
        
        x        - A torch.Tensor \mathbb{R}^{D x 1}. The proposed x
        p        - A torch.Tensor \mathbb{R}^{D x 1}. The proposed p
        logp     - A torch.Tensor \mathbb{R}^{1 x 1}. As previously defined
        gradcont - A troch.Tensor \mathbb{R}^{D x 1}. A tensor of the
                   gradients of the continous parameters
        aux      - Output from the potential. ** may not be needed. 
        '''
        if M is None:
            M = torch.ones(x0.size()[0])

        x = x0.clone()
        p = p0.clone()
        # performs first half step on continous parameters
        p[:-n_disc] = p[:-n_disc] + 0.5 * stpsze * potential(p[:-n_disc],
                                                             grad=True)
        if n_disc == 0:
            x = x + stpsze * kinetic(p, M, mom='Gauss', grad=True)
        else:
            # Update continous parameters if any
            if self.n_param != self.n_disc:
                x[:-n_disc] = x[:-n_disc] + stpsze * 0.5 * kinetic(
                    p[:-n_disc], M[:-n_disc], mom='Gauss', grad=True)
                logp, aux = potential(x, grad=False)
                gradcont = potential(x, grad=True)
            # Update discrete parameters
            if math.isinf(logp[0]):
                return x, p, grad, logp, n_feval, n_fupdate  #  not sure what to do here line 149
            # creates a function to permute 'J' indices. I.e J is the
            # the discontinous set of parameters. Line 3 of Algorithm 2
            coord_order = x.size()[0] - n_disc + np.random.permutation(n_disc)
            for index in coord_order:
                # calls on Algorithm 1 from Nishimura et al.
                x, p, logp, aux = self.coordwise_int(self, potential_disc,\
                                                     aux, index, x, p, M,\
                                                     stpsze, logp)
            x[:-n_disc] = x[:-n_disc] + stpsze * 0.5 * kinetic(
                p[:-n_disc], M[:-n_disc], mom='Gauss', grad=True)

        if self.n_param != self.n_disc:
            logp = potential(x, grad='False')
            gradcont = potential(x, grad='True')

        return x, p, gradcont, logp, aux
Example #47
0
assert math.ceil(0.5) == 1
assert math.ceil(1.0) == 1
assert math.ceil(1.5) == 2
assert math.ceil(-0.5) == 0

assert math.ldexp(float("inf"), -10**20) == float("inf")

assert almost_equal(math.log1p(1 / math.e - 1), -1)
assert almost_equal(math.log1p(0), 0)
assert almost_equal(math.log1p(math.e - 1), 1)
assert almost_equal(math.log1p(1), math.log(2))

assert almost_equal(math.acosh(1), 0)
assert almost_equal(math.acosh(2), 1.3169578969248168)

assert math.isinf(math.asinh(float("inf")))

assert almost_equal(math.asinh(0), 0)
assert almost_equal(math.asinh(1), 0.88137358701954305)

assert almost_equal(math.asinh(-1), -0.88137358701954305)

assert almost_equal(math.atanh(0), 0)
assert almost_equal(math.atanh(0.5), 0.54930614433405489)

assert almost_equal(math.atanh(-0.5), -0.54930614433405489)

assert math.isnan(math.atanh(float("nan")))

assert math.trunc(1.9) == 1.0
Example #48
0
        dist_fun = euclidean_distances
    else:  # 1-D list of strings
        from nltk.metrics.distance import edit_distance
        # x = ['we', 'shelled', 'clams', 'for', 'the', 'chowder']
        # y = ['class', 'too']
        x = ['i', 'soon', 'found', 'myself', 'muttering', 'to', 'the', 'walls']
        y = ['see', 'drown', 'himself']
        # x = 'we talked about the situation'.split()
        # y = 'we talked about the situation'.split()
        dist_fun = edit_distance
    dist, cost, acc, path = dtw(x, y, dist_fun, w=w, s=s)

    # Vizualize
    from matplotlib import pyplot as plt
    plt.imshow(cost.T,
               origin='lower',
               cmap=plt.cm.Reds,
               interpolation='nearest')
    plt.plot(path[0], path[1], '-o')  # relation
    plt.xticks(range(len(x)), x)
    plt.yticks(range(len(y)), y)
    plt.xlabel('x')
    plt.ylabel('y')
    plt.axis('tight')
    if isinf(w):
        plt.title('Minimum distance: {}, slope weight: {}'.format(dist, s))
    else:
        plt.title(
            'Minimum distance: {}, window widht: {}, slope weight: {}'.format(
                dist, w, s))
    plt.show()
Example #49
0
    def create_features(self, token: Token, invoicePage: InvoicePage):

        ################################### HELPER FUNCTIONS FOR TOKEN GENERATION #################################################
        # calculates and returns min dist from token 1 to token 2 in pixels
        def calc_min_dist(t1, t2):
            # get bounding outer rectangle
            outer_rect_left = min(t1.coordinates["x"], t2.coordinates["x"])
            outer_rect_top = min(t1.coordinates["y"], t2.coordinates["y"])
            outer_rect_bottom = max(
                (t1.coordinates["y"] + t1.coordinates["height"]),
                (t2.coordinates["y"] + t2.coordinates["height"]),
            )
            outer_rect_right = max(
                (t1.coordinates["x"] + t1.coordinates["width"]),
                (t2.coordinates["x"] + t2.coordinates["width"]),
            )
            outer_rect_width = outer_rect_right - outer_rect_left
            outer_rect_heigth = outer_rect_bottom - outer_rect_top
            inner_rect_width = max(
                0,
                outer_rect_width -
                (t1.coordinates["width"] + t2.coordinates["width"]),
            )
            inner_rect_height = max(
                0,
                outer_rect_heigth -
                (t1.coordinates["height"] + t2.coordinates["height"]),
            )
            pixel_dist = math.sqrt(inner_rect_width**2 + inner_rect_height**2)
            return pixel_dist

        # checks if two tokens are aligned vertically within a margin of error (checks midpoint, left boundary, right boundary)
        def is_vert_aligned(t1, t2, moe):
            """Returns true if t2 is vertically aligned with t1 and is below t1"""
            t2_below_t1 = t1.coordinates["y"] - t2.coordinates["y"] < 0
            if not t2_below_t1:
                return False

            if abs(t1.coordinates["x"] - t2.coordinates["x"]) < moe:
                return True
            if (abs((t1.coordinates["x"] + t1.coordinates["width"]) -
                    (t2.coordinates["x"] + t2.coordinates["width"])) < moe):
                return True
            t1_midpt_x = t1.coordinates["x"] + (t1.coordinates["width"] / 2)
            t2_midpt_x = t2.coordinates["x"] + (t2.coordinates["width"] / 2)
            if abs(t1_midpt_x - t2_midpt_x) < moe:
                return True
            return False

        # checks if two tokens are aligned horizontally within a margin of error (checks midpoint, top boundary, bottom boundary)
        def is_hori_aligned(t1, t2, moe):
            """Returns true if t2 is horizontally aligned with t1 and is to the right of t1"""
            t2_to_right_of_t1 = t1.coordinates["x"] - t2.coordinates["x"] < 0
            if not t2_to_right_of_t1:
                return False

            if abs(t1.coordinates["y"] - t2.coordinates["y"]) < moe:
                return True
            if (abs((t1.coordinates["y"] + t1.coordinates["height"]) -
                    (t2.coordinates["y"] + t2.coordinates["height"])) < moe):
                return True
            t1_midpt_y = t1.coordinates["y"] + (t1.coordinates["height"] / 2)
            t2_midpt_y = t2.coordinates["y"] + (t2.coordinates["height"] / 2)
            if abs(t1_midpt_y - t2_midpt_y) < moe:
                return True
            return False

        #############################################################################################################################

        features = {}

        # number of characters and words in token text string
        features["char_count"] = len(token.text)
        features["word_count"] = len(token.text.split(" "))

        # height and width of token
        features["height"] = token.coordinates["height"]
        features["width"] = token.coordinates["width"]

        # distance to edges of page (to nearest point on the box)
        features[
            "rel_dist_top"] = token.coordinates["y"] / invoicePage.size["y"]
        features[
            "rel_dist_left"] = token.coordinates["x"] / invoicePage.size["x"]
        features["dist_bottom"] = invoicePage.size["y"] - (
            token.coordinates["y"] + token.coordinates["height"])
        features["dist_right"] = invoicePage.size["x"] - (
            token.coordinates["x"] + token.coordinates["width"])

        # distance to boundaries of outermost text on page (tokens nearest to edge of page)
        min_x = math.inf
        min_y = math.inf
        max_y = 0
        max_x = 0
        for t in invoicePage.grouped_tokens:
            if t.coordinates["x"] < min_x:
                min_x = t.coordinates["x"]
            if t.coordinates["y"] < min_y:
                min_y = t.coordinates["y"]
            if t.coordinates["x"] + t.coordinates["width"] > max_x:
                max_x = t.coordinates["x"] + t.coordinates["width"]
            if t.coordinates["y"] + t.coordinates["height"] > max_y:
                max_y = t.coordinates["y"] + t.coordinates["height"]
        features["dist_top_outer"] = token.coordinates["y"] - min_y
        features["dist_left_outer"] = token.coordinates["x"] - min_x
        features["dist_bottom_outer"] = max_y - (token.coordinates["y"] +
                                                 token.coordinates["height"])
        features["dist_right_outer"] = max_x - (token.coordinates["x"] +
                                                token.coordinates["width"])

        # relative size of token box compared to page
        features["rel_size_page_x"] = token.coordinates[
            "width"] / invoicePage.size["x"]
        features["rel_size_page_y"] = (token.coordinates["height"] /
                                       invoicePage.size["y"])

        # ave dist to neighbours (pixel and relative)
        min_dist_neighbours = [
            calc_min_dist(t, token) for t in invoicePage.grouped_tokens
        ]
        features["average_dist_neighbours_pixel"] = sum(
            min_dist_neighbours) / len(min_dist_neighbours)
        invoice_diag = math.sqrt(invoicePage.size["y"]**2 +
                                 invoicePage.size["x"]**2)
        features["average_dist_neighbours_rel"] = (
            features["average_dist_neighbours_pixel"] / invoice_diag)
        N = 5  # N is arbitrary
        min_dist_neighbours.sort()
        N_nearest_neighbours = min_dist_neighbours[:N]
        features["average_dist_N_nearest_neighbours_pixel"] = sum(
            N_nearest_neighbours) / len(N_nearest_neighbours)
        features["average_dist_N_nearest_neighbours_rel"] = (
            features["average_dist_N_nearest_neighbours_pixel"] / invoice_diag)

        # relative size compared to other tokens (percentile)
        perc_w = 0
        perc_h = 0
        for t in invoicePage.grouped_tokens:
            if t is not token:
                if t.coordinates["width"] < token.coordinates["width"]:
                    perc_w += 1
                if t.coordinates["height"] < token.coordinates["height"]:
                    perc_h += 1
        features["percentile_width"] = perc_w / len(invoicePage.grouped_tokens)
        features["percentile_height"] = perc_h / len(
            invoicePage.grouped_tokens)

        # boolean if token contains fields
        features["contains_date"] = 1 if token.date_values else 0
        features["contains_currency"] = 1 if token.currency else 0
        features[
            "contains_specific_currency"] = 1 if token.specific_currency else 0
        features["contains_date_range"] = 1 if token.date_range else 0
        features["contains_address"] = 1 if token.address else 0
        features["contains_num_label"] = 1 if token.num_label else 0
        features["contains_total_label"] = 1 if token.total_label else 0
        features["contains_amount_label"] = 1 if token.amount_label else 0
        features["contains_date_label"] = 1 if token.date_label else 0
        features["contains_date_of_invoice_label"] = 1 if features[
            "contains_date_label"] and len(
                token.date_label.split(" ")
            ) > 1 else 0  # This is a more specific feature that the one above
        features["contains_digit"] = 1 if token.contains_digit else 0
        features["contains_company"] = 1 if token.company else 0
        features["contains_tax_label"] = 1 if token.tax_label else 0

        # boolean if aligned with selected tokens
        moe = 10  # arbitary 10 pixle margin of error
        features["vert_align_to_cell_w_date"] = 0
        features["vert_align_to_cell_w_currency"] = 0
        features["vert_align_to_cell_w_address"] = 0
        features["vert_align_to_cell_w_datelabel"] = 0
        features["vert_align_to_cell_w_dateofinvoicelabel"] = 0
        features["vert_align_to_cell_w_numlabel"] = 0
        features["vert_align_to_cell_w_totallabel"] = 0
        features["vert_align_to_cell_w_amountlabel"] = 0
        features["vert_align_to_cell_w_digit"] = 0
        features["vert_align_to_cell_w_invoicenum_label"] = 0
        features["vert_align_to_cell_w_accountnum_label"] = 0
        features["vert_align_to_cell_w_ponum_label"] = 0
        features["vert_align_to_cell_w_tax_label"] = 0

        features["hori_align_to_cell_w_date"] = 0
        features["hori_align_to_cell_w_currency"] = 0
        features["hori_align_to_cell_w_address"] = 0
        features["hori_align_to_cell_w_datelabel"] = 0
        features["hori_align_to_cell_w_dateofinvoicelabel"] = 0
        features["hori_align_to_cell_w_numlabel"] = 0
        features["hori_align_to_cell_w_totallabel"] = 0
        features["hori_align_to_cell_w_amountlabel"] = 0
        features["hori_align_to_cell_w_digit"] = 0
        features["hori_align_to_cell_w_invoicenum_label"] = 0
        features["hori_align_to_cell_w_accountnum_label"] = 0
        features["hori_align_to_cell_w_ponum_label"] = 0
        features["hori_align_to_cell_w_tax_label"] = 0

        for t in invoicePage.grouped_tokens:
            if t is not token:
                if is_vert_aligned(t, token, moe):
                    if t.date_values:
                        features["vert_align_to_cell_w_date"] = 1
                    if t.currency:
                        features["vert_align_to_cell_w_currency"] = 1
                    if t.address:
                        features["vert_align_to_cell_w_address"] = 1
                    if t.date_label:
                        features["vert_align_to_cell_w_datelabel"] = 1
                    if t.date_of_invoice_label:
                        features["vert_align_to_cell_w_dateofinvoicelabel"] = 1
                    if t.num_label:
                        features["vert_align_to_cell_w_numlabel"] = 1
                    if t.invoice_num_label:
                        features["vert_align_to_cell_w_invoicenum_label"] = 1
                    if t.acc_num_label:
                        features["vert_align_to_cell_w_accountnum_label"] = 1
                    if t.po_num_label:
                        features["vert_align_to_cell_w_ponum_label"] = 1
                    if t.total_label:
                        features["vert_align_to_cell_w_totallabel"] = 1
                    if t.amount_label:
                        features["vert_align_to_cell_w_accountnum_label"] = 1
                    if t.contains_digit:
                        features["vert_align_to_cell_w_digit"] = 1
                    if t.tax_label:
                        features["vert_align_to_cell_w_tax_label"] = 1

                if is_hori_aligned(t, token, moe):
                    if t.date_values:
                        features["hori_align_to_cell_w_date"] = 1
                    if t.currency:
                        features["hori_align_to_cell_w_currency"] = 1
                    if t.address:
                        features["hori_align_to_cell_w_address"] = 1
                    if t.date_label:
                        features["hori_align_to_cell_w_datelabel"] = 1
                    if t.date_of_invoice_label:
                        features["hori_align_to_cell_w_dateofinvoicelabel"] = 1
                    if t.num_label:
                        features["hori_align_to_cell_w_numlabel"] = 1
                    if t.invoice_num_label:
                        features["hori_align_to_cell_w_invoicenum_label"] = 1
                    if t.acc_num_label:
                        features["hori_align_to_cell_w_accountnum_label"] = 1
                    if t.po_num_label:
                        features["hori_align_to_cell_w_ponum_label"] = 1
                    if t.total_label:
                        features["hori_align_to_cell_w_totallabel"] = 1
                    if t.amount_label:
                        features["hori_align_to_cell_w_accountnum_label"] = 1
                    if t.contains_digit:
                        features["hori_align_to_cell_w_digit"] = 1
                    if t.tax_label:
                        features["hori_align_to_cell_w_tax_label"] = 1

        # dist to nearest cell with field (inf if no field in page)
        features["dist_nearest_cell_w_date"] = math.inf
        features["dist_nearest_cell_w_currency"] = math.inf
        features["dist_nearest_cell_w_address"] = math.inf
        features["dist_nearest_cell_w_datelabel"] = math.inf
        features["dist_nearest_cell_w_invoicedatelabel"] = math.inf
        features["dist_nearest_cell_w_numlabel"] = math.inf
        features["dist_nearest_cell_w_invoicenumlabel"] = math.inf
        features["dist_nearest_cell_w_accnumlabel"] = math.inf
        features["dist_nearest_cell_w_ponumlabel"] = math.inf
        features["dist_nearest_cell_w_totallabel"] = math.inf
        features["dist_nearest_cell_w_amountlabel"] = math.inf
        features["dist_nearest_cell_w_digit"] = math.inf
        features["dist_nearest_cell_w_tax_label"] = math.inf

        for t in invoicePage.grouped_tokens:
            if t is not token:
                dist = calc_min_dist(t, token)
                if t.date_values and dist < features[
                        "dist_nearest_cell_w_date"]:
                    features["dist_nearest_cell_w_date"] = dist
                if t.currency and dist < features[
                        "dist_nearest_cell_w_currency"]:
                    features["dist_nearest_cell_w_currency"] = dist
                if t.address and dist < features["dist_nearest_cell_w_address"]:
                    features["dist_nearest_cell_w_address"] = dist
                if t.date_label and dist < features[
                        "dist_nearest_cell_w_datelabel"]:
                    features["dist_nearest_cell_w_datelabel"] = dist
                if t.date_of_invoice_label and dist < features[
                        "dist_nearest_cell_w_invoicedatelabel"]:
                    features["dist_nearest_cell_w_invoicedatelabel"] = dist
                if t.num_label and dist < features[
                        "dist_nearest_cell_w_numlabel"]:
                    features["dist_nearest_cell_w_numlabel"] = dist
                if t.invoice_num_label and dist < features[
                        "dist_nearest_cell_w_invoicenumlabel"]:
                    features["dist_nearest_cell_w_invoicenumlabel"] = dist
                if t.acc_num_label and dist < features[
                        "dist_nearest_cell_w_accnumlabel"]:
                    features["dist_nearest_cell_w_accnumlabel"] = dist
                if t.po_num_label and dist < features[
                        "dist_nearest_cell_w_ponumlabel"]:
                    features["dist_nearest_cell_w_ponumlabel"] = dist
                if t.total_label and dist < features[
                        "dist_nearest_cell_w_totallabel"]:
                    features["dist_nearest_cell_w_totallabel"] = dist
                if t.total_label and dist < features[
                        "dist_nearest_cell_w_amountlabel"]:
                    features["dist_nearest_cell_w_amountlabel"] = dist
                if t.contains_digit and dist < features[
                        "dist_nearest_cell_w_digit"]:
                    features["dist_nearest_cell_w_digit"] = dist
                if t.tax_label and dist < features[
                        "dist_nearest_cell_w_tax_label"]:
                    features["dist_nearest_cell_w_tax_label"] = dist

        DEFAULT_DISTANCE = 0.75  # This is arbitrary
        for feature in features:
            if "dist_nearest" in feature and math.isinf(features[feature]):
                features[feature] = invoice_diag * DEFAULT_DISTANCE

        features["rel_dist_nearest_cell_w_date"] = features[
            "dist_nearest_cell_w_date"] / invoice_diag
        features["rel_dist_nearest_cell_w_currency"] = features[
            "dist_nearest_cell_w_currency"] / invoice_diag
        features["rel_dist_nearest_cell_w_address"] = features[
            "dist_nearest_cell_w_address"] / invoice_diag
        features["rel_dist_nearest_cell_w_datelabel"] = features[
            "dist_nearest_cell_w_datelabel"] / invoice_diag
        features["rel_dist_nearest_cell_w_invoicedatelabel"] = features[
            "dist_nearest_cell_w_invoicedatelabel"] / invoice_diag
        features["rel_dist_nearest_cell_w_numlabel"] = features[
            "dist_nearest_cell_w_numlabel"] / invoice_diag
        features["rel_dist_nearest_cell_w_invoicenumlabel"] = features[
            "dist_nearest_cell_w_invoicenumlabel"] / invoice_diag
        features["rel_dist_nearest_cell_w_accnumlabel"] = features[
            "dist_nearest_cell_w_accnumlabel"] / invoice_diag
        features["rel_dist_nearest_cell_w_ponumlabel"] = features[
            "dist_nearest_cell_w_ponumlabel"] / invoice_diag
        features["rel_dist_nearest_cell_w_totallabel"] = features[
            "dist_nearest_cell_w_totallabel"] / invoice_diag
        features["rel_dist_nearest_cell_w_amountlabel"] = features[
            "dist_nearest_cell_w_amountlabel"] / invoice_diag
        features["rel_dist_nearest_cell_w_digit"] = features[
            "dist_nearest_cell_w_digit"] / invoice_diag
        features["rel_dist_nearest_cell_w_tax_label"] = features[
            "dist_nearest_cell_w_tax_label"] / invoice_diag
        """
        features TODO:
    
        -text in bold (contains, aligns, dist)
        -text from an area that was shaded
        -token was from a grid (can be close to grid lines or q far away in a well spaced out grid box) / near a summation line (above total payable amounts)
        -percentage of black pixels in box?
        """

        return features
Example #50
0
    def take(self, n=None, constructor=list):
        """
    Returns a container with the n first elements from the Stream, or less if
    there aren't enough. Use this without args if you need only one element
    outside a list.

    Parameters
    ----------
    n :
      Number of elements to be taken. Defaults to None.
      Rounded when it's a float, and this can be ``inf`` for taking all.
    constructor :
      Container constructor function that can receie a generator as input.
      Defaults to ``list``.

    Returns
    -------
    The first ``n`` elements of the Stream sequence, created by the given
    constructor unless ``n == None``, which means returns the next element
    from the sequence outside any container.
    If ``n`` is None, this can raise StopIteration due to lack of data in
    the Stream. When ``n`` is a number, there's no such exception.

    Examples
    --------
    >>> Stream(5).take(3) # Three elements
    [5, 5, 5]
    >>> Stream(1.2, 2, 3).take() # One element, outside a container
    1.2
    >>> Stream(1.2, 2, 3).take(1) # With n = 1 argument, it'll be in a list
    [1.2]
    >>> Stream(1.2, 2, 3).take(1, constructor=tuple) # Why not a tuple?
    (1.2,)
    >>> Stream([1, 2]).take(3) # More than the Stream size, n is integer
    [1, 2]
    >>> Stream([]).take() # More than the Stream size, n is None
    Traceback (most recent call last):
      ...
    StopIteration

    Taking rounded float quantities and "up to infinity" elements
    (don't try using ``inf`` with endless Stream instances):

    >>> Stream([4, 3, 2, 3, 2]).take(3.4)
    [4, 3, 2]
    >>> Stream([4, 3, 2, 3, 2]).take(3.6)
    [4, 3, 2, 3]
    >>> Stream([4, 3, 2, 3, 2]).take(inf)
    [4, 3, 2, 3, 2]

    See Also
    --------
    Stream.peek :
      Returns the n first elements from the Stream, without removing them.

    Note
    ----
    You should avoid using take() as if this would be an iterator. Streams
    are iterables that can be easily part of a "for" loop, and their
    iterators (the ones automatically used in for loops) are slightly faster.
    Use iter() builtin if you need that, instead, or perhaps the blocks
    method.

    """
        if n is None:
            return next(self._data)
        if isinf(n) and n > 0:
            return constructor(self._data)
        if isinstance(n, float):
            n = rint(n) if n > 0 else 0  # So this works with -inf and nan
        return constructor(next(self._data) for _ in xrange(n))
Example #51
0
def json_clean(obj):
    """Clean an object to ensure it's safe to encode in JSON.

    Atomic, immutable objects are returned unmodified.  Sets and tuples are
    converted to lists, lists are copied and dicts are also copied.

    Note: dicts whose keys could cause collisions upon encoding (such as a dict
    with both the number 1 and the string '1' as keys) will cause a ValueError
    to be raised.

    Parameters
    ----------
    obj : any python object

    Returns
    -------
    out : object

      A version of the input which will not cause an encoding error when
      encoded as JSON.  Note that this function does not *encode* its inputs,
      it simply sanitizes it so that there will be no encoding errors later.

    """
    # types that are 'atomic' and ok in json as-is.
    atomic_ok = (unicode_type, type(None))

    # containers that we need to convert into lists
    container_to_list = (tuple, set, types.GeneratorType)

    # Since bools are a subtype of Integrals, which are a subtype of Reals,
    # we have to check them in that order.

    if isinstance(obj, bool):
        return obj

    if isinstance(obj, numbers.Integral):
        # cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598)
        return int(obj)

    if isinstance(obj, numbers.Real):
        # cast out-of-range floats to their reprs
        if math.isnan(obj) or math.isinf(obj):
            return repr(obj)
        return float(obj)

    if isinstance(obj, atomic_ok):
        return obj

    if isinstance(obj, bytes):
        if py3compat.PY3:
            # unanmbiguous binary data is base64-encoded
            # (this probably should have happened upstream)
            return b2a_base64(obj).decode('ascii')
        else:
            # Python 2 bytestr is ambiguous,
            # needs special handling for possible binary bytestrings.
            # imperfect workaround: if ascii, assume text.
            # otherwise assume binary, base64-encode (py3 behavior).
            try:
                return obj.decode('ascii')
            except UnicodeDecodeError:
                return b2a_base64(obj).decode('ascii')

    if isinstance(obj,
                  container_to_list) or (hasattr(obj, '__iter__')
                                         and hasattr(obj, next_attr_name)):
        obj = list(obj)

    if isinstance(obj, list):
        return [json_clean(x) for x in obj]

    if isinstance(obj, dict):
        # First, validate that the dict won't lose data in conversion due to
        # key collisions after stringification.  This can happen with keys like
        # True and 'true' or 1 and '1', which collide in JSON.
        nkeys = len(obj)
        nkeys_collapsed = len(set(map(unicode_type, obj)))
        if nkeys != nkeys_collapsed:
            raise ValueError('dict cannot be safely converted to JSON: '
                             'key collision would lead to dropped values')
        # If all OK, proceed by making the new dict that will be json-safe
        out = {}
        for k, v in iteritems(obj):
            out[unicode_type(k)] = json_clean(v)
        return out
    if isinstance(obj, datetime):
        return obj.strftime(ISO8601)

    # we don't understand it, it's probably an unserializable object
    raise ValueError("Can't clean for JSON: %r" % obj)
Example #52
0
def _compute_and_update_PI_kernel(
    i,
    T_A,
    T_B,
    m,
    QT_even,
    QT_odd,
    QT_first,
    M_T,
    Σ_T,
    μ_Q,
    σ_Q,
    k,
    ignore_trivial,
    excl_zone,
    profile,
    indices,
    compute_QT,
):
    """
    A Numba CUDA kernel to update the matrix profile and matrix profile indices

    Parameters
    ----------
    i : int
        sliding window `i`

    T_A : ndarray
        The time series or sequence for which to compute the dot product

    T_B : ndarray
        The time series or sequence that will be used to annotate T_A. For every
        subsequence in T_A, its nearest neighbor in T_B will be recorded.

    m : int
        Window size

    QT_even : ndarray
        The input QT array (dot product between the query sequence,`Q`, and
        time series, `T`) to use when `i` is even

    QT_odd : ndarray
        The input QT array (dot product between the query sequence,`Q`, and
        time series, `T`) to use when `i` is odd

    QT_first : ndarray
        Dot product between the first query sequence,`Q`, and time series, `T`

    M_T : ndarray
        Sliding mean of time series, `T`

    Σ_T : ndarray
        Sliding standard deviation of time series, `T`

    μ_Q : ndarray
        Mean of the query sequence, `Q`

    σ_Q : ndarray
        Standard deviation of the query sequence, `Q`

    k : int
        The total number of sliding windows to iterate over

    ignore_trivial : bool
        Set to `True` if this is a self-join. Otherwise, for AB-join, set this to
        `False`.

    excl_zone : int
        The half width for the exclusion zone relative to the current
        sliding window

    profile : ndarray
        Matrix profile. The first column consists of the global matrix profile,
        the second column consists of the left matrix profile, and the third
        column consists of the right matrix profile.

    indices : ndarray
        The first column consists of the matrix profile indices, the second
        column consists of the left matrix profile indices, and the third
        column consists of the right matrix profile indices.

    compute_QT : bool
        A boolean flag for whether or not to compute QT

    Returns
    -------
    None

    Notes
    -----
    `DOI: 10.1109/ICDM.2016.0085 \
    <https://www.cs.ucr.edu/~eamonn/STOMP_GPU_final_submission_camera_ready.pdf>`__

    See Table II, Figure 5, and Figure 6
    """
    start = cuda.grid(1)
    stride = cuda.gridsize(1)

    if i % 2 == 0:
        QT_out = QT_even
        QT_in = QT_odd
    else:
        QT_out = QT_odd
        QT_in = QT_even

    for j in range(start, QT_out.shape[0], stride):
        zone_start = max(0, j - excl_zone)
        zone_stop = min(k, j + excl_zone)

        if compute_QT:
            QT_out[j] = (QT_in[j - 1] - T_B[i - 1] * T_A[j - 1] +
                         T_B[i + m - 1] * T_A[j + m - 1])

            QT_out[0] = QT_first[i]
        if math.isinf(M_T[j]) or math.isinf(μ_Q[i]):
            D = np.inf
        else:
            if (σ_Q[i] < config.STUMPY_STDDEV_THRESHOLD
                    or Σ_T[j] < config.STUMPY_STDDEV_THRESHOLD):
                D = m
            else:
                denom = m * σ_Q[i] * Σ_T[j]
                if math.fabs(
                        denom
                ) < config.STUMPY_DENOM_THRESHOLD:  # pragma nocover
                    denom = config.STUMPY_DENOM_THRESHOLD
                D = abs(2 * m * (1.0 -
                                 (QT_out[j] - m * μ_Q[i] * M_T[j]) / denom))

            if (σ_Q[i] < config.STUMPY_STDDEV_THRESHOLD
                    and Σ_T[j] < config.STUMPY_STDDEV_THRESHOLD
                ) or D < config.STUMPY_D_SQUARED_THRESHOLD:
                D = 0

        if ignore_trivial:
            if i <= zone_stop and i >= zone_start:
                D = np.inf
            if D < profile[j, 1] and i < j:
                profile[j, 1] = D
                indices[j, 1] = i
            if D < profile[j, 2] and i > j:
                profile[j, 2] = D
                indices[j, 2] = i

        if D < profile[j, 0]:
            profile[j, 0] = D
            indices[j, 0] = i
Example #53
0
def main(conf):
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(conf['main_args']['cuda'])
    model_dir = conf['main_args']['model_dir']
    exp_dir = conf['main_args']['exp_dir']
    # Define Dataloader
    assert len(conf['main_args']['train_metadata']) == len(
        conf['main_args']['train_n_src'])
    train_gens = []
    for i in range(len(conf['main_args']['train_metadata'])):
        train_set = LibriMix(csv_path=conf['main_args']['train_metadata'][i],
                             sample_rate=conf['data']['sample_rate'],
                             n_src=conf['main_args']['train_n_src'][i],
                             segment=conf['data']['segment'])
        train_gen = DataLoader(
            train_set,
            shuffle=True,
            batch_size=int(conf['training']['batch_size'] * 2 /
                           conf['main_args']['train_n_src'][i]),
            num_workers=conf['training']['num_workers'],
            drop_last=True)
        train_gens.append(train_gen)

    assert len(conf['main_args']['val_metadata']) == len(
        conf['main_args']['val_n_src'])
    val_gens = []
    for i in range(len(conf['main_args']['val_metadata'])):
        val_set = LibriMix(csv_path=conf['main_args']['val_metadata'][i],
                           sample_rate=conf['data']['sample_rate'],
                           n_src=conf['main_args']['val_n_src'][i],
                           segment=conf['data']['segment'])
        val_gen = DataLoader(val_set,
                             shuffle=True,
                             batch_size=conf['training']['batch_size'],
                             num_workers=conf['training']['num_workers'],
                             drop_last=True)
        print(val_gen)
        val_gens.append(val_gen)
    SPKID = LIBRISPEECH_SPKID[conf['data']['subset']]

    # Loss functions
    loss_fn = dict()
    loss_fn['sisdr'] = PermInvariantSISDR(return_individual_results=True)
    loss_fn['spk_circle'] = CircleLoss(m=0.25, gamma=15)

    # Define model, optimizer + scheduler
    if conf['main_args']['stage'] == 1:
        model = CAE(conf['cae'])
        model_path = os.path.join(exp_dir, 'cae', config_cae_path(conf['cae']))
    elif conf['main_args']['stage'] == 2:
        conf['tcn'].update({
            'cae_path':
            os.path.join(exp_dir, 'cae', config_cae_path(conf['cae']))
        })
        model = CAE_DANet(conf['tcn'])
        model_path = os.path.join(exp_dir, model_dir)
        if conf['loss_fn']['spk_ce'] > 0:
            model.danet.add_softmax(output_size=len(SPKID), normalize=False)
            loss_fn['spk_ce'] = model.danet.spk_softmax
    else:
        raise ValueError('Training stage should be either 1 or 2!')
    model = torch.nn.DataParallel(model).cuda()
    opt = torch.optim.Adam(model.module.parameters(), lr=conf['optim']['lr'])

    # Validation metric
    metric_name = 'SISDRi'
    if metric_name == 'SISDRi':
        SISDRi = PermInvariantSISDR(backward_loss=False,
                                    improvement=True,
                                    return_individual_results=True)

    # Save config
    # os.makedirs(exp_dir, exist_ok=True)
    os.makedirs(model_path, exist_ok=True)
    conf_path = os.path.join(model_path, 'conf.yml')
    with open(conf_path, 'w') as outfile:
        yaml.safe_dump(conf, outfile)

    # Train model
    tr_step = 0
    val_step = 0
    new_lr = conf['optim']['lr']
    halving = False
    best_val_loss = float("-inf")
    val_no_impv = 0
    for i in range(conf['training']['epochs']):
        metric_dic = {
            'train_{}'.format(metric_name): 0.,
            'val_{}'.format(metric_name): 0.
        }
        print("Training stage {} || Epoch: {}/{}".format(
            conf['main_args']['stage'], i + 1, conf['training']['epochs']))
        model.train()
        train_metric_mean = []
        for data_set in zip(tqdm(train_gens[0], desc='Training'), train_gens[1]) if len(train_gens) == 2 \
                else tqdm(train_gens[0], desc='Training'):  # mini-batch
            if not isinstance(data_set, tuple):
                data_set = (data_set, )
            for data in data_set:
                opt.zero_grad()
                m1wavs = data[0].unsqueeze(1).cuda()
                clean_wavs = data[-1].cuda()
                speaker_id = data[1]

                if conf['main_args']['stage'] == 1:
                    recon_sources, enc_masks, enc_mixture = model.module(
                        m1wavs, clean_wavs)
                if conf['main_args']['stage'] == 2:
                    estimated_masks, enc_masks, enc_mixture, Wx, phase = model(
                        m1wavs,
                        clean_wavs,
                        train=True,
                        n_sources=clean_wavs.shape[1])
                    V = estimated_masks[
                        1]  # V (B, K, F*T),  enc_masks (B, C, F*T)
                    A = estimated_masks[2]  # (B, nspk, K)
                    estimated_masks = estimated_masks[
                        0]  # estimated_masks (B, nspk, F*T)
                    recon_sources = model.module.get_rec_sources(
                        estimated_masks.view(m1wavs.shape[0],
                                             estimated_masks.shape[1],
                                             model.module.input_dim, -1),
                        enc_mixture,
                        phase=phase)  # recovered waveform

                l_dict = dict()
                if conf['loss_fn']['sisdr'] > 0:
                    l_sisdr = loss_fn['sisdr'](recon_sources,
                                               clean_wavs).mean()
                    l_dict.update(
                        {'sisdr': conf['loss_fn']['sisdr'] * l_sisdr})
                if conf['loss_fn']['compact'] > 0:
                    enc_mixture = enc_mixture.view(enc_mixture.shape[0],
                                                   -1).unsqueeze(1)
                    w = -enc_mixture / torch.sum(
                        enc_mixture, dim=[1, 2], keepdim=True)
                    enc_masks[enc_masks <= 0.5] = 0
                    An = F.normalize(A.detach(), dim=2)
                    l_va = w * enc_masks * (torch.bmm(An, F.normalize(V,
                                                                      dim=1)))
                    l_va = l_va.sum(dim=[1, 2]).mean()
                    l_dict.update(
                        {'compact': conf['loss_fn']['compact'] * l_va})
                if conf['loss_fn']['spk_circle'] > 0:
                    L = torch.zeros(A.shape[0], A.shape[1]).cuda()
                    for j in range(A.shape[0]):
                        for k in range(A.shape[1]):
                            L[j][k] = SPKID.index(speaker_id[k][j])
                    inp_sp, inp_sn = convert_label_to_similarity(
                        A.view(-1, A.shape[2]), L.view(-1))
                    l_c = loss_fn['spk_circle'](inp_sp, inp_sn)
                    l_dict.update(
                        {'circle': conf['loss_fn']['spk_circle'] * l_c})
                if conf['loss_fn']['spk_ce'] > 0:
                    label = torch.zeros(A.shape[0],
                                        A.shape[1],
                                        dtype=torch.int64).cuda()
                    for j in range(A.shape[0]):
                        for k in range(A.shape[1]):
                            label[j][k] = SPKID.index(speaker_id[k][j])
                    if conf['tcn']['sim'] == 'cos':
                        l_softmax = loss_fn['spk_ce'](F.normalize(A.view(
                            -1, A.shape[2]),
                                                                  p=2,
                                                                  dim=1),
                                                      label.view(-1))
                    else:
                        l_softmax = loss_fn['spk_ce'](A.view(-1, A.shape[2]),
                                                      label.view(-1))
                    l_dict.update(
                        {'spksoftmax': conf['loss_fn']['spk_ce'] * l_softmax})

                # Loss back-propagation
                l = torch.tensor(0.0).cuda()
                for loss in l_dict.values():
                    if not math.isinf(loss) and not math.isnan(loss):
                        l = l + loss

                if not conf['cae']['stft'] or conf['main_args']['stage'] == 2:
                    l.backward()
                    torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
                    opt.step()

                train_metric = SISDRi(recon_sources,
                                      clean_wavs,
                                      initial_mixtures=m1wavs)
                train_metric_mean += train_metric.tolist()

        train_metric_mean = np.mean(train_metric_mean)
        metric_dic['train_{}'.format(metric_name)] = train_metric_mean
        tr_step += 1

        if val_gens is not None:
            model.eval()
            with torch.no_grad():
                val_metric_mean = []
                for data_set in zip(tqdm(val_gens[0], desc='Validation'), val_gens[1]) if len(val_gens) == 2 \
                        else tqdm(val_gens[0], desc='Validation'):  # mini-batch
                    if not isinstance(data_set, tuple):
                        data_set = (data_set, )
                    for data in data_set:
                        m1wavs = data[0].unsqueeze(1).cuda()
                        clean_wavs = data[-1].cuda()

                        if conf['main_args']['stage'] == 1:
                            recon_sources, _, _ = model.module(
                                m1wavs, clean_wavs)
                        if conf['main_args']['stage'] == 2:
                            estimated_masks, _, enc_mixture, _, phase = model(
                                m1wavs,
                                clean_wavs,
                                train=True,
                                n_sources=clean_wavs.shape[1])
                            V = estimated_masks[
                                1]  # V (B, K, F*T),  enc_masks (B, C, F*T)
                            A = estimated_masks[2]  # (B, nspk, K)
                            estimated_masks = estimated_masks[
                                0]  # estimated_masks (B, nspk, F*T)
                            recon_sources = model.module.get_rec_sources(
                                estimated_masks.view(m1wavs.shape[0],
                                                     estimated_masks.shape[1],
                                                     model.module.input_dim,
                                                     -1),
                                enc_mixture,
                                phase=phase)  # recovered waveform

                        val_metric = SISDRi(recon_sources,
                                            clean_wavs,
                                            initial_mixtures=m1wavs)
                        val_metric_mean += val_metric.tolist()
            val_metric_mean = np.mean(val_metric_mean)
            metric_dic['val_{}'.format(metric_name)] = val_metric_mean
            val_step += 1

        # Adjust learning rate (halving)
        if conf['training']['half_lr']:
            val_loss = round(val_metric_mean, 2)  # keep two decimal places
            if val_loss <= best_val_loss:
                val_no_impv += 1
                if val_no_impv % 6 == 0:
                    halving = True
                if val_no_impv >= 20 and conf['training']['early_stop']:
                    print("No imporvement for 20 epochs, early stopping.")
                    break
            else:
                best_val_loss = val_loss
                val_no_impv = 0
            if halving:
                optim_state = opt.state_dict()
                optim_state['param_groups'][0]['lr'] = \
                    optim_state['param_groups'][0]['lr'] / 2.0
                opt.load_state_dict(optim_state)
                print('Learning rate adjusted to: {lr:.6f}'.format(
                    lr=optim_state['param_groups'][0]['lr']))
                halving = False
                # val_no_impv = 0

        CAE.save_if_best(save_dir=model_path,
                         model=model.module,
                         optimizer=opt,
                         epoch=tr_step,
                         tr_loss=train_metric_mean,
                         cv_loss=val_metric_mean,
                         cv_loss_name='SISDRi',
                         save_every=50)
        pprint(metric_dic)
Example #54
0
def on_frame(frame):
    global points_agg, frame_count, stop, prev_angle
    cv_img = bridge.imgmsg_to_cv2(frame, 'bgr8')

    frame_count += 1
    if frame_count > 5 or stop:
       print(frame_count)
       print('stopped')
       publish_drive(0, 0)
       #cv2.imshow('cv_img', cv_img)
       #cv2.waitKey(1)
       return

    # publish_drive(15, -80)

    #msg_speed = drive_speed()
    #msg_speed.speed = 10
    #pub_speed.publish(msg_speed)
    
    #USE THIS
    #publish_drive(15,0)

    # msg_angle = drive_angle()
    # msg_angle.angle = 0
    # msg_angle.publish(msg_angle)

    

    hsv_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2HSV)
    gray_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)

    #ORIGINALLY 25-30

    lower_yellow = np.array([30, 100, 100], dtype = 'uint8')
    upper_yellow = np.array([35, 255, 255], dtype='uint8')

    mask_yellow = cv2.inRange(hsv_image, lower_yellow, upper_yellow)
    mask_white = cv2.inRange(gray_image, 200, 255)
    mask_yw = cv2.bitwise_or(mask_white, mask_yellow)
    mask_yw_image = cv2.bitwise_and(gray_image, mask_yellow)

    #cv2.imshow('cv_img', mask_yw_image)
    #cv2.waitKey(1)
    #mask_yw_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)


    ros_image = bridge.cv2_to_imgmsg(mask_yw_image, 'mono8')
    #camera1_pub.publish(ros_image)
    # frame_count = 0
    # return

    kernel_size = (5,5)
    gauss_gray = cv2.GaussianBlur(mask_yw_image,kernel_size, 0)

    low_threshold = 50
    high_threshold = 150
    canny_edges = cv2.Canny(gauss_gray,low_threshold,high_threshold)

    #cv2.imshow('cv_img', canny_edges)
    #cv2.waitKey(1)

    imshape = canny_edges.shape
    lower_left = [0,imshape[0]]
    lower_right = [imshape[1],imshape[0]]
    top_left = [0,imshape[0]/2]
    top_right = [imshape[1],imshape[0]/2]
    vertices = [np.array([lower_left,top_left,top_right,lower_right],dtype=np.int32)]
    roi_image = region_of_interest(canny_edges, vertices)

    #rho and theta are the distance and angular resolution of the grid in Hough space
    #same values as quiz
    rho = 2
    theta = np.pi/180
    #threshold is minimum number of intersections in a grid for candidate line to go to output
    threshold = 20
    min_line_len = 50
    max_line_gap = 200

    lines = hough_lines(roi_image, rho, theta, threshold, min_line_len, max_line_gap)
    line_image = lines_on_image(lines, roi_image)

    #cv2.imshow('cv_img', line_image)
    #cv2.waitKey(1)

    points = list()
    if lines == None:
        print('no lines; skipping')
        return
    for line in lines:
        for x1, y1, x2, y2 in line:
            points.append([x1, y1])
            points.append([x2, y2])

    points_agg += points

    if frame_count > 3:
        line_image = points_on_image(points_agg, roi_image)
        kmeans = KMeans(n_clusters=min(8, len(points_agg))).fit(points_agg)
        line_image = points_on_image(kmeans.cluster_centers_.astype(int), line_image, size=15)
        # print(kmeans.cluster_centers_.astype(int)[:,0])

        result = weighted_img(line_image, cv_img, alpha=0.8, beta=1., l=0.)

        bounding_path = nn_walk(kmeans.cluster_centers_.astype(int))
        line_segments = [[[int(bounding_path[i][0]), int(bounding_path[i][1]), int(bounding_path[i+1][0]), int(bounding_path[i+1][1])]] for i in range(len(bounding_path)-1)]
        
        draw_lines(result, line_segments)

        # cv2.imshow('cv_img', result)
        # cv2.waitKey(1)

        # Pathfinder algorithm
        LINE_Y = result.shape[0] - 150.0

        # find candidate line segments that hit that y
        candidates = []
        for i in range(len(bounding_path) - 1):
            if (bounding_path[i][1] <= LINE_Y and bounding_path[i + 1][1] >= LINE_Y) or (bounding_path[i][1] >= LINE_Y and bounding_path[i + 1][1] <= LINE_Y):
                candidates.append([bounding_path[i], bounding_path[i + 1]])
                cv2.line(result, (int(bounding_path[i][0]), int(bounding_path[i][1])), (int(bounding_path[i+1][0]), int(bounding_path[i+1][1])), [0, 0, 255], 5)

        slopes = [get_slope(p[0][0], p[0][1], p[1][0], p[1][1]) for p in candidates]

        candidates = [candidates[i] for i in range(len(candidates)) if (not math.isnan(slopes[i])) and slopes[i] != 0 and (not math.isinf(slopes[i]))]
        slopes = [slopes[i] for i in range(len(slopes)) if (not math.isnan(slopes[i])) and slopes[i] != 0 and (not math.isinf(slopes[i]))]
        
        x = list()
        for i in range(len(candidates)):
            x.append(int((LINE_Y - candidates[i][0][1] + slopes[i] * candidates[i][0][0]) / slopes[i]))
        # for a in x:
        #     cv2.line(result, (a, 0), (a, 480), [0, 0, 255], 2)

        #original values
        #left_cap = -100
        #right_cap = result.shape[1] + 100

        left_cap = -1000
        right_cap = result.shape[1] + 1000



        for a in x:
            if a < result.shape[1] / 2:
                if a > left_cap:
                    left_cap = a
            else:
                if a < right_cap:
                    right_cap = a

        #print("left" + str(left_cap))
        #print("right" + str(right_cap))

        # cv2.line(result, (left_cap, 0), (left_cap, 480), [0, 0, 255], 2)
        # cv2.line(result, (right_cap, 0), (right_cap, 480), [0, 0, 255], 2)

        midpoint = (left_cap + right_cap) / 2
        # print (midpoint, int(LINE_Y))
        tri_base = midpoint - 320

        angle = 0
        speed = 15

        #how to make it slowdown.. 

        if len(x) == 0:
            angle = prev_angle
            speed = 17
            print("no lines")
        elif len(x) == 1:
            single_lane = -1 * np.polyfit(kmeans.cluster_centers_.astype(int)[:,0], kmeans.cluster_centers_.astype(int)[:,1], 1)
            angle = np.degrees(np.arctan(single_lane[0]))
            if angle > 0:
                angle = 90 - angle
            elif angle < 0:
                angle = -90 - angle
            speed = 17
            print("one line - angle: " + str(angle))
        elif tri_base != 0:
            angle = np.degrees(np.arctan((480 - LINE_Y) / tri_base))
            if angle > 0:
                angle = 90 - angle
            elif angle < 0:
                angle = -90 - angle
            print("two lines?")
        
        if len(x) != 0:
            prev_angle = angle

        cv2.circle(result, (midpoint, int(LINE_Y)), 10, (0, 0, 255), -1)

        #print(midpoint, int(LINE_Y))

        #print(angle)
        publish_drive(speed, angle)
        #publish_drive(0,0)

        #cv2.imshow('cv_img', result)
        #cv2.waitKey(20)


        ros_image = bridge.cv2_to_imgmsg(result, 'bgr8')
        camera1_pub.publish(ros_image)

        frame_count = 0
        points_agg = list()
    
    return

    slopes = list()
    for line in lines:
        for x1, y1, x2, y2 in line:
            slope = get_slope(x1, y1, x2, y2)
            if not math.isinf(slope) and not math.isnan(slope):
                slopes.append(abs(get_slope(x1, y1, x2, y2)))

    # plt.hist(slopes)
    # plt.show()

    result = weighted_img(line_image, cv_img, alpha=0.8, beta=1., l=0.)

    # cv2.arrowedLine(result, (0, 0), (100, 100), (0,0,255), 5)

    ros_image = bridge.cv2_to_imgmsg(result, 'bgr8')
    camera1_pub.publish(ros_image)
Example #55
0
def main_compute(code_only=False):
    def compile_callback(code):
        return bytearray()

    tvm.register_func('tvm_callback_cuda_compile',
                      compile_callback,
                      override=True)

    default_tune_op = importlib.import_module('lang.generic')

    import logging
    import warnings
    from tvm import autotvm

    warnings.simplefilter("ignore")
    logging.getLogger('autotvm').setLevel(logging.ERROR)
    logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
    task = autotvm.task.create("template_op", args=(), target=tvm_target)

    AntaresGlobal.default_tune_op = default_tune_op
    AntaresGlobal.default_task = task

    if verbose:
        print('  >> Backend = %s, Python PID = %s, Task = %s;' %
              (backend, os.getpid(), default_tune_op.__name__))

    num_trials = int(os.environ['STEP']) if 'STEP' in os.environ else 0

    config = os.environ.get('CONFIG', '').strip()
    if config != '':
        best_config = config
    elif num_trials > 0:
        dev_num = backend_config.get_execution_parallism()
        if dev_num <= 0:
            raise Exception("No valid device found for backend: %s." % backend)
        batch_size = os.environ.get('BATCH', '')
        batch_size = 16 if not batch_size else int(batch_size)

        from concurrent.futures import ThreadPoolExecutor
        worker_size = batch_size if batch_size < dev_num else dev_num
        thread_pool = ThreadPoolExecutor(max_workers=worker_size)

        tuner_type = os.environ.get('TUNER')
        if not tuner_type:
            explicit_ops = AntaresGlobal.attrs.explicit_ops
            tuner_type = 'OpEvo'
        print('  >> MAKE_PARA = %d/%d, EXEC_PARA = %d, TUNER = %s\n' %
              (worker_size, batch_size, dev_num, tuner_type))

        auto_commit = os.environ.get('COMMIT', '')
        if auto_commit:
            saved_code = codehub_db(os.environ['COMPUTE_V1'])
            if saved_code is not None and auto_commit != 'force':
                raise Exception(
                    "Saved code has existed in codehub. Please try COMMIT=force to override it."
                )
            os.environ.pop('COMMIT')

        try:
            if getattr(AntaresGlobal, 'mode', None) == 'antares':
                task.search_space_v2 = backend_config.search_space(
                    AntaresGlobal.compute_graph)
            else:
                task.search_space_v2 = AntaresGlobal.attrs.auto_config.get_config_space(
                )
            task.n_parallel = batch_size
            tuner = importlib.import_module('tuner.%s.main' % tuner_type)
            tuner = tuner.MainTuner(task)
        except:
            raise Exception('>> Cannot import Antares Tuner: %s' % tuner_type)

        if hasattr(tuner, 'cleanup'):
            AntaresGlobal.cleanup_funcs.append(tuner.cleanup)

        if tuner is not None:
            AntaresGlobal.current_step = 0
            AntaresGlobal.completed_trials = 0
            AntaresGlobal.num_trials = num_trials

            eval_client.init(backend_root=backend_root)

            def measure_batch(inputs):
                results, futures = [], []
                target_sources, config_strs = [], []
                for i in range(len(inputs)):
                    dir_sid = AntaresGlobal.current_step + i + 1
                    config_str = inputs[i].config if type(
                        inputs[i].config).__name__ == 'str' else 'null'
                    config_strs.append(config_str)
                    try:
                        target_source = get_target_source(
                            config_strs[i], dir_sid)
                    except:
                        # traceback.print_exc()
                        target_source = None
                    target_sources.append(target_source)

                expected_timecost = tuner.task.best.timecost if not math.isinf(
                    tuner.task.best.timecost) else min(
                        30, float(os.environ.get('EXPECTED_TIMEOUT', 'inf')))
                for i in range(len(inputs)):
                    dir_sid = AntaresGlobal.current_step + i + 1
                    futures.append(
                        thread_pool.submit(run_config_entity,
                                           target_sources[i], config_strs[i],
                                           dir_sid, expected_timecost,
                                           i % dev_num))

                best_slot = -1
                for i in range(len(inputs)):
                    dir_sid = AntaresGlobal.current_step + i + 1
                    t = futures[i].result()
                    if t < tuner.task.best.timecost:
                        best_slot = dir_sid
                        tuner.task.best.timecost = t
                        tuner.task.best.config = inputs[i].config
                        tuner.task.best.occur = best_slot
                    results.append(
                        autotvm.measure.MeasureResult(costs=(t, ),
                                                      error_no=0,
                                                      all_cost=i,
                                                      timestamp=time.time()))
                AntaresGlobal.current_step += len(results)

                stage_logs = 'STEP[%d / %d] Current Best Config = %s, Perf = %g sec / op (%g Gflops), MemRatio = %g %%, Occur Step = %d;' % (
                    AntaresGlobal.current_step, num_trials,
                    tuner.task.best.config, tuner.task.best.timecost,
                    compute_gflops(tuner.task.flop, tuner.task.best.timecost),
                    compute_mem_ratio(
                        tuner.task.best.timecost), tuner.task.best.occur)

                print('\n\033[93m%s\033[0m' %
                      ('=' * min(120, len(stage_logs))))
                print(stage_logs)
                print('\033[93m%s\033[0m\n' %
                      ('=' * min(120, len(stage_logs))))

                if auto_commit and best_slot >= 0:
                    with open(local_get_dir_file('my_kernel.cc', best_slot),
                              'r') as fp:
                        device_source = fp.read()
                    with open(local_get_dir_file('result.txt', best_slot),
                              'r') as fp:
                        t = float(fp.read().split()[0])
                    kernel_path = codehub_db(
                        os.environ['COMPUTE_V1'],
                        source_code=device_source + code_suffix(
                            tpr=t, step_prod=best_slot, step_plan=num_trials))
                    print('  >> Update current code to codehub: %s' %
                          kernel_path)
                return results

            tuner.task.best = Mock()
            tuner.task.best.timecost = float('inf')
            tuner.task.best.config = None
            tuner.task.best.occur = -1

            tuner.measure_batch = measure_batch
            tuner.measure_batch.n_parallel = batch_size
            callbacks = []

            history_log_for_transfer_learning = os.environ.get('RECORD', '')

            if history_log_for_transfer_learning:
                callbacks.append(
                    autotvm.callback.log_to_file(
                        history_log_for_transfer_learning))
                # Enable Transfer Learning for Incremental Task
                if os.path.exists(history_log_for_transfer_learning):
                    print(
                        '  >>  Loading incremental history from log file: %s ..'
                        % history_log_for_transfer_learning)
                    tuner.load_history(
                        autotvm.record.load_from_file(
                            history_log_for_transfer_learning))

            tuner.tune(n_trial=num_trials,
                       callbacks=callbacks,
                       measure_option=None)

            if math.isinf(tuner.task.best.timecost):
                print(
                    f'[Error] No valid config found in the whole tuning. (Try other tuner types other than `TUNER={tuner_type}`?)'
                )
                cleanup_on_exit(0, None)

            best_config = tuner.task.best.config

            if auto_commit:
                device_source = codehub_db(os.environ['COMPUTE_V1'])
                codehub_db(os.environ['COMPUTE_V1'],
                           source_code=device_source +
                           '\n// Antares Tuning Completed in %d steps.' %
                           AntaresGlobal.current_step)

            print(
                "\n[Best Config] CONFIG='%s'  ==>  Performance is up to %f Gflops, occurred at step %d / %d; time per run = %g sec."
                %
                (best_config,
                 compute_gflops(tuner.task.flop, tuner.task.best.timecost),
                 tuner.task.best.occur, num_trials, tuner.task.best.timecost))

            cleanup_on_exit(-1, None)
        else:
            raise Exception('Unrecognized tuner type: `%s`' % tuner_type)
        exit(0)
    else:
        saved_code = codehub_db(os.environ['COMPUTE_V1'])
        if saved_code is not None:
            print("  >> Using Saved Code from Codehub:")
            print(
                "// ---------------------------------------------------------------------------"
            )
            print(saved_code)
            print(
                "// ---------------------------------------------------------------------------"
            )
            exit(0)
        best_config = ''

    assert isinstance(best_config, str)

    best_config = best_config if best_config else 'null'
    device_source, kernel_path = get_target_source(best_config)

    if code_only:
        return device_source

    if verbose:
        print()
        print(
            "// ---------------------------------------------------------------------------"
        )
        print(device_source)
        print(
            "// ---------------------------------------------------------------------------"
        )

    eval_client.init(backend_root=backend_root)
    dev_id = int(os.environ.get('DEV_ID', '0'))
    result = evaluate_perf(kernel_path, dev_id, device_source)
    exit(0 if result is not None and len(result) > 1 else 1)
def margin_intersect_offset(gradient_left, gradient_right, base_width, margin):
    r"""
             mL       <- gradient left
        mR  /         <- gradient right
         \ /\/        <- margin
          x-/----     <- regular intersect
         / \    |     <- intersect offset
        / x-\----     <- margin intersect
       / / \ \
      / /   \ \
     /_/_____\_\_____
    o_/_______\_o___| <- margin
    |<--------->|     <- base width

    OR

                         mL  <- gradient left
                    mR  /    <- gradient right
                    | /
                    x----    <- regular intersect
                  / |   |    <- intersect offset
                / x-|----    <- margin intersect
              / / | |
            / /   | |
          /_/_____|_|___
        o_/_______|_o___|    <- margin
        |<--------->|        <- base width
    """

    logging.debug(f"Gradient Left / Right: "
                  f"{gradient_left: 7.3f} / {gradient_right: 7.3f}")

    if isinf(gradient_left) and isinf(gradient_right):
        return None

    if np.isclose(gradient_left, gradient_right, atol=ATOL):
        return None

    regular_axis_intercept_left = 0
    regular_axis_intercept_right = base_width if isinf(gradient_right) else \
        - base_width * gradient_right

    logging.debug(
        f"Regular Axis Intercept Left / Right: "
        f"{regular_axis_intercept_left: 7.3f} / {regular_axis_intercept_right:7.3f}"
    )

    regular_intersect_x, regular_intersect_y = intersect_lines(
        gradient_left, regular_axis_intercept_left, gradient_right,
        regular_axis_intercept_right)

    if regular_intersect_x is None or regular_intersect_y is None:
        return None

    logging.debug(f"Regular Intersect X / Y: "
                  f"{regular_intersect_x: 7.3f} / {regular_intersect_y:7.3f}")

    margin_axis_intercept_left = margin if isinf(gradient_left) else \
        - abs(margin / gradient_cos(gradient_left))
    margin_axis_intercept_right = base_width - margin if isinf(gradient_right) else \
        regular_axis_intercept_right - abs(margin / gradient_cos(gradient_right))

    logging.debug(
        f"Margin Axis Intercept Left / Right: "
        f"{margin_axis_intercept_left: 7.3f} / {margin_axis_intercept_right:7.3f}"
    )

    margin_intersect_x, margin_intersect_y = intersect_lines(
        gradient_left, margin_axis_intercept_left, gradient_right,
        margin_axis_intercept_right)

    logging.debug(f"Margin Intersect X / Y: "
                  f"{margin_intersect_x: 7.3f} / {margin_intersect_y:7.3f}")

    return regular_intersect_y - margin_intersect_y
Example #57
0
def stringify_number(number):
    bound_for_float = 7.0
    if -bound_for_float < number < bound_for_float and number != 0.0:
        return str(number)
    return str(int(number))


pl.xlabel(x_axis)
pl.ylabel(y_axis)
if y_scale_log:
    pl.yscale('log')
pl.legend(loc='best')
constraint_text_horizontal_position = 0.94 - len(plot_data) * 0.07
for key in constraints:
    constraint_text = get_symbol[key]
    if math.isinf(constraints[key][1]):
        if math.isinf(constraints[key][0]):
            continue
        else:
            constraint_text += "$\geq$" + stringify_number(
                constraints[key][0]) + get_unit[key]
    else:
        if not math.isinf(constraints[key][0]):
            constraint_text = stringify_number(
                constraints[key]
                [0]) + get_unit[key] + "$\leq$" + constraint_text
        constraint_text += "$\leq$" + stringify_number(
            constraints[key][1]) + get_unit[key]
    pl.text(0.97,
            constraint_text_horizontal_position,
            constraint_text,
Example #58
0
            def measure_batch(inputs):
                results, futures = [], []
                target_sources, config_strs = [], []
                for i in range(len(inputs)):
                    dir_sid = AntaresGlobal.current_step + i + 1
                    config_str = inputs[i].config if type(
                        inputs[i].config).__name__ == 'str' else 'null'
                    config_strs.append(config_str)
                    try:
                        target_source = get_target_source(
                            config_strs[i], dir_sid)
                    except:
                        # traceback.print_exc()
                        target_source = None
                    target_sources.append(target_source)

                expected_timecost = tuner.task.best.timecost if not math.isinf(
                    tuner.task.best.timecost) else min(
                        30, float(os.environ.get('EXPECTED_TIMEOUT', 'inf')))
                for i in range(len(inputs)):
                    dir_sid = AntaresGlobal.current_step + i + 1
                    futures.append(
                        thread_pool.submit(run_config_entity,
                                           target_sources[i], config_strs[i],
                                           dir_sid, expected_timecost,
                                           i % dev_num))

                best_slot = -1
                for i in range(len(inputs)):
                    dir_sid = AntaresGlobal.current_step + i + 1
                    t = futures[i].result()
                    if t < tuner.task.best.timecost:
                        best_slot = dir_sid
                        tuner.task.best.timecost = t
                        tuner.task.best.config = inputs[i].config
                        tuner.task.best.occur = best_slot
                    results.append(
                        autotvm.measure.MeasureResult(costs=(t, ),
                                                      error_no=0,
                                                      all_cost=i,
                                                      timestamp=time.time()))
                AntaresGlobal.current_step += len(results)

                stage_logs = 'STEP[%d / %d] Current Best Config = %s, Perf = %g sec / op (%g Gflops), MemRatio = %g %%, Occur Step = %d;' % (
                    AntaresGlobal.current_step, num_trials,
                    tuner.task.best.config, tuner.task.best.timecost,
                    compute_gflops(tuner.task.flop, tuner.task.best.timecost),
                    compute_mem_ratio(
                        tuner.task.best.timecost), tuner.task.best.occur)

                print('\n\033[93m%s\033[0m' %
                      ('=' * min(120, len(stage_logs))))
                print(stage_logs)
                print('\033[93m%s\033[0m\n' %
                      ('=' * min(120, len(stage_logs))))

                if auto_commit and best_slot >= 0:
                    with open(local_get_dir_file('my_kernel.cc', best_slot),
                              'r') as fp:
                        device_source = fp.read()
                    with open(local_get_dir_file('result.txt', best_slot),
                              'r') as fp:
                        t = float(fp.read().split()[0])
                    kernel_path = codehub_db(
                        os.environ['COMPUTE_V1'],
                        source_code=device_source + code_suffix(
                            tpr=t, step_prod=best_slot, step_plan=num_trials))
                    print('  >> Update current code to codehub: %s' %
                          kernel_path)
                return results
Example #59
0
	def assertNearlyEqual(self, x, y, sigfigs=3):
		magnitude = (abs(x) + abs(y)) / 2
		if math.isinf(magnitude): magnitude = 1
		self.assertAlmostEqual(x, y, delta=magnitude*(10**(-sigfigs)))
Example #60
0
def default(obj, json_options=DEFAULT_JSON_OPTIONS):
    # We preserve key order when rendering SON, DBRef, etc. as JSON by
    # returning a SON for those types instead of a dict.
    if isinstance(obj, ObjectId):
        return {"$oid": str(obj)}
    if isinstance(obj, DBRef):
        return _json_convert(obj.as_doc(), json_options=json_options)
    if isinstance(obj, datetime.datetime):
        if (json_options.datetime_representation ==
                DatetimeRepresentation.ISO8601):
            if not obj.tzinfo:
                obj = obj.replace(tzinfo=utc)
            if obj >= EPOCH_AWARE:
                off = obj.tzinfo.utcoffset(obj)
                if (off.days, off.seconds, off.microseconds) == (0, 0, 0):
                    tz_string = 'Z'
                else:
                    tz_string = obj.strftime('%z')
                millis = int(obj.microsecond / 1000)
                fracsecs = ".%03d" % (millis, ) if millis else ""
                return {
                    "$date":
                    "%s%s%s" %
                    (obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string)
                }

        millis = bson._datetime_to_millis(obj)
        if (json_options.datetime_representation ==
                DatetimeRepresentation.LEGACY):
            return {"$date": millis}
        return {"$date": {"$numberLong": str(millis)}}
    if json_options.strict_number_long and isinstance(obj, Int64):
        return {"$numberLong": str(obj)}
    if isinstance(obj, (RE_TYPE, Regex)):
        flags = ""
        if obj.flags & re.IGNORECASE:
            flags += "i"
        if obj.flags & re.LOCALE:
            flags += "l"
        if obj.flags & re.MULTILINE:
            flags += "m"
        if obj.flags & re.DOTALL:
            flags += "s"
        if obj.flags & re.UNICODE:
            flags += "u"
        if obj.flags & re.VERBOSE:
            flags += "x"
        if isinstance(obj.pattern, text_type):
            pattern = obj.pattern
        else:
            pattern = obj.pattern.decode('utf-8')
        if json_options.json_mode == JSONMode.LEGACY:
            return SON([("$regex", pattern), ("$options", flags)])
        return {
            '$regularExpression': SON([("pattern", pattern),
                                       ("options", flags)])
        }
    if isinstance(obj, MinKey):
        return {"$minKey": 1}
    if isinstance(obj, MaxKey):
        return {"$maxKey": 1}
    if isinstance(obj, Timestamp):
        return {"$timestamp": SON([("t", obj.time), ("i", obj.inc)])}
    if isinstance(obj, Code):
        if obj.scope is None:
            return {'$code': str(obj)}
        return SON([('$code', str(obj)),
                    ('$scope', _json_convert(obj.scope, json_options))])
    if isinstance(obj, Binary):
        return _encode_binary(obj, obj.subtype, json_options)
    if PY3 and isinstance(obj, bytes):
        return _encode_binary(obj, 0, json_options)
    if isinstance(obj, uuid.UUID):
        if json_options.strict_uuid:
            data = obj.bytes
            subtype = OLD_UUID_SUBTYPE
            if json_options.uuid_representation == CSHARP_LEGACY:
                data = obj.bytes_le
            elif json_options.uuid_representation == JAVA_LEGACY:
                data = data[7::-1] + data[:7:-1]
            elif json_options.uuid_representation == UUID_SUBTYPE:
                subtype = UUID_SUBTYPE
            return _encode_binary(data, subtype, json_options)
        else:
            return {"$uuid": obj.hex}
    if isinstance(obj, Decimal128):
        return {"$numberDecimal": str(obj)}
    if isinstance(obj, bool):
        return obj
    if (json_options.json_mode == JSONMode.CANONICAL
            and isinstance(obj, integer_types)):
        if -2**31 <= obj < 2**31:
            return {'$numberInt': text_type(obj)}
        return {'$numberLong': text_type(obj)}
    if json_options.json_mode != JSONMode.LEGACY and isinstance(obj, float):
        if math.isnan(obj):
            return {'$numberDouble': 'NaN'}
        elif math.isinf(obj):
            representation = 'Infinity' if obj > 0 else '-Infinity'
            return {'$numberDouble': representation}
        elif json_options.json_mode == JSONMode.CANONICAL:
            # repr() will return the shortest string guaranteed to produce the
            # original value, when float() is called on it. str produces a
            # shorter string in Python 2.
            return {'$numberDouble': text_type(repr(obj))}
    raise TypeError("%r is not JSON serializable" % obj)