示例#1
0
def exp(x):
    z = _make_complex(x)

    exp_special = [
        [0+0j, None, complex(0, -0.0), 0+0j, None, 0+0j, 0+0j],
        [nan+nanj, None, None, None, None, nan+nanj, nan+nanj],
        [nan+nanj, None, 1-0j, 1+0j, None, nan+nanj, nan+nanj],
        [nan+nanj, None, 1-0j, 1+0j, None, nan+nanj, nan+nanj],
        [nan+nanj, None, None, None, None, nan+nanj, nan+nanj],
        [inf+nanj, None, complex(float("inf"), -0.0), inf, None, inf+nanj, inf+nanj],
        [nan+nanj, nan+nanj, complex(float("nan"), -0.0), nan, nan+nanj, nan+nanj, nan+nanj]
    ]

    if not isfinite(z):
        if math.isinf(z.real) and math.isfinite(z.imag) and z.imag != 0:
            if z.real > 0:
                ret = complex(math.copysign(inf, math.cos(z.imag)),
                              math.copysign(inf, math.sin(z.imag)))
            else:
                ret = complex(math.copysign(0, math.cos(z.imag)),
                              math.copysign(0, math.sin(z.imag)))
        else:
            ret = exp_special[_special_type(z.real)][_special_type(z.imag)]
        if math.isinf(z.imag) and (math.isfinite(z.real) or
                                   (math.isinf(z.real) and z.real > 0)):
            raise ValueError
        return ret

    if z.real > _LOG_LARGE_DOUBLE:
        ret = e * rect(math.exp(z.real - 1), z.imag)
    else:
        ret = rect(math.exp(z.real), z.imag)
    if math.isinf(ret.real) or math.isinf(ret.imag):
        raise OverflowError
    return ret
 def test_isfinite(self):
     real_vals = [float('-inf'), -2.3, -0.0,
                  0.0, 2.3, float('inf'), float('nan')]
     for x in real_vals:
         for y in real_vals:
             z = complex(x, y)
             self.assertEqual(cmath.isfinite(z),
                               math.isfinite(x) and math.isfinite(y))
示例#3
0
    def test_isfinite(self):
        import cmath
        import math

        real_vals = [
            float('-inf'), -2.3, -0.0, 0.0, 2.3, float('inf'), float('nan')
        ]
        for x in real_vals:
            for y in real_vals:
                z = complex(x, y)
                assert cmath.isfinite(z) == (math.isfinite(x) and math.isfinite(y))
示例#4
0
def test_my_sum_floats(x, y):
    print(x, y)

    # math.isfinite was introduced in python 3.2
    if sys.version_info[0:3] > (3, 2, 0):
        assume(math.isfinite(x))
        assume(math.isfinite(y))
    else:
        assume(not math.isnan(x) and not math.isinf(x))
        assume(not math.isnan(y) and not math.isinf(y))
    assert x + y == mysum(x, y)
示例#5
0
def read_error(data, attribute_name):
    if hasattr(data, attribute_name):
        attr = getattr(data, attribute_name)
        if not isnan(attr) and isfinite(attr):
            return attr

    return SENSOR_ERROR_MAX
示例#6
0
    def __init__(self, data):

        # for debugging purposes
        self.rawfix = data.fix

        self.utc = data.utc
        self.time = data.fix.time

        self.num_satellites = len(data.satellites)

        self.latitude = data.fix.latitude
        self.longitude = data.fix.longitude

        # Longitude error estimate in meters, 95% confidence.
        # Present if mode is 2 or 3 and DOPs can be calculated from the satellite view.
        self.longitude_error = read_error(data.fix, 'epx')

        # Latitude error estimate in meters, 95% confidence.
        # Present if mode is 2 or 3 and DOPs can be calculated from the satellite view.
        self.latitude_error = read_error(data.fix, 'epy')

        # Climb/sink error estimate in meters/sec, 95% confidence.

        # altitude in meters
        self.altitude = data.fix.altitude
        if isnan(self.altitude) or not isfinite(self.altitude):
            # just some value so that the calculation can continue
            # this will have next to no influence because the expected error is so high in this situation
            self.altitude = 50

        # accuracy depends on number of satellites
        # Estimated vertical error in meters, 95% confidence.
        # Present if mode is 3 and DOPs can be calculated from the satellite view.
        self.altitude_error = read_error(data.fix, 'epv')

        # Speed over ground, meters per second.
        self.speed = data.fix.speed

        # Speed error estimate in meters/sec, 95% confidence.
        self.speed_error = read_error(data.fix, 'eps')

        # Climb (positive) or sink (negative) rate, meters per second.
        self.climb = data.fix.climb

        # Climb/sink error estimate in meters/sec, 95% confidence.
        self.climb_error = read_error(data.fix, 'epc')

        # Course over ground, degrees from true north.
        self.track = data.fix.track

        # Direction error estimate in degrees, 95% confidence.
        self.track_error = read_error(data.fix, 'epd')

        # NMEA mode: %d, 0=no mode value yet seen, 1=no fix, 2=2D, 3=3D.
        self.rawmode = data.fix.mode

        # flags for different modes
        self.has_no_fix = data.fix.mode <= 1
        self.has_2d_fix = data.fix.mode >= 2
        self.has_3d_fix = data.fix.mode == 3
示例#7
0
文件: irccloud.py 项目: kcr/snipe
 def backfill(self, mfilter, target=None):
     self.log.debug('backfill([filter], %s)', util.timestr(target))
     live = [
         b for b in self.buffers.values()
         if (not b.get('deferred', False)
             and (('min_eid' not in b or 'have_eid' not in b)
             or b['have_eid'] > b['min_eid']))]
     if target is None:
         target = min(b.get('have_eid', 0) for b in live) - 1
     elif math.isfinite(target):
         target = int(target * 1000000)
     live = [b for b in live if b.get('have_eid', 0) > target]
     t0 = time.time()
     nb = []
     for task, when in self.backfillers:
         if (t0 - when) > 30.0:
             task.cancel()
         if not task.done():
             nb.append((task, when))
     self.backfillers = nb
     self.reap_tasks()
     self.log.debug('%d backfillers active', len(self.backfillers))
     if not self.backfillers:
         for b in live:
             t = asyncio.Task(self.backfill_buffer(b, target))
             self.backfillers.append((t, t0))
             self.tasks.append(t)
示例#8
0
文件: irccloud.py 项目: kcr/snipe
    def backfill_buffer(self, buf, target):
        self.log.debug("backfill_buffer([%s %s], %s)", buf["bid"], buf.get("have_eid"), target)
        try:
            target = max(target, buf["have_eid"] - self.backfill_length * 1000000)

            oob_data = yield from self._request(
                "GET",
                urllib.parse.urljoin(IRCCLOUD_API, "/chat/backlog"),
                params={"cid", buf["cid"], "bid", buf["bid"], "num", 256, "beforeid", buf["have_eid"] - 1},
                headers={"Cookie": "session=%s" % self.session},
                compress="gzip",
            )
            included = []

            if isinstance(oob_data, dict):
                raise Exception(str(oob_data))

            oldest = buf["have_eid"]
            self.log.debug("t = %f", oldest / 1000000)

            for m in oob_data:
                if m["bid"] == -1:
                    self.log.error("? %s", repr(m))
                    continue
                yield from self.process_message(included, m)

            included.sort()
            self.log.debug("processed %d messages", len(included))

            clip = None
            included.reverse()
            for i, m in enumerate(included):
                if m.data["eid"] >= oldest:
                    clip = i
                    self.log.debug("BETRAYAL %d %f %s", i, m.data["eid"] / 1000000, repr(m.data))
            if clip is not None:
                included = included[clip + 1 :]
            included.reverse()

            if included:
                self.log.debug("merging %d messages", len(included))
                l = len(self.messages)
                self.messages = list(messages.merge([self.messages, included]))
                self.log.debug("len(self.messages): %d -> %d", l, len(self.messages))
                self.drop_cache()
                self.redisplay(included[0], included[-1])

        except asyncio.CancelledError:
            return
        except:
            self.log.exception("backfilling %s", buf)
            return

        if (
            math.isfinite(target)
            and target < buf["have_eid"]
            and ("min_eid" not in buf or buf["have_eid"] > buf["min_eid"])
        ):
            yield from asyncio.sleep(0.1)
            yield from self.backfill_buffer(buf, target)
示例#9
0
def sinh(x):

    _sinh_special = [
        [inf+nanj, None, complex(-float("inf"), -0.0), -inf, None, inf+nanj, inf+nanj],
        [nan+nanj, None, None, None, None, nan+nanj, nan+nanj],
        [nanj, None, complex(-0.0, -0.0), complex(-0.0, 0.0), None, nanj, nanj],
        [nanj, None, complex(0.0, -0.0), complex(0.0, 0.0), None, nanj, nanj],
        [nan+nanj, None, None, None, None, nan+nanj, nan+nanj],
        [inf+nanj, None, complex(float("inf"), -0.0), inf, None, inf+nanj, inf+nanj],
        [nan+nanj, nan+nanj, complex(float("nan"), -0.0), nan, nan+nanj, nan+nanj, nan+nanj]
    ]

    z = _make_complex(x)

    if not isfinite(z):
        if math.isinf(z.imag) and not math.isnan(z.real):
            raise ValueError
        if math.isinf(z.real) and math.isfinite(z.imag) and z.imag != 0:
            if z.real > 0:
                return complex(math.copysign(inf, math.cos(z.imag)),
                               math.copysign(inf, math.sin(z.imag)))
            return complex(-math.copysign(inf, math.cos(z.imag)),
                           math.copysign(inf, math.sin(z.imag)))
        return _sinh_special[_special_type(z.real)][_special_type(z.imag)]

    if abs(z.real) > _LOG_LARGE_DOUBLE:
        x_minus_one = z.real - math.copysign(1, z.real)
        return complex(math.cos(z.imag) * math.sinh(x_minus_one) * e,
                       math.sin(z.imag) * math.cosh(x_minus_one) * e)
    return complex(math.cos(z.imag) * math.sinh(z.real),
                   math.sin(z.imag) * math.cosh(z.real))
示例#10
0
    def __init__(self, n_cluster=2, max_iter=1000, n_init=10,
                 distance_func=lambda x, y: np.linalg.norm(x-y)):
        # set number of clusters
        if isinstance(n_cluster, int) and n_cluster > 0:
            self.n_cluster = n_cluster
        else:
            raise ValueError("number of clusters should be positive integer")

        # set number of iterations
        if (isinstance(max_iter, int) and max_iter > 0) or isfinite(max_iter):
            self.max_iter = max_iter
        else:
            raise ValueError("max iteration should be positive integer")

        # set number of init
        if isinstance(n_init, int) and n_init > 0:
            self.n_init = n_init
        else:
            raise ValueError("n_init should be positive integers")

        # set distance function
        if callable(distance_func):
            self.distance_func = distance_func
        else:
            raise ValueError("distance function should be callable")
示例#11
0
def _exact_ratio(x):
    """Return Real number x to exact (numerator, denominator) pair.

    >>> _exact_ratio(0.25)
    (1, 4)

    x is expected to be an int, Fraction, Decimal or float.
    """
    try:
        # Optimise the common case of floats. We expect that the most often
        # used numeric type will be builtin floats, so try to make this as
        # fast as possible.
        if type(x) is float:
            return x.as_integer_ratio()
        try:
            # x may be an int, Fraction, or Integral ABC.
            return (x.numerator, x.denominator)
        except AttributeError:
            try:
                # x may be a float subclass.
                return x.as_integer_ratio()
            except AttributeError:
                try:
                    # x may be a Decimal.
                    return _decimal_to_ratio(x)
                except AttributeError:
                    # Just give up?
                    pass
    except (OverflowError, ValueError):
        # float NAN or INF.
        assert not math.isfinite(x)
        return (x, None)
    msg = "can't convert type '{}' to numerator/denominator"
    raise TypeError(msg.format(type(x).__name__))
    def ztest_maximum_win_deviation(self):
        """Return the zTest maximum win deviation if config setting exists and
        is valid, otherwise return the default.

        The zTest maximum win deviation specifies the maximum allowed
        deviation from the expected win frequency for a particular validator
        before the zTest will fail and the claimed block will be rejected.
        The deviation corresponds to a confidence interval (i.e., how
        confident we are that we have truly detected a validator winning at
        a frequency we consider too frequent):

        3.075 ==> 99.9%
        2.575 ==> 99.5%
        2.321 ==> 99%
        1.645 ==> 95%
        """
        if self._ztest_maximum_win_deviation is None:
            self._ztest_maximum_win_deviation = \
                self._get_config_setting(
                    name='sawtooth.poet.ztest_maximum_win_deviation',
                    value_type=float,
                    default_value=PoetSettingsView.
                    _ZTEST_MAXIMUM_WIN_DEVIATION_,
                    validate_function=lambda value:
                        math.isfinite(value) and value > 0)

        return self._ztest_maximum_win_deviation
示例#13
0
文件: statistics.py 项目: cerbo/scidb
def _exact_ratio(x):
    """Convert Real number x exactly to (numerator, denominator) pair.

    >>> _exact_ratio(0.25)
    (1, 4)

    x is expected to be an int, Fraction, Decimal or float.
    """
    try:
        try:
            # int, Fraction
            return (x.numerator, x.denominator)
        except AttributeError:
            # float
            try:
                return x.as_integer_ratio()
            except AttributeError:
                # Decimal
                try:
                    return _decimal_to_ratio(x)
                except AttributeError:
                    msg = "can't convert type '{}' to numerator/denominator"
                    #Modified by SciDB, Inc., to remove "from None" that did not work on Python 2.
                    raise TypeError(msg.format(type(x).__name__))
    except (OverflowError, ValueError):
        # INF or NAN
        if __debug__:
            # Decimal signalling NANs cannot be converted to float :-(
            if isinstance(x, Decimal):
                assert not x.is_finite()
            else:
                assert not math.isfinite(x)
        return (x, None)
示例#14
0
def cubic_reparameterize(
        cubic: Cubic,
        points_offset: Sequence[Vector],
        points_offset_len: int,
        u: Vector,
) -> List[float]:
    """
    Recalculate the values of u[] based on the Newton Raphson method
    """

    # double *u_prime = new double[points_offset_len]
    u_prime = [None] * points_offset_len

    for i in range(points_offset_len):
        u_prime[i] = cubic_find_root(cubic, points_offset[i], u[i])
        if not math.isfinite(u_prime[i]):
            del u_prime  # free
            return None

    u_prime.sort()

    if u_prime[0] < 0.0 or u_prime[points_offset_len - 1] > 1.0:
        del u_prime
        return None

    assert(u_prime[0] >= 0.0)
    assert(u_prime[points_offset_len - 1] <= 1.0)

    return u_prime
示例#15
0
 def pop_n_todo(self, n): # Should the two pop* methods be write-only?
      '''A lazy iterator yielding the n highest priority sequences'''
      while n > 0:
           seq = self._heap.pop()[2] # HEAPENTRY
           if isfinite(seq): # heap entries are sabotaged by setting seq=_Inf
                n -= 1
                yield seq
示例#16
0
     def write(self):
          '''Finalize self to file. Totally overwrites old data with current data.'''
          if not self._have_lock:
               raise LockError("Can't use SequencesManager.write() without lock!")
               # TODO: should these errors be (programmatically) distinguishable from
               # unable-to-acquire-lock errors?
          # ignore dropped seqs (HEAPENTRY)
          out = [item[2] for item in self._heap if (isfinite(item[2]) and item[2] in self._data)]
          # Find seqs that have been dropped from heap, they're just appended
          # at the end, no heapifying
          missing = set(self._data.keys()).difference(out)
          out = [self._data[seq] for seq in out]
          out.extend(self._data[seq] for seq in missing)
          # TODO: This is effectively cleaning the heap. Should we actually save the cleaned heap?
          # self._heap = _Heap(out) #(copies entire list)

          outdict = {"aaData": out}
          try:
               outdict['resdatetime'] = self.resdatetime
          except Exception:
               pass
          json_string = json.dumps(outdict, ensure_ascii=False, sort_keys=True).replace('],', '],\n') + '\n'
          # sort_keys to get reproducible output for testing, ensure_ascii=False to allow fancy names
          with open(self._jsonfile, 'w') as f:
               f.write(json_string)
          del json_string # Both outstrings generated here can be multiple megabytes each

          if self._txtfile:
               txt_string = ''.join(str(ali)+'\n' for ali in sorted(out, key=lambda ali: ali.seq) if ali.is_minimally_valid())
               # we want to go easy on newly added seqs with invalid data
               with open(self._txtfile, 'w') as f:
                    f.write(txt_string)
               del txt_string

          del out
示例#17
0
def cosh(x):
    _cosh_special = [
        [inf+nanj, None, inf, complex(float("inf"), -0.0), None, inf+nanj, inf+nanj],
        [nan+nanj, None, None, None, None, nan+nanj, nan+nanj],
        [nan, None, 1, complex(1, -0.0), None, nan, nan],
        [nan, None, complex(1, -0.0), 1, None, nan, nan],
        [nan+nanj, None, None, None, None, nan+nanj, nan+nanj],
        [inf+nanj, None, complex(float("inf"), -0.0), inf, None, inf+nanj, inf+nanj],
        [nan+nanj, nan+nanj, nan, nan, nan+nanj, nan+nanj, nan+nanj]
    ]

    z = _make_complex(x)

    if not isfinite(z):
        if math.isinf(z.imag) and not math.isnan(z.real):
            raise ValueError
        if math.isinf(z.real) and math.isfinite(z.imag) and z.imag != 0:
            if z.real > 0:
                return complex(math.copysign(inf, math.cos(z.imag)),
                               math.copysign(inf, math.sin(z.imag)))
            return complex(math.copysign(inf, math.cos(z.imag)),
                           -math.copysign(inf, math.sin(z.imag)))
        return _cosh_special[_special_type(z.real)][_special_type(z.imag)]

    if abs(z.real) > _LOG_LARGE_DOUBLE:
        x_minus_one = z.real - math.copysign(1, z.real)
        ret = complex(e * math.cos(z.imag) * math.cosh(x_minus_one),
                      e * math.sin(z.imag) * math.sinh(x_minus_one))
    else:
        ret = complex(math.cos(z.imag) * math.cosh(z.real),
                      math.sin(z.imag) * math.sinh(z.real))
    if math.isinf(ret.real) or math.isinf(ret.imag):
        raise OverflowError

    return ret
示例#18
0
def main():

    p = ArgumentParser()
    p.add_argument('-N', type=int, default=[10001, 100001, 1000001], nargs='+')
    p.add_argument('-Nrun', type=int, default=10)
    p = p.parse_args()

    times = {}
    for N in p.N:
        print('\nN=', N)
        print('----------------')
        t = benchmark_pisum(N, p.Nrun)
        t = {k: v for k, v in t.items() if math.isfinite(v)}
        times[N] = dict(sorted(t.items(), key=lambda x: x[1]))  # Python >= 3.5

        for k, v in t.items():
            print(k, v)

    if figure is not None and len(t) > 0:
        ax = figure().gca()
        for k, v in times.items():
            ax.scatter(v.keys(), v.values(), label=str(k))

        ax.set_title('PiSum, N={}'.format(p.N))
        ax.set_ylabel('run time [sec.]')
        ax.set_yscale('log')
        ax.grid(True)
        # ax.autoscale(True)  # bug?
        # leave nanmin/nanmax for where some iterations fail
        ax.set_ylim((0.1*np.nanmin(list(times[min(p.N)].values())),
                     10*np.nanmax(list(times[max(p.N)].values()))))
        ax.legend(loc='best')
        show()
示例#19
0
文件: An.py 项目: vdhan/RSA
def is_finite(n):
    """Check if finite number"""
    try:
        a = float(n)
        return math.isfinite(a)
    except:
        return False
def min_str(s, t):
    if s == '' and t == '':
        return ''

    counter = Counter(t)
    missing = set(counter.keys())
    min_start = 0
    min_end = float('inf')
    cursor_end = 0
    s_len = len(s)

    for cursor_start, char in enumerate(s):
        if missing:
            try:
                cursor_end = advance_window_end(counter, cursor_end, missing, s, s_len)
            except StopIteration:
                break

        if not missing:
            if (min_end - min_start) > (cursor_end - cursor_start):
                min_start = cursor_start
                min_end = cursor_end
            if char in counter:
                counter[char] += 1
                if counter[char] == 1:
                    missing.add(char)

    if math.isfinite(min_end):
        return s[min_start:min_end]
    raise Exception()
示例#21
0
def autoscale(settings, data):
  subset = []
  for _group, files in settings['files'].items():
    for f in files:
      for a in settings['algos']:
        if math.isfinite(data[f][a]):
          subset.append(data[f][a])
  return min(subset), max(subset)
def time_since_J2000_epoch(JDTT):
    '''(float) -> float
    Calculates the time offset since the J2000 epoch, known as ΔtJ2000 in the Mars24 algorithm. 
    Returns None if given bad input. Provided to students.'''


    if type(JDTT) == float and math.isfinite(JDTT):
        return JDTT - 2451545.0 
示例#23
0
文件: config.py 项目: chripo/Radicale
def positive_float(value):
    value = float(value)
    if not math.isfinite(value):
        raise ValueError("value is infinite")
    if math.isnan(value):
        raise ValueError("value is not a number")
    if value < 0:
        raise ValueError("value is negative: %f" % value)
    return value
示例#24
0
def test_encode(diagnostic, encoded):
    if isinstance(diagnostic, float):
        if not math.isfinite(diagnostic) and len(encoded) > 3:
            return
    if (isinstance(diagnostic, list) or isinstance(diagnostic, dict)) and 0xff in encoded:
        return
    if (isinstance(diagnostic, tuple)):
        return
    assert encoded == pycbor.encode(diagnostic)
示例#25
0
def _isfinite(x):

    try:

        return x.is_finite()  # Likely a Decimal.

    except AttributeError:

        return math.isfinite(x)  # Coerces to float first.
示例#26
0
def _sum(data, start=0):
    """_sum(data [, start]) -> value

    Return a high-precision sum of the given numeric data. If optional
    argument ``start`` is given, it is added to the total. If ``data`` is
    empty, ``start`` (defaulting to 0) is returned.


    Examples
    --------

    >>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75)
    11.0

    Some sources of round-off error will be avoided:

    >>> _sum([1e50, 1, -1e50] * 1000)  # Built-in sum returns zero.
    1000.0

    Fractions and Decimals are also supported:

    >>> from fractions import Fraction as F
    >>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)])
    Fraction(63, 20)

    >>> from decimal import Decimal as D
    >>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")]
    >>> _sum(data)
    Decimal('0.6963')

    """
    n, d = _exact_ratio(start)
    T = type(start)
    partials = {d: n}  # map {denominator: sum of numerators}
    # Micro-optimizations.
    coerce_types = _coerce_types
    exact_ratio = _exact_ratio
    partials_get = partials.get
    # Add numerators for each denominator, and track the "current" type.
    for x in data:
        T = _coerce_types(T, type(x))
        n, d = exact_ratio(x)
        partials[d] = partials_get(d, 0) + n
    if None in partials:
        assert issubclass(T, (float, Decimal))
        assert not math.isfinite(partials[None])
        return T(partials[None])
    total = Fraction()
    for d, n in sorted(partials.items()):
        total += Fraction(n, d)
    if issubclass(T, int):
        assert total.denominator == 1
        return T(total.numerator)
    if issubclass(T, Decimal):
        return T(total.numerator)/total.denominator
    return T(total)
示例#27
0
 def testIsfinite(self):
     self.assertTrue(math.isfinite(0.0))
     self.assertTrue(math.isfinite(-0.0))
     self.assertTrue(math.isfinite(1.0))
     self.assertTrue(math.isfinite(-1.0))
     self.assertFalse(math.isfinite(float("nan")))
     self.assertFalse(math.isfinite(float("inf")))
     self.assertFalse(math.isfinite(float("-inf")))
示例#28
0
def parseDMS(dmsStr):
    """
    Parse string representing degrees/minutes/seconds (aka DMS) into numeric degrees.
    Return a float representing decimal degrees.
    
    Arguments :
    dmsStr {string | float} -- Decimal degrees or deg/min/sec in variety of formats.
    
    Allow signed decimal degrees (eg: -0.13) or deg-min-sec optionally suffixed by compass direction (NSEW).
    A variety of separators are accepted (eg 3° 37' 09"W).
    Seconds and minutes may be omitted.  
    
    Example :
        > dms.parseDMS("48°51'25.2'N")         
        > 48.857000
    """
    
    # check for signed decimal degrees without NSEW compass direction, if so return it directly
    if isinstance(dmsStr, float) and isfinite(dmsStr):
            return float(dmsStr)

    tmpStr = str(dmsStr).strip()
    # Strip off any sign at the beginning of the string
    tmpStr = tmpStr.replace('^-', '')
    # Strip off any compass direction at the end of the string
    tmpStr = tmpStr.replace('[NSEW]$]', '', re.IGNORECASE)
    # Split out separate d/m/s (degree / minute / second)
    dms = re.split('[^0-9.]+', tmpStr)
    
    # Remove first list element if it contains empty string (positive or negative sign)
    if dms[0] == '':
        dms = dms[1:]
        
    # Remove last list element if it contains empty string (compass direction)
    if dms[len(dms)-1] == '':
        dms = dms[:len(dms)-1]
    
    if (dms == ''):
        return None; 
    
    # Convert to decimal degrees
    dms_length = len(dms)
    if dms_length == 3: # 3-part result d/m/s
        deg = float(dms[0])/1 + float(dms[1])/60 + float(dms[2])/3600
    elif dms_length == 2: # 2-part result d/m
        deg = float(dms[0])/1 + float(dms[1])/60
    elif dms_length == 1: # Only d (degrees)
        deg = float(dms[0])
    else:
        return None
    
    m = re.match('^-.*|.*[WS]$', dmsStr.strip(), re.IGNORECASE)
    if m is not None:
        deg = -deg
        
    return float(deg)
示例#29
0
 def __init__(self, iterable):
     super().__init__()
     self.head = LinkedList.Node(-inf, None)
     self.tail = LinkedList.Node(inf, None)
     last = self.head
     for k, v in iterable:
         assert isfinite(k)
         last.next = LinkedList.Node(k, v)
         last = last.next
     last.next = self.tail
示例#30
0
文件: An.py 项目: vdhan/RSA
def inverse(a, z=26):
    """Return the modular multiplicative inverse of given number"""
    if not math.isfinite(a) or not math.isfinite(z):
        raise TypeError('Parameter Error!')

    a = int(a)
    z = int(z)
    if a < 0:
        a %= z

    if gcd(a, z) == 1:
        n, y, y2 = z, 1, 0
        while a != 0:
            q = z // a
            z, a = a, z % a
            y, y2 = y2 - q * y, y
        return y2 % n

    return 0
def is_finite(value: Any) -> bool:
    """Return true if a value is a finite number."""
    return (isinstance(value, int)
            and not isinstance(value, bool)) or (isinstance(value, float)
                                                 and isfinite(value))
示例#32
0
文件: messages.py 项目: aa1830/snipe
    def walk(
            self, start, forward=True, mfilter=None, backfill_to=None,
            search=False):
        """Iterate through a list of messages associated with a backend.

        :param start: Where to start iterating from.
        :type start: integer, SnipeMessage, float or None
        :param bool forward: Whether to go forwards or backwards.
        :param mfilter: Any applicable message filter
        :type mfilter: Filter or None
        :param backfill_to: How far the backend should dig
        :type backfill_to: float or None
        :param bool search: Whether this is being called from a search

        If ``start`` is ``None``, begin at the end ``forward`` would have us
        moving away from.

        ``backfill_to`` potentially triggers the backend to pull in more
        messages, but it doesn't guarantee that they'll be visible in this
        iteration.

        ``search`` lets backends behave differently when not called from the
        redisplay, for date headers and such that want to bypass filters on
        display.
        """
        self.log.debug(
            'walk(%s, %s, [filter], %s, %s)',
            repr(start), forward, util.timestr(backfill_to), search)
        # I have some concerns that that this depends on the
        # self.messages list being stable over the life of the
        # iterator.  This doesn't seem to be a a problem as of when I
        # write this comment, but defensive coding should address this
        # at some point.  (If you are finding this comment because of
        # weird message list behavior, this might be why...)

        if mfilter is not None:
            mfilter = mfilter.simplify({
                'backend': self.name,
                'context': self.context,
                })
            if mfilter is False:
                return
            if mfilter is True:
                mfilter = None

        if mfilter is None:
            def mfilter(m):
                return True

        cachekey = (start, forward, mfilter)
        point = self.startcache.get(cachekey, None)

        if backfill_to is not None and math.isfinite(backfill_to):
            self.backfill(mfilter, backfill_to)

        needcache = False
        if point is None:
            needcache = True
            if start is not None:
                left = bisect.bisect_left(self.messages, start)
                right = bisect.bisect_right(self.messages, start)
                try:
                    point = self.messages.index(start, left, right)
                except ValueError:
                    point = None
            else:
                if forward:
                    point = 0
                else:
                    point = len(self.messages) - 1

            if forward:
                point = point if point is not None else left
            else:
                point = point if point is not None else right - 1

        if forward:
            def getnext(x):
                return x + 1
        else:
            def getnext(x):
                return x - 1

        # self.log.debug(
        #     'len(self.messages)=%d, point=%d', len(self.messages), point)

        adjkey = None
        while self.messages:
            # self.log.debug(', point=%d', point)
            if not 0 <= point < len(self.messages):
                break
            m = self.messages[point]
            if mfilter(m):
                if needcache:
                    self.startcache[cachekey] = point
                    needcache = False
                yield m
                if adjkey is not None:
                    self.adjcache[adjkey] = point
                adjkey = (m, forward, mfilter)
            point = self.adjcache.get(adjkey, getnext(point))

        if adjkey is not None:
            self.adjcache[adjkey] = point

        # specifically catch the situation where we're trying to go off the top
        if point < 0 and backfill_to is not None:
            self.backfill(mfilter, backfill_to)
示例#33
0
def solve(problem_nr, level, start, render: bool):

    name = "problem_{:02d}".format(problem_nr)

    EMPTY = ord('.')

    vertices = set()
    edges = {}

    to_be_visited = [start]

    while to_be_visited:
        s = to_be_visited.pop()
        if s in vertices:
            continue

        vertices.add(s)

        for y in range(5):
            for x in range(5):
                mover = s[y * 5 + x]
                if mover != EMPTY:
                    for (direction, dx, dy) in [('R', +1, 0), ('U', 0, -1),
                                                ('L', -1, 0), ('D', 0, +1)]:
                        q = 1
                        while True:
                            xx = x + q * dx
                            yy = y + q * dy
                            if xx < 0 or xx > 4 or yy < 0 or yy > 4:
                                break
                            if s[yy * 5 + xx] != EMPTY:
                                if q > 1:
                                    # valid move found!
                                    xx = x + (q - 1) * dx
                                    yy = y + (q - 1) * dy
                                    ss = bytearray(s)
                                    ss[y * 5 + x] = EMPTY
                                    ss[yy * 5 + xx] = mover
                                    ss = bytes(ss)
                                    edges[(s, ss)] = chr(mover) + direction
                                    to_be_visited.append(ss)
                                break  # from while loop
                            q += 1

    distance_to_solution = {}
    for v in vertices:
        if v[12] == ord('x'):
            distance_to_solution[v] = 0

    dts = 0
    while True:
        changed = False
        for (v1, v2) in edges:
            if v1 not in distance_to_solution:
                if v2 in distance_to_solution:
                    if distance_to_solution[v2] == dts:
                        distance_to_solution[v1] = dts + 1
                        changed = True
        if not changed:
            break
        dts += 1

    for v in vertices:
        if v not in distance_to_solution:
            distance_to_solution[v] = math.inf

    on_optimal_path = set()
    on_optimal_path.add(start)
    while True:
        changed = False
        for (v1, v2) in edges:
            if v1 in on_optimal_path and v2 not in on_optimal_path:
                if distance_to_solution[v2] < distance_to_solution[v1]:
                    on_optimal_path.add(v2)
                    changed = True
        if not changed:
            break

    print("name: {} vertices: {} edges: {}".format(name, len(vertices),
                                                   len(edges)))

    filename_dot = "{}.dot".format(name)
    with open(filename_dot, "w") as fo:
        print("digraph {} {{".format(name), file=fo)

        attributes = {}
        attributes["shape"] = "circle"
        print("    node{};".format(attributes_string(attributes)), file=fo)

        attributes = {}
        attributes["overlap"] = "no"
        attributes["label"] = "problem {} ({})".format(problem_nr, level)
        print("    graph{};".format(attributes_string(attributes)), file=fo)

        dot_nv = 0
        dot_ne = 0

        for v in vertices:
            vname = v.decode().replace(".", "_")
            distance = distance_to_solution[v]

            if not math.isfinite(distance):
                continue

            label = "\\n".join(
                name[5 * i:5 * (i + 1)]
                for i in range(5)) + "\\n\\n({})".format(
                    distance if math.isfinite(distance) else "∞")

            attributes = {}
            attributes["label"] = label
            if v == start:
                attributes["fillcolor"] = "dodgerblue"
                attributes["style"] = "filled"
            elif v[12] == ord('x') and v in on_optimal_path:
                attributes["fillcolor"] = "green"
                attributes["style"] = "filled"
            elif v[12] == ord('x'):
                attributes["fillcolor"] = "darkseagreen1"
                attributes["style"] = "filled"
            elif v in on_optimal_path:
                attributes["fillcolor"] = "cyan"
                attributes["style"] = "filled"
            elif not math.isfinite(distance):
                attributes["fillcolor"] = "red"
                attributes["style"] = "filled"
            else:
                attributes["fillcolor"] = "beige"
                attributes["style"] = "filled"

            print("    {}{};".format(vname, attributes_string(attributes)),
                  file=fo)
            dot_nv += 1

        for ((v1, v2), move_description) in edges.items():

            if not (math.isfinite(distance_to_solution[v1])
                    and math.isfinite(distance_to_solution[v2])):
                continue

            v1name = v1.decode().replace(".", "_")
            v2name = v2.decode().replace(".", "_")

            attributes = {}
            attributes["label"] = move_description

            if distance_to_solution[v2] < distance_to_solution[v1]:
                attributes["color"] = "green"

            print("    {} -> {}{};".format(v1name, v2name,
                                           attributes_string(attributes)),
                  file=fo)
            dot_ne += 1
        print("}", file=fo)

    print("name: {} graphviz file vertices: {} edges: {}".format(
        name, dot_nv, dot_ne))

    solutions = enumerate_solutions([], start, distance_to_solution, edges)
    for (i, solution) in enumerate(solutions, 1):
        sol = []
        for step in solution:
            step = step.upper()
            if len(sol) == 0 or not sol[-1].startswith(step[0]):
                sol.append(step[0] + "-")
            sol[-1] = sol[-1] + step[1]
        sol = ", ".join(sol)
        print("problem {}, solution {}/{}: {}".format(problem_nr, i,
                                                      len(solutions), sol))

    if render:
        filename_pdf = "{}.pdf".format(name)
        args = ["dot", "-Tpdf", filename_dot, "-o", filename_pdf]
        subprocess.run(args)

    return distance_to_solution[start]
示例#34
0
                 0.1]))  # 10 * 0.1  # >>>  1.10

# math.gcd(a, b)
# 返回整数a和b的最大公约数, gcd(0, 0)返回0
print(math.gcd(12, 16))  # >>> 4

# math.isclose(a, b, *, rel_tol=1e-09, abs_tol=0.0)
# 如果a和b的值彼此接近,则返回True, 反之则False
# rel_tol is the relative tolerance – it is the maximum allowed difference between a and b
# To set a tolerance of 5%, pass rel_tol=0.05
# abs_tol is the minimum absolute tolerance – useful for comparisons near zero. abs_tol must be at least zero
print(math.isclose(100, 90, rel_tol=0.05))  # >>> False  # 设置为百分比

# math.isfinite(x)
# 如果x既不是无穷大也不是NaN,则返回True,否则返回False。注意0.0 是被认为是有限的
print(math.isfinite(5))  # >>> True
print(math.isfinite(1 / 3))  # >>> True  # 无限循环小数也是有限的
print(math.isfinite(math.pi))  # True  # 无限不循环小数也是有限
# print(math.isfinite(1/0))    # raise ZeroDivisionError

# math.isinf(x)
# 如果x是正或负无穷大,则返回True,否则返回False
print(9e2)  # >>> 900.0
print(math.isinf(9e999))  # >>> True

# math.isnan(x)
# 如果x是NaN(不是数字),则返回True,否则返回False

# math.modf(x)
# 返回x的小数和整数部分。这两个结果携带的x标志,也是浮点型
print(math.modf(5.25))  # >>> (0.25, 5.0)  # in a tuple
示例#35
0
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann.  All rights reserved.
#
"""Checking for overflow or infinite values.
"""
#end_pymotw_header

import math

for f in [0.0, 1.0, math.pi, math.e, math.inf, math.nan]:
    print('{:5.2f} {!s}'.format(f, math.isfinite(f)))
示例#36
0
文件: utils.py 项目: shromonag/py-stl
def _interval_discretizable(itvl, dt):
    l, u = itvl.lower / dt, itvl.upper / dt
    if not (isfinite(l) and isfinite(u)):
        return False
    return np.isclose(l, round(l)) and np.isclose(u, round(u))
示例#37
0
 def __repr__(self) -> str:
     minv = self._min_value if math.isfinite(self._min_value) else None
     maxv = self._max_value if math.isfinite(self._max_value) else None
     return '<Numbers{}>'.format(range_str(minv, maxv, 'v'))
示例#38
0
def _isfinite(x):
    try:
        return x.is_finite()  # Likely a Decimal.
    except AttributeError:
        return math.isfinite(x)  # Coerces to float first.
示例#39
0
 def partially_bounded(self) -> bool:
     """
     Returns:
         Whether or not the metric has a mutually exclusive bound
     """
     return math.isfinite(self.__minimum) ^ math.isfinite(self.__maximum)
示例#40
0
 def lower_bounded(self) -> bool:
     """
     Returns:
         Whether or not the metric has a lower bound
     """
     return math.isfinite(self.__minimum)
示例#41
0
    def event_to_json(event: Dict) -> str:
        """Convert event into json in format Influx expects."""
        state = event.data.get(EVENT_NEW_STATE)
        if (
            state is None
            or state.state in (STATE_UNKNOWN, "", STATE_UNAVAILABLE)
            or not entity_filter(state.entity_id)
        ):
            return

        try:
            _include_state = _include_value = False

            _state_as_value = float(state.state)
            _include_value = True
        except ValueError:
            try:
                _state_as_value = float(state_helper.state_as_number(state))
                _include_state = _include_value = True
            except ValueError:
                _include_state = True

        include_uom = True
        entity_config = component_config.get(state.entity_id)
        measurement = entity_config.get(CONF_OVERRIDE_MEASUREMENT)
        if measurement in (None, ""):
            if override_measurement:
                measurement = override_measurement
            else:
                measurement = state.attributes.get(CONF_UNIT_OF_MEASUREMENT)
                if measurement in (None, ""):
                    if default_measurement:
                        measurement = default_measurement
                    else:
                        measurement = state.entity_id
                else:
                    include_uom = False

        json = {
            INFLUX_CONF_MEASUREMENT: measurement,
            INFLUX_CONF_TAGS: {
                CONF_DOMAIN: state.domain,
                CONF_ENTITY_ID: state.object_id,
            },
            INFLUX_CONF_TIME: event.time_fired,
            INFLUX_CONF_FIELDS: {},
        }
        if _include_state:
            json[INFLUX_CONF_FIELDS][INFLUX_CONF_STATE] = state.state
        if _include_value:
            json[INFLUX_CONF_FIELDS][INFLUX_CONF_VALUE] = _state_as_value

        ignore_attributes = set(entity_config.get(CONF_IGNORE_ATTRIBUTES, []))
        ignore_attributes.update(global_ignore_attributes)
        for key, value in state.attributes.items():
            if key in tags_attributes:
                json[INFLUX_CONF_TAGS][key] = value
            elif (
                key != CONF_UNIT_OF_MEASUREMENT or include_uom
            ) and key not in ignore_attributes:
                # If the key is already in fields
                if key in json[INFLUX_CONF_FIELDS]:
                    key = f"{key}_"
                # Prevent column data errors in influxDB.
                # For each value we try to cast it as float
                # But if we can not do it we store the value
                # as string add "_str" postfix to the field key
                try:
                    json[INFLUX_CONF_FIELDS][key] = float(value)
                except (ValueError, TypeError):
                    new_key = f"{key}_str"
                    new_value = str(value)
                    json[INFLUX_CONF_FIELDS][new_key] = new_value

                    if RE_DIGIT_TAIL.match(new_value):
                        json[INFLUX_CONF_FIELDS][key] = float(
                            RE_DECIMAL.sub("", new_value)
                        )

                # Infinity and NaN are not valid floats in InfluxDB
                try:
                    if not math.isfinite(json[INFLUX_CONF_FIELDS][key]):
                        del json[INFLUX_CONF_FIELDS][key]
                except (KeyError, TypeError):
                    pass

        json[INFLUX_CONF_TAGS].update(tags)

        return json
示例#42
0
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq):
    model.train()
    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter(
        'lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    header = 'Epoch: [{}]'.format(epoch)

    lr_scheduler = None
    if epoch == 0:
        warmup_factor = 1. / 1000
        warmup_iters = min(1000, len(data_loader) - 1)

        lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters,
                                                 warmup_factor)

    for images, targets in metric_logger.log_every(data_loader, print_freq,
                                                   header):
        images = list(image.to(device)
                      for image in images)  #.to(device) for both
        targets = [{k: v.to(device)
                    for k, v in t.items()}
                   for t in targets]  #.to(device) for both

        loss_dict = model(images, targets)
        '''    
    During training, the model expects both the input tensors, as well as a targets (list of dictionary),
    containing:
        - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
          ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
        - labels (Int64Tensor[N]): the class label for each ground-truth box
        - masks (UInt8Tensor[N, H, W]): the segmentation binary masks for each instance
    The model returns a Dict[Tensor] during training, containing the classification and regression
    losses for both the RPN and the R-CNN, and the mask loss.
    
    During inference, the model requires only the input tensors, and returns the post-processed
    predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
    follows:
        - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
          ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
        - labels (Int64Tensor[N]): the predicted labels for each image
        - scores (Tensor[N]): the scores or each prediction
        - masks (UInt8Tensor[N, 1, H, W]): the predicted masks for each instance, in 0-1 range. In order to
          obtain the final segmentation masks, the soft masks can be thresholded, generally
          with a value of 0.5 (mask >= 0.5)
          '''

        losses = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = utils.reduce_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())

        loss_value = losses_reduced.item()

        if not math.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        losses.backward()
        optimizer.step()

        if lr_scheduler is not None:
            lr_scheduler.step()

        metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])

    return metric_logger
示例#43
0
  def getLines(self, frame):

    # Run the feature extraction only once every two frames.
    self.frameIndex += 1
    l,r,ls,rs = self.previousReturnValue
    if self.frameIndex % 2 == 0:
      l, r, ls, rs = self._getProbs(frame)
      self.previousReturnValue = l, r, ls, rs

    laneLines = []
    for prob, score, _id in zip([l,r], [ls,rs], [1,2]):
      if score < .5:
        laneLines.append([])
        continue

      prob = prob[VOFF:-VOFF][::-1]
      maxs = np.max(prob, axis=1)
      measurement = np.argmax(prob, axis=1).astype('float32')
      if self.state[_id] is None:
        self.state[_id] = measurement.copy()

      # Count 'size' of discontinuities.
      count = 0
      px = measurement[0]
      for y,x in enumerate(measurement):
        if abs(x-px) <= 60 - y/8:
          px = x
        else:
          count += 1
      noise = count/len(measurement)

      # The measurement is too noisy.
      if noise > .1:
        newState = self.state[_id].copy()
        self.framesSinceDiscontinuous = 0
      else:
        # Try to remove some discontinuities
        newMeasurement = measurement
        for k in range(2):
          mg = max(abs(np.gradient(newMeasurement)))
          newMeasurement = measurement.copy()
          grad = np.gradient(newMeasurement)
          # use previous state estimate
          for y,gx in enumerate(grad):
            if abs(gx) > 20:
              low = max(0,y-20)
              high = min(len(grad)-1,y+20)
              newMeasurement[low:high] = self.state[_id][low:high]
          px = newMeasurement[0]
          grad = np.gradient(newMeasurement)
          for y,x in enumerate(newMeasurement):
            if abs(x-px) >= 40 - y/8:
              px += grad[:y-1].mean()
              if math.isfinite(px):
                newMeasurement[y] = px
            else:
              px = newMeasurement[y]
          nmg = max(abs(np.gradient(newMeasurement)))
        # If we made the curve more discontinuous then just use the original curve
        if nmg > mg:
          newMeasurement = measurement

        # Check how different the measurement is from the previous state est
        diff = abs(self.state[_id] - newMeasurement).mean()
        if diff > 20: # Take the measurement to be the new state if the measurement is nearly perfect
          self.state[_id] = newMeasurement.copy()
          if self.framesSinceDiscontinuous < 7:
            self.output[_id] = newMeasurement.copy()
        newState = mix(self.state[_id], newMeasurement, 1-TIMESMOOTH)
        self.framesSinceDiscontinuous = min(self.framesSinceDiscontinuous+1,8)

      smoothx = newState[0]
      trendx = newState[1] - newState[0]

      # Output only smoothed versions of the internal state.
      smoothedState = newState.copy()
      for y,x in enumerate(newState):
        prevsmoothx = smoothx
        smoothx = mix(smoothx + trendx, x, 1-XSMOOTH)
        trendx = mix(trendx, smoothx-prevsmoothx, 1-XTRENDSMOOTH)
        smoothedState[y] = smoothx

      # Feedback loop
      # newState = mix(newState, smoothedState, .3)

      self.state[_id] = newState.copy()

      # Smooth our output in time
      if self.output[_id] is None:
        self.output[_id] = smoothedState
      self.output[_id] = mix(self.output[_id], smoothedState, 1-OUTPUTTIMESMOOTH)
      # Smooth the output a bit more
      smoothx = self.output[_id][0]
      for y,x in enumerate(self.output[_id]):
        prevsmoothx = smoothx
        smoothx = mix(smoothx + trendx, x, 1-OUTPUTXSMOOTH)
        trendx = mix(trendx, smoothx-prevsmoothx, 1-OUTPUTXSMOOTH)
        self.output[_id][y] = smoothx

      # Finally convert to the output format
      xs, ys = [], []
      for y,x in enumerate(self.output[_id]):
        try:
          xs.append(int(x*720/976))
          y = len(smoothedState) - y - 1
          ys.append(int((VOFF+y)*720/976+207))
        except:
          continue

      # Filter anything we are not confident about
      ox,oy=[],[]
      for y in range(len(xs)):
        if maxs[y] > OUTPUT_SEGMENT_THRESHOLD:
          ox.append(xs[y])
          oy.append(ys[y])
      xs,ys=ox,oy

      # If we filtered a lot then just ignore this lane
      if len(xs) < 85:
        xs,ys = [],[]

      laneLines.append(list(zip(xs, ys, xs[1:], ys[1:])))
    return laneLines
示例#44
0
def _coordinateRepr(self):
    return "{}({})".format(
        type(self).__name__,
        ", ".join(f"{v:0.17g}" if math.isfinite(v) else f"float('{v}')"
                  for v in self))
示例#45
0
 def bounded(self) -> bool:
     """
     Returns:
         Whether or not there is at least a single bound
     """
     return math.isfinite(self.__minimum) or math.isfinite(self.__maximum)
示例#46
0
 def is_finite(self) -> bool:
     return isfinite(self.x) and isfinite(self.y)
示例#47
0
def publish(
    purser: yakut.Purser,
    message: typing.Sequence[typing.Tuple[str, str]],
    period: float,
    count: int,
    priority: pyuavcan.transport.Priority,
) -> None:
    """
    Publish messages on the specified subjects.
    The local node will also publish heartbeat and respond to GetInfo, unless it is configured to be anonymous.

    The command accepts a list of space-separated pairs like:

    \b
        [SUBJECT_ID.]TYPE_NAME.MAJOR.MINOR  YAML_FIELDS

    The first element is a name like `uavcan.node.Heartbeat.1.0` prepended by the subject-ID.
    The subject-ID may be omitted if a fixed one is defined for the data type.

    The second element specifies the values of the message fields in YAML format (or JSON, which is a subset of YAML).
    Missing fields will be left at their default values;
    therefore, to publish a default-initialized message, the field specifier should be an empty dict: `{}`.
    For more info about the format see PyUAVCAN documentation on builtin-based representations.

    The number of such pairs can be arbitrary; all defined messages will be published synchronously.
    If no such pairs are specified, only the heartbeat will be published, unless the local node is anonymous.

    Forward or backward slashes can be used instead of ".";
    version numbers can be also separated using underscores.
    This is done to allow the user to rely on filesystem autocompletion when typing the command.

    Examples:

    \b
        yakut pub uavcan.diagnostic.Record.1.1 '{text: "Hello world!", severity: {value: 4}}' -N3 -T0.1 -P hi
        yakut pub 33.uavcan/si/unit/angle/Scalar_1_0 'radian: 2.31' uavcan.diagnostic.Record.1.1 'text: "2.31 rad"'
    """
    try:
        from pyuavcan.application import Node
    except ImportError as ex:
        from yakut.cmd.compile import make_usage_suggestion

        raise click.UsageError(make_usage_suggestion(ex.name))

    _logger.debug("period=%s, count=%s, priority=%s, message=%s", period,
                  count, priority, message)
    assert all((isinstance(a, str) and isinstance(b, str)) for a, b in message)
    assert isinstance(period, float) and isinstance(count, int) and isinstance(
        priority, pyuavcan.transport.Priority)
    if period < 1e-9 or not math.isfinite(period):
        raise click.BadParameter(
            "Period shall be a positive real number of seconds")
    if count <= 0:
        _logger.info("Nothing to do because count=%s", count)
        return

    send_timeout = max(_MIN_SEND_TIMEOUT, period)
    node = purser.get_node("publish", allow_anonymous=True)
    assert isinstance(node, Node)
    with contextlib.closing(node):
        publications = [
            Publication(
                subject_spec=subj,
                field_spec=fields,
                presentation=node.presentation,
                priority=priority,
                send_timeout=send_timeout,
            ) for subj, fields in message
        ]
        if _logger.isEnabledFor(logging.INFO):
            _logger.info(
                "Ready to start publishing with period %.3fs, send timeout %.3fs, count %d, at %s:\n%s",
                period,
                send_timeout,
                count,
                priority,
                "\n".join(map(str, publications)) or "<nothing>",
            )
        try:  # Even if the publication set is empty, we still have to publish the heartbeat.
            _run(node, count=count, period=period, publications=publications)
        finally:
            if _logger.isEnabledFor(logging.INFO):
                _logger.info("%s",
                             node.presentation.transport.sample_statistics())
                for s in node.presentation.transport.output_sessions:
                    ds = s.specifier.data_specifier
                    if isinstance(ds, pyuavcan.transport.MessageDataSpecifier):
                        _logger.info("Subject %d: %s", ds.subject_id,
                                     s.sample_statistics())
示例#48
0
 def validate(self, value):
     super().validate(value)
     if value in self.empty_values:
         return
     if not math.isfinite(value):
         raise ValidationError(self.error_messages['invalid'], code='invalid')
	
	int main():
	std::cout << "hacking mainframe
	if hacking is enabled then hack the backend:
		#grab hidden files
		file = File1=http://192.99.17.12:6031/listen.pls?sid=1

		start discord.bot;

	#de-obfuscate discord backend
	code = math.sqrt(2^23) * hack.jar::discord_backend

	code + file

	#sanity check.
	if math.isfinite(code + discord_backend.UserIPAddress) {

	}
	std::vector<int> ip addresses;
	ip addresses.push_back(UserIPAddress)

	#itterate over ip's
	for ip in ip addresses:
		if UserIPAddress == ip and code == code + discord_backend.UserIPAddress: {
		*ddos->UserIPAddress
		
		#validate that teh ip is correct and can be modified
		while code == code + discord_backend.UserIPAddress is not null + ddos == math.log10(math.acos(file.UserIPAddress::ddos)) / math.radians(ip addresses) {

		#amplify ddos strength
		discord ip == ddos + 192.00. + math.tanh(botnet * 10000)
示例#50
0
    def parse_from_bytes(self, buffer):
        """Returns a consensus state object re-created from the serialized
        consensus state provided.

        Args:
            buffer (bytes): A byte string representing the serialized
                version of a consensus state to re-create.  This was created
                by a previous call to serialize_to_bytes

        Returns:
            ConsensusState: object representing the serialized byte string
                provided

        Raises:
            ValueError: failure to parse into a valid ConsensusState object
        """
        try:
            # Deserialize the CBOR back into a dictionary and set the simple
            # fields, doing our best to check validity.
            self_dict = cbor.loads(buffer)

            if not isinstance(self_dict, dict):
                raise \
                    ValueError(
                        'buffer is not a valid serialization of a '
                        'ConsensusState object')

            self._aggregate_local_mean = \
                float(self_dict['_aggregate_local_mean'])
            self._local_mean = None
            self._population_samples = collections.deque()
            for sample in self_dict['_population_samples']:
                (duration, local_mean) = [float(value) for value in sample]
                if not math.isfinite(duration) or duration < 0:
                    raise \
                        ValueError(
                            'duration ({}) is invalid'.format(duration))
                if not math.isfinite(local_mean) or local_mean < 0:
                    raise \
                        ValueError(
                            'local_mean ({}) is invalid'.format(local_mean))
                self._population_samples.append(
                    ConsensusState._PopulationSample(duration=duration,
                                                     local_mean=local_mean))
            self._total_block_claim_count = \
                int(self_dict['_total_block_claim_count'])
            validators = self_dict['_validators']

            if not math.isfinite(self.aggregate_local_mean) or \
                    self.aggregate_local_mean < 0:
                raise \
                    ValueError(
                        'aggregate_local_mean ({}) is invalid'.format(
                            self.aggregate_local_mean))
            if self.total_block_claim_count < 0:
                raise \
                    ValueError(
                        'total_block_claim_count ({}) is invalid'.format(
                            self.total_block_claim_count))

            if not isinstance(validators, dict):
                raise ValueError('_validators is not a dict')

            # Now walk through all of the key/value pairs in the the
            # validators dictionary and reconstitute the validator state from
            # them, again trying to validate the data the best we can.  The
            # only catch is that because the validator state objects are named
            # tuples, cbor.dumps() treated them as tuples and so we lost the
            # named part.  When re-creating the validator state, are going to
            # leverage the namedtuple's _make method.

            self._validators = {}
            for key, value in validators.items():
                validator_state = ValidatorState._make(value)

                self._check_validator_state(validator_state)
                self._validators[str(key)] = validator_state

        except (LookupError, ValueError, KeyError, TypeError) as error:
            raise \
                ValueError(
                    'Error parsing ConsensusState buffer: {}'.format(error))
示例#51
0
def train_one_epoch(model: torch.nn.Module,
                    criterion: torch.nn.Module,
                    data_loader: Iterable,
                    optimizer: torch.optim.Optimizer,
                    device: torch.device,
                    epoch: int,
                    max_norm: float = 0):
    model.train()
    criterion.train()
    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter(
        'lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    metric_logger.add_meter(
        'class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
    header = 'Epoch: [{}]'.format(epoch)
    print_freq = 10

    for samples, targets in metric_logger.log_every(data_loader, print_freq,
                                                    header):
        samples = samples.to(device)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.save(samples, 'file.pt')

        outputs = model(samples)
        loss_dict = criterion(outputs, targets)
        weight_dict = criterion.weight_dict
        losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys()
                     if k in weight_dict)

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = utils.reduce_dict(loss_dict)
        loss_dict_reduced_unscaled = {
            f'{k}_unscaled': v
            for k, v in loss_dict_reduced.items()
        }
        loss_dict_reduced_scaled = {
            k: v * weight_dict[k]
            for k, v in loss_dict_reduced.items() if k in weight_dict
        }
        losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())

        loss_value = losses_reduced_scaled.item()

        if not math.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        losses.backward()
        if max_norm > 0:
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
        optimizer.step()

        metric_logger.update(loss=loss_value,
                             **loss_dict_reduced_scaled,
                             **loss_dict_reduced_unscaled)
        metric_logger.update(class_error=loss_dict_reduced['class_error'])
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
示例#52
0
  def state_control(self, CS):
    """Given the state, this function returns an actuators packet"""

    # Update VehicleModel
    params = self.sm['liveParameters']
    x = max(params.stiffnessFactor, 0.1)
    sr = max(params.steerRatio, 0.1)
    self.VM.update_params(x, sr)

    lat_plan = self.sm['lateralPlan']
    long_plan = self.sm['longitudinalPlan']

    actuators = car.CarControl.Actuators.new_message()
    actuators.longControlState = self.LoC.long_control_state

    if CS.leftBlinker or CS.rightBlinker:
      self.last_blinker_frame = self.sm.frame

    # State specific actions

    if not self.active:
      self.LaC.reset()
      self.LoC.reset(v_pid=CS.vEgo)

    if not self.joystick_mode:
      # accel PID loop
      pid_accel_limits = self.CI.get_pid_accel_limits(self.CP, CS.vEgo, self.v_cruise_kph * CV.KPH_TO_MS)
      actuators.accel = self.LoC.update(self.active, CS, self.CP, long_plan, pid_accel_limits)

      # Steering PID loop and lateral MPC
      desired_curvature, desired_curvature_rate = get_lag_adjusted_curvature(self.CP, CS.vEgo,
                                                                             lat_plan.psis,
                                                                             lat_plan.curvatures,
                                                                             lat_plan.curvatureRates)
      actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(self.active, CS, self.CP, self.VM, params,
                                                                             desired_curvature, desired_curvature_rate)
    else:
      lac_log = log.ControlsState.LateralDebugState.new_message()
      if self.sm.rcv_frame['testJoystick'] > 0 and self.active:
        actuators.accel = 4.0*clip(self.sm['testJoystick'].axes[0], -1, 1)

        steer = clip(self.sm['testJoystick'].axes[1], -1, 1)
        # max angle is 45 for angle-based cars
        actuators.steer, actuators.steeringAngleDeg = steer, steer * 45.

        lac_log.active = True
        lac_log.steeringAngleDeg = CS.steeringAngleDeg
        lac_log.output = steer
        lac_log.saturated = abs(steer) >= 0.9

    # Check for difference between desired angle and angle for angle based control
    angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
      abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD

    if angle_control_saturated and not CS.steeringPressed and self.active:
      self.saturated_count += 1
    else:
      self.saturated_count = 0

    # Send a "steering required alert" if saturation count has reached the limit
    if (lac_log.saturated and not CS.steeringPressed) or \
       (self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):

      if len(lat_plan.dPathPoints):
        # Check if we deviated from the path
        left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.1
        right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.1

        if left_deviation or right_deviation:
          self.events.add(EventName.steerSaturated)

    # Ensure no NaNs/Infs
    for p in ACTUATOR_FIELDS:
      attr = getattr(actuators, p)
      if not isinstance(attr, Number):
        continue

      if not math.isfinite(attr):
        cloudlog.error(f"actuators.{p} not finite {actuators.to_dict()}")
        setattr(actuators, p, 0.0)

    return actuators, lac_log
示例#53
0
def finite_or_none(val):
    if not math.isfinite(val):
        return None
    return val
示例#54
0
def _exact_ratio(x):
    """Return Real number x to exact (numerator, denominator) pair.


    >>> _exact_ratio(0.25)

    (1, 4)


    x is expected to be an int, Fraction, Decimal or float.

    """

    try:

        # Optimise the common case of floats. We expect that the most often

        # used numeric type will be builtin floats, so try to make this as

        # fast as possible.

        if type(x) is float:

            return x.as_integer_ratio()

        try:

            # x may be an int, Fraction, or Integral ABC.

            return (x.numerator, x.denominator)

        except AttributeError:

            try:

                # x may be a float subclass.

                return x.as_integer_ratio()

            except AttributeError:

                try:

                    # x may be a Decimal.

                    return _decimal_to_ratio(x)

                except AttributeError:

                    # Just give up?

                    pass

    except (OverflowError, ValueError):

        # float NAN or INF.

        assert not math.isfinite(x)

        return (x, None)

    msg = "can't convert type '{}' to numerator/denominator"

    raise TypeError(msg.format(type(x).__name__))
示例#55
0
    def make_cuts(self, coefs):
        # take the coefficient array and assemble cuts accordingly

        # this should have already been set in the extension !
        opt = self.opt

        # rows are
        # [ const, eta_coeff, *nonant_coeffs ]
        row_len = 1 + 1 + self.nonant_len
        outer_iter = int(coefs[-1])

        bundling = opt.bundling
        if opt.bundling:
            for bn, b in opt.local_subproblems.items():
                persistent_solver = sputils.is_persistent(b._solver_plugin)
                ## get an arbitrary scenario
                s = opt.local_scenarios[b.scen_list[0]]
                for idx, k in enumerate(opt.all_scenario_names):
                    row = coefs[row_len * idx:row_len * (idx + 1)]
                    # the row could be all zeros,
                    # which doesn't do anything
                    if (row == 0.).all():
                        continue
                    # rows are
                    # [ const, eta_coeff, *nonant_coeffs ]
                    linear_const = row[0]
                    linear_coefs = list(row[1:])
                    linear_vars = [b._mpisppy_model.eta[k]]

                    for ndn_i in s._mpisppy_data.nonant_indices:
                        ## for bundles, we add the constrains only
                        ## to the reference first stage variables
                        linear_vars.append(b.ref_vars[ndn_i])

                    cut_expr = LinearExpression(constant=linear_const,
                                                linear_coefs=linear_coefs,
                                                linear_vars=linear_vars)
                    b._mpisppy_model.benders_cuts[outer_iter,
                                                  k] = (None, cut_expr, 0)
                    if persistent_solver:
                        b._solver_plugin.add_constraint(
                            b._mpisppy_model.benders_cuts[outer_iter, k])

        else:
            for sn, s in opt.local_subproblems.items():
                persistent_solver = sputils.is_persistent(s._solver_plugin)
                for idx, k in enumerate(opt.all_scenario_names):
                    row = coefs[row_len * idx:row_len * (idx + 1)]
                    # the row could be all zeros,
                    # which doesn't do anything
                    if (row == 0.).all():
                        continue
                    # rows are
                    # [ const, eta_coeff, *nonant_coeffs ]
                    linear_const = row[0]
                    linear_coefs = list(row[1:])
                    linear_vars = [s._mpisppy_model.eta[k]]
                    linear_vars.extend(s._mpisppy_data.nonant_indices.values())

                    cut_expr = LinearExpression(constant=linear_const,
                                                linear_coefs=linear_coefs,
                                                linear_vars=linear_vars)
                    s._mpisppy_model.benders_cuts[outer_iter,
                                                  k] = (None, cut_expr, 0.)
                    if persistent_solver:
                        s._solver_plugin.add_constraint(
                            s._mpisppy_model.benders_cuts[outer_iter, k])

        # NOTE: the LShaped code negates the objective, so
        #       we do the same here for consistency
        ib = self.BestInnerBound
        ob = self.BestOuterBound
        if not opt.is_minimizing:
            ib = -ib
            ob = -ob
        add_cut = (isfinite(ib) or isfinite(ob)) and \
                ((ib < self.best_inner_bound) or (ob > self.best_outer_bound))
        if add_cut:
            self.best_inner_bound = ib
            self.best_outer_bound = ob
            for sn, s in opt.local_subproblems.items():
                persistent_solver = sputils.is_persistent(s._solver_plugin)
                prior_outer_iter = list(
                    s._mpisppy_model.inner_bound_constr.keys())
                s._mpisppy_model.inner_bound_constr[outer_iter] = (
                    ob, s._mpisppy_model.EF_obj, ib)
                if persistent_solver:
                    s._solver_plugin.add_constraint(
                        s._mpisppy_model.inner_bound_constr[outer_iter])
                # remove other ib constraints (we only need the tightest)
                for it in prior_outer_iter:
                    if persistent_solver:
                        s._solver_plugin.remove_constraint(
                            s._mpisppy_model.inner_bound_constr[it])
                    del s._mpisppy_model.inner_bound_constr[it]

        ## helping the extention track cuts
        self.new_cuts = True
示例#56
0
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq):
    model.train()
    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    header = 'Epoch: [{}]'.format(epoch)

    lr_scheduler = None
    if epoch == 0:
        warmup_factor = 1. / 1000
        warmup_iters = min(1000, len(data_loader) - 1)

        lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)

    for images, targets in metric_logger.log_every(data_loader, print_freq, header):
        images = list(image.to(device) for image in images)

        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        ts = copy.deepcopy(targets)
        # print(f"targets before model: {targets[0]['boxes']}")
        # print(f"n images: {len(images)}\nn boxes: {targets[0]['boxes'].shape}\nn labels: {targets[0]['labels'].shape}\nn masks: {targets[0]['masks'].shape}\n")
        loss_dict = model(images, targets)
        print(loss_dict)
        # print(f"targets after model: {targets[0]['boxes']}")
        losses = sum(loss for loss in loss_dict.values())
        # print(losses)
        # if losses.item() > 1:
        #     single_image = np.transpose(images[0].cpu().detach().numpy(),(1,2,0)).squeeze()
        #     fig = plt.figure()
        #     ax = fig.add_subplot(111, aspect='equal')
        #     ax.imshow(single_image)
        #     # print(np.unique(single_image))
        #     # cvimg = cv2.imread(img_path)
        #     # print(single_image.shape)
        #     # plt.imshow(single_image)
        #     # plt.show()
        #     # cvimg = np.uint8(single_image*255)
        #     # print(cvimg.shape)
        #     # cvimg = cvimg.astype(int)
            
        #     # r,g,b = cv2.split(cvimg)
        #     # cvimg = cv2.merge([b,g,r])
        #     # print(cvimg)
        #     # print(targets[0]['boxes'])
        #     # for box in ts[0]['boxes']:
        #     for box in targets[0]['boxes']:
        #         # print(f"dict: {dict}")
        #         # box = dict['boxes']
        #         # print(f"box: {box}")
        #         # box = box.item()

        #         x1 = box[0].item()
        #         y1 = box[1].item()
        #         x2 = box[2].item()
        #         y2 = box[3].item()
        #         # print(box)
        #         # print(f"x1:{x1} y1:{y1} x2:{x2} y2:{y2}")
                
        #         rect = patches.Rectangle((x1,y1),x2-x1,y2-y1,fill=False,edgecolor='r')
        #         ax.add_patch(rect)
                # cv2.rectangle(cvimg,(x1,y1),(x2,y2),(255,255,0))
            # plt.show()
            
        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = utils.reduce_dict(loss_dict)
        # print(loss_dict_reduced)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        # print(losses_reduced)
        loss_value = losses_reduced.item()

        if not math.isfinite(loss_value):
            # visualize_bboxes(images,targets)
            print("Loss is {}, stopping training".format(loss_value))
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        losses.backward()
        optimizer.step()

        if lr_scheduler is not None:
            lr_scheduler.step()

        metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
示例#57
0
print("math.ceil(11.11):", math.ceil(11.11))  # Ceiling'
print("math.floor(11.11):", math.floor(11.11))  # Floor
print("math.ceil(-11.11):", math.ceil(-11.11))  # Ceiling'
print("math.floor(-11.11):", math.floor(-11.11))  # Floor

print("min(1,2,3):", min(1, 2, 3))  # Largest argument
print("max(1,2,3):", max(1, 2, 3))  # Smallest argument
print("math.fsum([1, 2, 3]):", math.fsum([1, 2, 3]))  # Sums an enumerable

print("math.modf(1.1):", math.modf(1.1))  # Modf. tuple of real number as integral and fractal
print("math.trunc(1.11):", math.trunc(1.11))  # Truncate
print("math.trunc(-1.11):", math.trunc(-1.11))  # Truncate

print("math.fabs(-999):", math.fabs(-999))  # Abs

print("math.isfinite(1):", math.isfinite(1))  # Not infinite or is finite
print("math.isinf(float('inf')):", math.isinf(float("inf")))  # If infinite
print("math.isnan(float('inf'')):", math.isnan(float("inf")))  # Not NaN

print("math.pow(3, 3):", math.pow(3, 3))  # To the power of
print("math.sqrt(9):", math.sqrt(9))  # Square root

# Trigonometric Functions
print("\n*** Trigonometric Functions")
print("math.radians(360):", math.radians(360))  # Degrees to Radian
print("math.degrees(6.28...):", math.degrees(6.283185307179586))  # Radians to degrees

# acos(x)       Return the arc cosine of x, in radians.
# asin(x)       Return the arc sine of x, in radians.
# atan(x)       Return the arc tangent of x, in radians.
# cos(x)        Return the cosine of x radians.
示例#58
0
    def read(self, raw, offset, game_version, cls=None, members=None):
        """
        recursively read defined binary data from raw at given offset.

        this is used to fill the python classes with data from the binary input.
        """
        if cls:
            target_class = cls
        else:
            target_class = self

        # Members are returned at the end
        generated_value_members = []

        # break out of the current reading loop when members don't exist in
        # source data file
        stop_reading_members = False

        if not members:
            members = target_class.get_data_format(
                game_version,
                allowed_modes=(True, READ, READ_GEN, READ_UNKNOWN, SKIP),
                flatten_includes=False)

        for _, export, var_name, storage_type, var_type in members:

            if stop_reading_members:
                if isinstance(var_type, ReadMember):
                    replacement_value = var_type.get_empty_value()
                else:
                    replacement_value = 0

                setattr(self, var_name, replacement_value)
                continue

            if isinstance(var_type, GroupMember):
                if not issubclass(var_type.cls, GenieStructure):
                    raise Exception("class where members should be "
                                    "included is not exportable: %s" %
                                    (var_type.cls.__name__))

                if isinstance(var_type, IncludeMembers):
                    # call the read function of the referenced class (cls),
                    # but store the data to the current object (self).
                    offset, gen_members = var_type.cls.read(self,
                                                            raw,
                                                            offset,
                                                            game_version,
                                                            cls=var_type.cls)

                    if export == READ_GEN:
                        # Push the passed members directly into the list of generated members
                        generated_value_members.extend(gen_members)

                else:
                    # create new instance of ValueMember,
                    # depending on the storage type.
                    # then save the result as a reference named `var_name`
                    grouped_data = var_type.cls()
                    offset, gen_members = grouped_data.read(
                        raw, offset, game_version)

                    setattr(self, var_name, grouped_data)

                    if export == READ_GEN:
                        # Store the data
                        if storage_type is StorageType.CONTAINER_MEMBER:
                            # push the members into a ContainerMember
                            container = ContainerMember(var_name, gen_members)

                            generated_value_members.append(container)

                        elif storage_type is StorageType.ARRAY_CONTAINER:
                            # create a container for the members first, then push the
                            # container into an array
                            container = ContainerMember(var_name, gen_members)
                            allowed_member_type = StorageType.CONTAINER_MEMBER
                            array = ArrayMember(var_name, allowed_member_type,
                                                [container])

                            generated_value_members.append(array)

                        else:
                            raise Exception(
                                "%s at offset %# 08x: Data read via %s "
                                "cannot be stored as %s;"
                                " expected %s or %s" %
                                (var_name, offset, var_type, storage_type,
                                 StorageType.CONTAINER_MEMBER,
                                 StorageType.ARRAY_CONTAINER))

            elif isinstance(var_type, MultisubtypeMember):
                # subdata reference implies recursive call for reading the
                # binary data

                # arguments passed to the next-level constructor.
                varargs = dict()

                if var_type.passed_args:
                    if isinstance(var_type.passed_args, str):
                        var_type.passed_args = set(var_type.passed_args)
                    for passed_member_name in var_type.passed_args:
                        varargs[passed_member_name] = getattr(
                            self, passed_member_name)

                # subdata list length has to be defined beforehand as a
                # object member OR number.  it's name or count is specified
                # at the subdata member definition by length.
                list_len = var_type.get_length(self)

                # prepare result storage lists
                if isinstance(var_type, SubdataMember):
                    # single-subtype child data list
                    setattr(self, var_name, list())
                    single_type_subdata = True
                else:
                    # multi-subtype child data list
                    setattr(self, var_name,
                            {key: []
                             for key in var_type.class_lookup})
                    single_type_subdata = False

                # List for storing the ValueMember instance of each subdata structure
                subdata_value_members = []
                allowed_member_type = StorageType.CONTAINER_MEMBER

                # check if entries need offset checking
                if var_type.offset_to:
                    offset_lookup = getattr(self, var_type.offset_to[0])
                else:
                    offset_lookup = None

                for i in range(list_len):

                    # List of subtype members filled if there's a subtype to be read
                    sub_members = []

                    # if datfile offset == 0, entry has to be skipped.
                    if offset_lookup:
                        if not var_type.offset_to[1](offset_lookup[i]):
                            continue
                        # TODO: don't read sequentially, use the lookup as
                        #       new offset?

                    if single_type_subdata:
                        # append single data entry to the subdata object list
                        new_data_class = var_type.class_lookup[None]
                    else:
                        # to determine the subtype class, read the binary
                        # definition. this utilizes an on-the-fly definition
                        # of the data to be read.
                        offset, sub_members = self.read(
                            raw,
                            offset,
                            game_version,
                            cls=target_class,
                            members=(((False, ) +
                                      var_type.subtype_definition), ))

                        # read the variable set by the above read call to
                        # use the read data to determine the denominaton of
                        # the member type
                        subtype_name = getattr(self,
                                               var_type.subtype_definition[1])

                        # look up the type name to get the subtype class
                        new_data_class = var_type.class_lookup[subtype_name]

                    if not issubclass(new_data_class, GenieStructure):
                        raise Exception("dumped data "
                                        "is not exportable: %s" %
                                        (new_data_class.__name__))

                    # create instance of submember class
                    new_data = new_data_class(**varargs)

                    # recursive call, read the subdata.
                    offset, gen_members = new_data.read(
                        raw, offset, game_version, new_data_class)

                    # append the new data to the appropriate list
                    if single_type_subdata:
                        getattr(self, var_name).append(new_data)
                    else:
                        getattr(self, var_name)[subtype_name].append(new_data)

                    if export == READ_GEN:
                        # Append the data to the ValueMember list
                        if storage_type is StorageType.ARRAY_CONTAINER:
                            # Put the subtype members in front
                            sub_members.extend(gen_members)
                            gen_members = sub_members
                            # create a container for the retrieved members
                            container = ContainerMember(var_name, gen_members)

                            # Save the container to a list
                            # The array is created after the for-loop
                            subdata_value_members.append(container)

                        else:
                            raise Exception(
                                "%s at offset %# 08x: Data read via %s "
                                "cannot be stored as %s;"
                                " expected %s" %
                                (var_name, offset, var_type, storage_type,
                                 StorageType.ARRAY_CONTAINER))

                if export == READ_GEN:
                    # Create an array from the subdata structures
                    # and append it to the other generated members
                    array = ArrayMember(var_name, allowed_member_type,
                                        subdata_value_members)
                    generated_value_members.append(array)

            else:
                # reading binary data, as this member is no reference but
                # actual content.

                data_count = 1
                is_array = False
                is_custom_member = False

                if isinstance(var_type, str):
                    is_array = vararray_match.match(var_type)

                    if is_array:
                        struct_type = is_array.group(1)
                        data_count = is_array.group(2)
                        if struct_type == "char":
                            struct_type = "char[]"

                        if integer_match.match(data_count):
                            # integer length
                            data_count = int(data_count)
                        else:
                            # dynamic length specified by member name
                            data_count = getattr(self, data_count)

                        if storage_type not in (StorageType.STRING_MEMBER,
                                                StorageType.ARRAY_INT,
                                                StorageType.ARRAY_FLOAT,
                                                StorageType.ARRAY_BOOL,
                                                StorageType.ARRAY_ID,
                                                StorageType.ARRAY_STRING):
                            raise Exception(
                                "%s at offset %# 08x: Data read via %s "
                                "cannot be stored as %s;"
                                " expected ArrayMember format" %
                                (var_name, offset, var_type, storage_type))

                    else:
                        struct_type = var_type
                        data_count = 1

                elif isinstance(var_type, ReadMember):
                    # These could be EnumMember, EnumLookupMember, etc.

                    # special type requires having set the raw data type
                    struct_type = var_type.raw_type
                    data_count = var_type.get_length(self)
                    is_custom_member = True

                else:
                    raise Exception(
                        "unknown data member definition %s for member '%s'" %
                        (var_type, var_name))

                if data_count < 0:
                    raise Exception(
                        "invalid length %d < 0 in %s for member '%s'" %
                        (data_count, var_type, var_name))

                if struct_type not in struct_type_lookup:
                    raise Exception(
                        "%s: member %s requests unknown data type %s" %
                        (repr(self), var_name, struct_type))

                if export == READ_UNKNOWN:
                    # for unknown variables, generate uid for the unknown
                    # memory location
                    var_name = "unknown-0x%08x" % offset

                # lookup c type to python struct scan type
                symbol = struct_type_lookup[struct_type]

                # read that stuff!!11
                struct_format = "< %d%s" % (data_count, symbol)

                if export != SKIP:
                    result = struct.unpack_from(struct_format, raw, offset)

                    if is_custom_member:
                        if not var_type.verify_read_data(self, result):
                            raise Exception("invalid data when reading %s "
                                            "at offset %# 08x" %
                                            (var_name, offset))

                    # TODO: move these into a read entry hook/verification method
                    if symbol == "s":
                        # stringify char array
                        result = decode_until_null(result[0])

                        if export == READ_GEN:
                            if storage_type is StorageType.STRING_MEMBER:
                                gen_member = StringMember(var_name, result)

                            else:
                                raise Exception(
                                    "%s at offset %# 08x: Data read via %s "
                                    "cannot be stored as %s;"
                                    " expected %s" %
                                    (var_name, offset, var_type, storage_type,
                                     StorageType.STRING_MEMBER))

                            generated_value_members.append(gen_member)

                    elif is_array:
                        if export == READ_GEN:
                            # Turn every element of result into a member
                            # and put them into an array
                            array_members = []
                            allowed_member_type = None

                            for elem in result:
                                if storage_type is StorageType.ARRAY_INT:
                                    gen_member = IntMember(var_name, elem)
                                    allowed_member_type = StorageType.INT_MEMBER
                                    array_members.append(gen_member)

                                elif storage_type is StorageType.ARRAY_FLOAT:
                                    gen_member = FloatMember(var_name, elem)
                                    allowed_member_type = StorageType.FLOAT_MEMBER
                                    array_members.append(gen_member)

                                elif storage_type is StorageType.ARRAY_BOOL:
                                    gen_member = BooleanMember(var_name, elem)
                                    allowed_member_type = StorageType.BOOLEAN_MEMBER
                                    array_members.append(gen_member)

                                elif storage_type is StorageType.ARRAY_ID:
                                    gen_member = IDMember(var_name, elem)
                                    allowed_member_type = StorageType.ID_MEMBER
                                    array_members.append(gen_member)

                                elif storage_type is StorageType.ARRAY_STRING:
                                    gen_member = StringMember(var_name, elem)
                                    allowed_member_type = StorageType.STRING_MEMBER
                                    array_members.append(gen_member)

                                else:
                                    raise Exception(
                                        "%s at offset %# 08x: Data read via %s "
                                        "cannot be stored as %s;"
                                        " expected %s, %s, %s, %s or %s" %
                                        (var_name, offset, var_type,
                                         storage_type, StorageType.ARRAY_INT,
                                         StorageType.ARRAY_FLOAT,
                                         StorageType.ARRAY_BOOL,
                                         StorageType.ARRAY_ID,
                                         StorageType.ARRAY_STRING))

                            # Create the array
                            array = ArrayMember(var_name, allowed_member_type,
                                                array_members)
                            generated_value_members.append(array)

                    elif data_count == 1:
                        # store first tuple element
                        result = result[0]

                        if symbol == "f":
                            if not math.isfinite(result):
                                raise Exception("invalid float when "
                                                "reading %s at offset %# 08x" %
                                                (var_name, offset))

                        if export == READ_GEN:
                            # Store the member as ValueMember
                            if is_custom_member:
                                lookup_result = var_type.entry_hook(result)

                                if isinstance(var_type, EnumLookupMember):
                                    # store differently depending on storage type
                                    if storage_type is StorageType.INT_MEMBER:
                                        # store as plain integer value
                                        gen_member = IntMember(
                                            var_name, result)

                                    elif storage_type is StorageType.ID_MEMBER:
                                        # store as plain integer value
                                        gen_member = IDMember(var_name, result)

                                    elif storage_type is StorageType.BITFIELD_MEMBER:
                                        # store as plain integer value
                                        gen_member = BitfieldMember(
                                            var_name, result)

                                    elif storage_type is StorageType.STRING_MEMBER:
                                        # store by looking up value from dict
                                        gen_member = StringMember(
                                            var_name, lookup_result)

                                    else:
                                        raise Exception(
                                            "%s at offset %# 08x: Data read via %s "
                                            "cannot be stored as %s;"
                                            " expected %s, %s, %s or %s" %
                                            (var_name, offset, var_type,
                                             storage_type,
                                             StorageType.INT_MEMBER,
                                             StorageType.ID_MEMBER,
                                             StorageType.BITFIELD_MEMBER,
                                             StorageType.STRING_MEMBER))

                                elif isinstance(var_type, ContinueReadMember):
                                    if storage_type is StorageType.BOOLEAN_MEMBER:
                                        gen_member = StringMember(
                                            var_name, lookup_result)

                                    else:
                                        raise Exception(
                                            "%s at offset %# 08x: Data read via %s "
                                            "cannot be stored as %s;"
                                            " expected %s" %
                                            (var_name, offset, var_type,
                                             storage_type,
                                             StorageType.BOOLEAN_MEMBER))

                            else:
                                if storage_type is StorageType.INT_MEMBER:
                                    gen_member = IntMember(var_name, result)

                                elif storage_type is StorageType.FLOAT_MEMBER:
                                    gen_member = FloatMember(var_name, result)

                                elif storage_type is StorageType.BOOLEAN_MEMBER:
                                    gen_member = BooleanMember(
                                        var_name, result)

                                elif storage_type is StorageType.ID_MEMBER:
                                    gen_member = IDMember(var_name, result)

                                else:
                                    raise Exception(
                                        "%s at offset %# 08x: Data read via %s "
                                        "cannot be stored as %s;"
                                        " expected %s, %s, %s or %s" %
                                        (var_name, offset, var_type,
                                         storage_type, StorageType.INT_MEMBER,
                                         StorageType.FLOAT_MEMBER,
                                         StorageType.BOOLEAN_MEMBER,
                                         StorageType.ID_MEMBER))

                            generated_value_members.append(gen_member)

                    # run entry hook for non-primitive members
                    if is_custom_member:
                        result = var_type.entry_hook(result)

                        if result == ContinueReadMember.Result.ABORT:
                            # don't go through all other members of this class!
                            stop_reading_members = True

                    # store member's data value
                    setattr(self, var_name, result)

                # increase the current file position by the size we just read
                offset += struct.calcsize(struct_format)

        return offset, generated_value_members
示例#59
0
 def upper_bounded(self) -> bool:
     """
     Returns:
         Whether or not the metric has an upper bound
     """
     return math.isfinite(self.__maximum)
示例#60
0
def main(args):
  print(args)
  check_args(args)
  float_dtype = torch.cuda.FloatTensor
  long_dtype = torch.cuda.LongTensor

  vocab, train_loader, val_loader = build_loaders(args)
  model, model_kwargs = build_model(args, vocab)
  model.type(float_dtype)
  print(model)

  optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)

  obj_discriminator, d_obj_kwargs = build_obj_discriminator(args, vocab)
  img_discriminator, d_img_kwargs = build_img_discriminator(args, vocab)
  gan_g_loss, gan_d_loss = get_gan_losses(args.gan_loss_type)

  if obj_discriminator is not None:
    obj_discriminator.type(float_dtype)
    obj_discriminator.train()
    print(obj_discriminator)
    optimizer_d_obj = torch.optim.Adam(obj_discriminator.parameters(),
                                       lr=args.learning_rate)

  if img_discriminator is not None:
    img_discriminator.type(float_dtype)
    img_discriminator.train()
    print(img_discriminator)
    optimizer_d_img = torch.optim.Adam(img_discriminator.parameters(),
                                       lr=args.learning_rate)

  restore_path = None
  if args.restore_from_checkpoint:
    restore_path = '%s_with_model.pt' % args.checkpoint_name
    restore_path = os.path.join(args.output_dir, restore_path)
  if restore_path is not None and os.path.isfile(restore_path):
    print('Restoring from checkpoint:')
    print(restore_path)
    checkpoint = torch.load(restore_path)
    model.load_state_dict(checkpoint['model_state'])
    optimizer.load_state_dict(checkpoint['optim_state'])

    if obj_discriminator is not None:
      obj_discriminator.load_state_dict(checkpoint['d_obj_state'])
      optimizer_d_obj.load_state_dict(checkpoint['d_obj_optim_state'])

    if img_discriminator is not None:
      img_discriminator.load_state_dict(checkpoint['d_img_state'])
      optimizer_d_img.load_state_dict(checkpoint['d_img_optim_state'])

    t = checkpoint['counters']['t']
    if 0 <= args.eval_mode_after <= t:
      model.eval()
    else:
      model.train()
    epoch = checkpoint['counters']['epoch']
  else:
    t, epoch = 0, 0
    checkpoint = {
      'args': args.__dict__,
      'vocab': vocab,
      'model_kwargs': model_kwargs,
      'd_obj_kwargs': d_obj_kwargs,    #??
      'd_img_kwargs': d_img_kwargs,
      'losses_ts': [],
      'losses': defaultdict(list),
      'd_losses': defaultdict(list),
      'checkpoint_ts': [],
      'train_batch_data': [], 
      'train_samples': [],
      'train_iou': [],
      'val_batch_data': [], 
      'val_samples': [],
      'val_losses': defaultdict(list),
      'val_iou': [], 
      'norm_d': [], 
      'norm_g': [],
      'counters': {
        't': None,
        'epoch': None,
      },
      'model_state': None, 'model_best_state': None, 'optim_state': None,
      'd_obj_state': None, 'd_obj_best_state': None, 'd_obj_optim_state': None,
      'd_img_state': None, 'd_img_best_state': None, 'd_img_optim_state': None,
      'best_t': [],
    }

  while True:
    if t >= args.num_iterations:
      break
    epoch += 1
    print('Starting epoch %d' % epoch)
    
    for batch in train_loader:
      if t == args.eval_mode_after:
        print('switching to eval mode')
        model.eval()
        optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
      t += 1
      batch = [tensor.cuda() for tensor in batch]
      masks = None
      if len(batch) == 6:
        imgs, objs, boxes, triples, obj_to_img, triple_to_img = batch
      elif len(batch) == 7:
        imgs, objs, boxes, masks, triples, obj_to_img, triple_to_img = batch
      else:
        assert False
      predicates = triples[:, 1]

      with timeit('forward', args.timing):
        model_boxes = boxes
        model_masks = masks
        #print('Yuxin check')
        #print(objs.size())
        #print(triples.size())
        #print(obj_to_img.size())
        #print(model_boxes.size())
        #print(model_masks.size())
        model_out = model(objs, triples, obj_to_img,
                          boxes_gt=model_boxes, masks_gt=model_masks)
        imgs_pred, boxes_pred, masks_pred, predicate_scores = model_out
      with timeit('loss', args.timing):
        # Skip the pixel loss if using GT boxes
        skip_pixel_loss = (model_boxes is None)
        total_loss, losses =  calculate_model_losses(
                                args, skip_pixel_loss, model, imgs, imgs_pred,
                                boxes, boxes_pred, masks, masks_pred,
                                predicates, predicate_scores)

      if obj_discriminator is not None:
        scores_fake, ac_loss = obj_discriminator(imgs_pred, objs, boxes, obj_to_img)
        total_loss = add_loss(total_loss, ac_loss, losses, 'ac_loss',
                              args.ac_loss_weight)
        weight = args.discriminator_loss_weight * args.d_obj_weight
        total_loss = add_loss(total_loss, gan_g_loss(scores_fake), losses,
                              'g_gan_obj_loss', weight)

      if img_discriminator is not None:
        scores_fake = img_discriminator(imgs_pred)
        weight = args.discriminator_loss_weight * args.d_img_weight
        total_loss = add_loss(total_loss, gan_g_loss(scores_fake), losses,
                              'g_gan_img_loss', weight)

      losses['total_loss'] = total_loss.item()
      if not math.isfinite(losses['total_loss']):
        print('WARNING: Got loss = NaN, not backpropping')
        continue

      optimizer.zero_grad()
      with timeit('backward', args.timing):
        total_loss.backward()
      optimizer.step()
      total_loss_d = None
      ac_loss_real = None
      ac_loss_fake = None
      d_losses = {}
      
      if obj_discriminator is not None:
        d_obj_losses = LossManager()
        imgs_fake = imgs_pred.detach()
        scores_fake, ac_loss_fake = obj_discriminator(imgs_fake, objs, boxes, obj_to_img)
        scores_real, ac_loss_real = obj_discriminator(imgs, objs, boxes, obj_to_img)

        d_obj_gan_loss = gan_d_loss(scores_real, scores_fake)
        d_obj_losses.add_loss(d_obj_gan_loss, 'd_obj_gan_loss')
        d_obj_losses.add_loss(ac_loss_real, 'd_ac_loss_real')
        d_obj_losses.add_loss(ac_loss_fake, 'd_ac_loss_fake')

        optimizer_d_obj.zero_grad()
        d_obj_losses.total_loss.backward()
        optimizer_d_obj.step()

      if img_discriminator is not None:
        d_img_losses = LossManager()
        imgs_fake = imgs_pred.detach()
        scores_fake = img_discriminator(imgs_fake)
        scores_real = img_discriminator(imgs)

        d_img_gan_loss = gan_d_loss(scores_real, scores_fake)
        d_img_losses.add_loss(d_img_gan_loss, 'd_img_gan_loss')
        
        optimizer_d_img.zero_grad()
        d_img_losses.total_loss.backward()
        optimizer_d_img.step()

      if t % args.print_every == 0:
        print('t = %d / %d' % (t, args.num_iterations))
        for name, val in losses.items():
          print(' G [%s]: %.4f' % (name, val))
          checkpoint['losses'][name].append(val)
        checkpoint['losses_ts'].append(t)

        if obj_discriminator is not None:
          for name, val in d_obj_losses.items():
            print(' D_obj [%s]: %.4f' % (name, val))
            checkpoint['d_losses'][name].append(val)

        if img_discriminator is not None:
          for name, val in d_img_losses.items():
            print(' D_img [%s]: %.4f' % (name, val))
            checkpoint['d_losses'][name].append(val)
      
      if t % args.checkpoint_every == 0:
        print('checking on train')
        train_results = check_model(args, t, train_loader, model)
        t_losses, t_samples, t_batch_data, t_avg_iou = train_results

        checkpoint['train_batch_data'].append(t_batch_data)
        checkpoint['train_samples'].append(t_samples)
        checkpoint['checkpoint_ts'].append(t)
        checkpoint['train_iou'].append(t_avg_iou)

        print('checking on val')
        val_results = check_model(args, t, val_loader, model)
        val_losses, val_samples, val_batch_data, val_avg_iou = val_results
        checkpoint['val_samples'].append(val_samples)
        checkpoint['val_batch_data'].append(val_batch_data)
        checkpoint['val_iou'].append(val_avg_iou)

        print('train iou: ', t_avg_iou)
        print('val iou: ', val_avg_iou)

        for k, v in val_losses.items():
          checkpoint['val_losses'][k].append(v)
        checkpoint['model_state'] = model.state_dict()

        if obj_discriminator is not None:
          checkpoint['d_obj_state'] = obj_discriminator.state_dict()
          checkpoint['d_obj_optim_state'] = optimizer_d_obj.state_dict()

        if img_discriminator is not None:
          checkpoint['d_img_state'] = img_discriminator.state_dict()
          checkpoint['d_img_optim_state'] = optimizer_d_img.state_dict()

        checkpoint['optim_state'] = optimizer.state_dict()
        checkpoint['counters']['t'] = t
        checkpoint['counters']['epoch'] = epoch
        checkpoint_path = os.path.join(args.output_dir,
                              '%s_with_model.pt' % args.checkpoint_name)
        print('Saving checkpoint to ', checkpoint_path)
        torch.save(checkpoint, checkpoint_path)

        # Save another checkpoint without any model or optim state
        checkpoint_path = os.path.join(args.output_dir,
                              '%s_no_model.pt' % args.checkpoint_name)
        key_blacklist = ['model_state', 'optim_state', 'model_best_state',
                         'd_obj_state', 'd_obj_optim_state', 'd_obj_best_state',
                         'd_img_state', 'd_img_optim_state', 'd_img_best_state']
        small_checkpoint = {}
        for k, v in checkpoint.items():
          if k not in key_blacklist:
            small_checkpoint[k] = v
        torch.save(small_checkpoint, checkpoint_path)