Example #1
0
 def test_ctx_mgr_restores(self):
     # test that print options are actually restrored
     opts = np.get_printoptions()
     with np.printoptions(precision=opts['precision'] - 1,
                          linewidth=opts['linewidth'] - 4):
         pass
     assert_equal(np.get_printoptions(), opts)
def test_precision():
    """test various values for float_precision."""
    f = PlainTextFormatter()
    nt.assert_equals(f(pi), repr(pi))
    f.float_precision = 0
    if numpy:
        po = numpy.get_printoptions()
        nt.assert_equals(po["precision"], 0)
    nt.assert_equals(f(pi), "3")
    f.float_precision = 2
    if numpy:
        po = numpy.get_printoptions()
        nt.assert_equals(po["precision"], 2)
    nt.assert_equals(f(pi), "3.14")
    f.float_precision = "%g"
    if numpy:
        po = numpy.get_printoptions()
        nt.assert_equals(po["precision"], 2)
    nt.assert_equals(f(pi), "3.14159")
    f.float_precision = "%e"
    nt.assert_equals(f(pi), "3.141593e+00")
    f.float_precision = ""
    if numpy:
        po = numpy.get_printoptions()
        nt.assert_equals(po["precision"], 8)
    nt.assert_equals(f(pi), repr(pi))
Example #3
0
def magic_push_print(self, arg):
    """ Set numpy array printing options by pushing onto a stack.

"""
    try:
        import numpy
    except ImportError:
        raise UsageError("could not import numpy.")
    args = parse_argstring(magic_push_print, arg)
    kwds = {}
    if args.precision is not None:
        kwds['precision'] = args.precision
    if args.threshold is not None:
        if args.threshold == 0:
            args.threshold = sys.maxint
        kwds['threshold'] = args.threshold
    if args.edgeitems is not None:
        kwds['edgeitems'] = args.edgeitems
    if args.linewidth is not None:
        kwds['linewidth'] = args.linewidth
    if args.suppress is not None:
        kwds['suppress'] = args.suppress
    if args.nanstr is not None:
        kwds['nanstr'] = args.nanstr
    if args.infstr is not None:
        kwds['infstr'] = args.infstr

    old_options = numpy.get_printoptions()
    numpy.set_printoptions(**kwds)
    stack = getattr(self, '_numpy_printoptions_stack', [])
    stack.append(old_options)
    self._numpy_printoptions_stack = stack
    if not args.quiet:
        print_numpy_printoptions(numpy.get_printoptions())
def test_precision():
    """test various values for float_precision."""
    f = PlainTextFormatter()
    nt.assert_equal(f(pi), repr(pi))
    f.float_precision = 0
    if numpy:
        po = numpy.get_printoptions()
        nt.assert_equal(po['precision'], 0)
    nt.assert_equal(f(pi), '3')
    f.float_precision = 2
    if numpy:
        po = numpy.get_printoptions()
        nt.assert_equal(po['precision'], 2)
    nt.assert_equal(f(pi), '3.14')
    f.float_precision = '%g'
    if numpy:
        po = numpy.get_printoptions()
        nt.assert_equal(po['precision'], 2)
    nt.assert_equal(f(pi), '3.14159')
    f.float_precision = '%e'
    nt.assert_equal(f(pi), '3.141593e+00')
    f.float_precision = ''
    if numpy:
        po = numpy.get_printoptions()
        nt.assert_equal(po['precision'], 8)
    nt.assert_equal(f(pi), repr(pi))
Example #5
0
    def _get_suppress(self):
        """
        Gets the current suppression settings (from numpy).
        """

        suppress = np.get_printoptions()['suppress']
        suppress_thresh = 0.1 ** (np.get_printoptions()['precision'] + 0.5)
        return (suppress, suppress_thresh)
Example #6
0
 def test_ctx_mgr_exceptions(self):
     # test that print options are restored even if an exeption is raised
     opts = np.get_printoptions()
     try:
         with np.printoptions(precision=2, linewidth=11):
             raise ValueError
     except ValueError:
         pass
     assert_equal(np.get_printoptions(), opts)
Example #7
0
  def testTensorStrReprObeyNumpyPrintOptions(self):
    orig_threshold = np.get_printoptions()["threshold"]
    orig_edgeitems = np.get_printoptions()["edgeitems"]
    np.set_printoptions(threshold=2, edgeitems=1)

    t = _create_tensor(np.arange(10, dtype=np.int32))
    self.assertIn("[0 ..., 9]", str(t))
    self.assertIn("[0, ..., 9]", repr(t))

    # Clean up: reset to previous printoptions.
    np.set_printoptions(threshold=orig_threshold, edgeitems=orig_edgeitems)
Example #8
0
  def testTensorStrReprObeyNumpyPrintOptions(self):
    orig_threshold = np.get_printoptions()["threshold"]
    orig_edgeitems = np.get_printoptions()["edgeitems"]
    np.set_printoptions(threshold=2, edgeitems=1)

    t = _create_tensor(np.arange(10, dtype=np.int32))
    self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", str(t)))
    self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", repr(t)))

    # Clean up: reset to previous printoptions.
    np.set_printoptions(threshold=orig_threshold, edgeitems=orig_edgeitems)
Example #9
0
    def __repr__(self):
        if len(self) > np.get_printoptions()['threshold']:
            # Show only the first and last edgeitems.
            edgeitems = np.get_printoptions()['edgeitems']
            data = str(list(self[:edgeitems]))[:-1]
            data += ", ..., "
            data += str(list(self[-edgeitems:]))[1:]
        else:
            data = str(list(self))

        return "{name}({data})".format(name=self.__class__.__name__,
                                       data=data)
Example #10
0
def _test():
    import doctest

    start_suppress = np.get_printoptions()["suppress"]
    np.set_printoptions(suppress=True)
    doctest.testmod()
    np.set_printoptions(suppress=start_suppress)
def parse_numpy_printoption(kv_str):
  """Sets a single numpy printoption from a string of the form 'x=y'.

  See documentation on numpy.set_printoptions() for details about what values
  x and y can take. x can be any option listed there other than 'formatter'.

  Args:
    kv_str: A string of the form 'x=y', such as 'threshold=100000'

  Raises:
    argparse.ArgumentTypeError: If the string couldn't be used to set any
        nump printoption.
  """
  k_v_str = kv_str.split("=", 1)
  if len(k_v_str) != 2 or not k_v_str[0]:
    raise argparse.ArgumentTypeError("'%s' is not in the form k=v." % kv_str)
  k, v_str = k_v_str
  printoptions = np.get_printoptions()
  if k not in printoptions:
    raise argparse.ArgumentTypeError("'%s' is not a valid printoption." % k)
  v_type = type(printoptions[k])
  if v_type is type(None):
    raise argparse.ArgumentTypeError(
        "Setting '%s' from the command line is not supported." % k)
  try:
    v = (
        v_type(v_str)
        if v_type is not bool else flags.BooleanParser().parse(v_str))
  except ValueError as e:
    raise argparse.ArgumentTypeError(e.message)
  np.set_printoptions(**{k: v})
    def __call__(self, report_folder, connection_holder, dsg_targets):
        """ Convert synaptic matrix for every application edge.
        """

        # Update the print options to display everything
        print_opts = numpy.get_printoptions()
        numpy.set_printoptions(threshold=numpy.nan)

        if dsg_targets is None:
            raise SynapticConfigurationException(
                "dsg_targets should not be none, used as a check for "
                "connection holder data to be generated")

        # generate folder for synaptic reports
        top_level_folder = os.path.join(report_folder, _DIRNAME)
        if not os.path.exists(top_level_folder):
            os.mkdir(top_level_folder)

        # create progress bar
        progress = ProgressBar(connection_holder.keys(),
                               "Generating synaptic matrix reports")

        # for each application edge, write matrix in new file
        for edge, _ in progress.over(connection_holder.keys()):
            # only write matrix's for edges which have matrix's
            if isinstance(edge, ProjectionApplicationEdge):
                # figure new file name
                file_name = os.path.join(
                    top_level_folder, _TMPL_FILENAME.format(edge.label))
                self._write_file(file_name, connection_holder, edge)

        # Reset the print options
        numpy.set_printoptions(**print_opts)
    def compareSplitsAll(self, precision=3, linewidth=120):
        nM = len(self.mm)
        nItems = ((nM * nM) - nM)/2
        results = numpy.zeros((nM, nM), numpy.float)
        vect = numpy.zeros(nItems, numpy.float)
        vCounter = 0
        for mNum1 in range(1, nM):
            for mNum2 in range(mNum1):
                ret = self.compareSplits(mNum1, mNum2, verbose=False)
                #print "+++ ret = %s" % ret
                if ret == None:
                    ret = 0.0
                results[mNum1][mNum2] = ret
                results[mNum2][mNum1] = ret
                vect[vCounter] = ret
                vCounter += 1
                if 0:
                    print " %10i " % mNum1,
                    print " %10i " % mNum2,
                    print "%.3f" % ret

        # Save current numpy printoptions, and restore, below.
        curr = numpy.get_printoptions()
        numpy.set_printoptions(precision=precision, linewidth=linewidth)
        print results
        numpy.set_printoptions(precision=curr['precision'], linewidth=curr['linewidth'])
        
        print "For the %i values in one triangle," % nItems
        print "max =  ", vect.max()
        print "min =  ", vect.min()
        print "mean = ", vect.mean()
        print "var =  ", vect.var()
Example #14
0
def print_array(x, debug=False, **kwargs):
    opt = np.get_printoptions()
    ndigits = int(np.log10(np.nanmax(x))) + 2

    if 'precision' in kwargs:
        nprec = kwargs['precision']
    else:
        nprec = 3

    if 'formatter' not in kwargs:
        if issubclass(x.dtype.type, np.int):
            ff = '{:%dd}' % (ndigits)
            kwargs['formatter'] = {'int': ff.format}
        else:
            ff = '{:%d.%df}' % (ndigits + nprec, nprec)
            kwargs['formatter'] = {'float': ff.format}

    if debug:
        print nprec, ndigits, kwargs

    np.set_printoptions(**kwargs)
    if isinstance(x, np.ndarray):
        if len(x.shape) > 1:
            _print_helper(x)
        else:
            print(x)
    np.set_printoptions(**opt)
Example #15
0
    def __repr__(self):
        prefixstr = "    "

        if self._values.shape == ():
            v = [tuple([self._values[nm] for nm in self._values.dtype.names])]
            v = np.array(v, dtype=self._values.dtype)
        else:
            v = self._values

        names = self._values.dtype.names
        precision = np.get_printoptions()["precision"]
        fstyle = functools.partial(_fstyle, precision)
        format_val = lambda val: np.array2string(val, style=fstyle)
        formatter = {"numpystr": lambda x: "({0})".format(", ".join(format_val(x[name]) for name in names))}

        if NUMPY_LT_1P7:
            arrstr = np.array2string(v, separator=", ", prefix=prefixstr)

        else:
            arrstr = np.array2string(v, formatter=formatter, separator=", ", prefix=prefixstr)

        if self._values.shape == ():
            arrstr = arrstr[1:-1]

        unitstr = ("in " + self._unitstr) if self._unitstr else "[dimensionless]"
        return "<{0} ({1}) {2:s}\n{3}{4}>".format(
            self.__class__.__name__, ", ".join(self.components), unitstr, prefixstr, arrstr
        )
Example #16
0
def _pprint(params, offset=0, printer=repr):
    # Do a multi-line justified repr:
    options = np.get_printoptions()
    np.set_printoptions(precision=5, threshold=64, edgeitems=2)
    params_list = list()
    this_line_length = offset
    line_sep = ',\n' + (1 + offset) * ' '
    for i, (k, v) in enumerate(params):
        if type(v) is float:
            # use str for representing floating point numbers
            # this way we get consistent representation across
            # architectures and versions.
            this_repr = '%s=%s' % (k, str(v))
        else:
            # use repr of the rest
            this_repr = '%s=%s' % (k, printer(v))
        if len(this_repr) > 500:
            this_repr = this_repr[:300] + '...' + this_repr[-100:]
        if i > 0:
            if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
                params_list.append(line_sep)
                this_line_length = len(line_sep)
            else:
                params_list.append(', ')
                this_line_length += 2
        params_list.append(this_repr)
        this_line_length += len(this_repr)

    np.set_printoptions(**options)
    lines = ''.join(params_list)
    # Strip trailing space to avoid nightmare in doctests
    lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
    return lines
Example #17
0
def printoptions(*args, **kwargs):
    original = np.get_printoptions()
    np.set_printoptions(*args, **kwargs)
    try:
        yield
    finally:
        np.set_printoptions(**original)
Example #18
0
def _array2string(values, prefix=''):
    # Mimic numpy >=1.12 array2string, in which structured arrays are
    # typeset taking into account all printoptions.
    # TODO: in final numpy 1.12, the scalar case should work as well;
    # see https://github.com/numpy/numpy/issues/8172
    if NUMPY_LT_1_12:
        # Mimic StructureFormat from numpy >=1.12 assuming float-only data.
        from numpy.core.arrayprint import FloatFormat
        opts = np.get_printoptions()
        format_functions = [FloatFormat(np.atleast_1d(values[component]).ravel(),
                                        precision=opts['precision'],
                                        suppress_small=opts['suppress'])
                            for component in values.dtype.names]

        def fmt(x):
            return '({})'.format(', '.join(format_function(field)
                                           for field, format_function in
                                           zip(x, format_functions)))
        # Before 1.12, structures arrays were set as "numpystr",
        # so that is the formmater we need to replace.
        formatter = {'numpystr': fmt}
    else:
        fmt = repr
        formatter = {}

    return np.array2string(values, formatter=formatter, style=fmt,
                           separator=', ', prefix=prefix)
Example #19
0
 def print_table(self):
     '''Print a table of probabilities at each SNP.'''
     options = np.get_printoptions()
     np.set_printoptions(precision=3, suppress=True, threshold=np.nan, linewidth=200)
     print 'lambda = %s, Delta = %s, eps = %.1e' % (self.lam, repr(self.Delta)[6:-1], self.e)
     print 'Viterbi path (frame SNPs): ' + ' -> '.join(map(lambda x: '%d (%d-%d)' % (x[0], x[1][0], x[1][1]),
                                               itemutil.groupby_with_range(self.Q_star + 1)))
     print 'Viterbi path (SNPs):       ' + ' -> '.join(map(lambda x: '%d (%d-%d)' % (x[0], self.snps[x[1][0]], self.snps[x[1][1]]),
                                               itemutil.groupby_with_range(self.Q_star + 1)))
     print '    %-10s%-10s%-10s%-10s%-10s%-10s%-10s%-10s%-10s%-10s%-10s%-10s%-10s%-10s%-10s%-10s%-10s%-10s%-10s' % \
     ('t', 'SNP#', 'Obs', 'G1', 'G2', 'lam*dx', 'p',
      'Gam1', 'Gam2', 'Gam3', 'Gam4', 'Gam5', 'Gam6', 'Gam7', 'Gam8', 'Gam9',
      'p(IBD)', 'Viterbi', 'IBD?')
     print np.concatenate((np.arange(len(self.x))[np.newaxis].transpose(),
                           self.snps[np.newaxis].transpose(),
                           self.Obs[np.newaxis].transpose(),
                           np.array([ProbIbdHmmCalculator._T_STATE_G[t][0] for t in self.Obs])[np.newaxis].transpose(),
                           np.array([ProbIbdHmmCalculator._T_STATE_G[t][1] for t in self.Obs])[np.newaxis].transpose(),
                           np.concatenate((self.lam_x, [0]))[np.newaxis].transpose(),
                           self.p[np.newaxis].transpose(),
                           self.Gamma.transpose(),
                           self.p_ibd_gamma[np.newaxis].transpose(),
                           (self.Q_star + 1)[np.newaxis].transpose(),
                           self.p_ibd_viterbi[np.newaxis].transpose()
                           ), axis=1)
     util.set_printoptions(options)
Example #20
0
def linprog_verbose_callback(xk, **kwargs):
    """
    This is a sample callback for use with linprog, demonstrating the callback interface.
    This callback produces detailed output to sys.stdout before each iteration and after
    the final iteration of the simplex algorithm.

    Parameters
    ----------
    xk : array_like
        The current solution vector.
    **kwargs : dict
        A dictionary containing the following parameters:

        tableau : array_like
            The current tableau of the simplex algorithm.  Its structure is defined in _solve_simplex.
        phase : int
            The current Phase of the simplex algorithm (1 or 2)
        iter : int
            The current iteration number.
        pivot : tuple(int, int)
            The index of the tableau selected as the next pivot, or nan if no pivot exists
        basis : array(int)
            A list of the current basic variables.  Each element contains the name of a basic variable and
            its value.
        complete : bool
            True if the simplex algorithm has completed (and this is the final call to callback), otherwise False.
    """
    tableau = kwargs["tableau"]
    iter = kwargs["iter"]
    pivrow, pivcol = kwargs["pivot"]
    phase = kwargs["phase"]
    basis = kwargs["basis"]
    complete = kwargs["complete"]

    saved_printoptions = np.get_printoptions()
    np.set_printoptions(linewidth=500,
                        formatter={'float':lambda x: "{: 12.4f}".format(x)})
    if complete:
        print("--------- Iteration Complete - Phase {:d} -------\n".format(phase))
        print("Tableau:")
    elif iter == 0:
        print("--------- Initial Tableau - Phase {:d} ----------\n".format(phase))

    else:
        print("--------- Iteration {:d}  - Phase {:d} --------\n".format(iter, phase))
        print("Tableau:")

    if iter >= 0:
        print("" + str(tableau) + "\n")
        if not complete:
            print("Pivot Element: T[{:.0f}, {:.0f}]\n".format(pivrow, pivcol))
        print("Basic Variables:", basis)
        print()
        print("Current Solution:")
        print("x = ", xk)
        print()
        print("Current Objective Value:")
        print("f = ", -tableau[-1, -1])
        print()
    np.set_printoptions(**saved_printoptions)
Example #21
0
    def __repr__(self):
        prefixstr = '    '

        if self._values.shape == ():
            v = [tuple([self._values[nm] for nm in self._values.dtype.names])]
            v = np.array(v, dtype=self._values.dtype)
        else:
            v = self._values

        names = self._values.dtype.names
        precision = np.get_printoptions()['precision']
        fstyle = functools.partial(_fstyle, precision)
        format_val = lambda val: np.array2string(val, style=fstyle)
        formatter = {
            'numpystr': lambda x: '({0})'.format(
                ', '.join(format_val(x[name]) for name in names))
        }

        if NUMPY_LT_1P7:
            arrstr = np.array2string(v, separator=', ',
                                     prefix=prefixstr)

        else:
            arrstr = np.array2string(v, formatter=formatter,
                                     separator=', ',
                                     prefix=prefixstr)

        if self._values.shape == ():
            arrstr = arrstr[1:-1]

        unitstr = ('in ' + self._unitstr) if self._unitstr else '[dimensionless]'
        return '<{0} ({1}) {2:s}\n{3}{4}>'.format(
            self.__class__.__name__, ', '.join(self.components),
            unitstr, prefixstr, arrstr)
Example #22
0
 def __repr__(self):
     "Returns a string representation for debugging."
     old_printopt = numpy.get_printoptions()
     try:
         import sys
         numpy.set_printoptions(threshold=sys.maxint, suppress=True)
         import os
         rows, columns = map(int, os.popen('stty size', 'r').read().split())
         # We don't return the rates in self.unit_adapters_mat
         array_str = numpy.array_str(a=self.update_history,
                                     max_line_width=columns-26,
                                     precision=2) \
                                     if self.update_history != None \
                                     else None
         r = (
             "RectilinearOuputRateEncoder(\n"
             "  self.pynn_pop =       %r\n"
             "  self.shape =          %r\n"
             "  self.window_width =   %r\n"
             "  self.update_period =  %r\n"
             "  self.hist_len =       %r\n"
             "  self.idx =            %r\n"
             "  self.update_history = %s\n"
             ")" ) % \
             (self.pynn_population,
              (self._dim1, self._dim2),
              self.window_width,
              self.update_period,
              self.hist_len,
              self.idx,
              array_str)
     finally:
         numpy.set_printoptions(**old_printopt)
     return r
Example #23
0
    def _register_neighb_to_model(self, model_bundle, neighb_streamlines,
                                  metric=None, x0=None, bounds=None,
                                  select_model=400, select_target=600,
                                  method='L-BFGS-B',
                                  nb_pts=20, num_threads=None):

        if self.verbose:
            print('# Local SLR of neighb_streamlines to model')
            t = time()

        if metric is None or metric == 'symmetric':
            metric = BundleMinDistanceMetric(num_threads=num_threads)
        if metric == 'asymmetric':
            metric = BundleMinDistanceAsymmetricMetric()
        if metric == 'diagonal':
            metric = BundleSumDistanceMatrixMetric()

        if x0 is None:
            x0 = 'similarity'

        if bounds is None:
            bounds = [(-30, 30), (-30, 30), (-30, 30),
                      (-45, 45), (-45, 45), (-45, 45), (0.8, 1.2)]

        # TODO this can be speeded up by using directly the centroids
        static = select_random_set_of_streamlines(model_bundle,
                                                  select_model, rng=self.rng)
        moving = select_random_set_of_streamlines(neighb_streamlines,
                                                  select_target, rng=self.rng)

        static = set_number_of_points(static, nb_pts)
        moving = set_number_of_points(moving, nb_pts)

        slr = StreamlineLinearRegistration(metric=metric, x0=x0,
                                           bounds=bounds,
                                           method=method)
        slm = slr.optimize(static, moving)

        transf_streamlines = neighb_streamlines.copy()
        transf_streamlines._data = apply_affine(
            slm.matrix, transf_streamlines._data)

        transf_matrix = slm.matrix
        slr_bmd = slm.fopt
        slr_iterations = slm.iterations

        if self.verbose:
            print(' Square-root of BMD is %.3f' % (np.sqrt(slr_bmd),))
            if slr_iterations is not None:
                print(' Number of iterations %d' % (slr_iterations,))
            print(' Matrix size {}'.format(slm.matrix.shape))
            original = np.get_printoptions()
            np.set_printoptions(3, suppress=True)
            print(transf_matrix)
            print(slm.xopt)
            np.set_printoptions(**original)

            print(' Duration %0.3f sec. \n' % (time() - t,))

        return transf_streamlines, slr_bmd
Example #24
0
def plot_keypoint_scales(hs, fnum=1):
    print('[dev] plot_keypoint_scales()')
    cx2_kpts = hs.feats.cx2_kpts
    if len(cx2_kpts) == 0:
        hs.refresh_features()
        cx2_kpts = hs.feats.cx2_kpts
    cx2_nFeats = map(len, cx2_kpts)
    kpts = np.vstack(cx2_kpts)
    print('[dev] --- LaTeX --- ')
    _printopts = np.get_printoptions()
    np.set_printoptions(precision=3)
    print(util_latex.latex_scalar(r'\# keypoints, ', len(kpts)))
    print(util_latex.latex_mystats(r'\# keypoints per image', cx2_nFeats))
    scales = ktool.get_scales(kpts)
    scales = np.array(sorted(scales))
    print(util_latex.latex_mystats(r'keypoint scale', scales))
    np.set_printoptions(**_printopts)
    print('[dev] ---/LaTeX --- ')
    #
    df2.figure(fnum=fnum, docla=True, title='sorted scales')
    df2.plot(scales)
    df2.adjust_subplots_safe()
    #ax = df2.gca()
    #ax.set_yscale('log')
    #ax.set_xscale('log')
    #
    fnum += 1
    df2.figure(fnum=fnum, docla=True, title='hist scales')
    df2.show_histogram(scales, bins=20)
    df2.adjust_subplots_safe()
    #ax = df2.gca()
    #ax.set_yscale('log')
    #ax.set_xscale('log')
    return fnum
Example #25
0
def test(doctests=False):
    """

    Run the nitime test suite using nose.

    """
    #Make sure that you only change the print options during the testing
    #of nitime and don't affect the user session after that:
    opt_dict = np.get_printoptions()
    np.set_printoptions(precision=4)
    # We construct our own argv manually, so we must set argv[0] ourselves
    argv = ['nosetests',
            # Name the package to actually test, in this case nitime
            'nitime',

            # extra info in tracebacks
            '--detailed-errors',

            # We add --exe because of setuptools' imbecility (it blindly does
            # chmod +x on ALL files).  Nose does the right thing and it tries
            # to avoid executables, setuptools unfortunately forces our hand
            # here.  This has been discussed on the distutils list and the
            # setuptools devs refuse to fix this problem!
            '--exe',
            ]

    if doctests:
        argv.append('--with-doctest')

    # Now nose can run
    try:
        TestProgram(argv=argv)#, exit=False)
    finally:
        np.set_printoptions(**opt_dict)
Example #26
0
def printvar(locals_, varname, attr='.shape', typepad=0):
    npprintopts = np.get_printoptions()
    np.set_printoptions(threshold=5)
    dotpos = varname.find('.')
    # Locate var
    if dotpos == -1:
        var = locals_[varname]
    else:
        varname_ = varname[:dotpos]
        dotname_ = varname[dotpos:]
        var_ = locals_[varname_]  # NOQA
        var = eval('var_' + dotname_)
    # Print in format
    typestr = str(util_type.get_type(var)).ljust(typepad)

    if isinstance(var, np.ndarray):
        varstr = eval('str(var' + attr + ')')
        print('[var] %s %s = %s' % (typestr, varname + attr, varstr))
    elif isinstance(var, list):
        if attr == '.shape':
            func = 'len'
        else:
            func = ''
        varstr = eval('str(' + func + '(var))')
        print('[var] %s len(%s) = %s' % (typestr, varname, varstr))
    else:
        print('[var] %s %s = %r' % (typestr, varname, var))
    np.set_printoptions(**npprintopts)
Example #27
0
 def test_OLS_fixed(self):
     start_suppress = np.get_printoptions()['suppress']
     np.set_printoptions(suppress=True)    
     ols = OLS_Regimes(self.y, self.x, self.regimes, w=self.w, cols2regi=[False,True], regime_err_sep=True, constant_regi='one', nonspat_diag=False, spat_diag=True, name_y=self.y_var, name_x=self.x_var, name_ds='columbus', name_regimes=self.r_var, name_w='columbus.gal')        
     np.testing.assert_allclose(ols.betas,np.array([[ -0.24385565], [ \
     -0.26335026], [ 68.89701137], [ -1.67389685]]), RTOL) 
     vm = np.array([ 0.02354271,  0.01246677,  0.00424658, -0.04921356])
     np.testing.assert_allclose(ols.vm[0], vm,RTOL)
     np.testing.assert_allclose(ols.lm_error, \
         (5.62668744,  0.01768903),RTOL)
     np.testing.assert_allclose(ols.lm_lag, \
         (9.43264957,  0.00213156),RTOL)
     np.testing.assert_allclose(ols.mean_y, \
         35.12882389795919,RTOL)
     np.testing.assert_equal(ols.kf, 2)
     np.testing.assert_equal(ols.kr, 1)
     np.testing.assert_equal(ols.n, 49)
     np.testing.assert_equal(ols.nr, 2)
     np.testing.assert_equal(ols.name_ds,  'columbus')
     np.testing.assert_equal(ols.name_gwk,  None)
     np.testing.assert_equal(ols.name_w,  'columbus.gal')
     np.testing.assert_equal(ols.name_x,  ['0_HOVAL', '1_HOVAL', '_Global_CONSTANT', '_Global_INC'])
     np.testing.assert_equal(ols.name_y,  'CRIME')
     np.testing.assert_allclose(ols.predy[3], np.array([
         52.65974636]),RTOL)
     np.testing.assert_allclose(ols.r2, \
             0.5525561183786056 ,RTOL)
     np.testing.assert_equal(ols.robust,  'unadjusted')
     np.testing.assert_allclose(ols.t_stat[2][0], \
             13.848705206463748,RTOL)
     np.testing.assert_allclose(ols.t_stat[2][1], \
             7.776650625274256e-18,RTOL)
     np.set_printoptions(suppress=start_suppress)
Example #28
0
 def numarr2str(self, val):
     threshold = numpy.get_printoptions()["threshold"]
     numpy.set_printoptions(threshold=val.size)
     valstr = "array(" + numpy.array2string(val, precision =3, separator =",") + ", '%s')"%val.dtype.char
     #valstr.replace("\n", "\n\t")
     numpy.set_printoptions(threshold=threshold)
     return valstr
Example #29
0
def fullprint(*args, **kwargs):
  from pprint import pprint
  import numpy
  opt = numpy.get_printoptions()
  numpy.set_printoptions(threshold='nan')
  pprint(*args, **kwargs)
  numpy.set_printoptions(**opt)
Example #30
0
    def test(self, patterns):
        error = 0.0
        bin_correct = 0

        precision = np.get_printoptions()['precision']
        np.set_printoptions(precision=7, suppress=True)

        print('')
        for values, targets in patterns:
            a = self.predict(values)

            # sum of squares error
            # noinspection PyUnresolvedReferences
            sse = ((np.array(targets)-a)**2).sum()
            error += sse

            # noinspection PyTypeChecker
            u_bin = a >= .5
            bin_targets = np.array(targets) >= .5
            bin_correct += np.all(u_bin == bin_targets)
            print('{} -> {} = {}'.format(values, targets, a))

        print('')
        print('correct: {}/{}'.format(bin_correct, len(patterns)))
        print('overall error: {}'.format(error))

        np.set_printoptions(precision=precision, suppress=False)
        return bin_correct, len(patterns), error
Example #31
0
def test_set_numpy_options() -> None:
    original_options = np.get_printoptions()
    with formatting.set_numpy_options(threshold=10):
        assert len(repr(np.arange(500))) < 200
    # original options are restored
    assert np.get_printoptions() == original_options
Example #32
0
def fullprint(*args, **kwargs):
    opt = np.get_printoptions()
    np.set_printoptions(threshold=np.inf)
    print(*args, **kwargs)
    np.set_printoptions(opt)
Example #33
0
def printoptions(*args, **kwargs):
    original = np.get_printoptions()
    np.set_printoptions(*args, **kwargs)
    yield
    np.set_printoptions(**original)
Example #34
0
                           yend=self.yd,
                           q=self.q,
                           regime_err_sep=False)
        tbetas = np.array([
            [80.23408166],
            [5.48218125],
            [82.98396737],
            [0.49775429],
            [-3.72663211],
            [-1.27451485],
        ])
        np.testing.assert_array_almost_equal(tbetas, reg.betas)
        vm = np.array([495.16048523, 78.89742341, 0., 0., -47.12971066, 0.])
        np.testing.assert_array_almost_equal(reg.vm[0], vm, 6)
        u_3 = np.array([[25.57676372], [-17.94922796], [-26.71588759]])
        np.testing.assert_array_almost_equal(reg.u[0:3], u_3, 7)
        predy_3 = np.array([[-9.85078372], [36.75098196], [57.34266859]])
        np.testing.assert_array_almost_equal(reg.predy[0:3], predy_3, 7)
        chow_regi = np.array([[0.00616179,
                               0.93743265], [0.3447218, 0.55711631],
                              [0.37093662, 0.54249417]])
        np.testing.assert_array_almost_equal(reg.chow.regi, chow_regi, 7)
        self.assertAlmostEqual(reg.chow.joint[0], 1.1353790779821029, 7)


if __name__ == '__main__':
    start_suppress = np.get_printoptions()['suppress']
    np.set_printoptions(suppress=True)
    unittest.main()
    np.set_printoptions(suppress=start_suppress)
    def call(self, verbose=False, ret_all=None, itime=None, iiter=None):
        """
        Call the homogenization engine and compute the homogenized
        coefficients.

        Parameters
        ----------
        verbose : bool
            If True, print the computed coefficients.
        ret_all : bool or None
            If not None, it can be used to override the 'return_all' option.
            If True, also the dependencies are returned.
        time_tag: str
            The time tag used in file names.

        Returns
        -------
        coefs : Coefficients instance
            The homogenized coefficients.
        dependencies : dict
            The dependencies, if `ret_all` is True.
        """
        opts = self.app_options

        ret_all = get_default(ret_all, opts.return_all)

        if not hasattr(self, 'he'):
            volumes = {}
            if hasattr(opts, 'volumes') and (opts.volumes is not None):
                volumes.update(opts.volumes)
            elif hasattr(opts, 'volume') and (opts.volume is not None):
                volumes['total'] = opts.volume
            else:
                volumes['total'] = 1.0

            self.he = HomogenizationEngine(self.problem,
                                           self.options,
                                           volumes=volumes)

        if self.micro_states is not None:
            self.update_micro_states()
            self.he.set_micro_states(self.micro_states)

        multiproc_mode = None
        if opts.multiprocessing and multi.use_multiprocessing:
            multiproc, multiproc_mode = multi.get_multiproc(mpi=opts.use_mpi)

            if multiproc_mode is not None:
                upd_var = self.app_options.mesh_update_variable
                if upd_var is not None:
                    uvar = self.problem.create_variables([upd_var])[upd_var]
                    uvar.field.mappings0 = multiproc.get_dict('mappings0',
                                                              soft_set=True)
                per.periodic_cache = multiproc.get_dict('periodic_cache',
                                                        soft_set=True)

        time_tag = ('' if itime is None else '_t%03d' % itime)\
            + ('' if iiter is None else '_i%03d' % iiter)

        aux = self.he(ret_all=ret_all, time_tag=time_tag)
        if ret_all:
            coefs, dependencies = aux
            # store correctors for coors update
            self.updating_corrs = {}
            for v in six.itervalues(opts.micro_update):
                if v is not None and not hasattr(v, '__call__'):
                    for cr, _, _ in v:
                        if cr is not None:
                            self.updating_corrs[cr] = dependencies[cr]
        else:
            coefs = aux

        if coefs is not None:
            coefs = Coefficients(**coefs.to_dict())

            if verbose:
                prec = nm.get_printoptions()['precision']
                if hasattr(opts, 'print_digits'):
                    nm.set_printoptions(precision=opts.print_digits)
                print(coefs)
                nm.set_printoptions(precision=prec)

            ms_cache = self.micro_state_cache
            for ii in self.app_options.store_micro_idxs:
                for k in self.micro_states.keys():
                    key = self.get_micro_cache_key(k, ii, itime)
                    ms_cache[key] = self.micro_states[k][ii]

            coef_save_name = op.join(opts.output_dir, opts.coefs_filename)
            coefs.to_file_hdf5(coef_save_name + '%s.h5' % time_tag)
            coefs.to_file_txt(coef_save_name + '%s.txt' % time_tag,
                              opts.tex_names, opts.float_format)

        if ret_all:
            return coefs, dependencies
        else:
            return coefs
Example #36
0
a = np.array([0.123456789, 0.987654321])

a
# array([0.12345679, 0.98765432])

%precision 3
# '%.3f'

a
# array([0.123, 0.988])

print(a)
# [0.123 0.988]

print(np.get_printoptions()['precision'])
# 3

np.set_printoptions(precision=5)

a
# array([0.12346, 0.98765])

print(a)
# [0.12346 0.98765]

print(a[0])
# 0.123456789

%precision
# '%r'
Example #37
0
File: nao.py Project: zzy2014/pyscf
    def init_mo_coeff_label(self, **kw):
        """ Constructor a mean-field class from the preceeding SIESTA calculation """
        from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations
        from pyscf.nao.m_fermi_energy import fermi_energy

        self.mo_coeff = require(self.wfsx.x,
                                dtype=self.dtype,
                                requirements='CW')
        self.mo_energy = require(self.wfsx.ksn2e,
                                 dtype=self.dtype,
                                 requirements='CW')
        self.telec = kw['telec'] if 'telec' in kw else self.hsx.telec

        self.magnetization = kw[
            'magnetization'] if 'magnetization' in kw else None  #using this key for number of unpaired
        if self.nspin == 1:
            self.nelec = kw['nelec'] if 'nelec' in kw else np.array(
                [self.hsx.nelec])
        elif (self.nspin == 2 and self.magnetization == None):
            self.nelec = kw['nelec'] if 'nelec' in kw else np.array(
                [int(self.hsx.nelec /
                     2), int(self.hsx.nelec / 2)])
        elif (self.nspin == 2 and self.magnetization != None):
            if 'nelec' in kw: self.nelec = kw['nelec']
            else:
                ne = self.hsx.nelec
                nalpha = (ne + self.magnetization) // 2
                nbeta = nalpha - self.magnetization
                if nalpha + nbeta != ne:
                    raise RuntimeError(
                        'Electron number %d and spin %d are not consistent\n'
                        'Note mol.spin = 2S = Nalpha - Nbeta, not 2S+1' %
                        (ne, self.magnetization))
                self.nelec = np.array([nalpha, nbeta])

            if self.verbosity > 0:
                print(__name__, 'not sure here: self.nelec', self.nelec)
        else:
            raise RuntimeError('0>nspin>2?')

        if 'fermi_energy' in kw:
            self.fermi_energy = kw[
                'fermi_energy']  # possibility to redefine Fermi energy
        ksn2fd = fermi_dirac_occupations(self.telec, self.mo_energy,
                                         self.fermi_energy)
        self.mo_occ = (3 - self.nspin) * ksn2fd
        nelec_occ = np.einsum('ksn->s', self.mo_occ) / self.nkpoints
        if not np.allclose(self.nelec, nelec_occ, atol=1e-4):
            fermi_guess = fermi_energy(self.wfsx.ksn2e, self.hsx.nelec,
                                       self.hsx.telec)
            np.set_printoptions(precision=2, linewidth=1000)
            raise RuntimeWarning(
                '''occupations?\n mo_occ: \n{}\n telec: {}\n nelec expected: {}
 nelec(occ): {}\n Fermi guess: {}\n Fermi: {}\n E_n:\n{}'''.format(
                    self.mo_occ, self.telec, self.nelec, nelec_occ,
                    fermi_guess, self.fermi_energy, self.mo_energy))

        if 'fermi_energy' in kw and self.verbosity > 0:
            po = np.get_printoptions()
            np.set_printoptions(precision=2, linewidth=1000)
            print(__name__, "mo_occ:\n{}".format(self.mo_occ))
            np.set_printoptions(**po)
Example #38
0
def set_printoptions(*args, **kw):
    "Context manager for numpy's print options."
    was = np.get_printoptions()
    np.set_printoptions(*args, **kw)
    yield
    np.set_printoptions(**was)
Example #39
0
    def toString(self, bShowBaseline=True, bBaseline=False):
        """
        return a nicely indented test report
        if bShowBaseline, includes any attached report(s), with proper indentation
        if bBaseline: report for a baseline method
        """
        if bBaseline:
            sSpace = " " * 8
            sSepBeg = "\n" + sSpace + "~" * 30
            sTitle = " BASELINE "
            sSepEnd = "~" * len(sSepBeg)
        else:
            sSepBeg = "--- " + time.asctime(time.gmtime(
                self.t)) + "---------------------------------"
            sTitle = "TEST REPORT FOR"
            sSpace = ""
            sSepEnd = "-" * len(sSepBeg)

        aConfuMat = self.getConfusionMatrix()
        np_dFmt = np.get_printoptions()
        np.set_printoptions(threshold=100 * 100, linewidth=100 * 20)
        s1 = str(aConfuMat)
        np.set_printoptions(np_dFmt)

        if self.lsClassName:
            iMaxClassNameLen = max([len(s) for s in self.lsClassName])
            lsLine = s1.split("\n")
            sFmt = "%%%ds  %%s" % iMaxClassNameLen  #producing something like "%20s  %s"
            assert len(lsLine) == len(
                self.lsClassName
            ), "Internal error: expected one line per class name"
            s1 = "\n".join([
                sFmt % (sLabel, sLine)
                for (sLabel, sLine) in zip(self.lsClassName, lsLine)
            ])

        fScore, s2 = self.getClassificationReport(aConfuMat)

        s3 = "(unweighted) Accuracy score = %.2f     trace=%d  sum=%d" % (
            fScore, aConfuMat.trace(), np.sum(aConfuMat))

        if bShowBaseline:
            if self.lBaselineTestReport:
                sBaselineReport = "".join([
                    o.toString(False, True) for o in self.lBaselineTestReport
                ])
            else:
                sBaselineReport = sSpace + "(No Baseline method to report)"
        else:
            sBaselineReport = ""

        sReport = """%(space)s%(sSepBeg)s 
%(space)s%(sTitle)s: %(name)s

%(space)s  Line=True class, column=Prediction
%(s1)s

%(space)s%(s3)s

%(s2)s
%(sBaselineReport)s
%(space)s%(sSepEnd)s
""" % {
            "space": sSpace,
            "sSepBeg": sSepBeg,
            "sTitle": sTitle,
            "name": self.name,
            "s1": s1,
            "s2": s2,
            "s3": s3,
            "sBaselineReport": sBaselineReport,
            "sSepEnd": sSepEnd
        }

        #indent the baseline
        if bBaseline:
            sReport = '\n'.join(['\t' + _s for _s in sReport.split('\n')])

        return sReport
Example #40
0
    def test_repr_latex(self):
        from ...units.quantity import conf

        q2scalar = u.Quantity(1.5e14, 'm/s')
        assert self.scalarintq._repr_latex_() == r'$$1 \; \mathrm{m}$$'
        assert self.scalarfloatq._repr_latex_() == r'$$1.3 \; \mathrm{m}$$'
        assert (q2scalar._repr_latex_() ==
                r'$$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$$')
        assert self.arrq._repr_latex_() == r'$$[1,~2.3,~8.9] \; \mathrm{m}$$'

        qmed = np.arange(100) * u.m
        qbig = np.arange(1000) * u.m
        qvbig = np.arange(10000) * 1e9 * u.m

        pops = np.get_printoptions()
        oldlat = conf.latex_array_threshold
        try:
            # check precision behavior
            q = u.Quantity(987654321.123456789, 'm/s')
            qa = np.array([7.89123, 123456789.987654321, 0]) * u.cm
            np.set_printoptions(precision=8)
            assert q._repr_latex_(
            ) == r'$$9.8765432 \times 10^{8} \; \mathrm{\frac{m}{s}}$$'
            assert qa._repr_latex_(
            ) == r'$$[7.89123,~1.2345679 \times 10^{8},~0] \; \mathrm{cm}$$'
            np.set_printoptions(precision=2)
            assert q._repr_latex_(
            ) == r'$$9.9 \times 10^{8} \; \mathrm{\frac{m}{s}}$$'
            assert qa._repr_latex_(
            ) == r'$$[7.9,~1.2 \times 10^{8},~0] \; \mathrm{cm}$$'

            # check thresholding behavior
            conf.latex_array_threshold = 100  # should be default
            lsmed = qmed._repr_latex_()
            assert r'\dots' not in lsmed
            lsbig = qbig._repr_latex_()
            assert r'\dots' in lsbig
            lsvbig = qvbig._repr_latex_()
            assert r'\dots' in lsvbig

            conf.latex_array_threshold = 1001
            lsmed = qmed._repr_latex_()
            assert r'\dots' not in lsmed
            lsbig = qbig._repr_latex_()
            assert r'\dots' not in lsbig
            lsvbig = qvbig._repr_latex_()
            assert r'\dots' in lsvbig

            conf.latex_array_threshold = -1  # means use the numpy threshold
            np.set_printoptions(threshold=99)
            lsmed = qmed._repr_latex_()
            assert r'\dots' in lsmed
            lsbig = qbig._repr_latex_()
            assert r'\dots' in lsbig
            lsvbig = qvbig._repr_latex_()
            assert r'\dots' in lsvbig
        finally:
            # prevent side-effects from influencing other tests
            np.set_printoptions(**pops)
            conf.latex_array_threshold = oldlat

        qinfnan = [np.inf, -np.inf, np.nan] * u.m
        assert qinfnan._repr_latex_(
        ) == r'$$[\infty,~-\infty,~{\rm NaN}] \; \mathrm{m}$$'
Example #41
0
File: MV2.py Project: forsyth2/cdms
def get_print_limit():
    return get_printoptions()['threshold']
Example #42
0
def printer(value):
    opt = get_printoptions()
    set_printoptions(precision=2)
    print 'new value:', value
    set_printoptions(**opt)
Example #43
0
try:
    from distutils.sysconfig import get_config_vars
except ImportError:
    from sysconfig import get_config_vars
print("Config: " + str(get_config_vars("CONFIG_ARGS")))
print("")

try:
    import numpy
except ImportError:
    print("Numpy not installed")
else:
    print("Numpy %s" % numpy.version.version)
    print("      include %s" % numpy.get_include())
    print("      options %s" % numpy.get_printoptions())
print("")
try:
    import pyopencl
except Exception as error:
    print("Unable to import pyopencl: %s" % error)
else:
    print("PyOpenCL platform:")
    for p in pyopencl.get_platforms():
        print("  %s" % p)
        for d in p.get_devices():
            print("    %s max_workgroup_size is %s" % (d, d.max_work_group_size))
try:
    from silx.opencl import ocl
except Exception:
    print("Unable to import silx")
def printoptions(*args, **kwargs):
    """Switches printoptions temporarily via yield before switching back."""
    original = np.get_printoptions()
    np.set_printoptions(*args, **kwargs)
    yield
    np.set_printoptions(**original)
Example #45
0
def test(target=None, show=False, onlydoctests=False, coverage=False,
         htmlreport=False, abort=False, verbose=True):
    """Run docstring examples and additional tests.

    Examples
    --------
    >>> import pygimli as pg
    >>> # You can test everything with pg.test() or test a single function:
    >>> pg.test("pg.utils.boxprint", verbose=False)
    >>> # The target argument can also be the function directly
    >>> from pygimli.utils import boxprint
    >>> pg.test(boxprint, verbose=False)

    Parameters
    ----------
    target : function or string, optional
        Function or method to test. By default everything is tested.
    show : boolean, optional
        Show matplotlib windows during test run. They will be closed
        automatically.
    onlydoctests : boolean, optional
        Run test files in ../tests as well.
    coverage : boolean, optional
        Create a coverage report. Requires the pytest-cov plugin.
    htmlreport : str, optional
        Filename for HTML report such as www.pygimli.org/build_tests.html.
        Requires pytest-html plugin.
    abort : boolean, optional
        Return correct exit code, e.g. abort documentation build when a test
        fails.
    """
    # Remove figure warnings
    plt.rcParams["figure.max_open_warning"] = 1000
    warnings.filterwarnings("ignore", category=UserWarning,
                            message='Matplotlib is currently using agg, which is a'
                                    ' non-GUI backend, so cannot show the figure.')

    printopt = np.get_printoptions()

    if verbose:
        pg.boxprint("Testing pygimli %s" % pg.__version__, sym="+", width=90)

    # Numpy compatibility (array string representation has changed)
    if np.__version__[:4] == "1.14":
        pg.warn("Some doctests will fail due to old numpy version.",
                "Consider upgrading to numpy >= 1.15")

    if target:
        if isinstance(target, str):
            # If target is a string, such as "pg.solver.solve"
            # the code below will overwrite target with the corresponding
            # imported function, so that doctest works.
            target = target.replace("pg.", "pygimli.")
            import importlib
            mod_name, func_name = target.rsplit('.', 1)
            mod = importlib.import_module(mod_name)
            target = getattr(mod, func_name)

        if show: # Keep figure openend if single function is tested
            plt.ioff()

        import doctest
        doctest.run_docstring_examples(target, globals(), verbose=verbose,
                                       optionflags=doctest.ELLIPSIS,
                                       name=target.__name__)
        return

    try:
        import pytest
    except ImportError:
        raise ImportError("pytest is required to run test suite. "
                          "Try 'sudo pip install pytest'.")

    old_backend = plt.get_backend()
    if not show:
        plt.switch_backend("Agg")
    else:
        plt.ion()

    cwd = join(realpath(__path__[0]), '..')

    excluded = [
        "gui", "physics/traveltime/example.py", "physics/em/fdemexample.py"
    ]

    if onlydoctests:
        excluded.append("testing")

    cmd = ([
        "-v", "-rsxX", "--color", "yes", "--doctest-modules", "--durations",
        "5", cwd
    ])
    for directory in excluded:
        cmd.extend(["--ignore", join(cwd, directory)])

    if coverage:
        pc = pg.optImport("pytest_cov", "create a code coverage report")
        if pc:
            cmd.extend(["--cov", "pygimli"])
            cmd.extend(["--cov-report", "term"])

    if htmlreport:
        ph = pg.optImport("pytest_html", "create a html report")
        if ph:
            cmd.extend(["--html", htmlreport])

    plt.close("all")
    exitcode = pytest.main(cmd)
    if abort:
        print("Exiting with exitcode", exitcode)
        sys.exit(exitcode)

    plt.switch_backend(old_backend)
    np.set_printoptions(**printopt)
Example #46
0
 def setUp(self):
     self.oldopts = np.get_printoptions()
Example #47
0
def np_show_complete_array():
    # See: https://stackoverflow.com/a/45831462/916672
    oldoptions = np.get_printoptions()
    np.set_printoptions(threshold=np.inf)
    yield
    np.set_printoptions(**oldoptions)
Example #48
0
def value_to_display(value, minmax=False, level=0):
    """Convert value for display purpose"""
    # To save current Numpy threshold
    np_threshold = FakeObject

    try:
        numeric_numpy_types = (int64, int32, int16, int8, uint64, uint32,
                               uint16, uint8, float64, float32, float16,
                               complex128, complex64, bool_)
        if ndarray is not FakeObject:
            # Save threshold
            np_threshold = get_printoptions().get('threshold')
            # Set max number of elements to show for Numpy arrays
            # in our display
            set_printoptions(threshold=10)
        if isinstance(value, recarray):
            if level == 0:
                fields = value.names
                display = 'Field names: ' + ', '.join(fields)
            else:
                display = 'Recarray'
        elif isinstance(value, MaskedArray):
            display = 'Masked array'
        elif isinstance(value, ndarray):
            if level == 0:
                if minmax:
                    try:
                        display = 'Min: %r\nMax: %r' % (value.min(),
                                                        value.max())
                    except (TypeError, ValueError):
                        if value.dtype.type in numeric_numpy_types:
                            display = str(value)
                        else:
                            display = default_display(value)
                elif value.dtype.type in numeric_numpy_types:
                    display = str(value)
                else:
                    display = default_display(value)
            else:
                display = 'Numpy array'
        elif any([type(value) == t for t in [list, set, tuple, dict]]):
            display = collections_display(value, level + 1)
        elif isinstance(value, Image):
            if level == 0:
                display = '%s  Mode: %s' % (address(value), value.mode)
            else:
                display = 'Image'
        elif isinstance(value, DataFrame):
            if level == 0:
                cols = value.columns
                if PY2 and len(cols) > 0:
                    # Get rid of possible BOM utf-8 data present at the
                    # beginning of a file, which gets attached to the first
                    # column header when headers are present in the first
                    # row.
                    # Fixes Issue 2514
                    try:
                        ini_col = to_text_string(cols[0], encoding='utf-8-sig')
                    except:
                        ini_col = to_text_string(cols[0])
                    cols = [ini_col] + [to_text_string(c) for c in cols[1:]]
                else:
                    cols = [to_text_string(c) for c in cols]
                display = 'Column names: ' + ', '.join(list(cols))
            else:
                display = 'Dataframe'
        elif isinstance(value, NavigableString):
            # Fixes Issue 2448
            display = to_text_string(value)
            if level > 0:
                display = u"'" + display + u"'"
        elif isinstance(value, Index):
            if level == 0:
                display = value.summary()
            else:
                display = 'Index'
        elif is_binary_string(value):
            # We don't apply this to classes that extend string types
            # See issue 5636
            if is_type_text_string(value):
                try:
                    display = to_text_string(value, 'utf8')
                    if level > 0:
                        display = u"'" + display + u"'"
                except:
                    display = value
                    if level > 0:
                        display = b"'" + display + b"'"
            else:
                display = default_display(value)
        elif is_text_string(value):
            # We don't apply this to classes that extend string types
            # See issue 5636
            if is_type_text_string(value):
                display = value
                if level > 0:
                    display = u"'" + display + u"'"
            else:
                display = default_display(value)
        elif (isinstance(value, datetime.date)
              or isinstance(value, datetime.timedelta)):
            display = str(value)
        elif (isinstance(value, NUMERIC_TYPES) or isinstance(value, bool)
              or isinstance(value, numeric_numpy_types)):
            display = repr(value)
        else:
            if level == 0:
                display = default_display(value)
            else:
                display = default_display(value, with_module=False)
    except:
        display = default_display(value)

    # Truncate display at 70 chars to avoid freezing Spyder
    # because of large displays
    if len(display) > 70:
        display = display[:70].rstrip() + ' ...'

    # Restore Numpy threshold
    if np_threshold is not FakeObject:
        set_printoptions(threshold=np_threshold)

    return display
Example #49
0
def fullprint(arr):
    opt = np.get_printoptions()
    np.set_printoptions(threshold=np.inf)
    print(arr)
    np.set_printoptions(**opt)
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 25 17:53:22 2018
@author: leonc
"""
import numpy as np
import pandas as pd
import json
import os, sys

np.get_printoptions()
np.set_printoptions(precision=3, suppress=True)
np.set_printoptions(threshold=sys.maxsize)

raange = pd.date_range(start='2017-02-08 00:00:00',
                       end='2017-05-10 23:59:59',
                       freq='min')
frame_Index = pd.date_range(start='2017-02-08', end='2017-05-10 ', freq='D')
range_plt = pd.date_range(start='2017-03-01 00:00:00',
                          end='2017-03-28 23:59:59',
                          freq='min')


# dataFile: data file in JSON format
# frequency: time duration used for resampling eg. "5min"
# returns numpy Array with axis-0 representing days and axis-1 including chunks in a day
def seqFunct(dataFile, frequency):
    fileObject = open(dataFile)
    jsonObject = json.load(fileObject)
    dataFrame = pd.DataFrame(jsonObject['items'],
                             columns=['timestamp', 'value'])
Example #51
0
def linprog_verbose_callback(res):
    """
    A sample callback function demonstrating the linprog callback interface.
    This callback produces detailed output to sys.stdout before each iteration
    and after the final iteration of the simplex algorithm.

    Parameters
    ----------
    res : A `scipy.optimize.OptimizeResult` consisting of the following fields:

        x : 1-D array
            The independent variable vector which optimizes the linear
            programming problem.
        fun : float
            Value of the objective function.
        success : bool
            True if the algorithm succeeded in finding an optimal solution.
        slack : 1-D array
            The values of the slack variables. Each slack variable corresponds
            to an inequality constraint. If the slack is zero, then the
            corresponding constraint is active.
        con : 1-D array
            The (nominally zero) residuals of the equality constraints, that is,
            ``b - A_eq @ x``
        phase : int
            The phase of the optimization being executed. In phase 1 a basic
            feasible solution is sought and the T has an additional row
            representing an alternate objective function.
        status : int
            An integer representing the exit status of the optimization::

                 0 : Optimization terminated successfully
                 1 : Iteration limit reached
                 2 : Problem appears to be infeasible
                 3 : Problem appears to be unbounded
                 4 : Serious numerical difficulties encountered

        nit : int
            The number of iterations performed.
        message : str
            A string descriptor of the exit status of the optimization.
    """
    x = res['x']
    fun = res['fun']
    phase = res['phase']
    status = res['status']
    nit = res['nit']
    message = res['message']
    complete = res['complete']

    saved_printoptions = np.get_printoptions()
    np.set_printoptions(linewidth=500,
                        formatter={'float': lambda x: "{0: 12.4f}".format(x)})
    if status:
        print('--------- Simplex Early Exit -------\n'.format(nit))
        print(
            'The simplex method exited early with status {0:d}'.format(status))
        print(message)
    elif complete:
        print('--------- Simplex Complete --------\n')
        print('Iterations required: {}'.format(nit))
    else:
        print('--------- Iteration {0:d}  ---------\n'.format(nit))

    if nit > 0:
        if phase == 1:
            print('Current Pseudo-Objective Value:')
        else:
            print('Current Objective Value:')
        print('f = ', fun)
        print()
        print('Current Solution Vector:')
        print('x = ', x)
        print()

    np.set_printoptions(**saved_printoptions)
Example #52
0
def numpy_precision(precision):
    old = np.get_printoptions()['precision']
    np.set_printoptions(precision=precision)
    yield
    np.set_printoptions(old)
Example #53
0
def extract_features(EEG_segs, channel_names, combined_channel_names, Fs, NW, total_freq_range, sub_window_time, sub_window_step, seg_start_ids, return_feature_names=False, n_jobs=-1, verbose=True):
    """Extract features from EEG segments.

    Arguments:
    EEG_segs -- a list of EEG segments in numpy.ndarray type, size=(sample_point, channel_num)
    channel_names -- a list of channel names for each column of EEG_segs
    ##combined_channel_names -- a list of combined column_channels_names, for example from 'F3M2' and 'F4M1' to 'F'
    Fs -- sampling frequency in Hz

    Keyword arguments:
    process_num -- default None, number of parallel processes, if None, equals to 4x #CPU.

    Outputs:
    features from each segment in numpy.ndarray type, size=(seg_num, feature_num)
    a list of names of each feature
    psd estimation, size=(window_num, freq_point_num, channel_num), or a list of them for each band
    frequencies, size=(freq_point_num,), or a list of them for each band
    """

    #if type(EEG_segs)!=list:
    #    raise TypeError('EEG segments should be list of numpy.ndarray, with size=(sample_point, channel_num).')

    seg_num, channel_num, window_size = EEG_segs.shape
    if seg_num <= 0:
        return []
    
    sub_window_size = int(round(sub_window_time*Fs))
    sub_step_size = int(round(sub_window_step*Fs))
    dpss, eigvals = tsa.dpss_windows(sub_window_size,NW,2*NW)
    nfft = max(1<<(sub_window_size-1).bit_length(), sub_window_size)
    freq = np.arange(0, Fs, Fs*1.0/nfft)[:nfft//2+1]
    total_freq_id = np.where(np.logical_and(freq>=total_freq_range[0], freq<total_freq_range[1]))[0]
    
    old_threshold = np.get_printoptions()['threshold']
    np.set_printoptions(threshold=sys.maxsize)#np.nan)
    
    features = Parallel(n_jobs=n_jobs,verbose=verbose,backend='multiprocessing')(delayed(compute_features_each_seg)(EEG_segs[segi], window_size, channel_num, band_num, NW, Fs, freq, band_freq, total_freq_range, total_freq_id, sub_window_size, sub_step_size, dpss=dpss, eigvals=eigvals) for segi in range(seg_num))
    
    np.set_printoptions(threshold=old_threshold)

    if return_feature_names:
        feature_names = ['mean_gradient_%s'%chn for chn in channel_names]
        feature_names += ['kurtosis_%s'%chn for chn in channel_names]
        feature_names += ['sample_entropy_%s'%chn for chn in channel_names]
        for ffn in ['max','min','mean','std','kurtosis']:#,'skewness'
            for bn in band_names:
                if ffn=='kurtosis' or bn!='sigma': # no need for sigma band
                    feature_names += ['%s_bandpower_%s_%s'%(bn,ffn,chn) for chn in combined_channel_names]

        power_ratios = ['delta/theta','delta/alpha','theta/alpha']
        for pr in power_ratios:
            feature_names += ['%s_max_%s'%(pr,chn) for chn in combined_channel_names]
            feature_names += ['%s_min_%s'%(pr,chn) for chn in combined_channel_names]
            feature_names += ['%s_mean_%s'%(pr,chn) for chn in combined_channel_names]
            feature_names += ['%s_std_%s'%(pr,chn) for chn in combined_channel_names]

    # features.shape = (#epoch, 102)
    
    if return_feature_names:
        return np.array(features), feature_names#, pxx_mts, freqs
    else:
        return np.array(features)#, pxx_mts, freqs
Example #54
0
from __future__ import absolute_import, division, print_function

import doctest

import numpy as np
import pkg_resources

# import all packages
from . import audio, evaluation, features, io, ml, models, processors, utils

# define a version variable
__version__ = pkg_resources.get_distribution("madmom").version

# set and restore numpy's print options for doctests
_NP_PRINT_OPTIONS = np.get_printoptions()


def setup():
    # pylint: disable=missing-docstring
    # sets up the environment for doctests (when run through nose)
    np.set_printoptions(precision=5, edgeitems=2, suppress=True)


def teardown():
    # pylint: disable=missing-docstring
    # restore the environment after doctests (when run through nose)
    np.set_printoptions(**_NP_PRINT_OPTIONS)


# Create a doctest output checker that optionally ignores the unicode string
Example #55
0
def printoptions(*args, **kwargs):
    original = numpy.get_printoptions()
    numpy.set_printoptions(*args, **kwargs)
    yield
    numpy.set_printoptions(**original)
Example #56
0
    def _register_neighb_to_model(self,
                                  model_bundle,
                                  neighb_streamlines,
                                  metric=None,
                                  x0=None,
                                  bounds=None,
                                  select_model=400,
                                  select_target=600,
                                  method='L-BFGS-B',
                                  nb_pts=20,
                                  num_threads=None):
        if self.verbose:
            logger.info('# Local SLR of neighb_streamlines to model')
            t = time()

        if metric is None or metric == 'symmetric':
            metric = BundleMinDistanceMetric(num_threads=num_threads)
        if metric == 'asymmetric':
            metric = BundleMinDistanceAsymmetricMetric()
        if metric == 'diagonal':
            metric = BundleSumDistanceMatrixMetric()

        if x0 is None:
            x0 = 'similarity'

        if bounds is None:
            bounds = [(-30, 30), (-30, 30), (-30, 30), (-45, 45), (-45, 45),
                      (-45, 45), (0.8, 1.2)]

        # TODO this can be speeded up by using directly the centroids
        static = select_random_set_of_streamlines(model_bundle,
                                                  select_model,
                                                  rng=self.rng)
        moving = select_random_set_of_streamlines(neighb_streamlines,
                                                  select_target,
                                                  rng=self.rng)

        static = set_number_of_points(static, nb_pts)
        moving = set_number_of_points(moving, nb_pts)

        slr = StreamlineLinearRegistration(metric=metric,
                                           x0=x0,
                                           bounds=bounds,
                                           method=method)
        slm = slr.optimize(static, moving)

        transf_streamlines = neighb_streamlines.copy()
        transf_streamlines._data = apply_affine(slm.matrix,
                                                transf_streamlines._data)

        transf_matrix = slm.matrix
        slr_bmd = slm.fopt
        slr_iterations = slm.iterations

        if self.verbose:
            logger.info(' Square-root of BMD is %.3f' % (np.sqrt(slr_bmd), ))
            if slr_iterations is not None:
                logger.info(' Number of iterations %d' % (slr_iterations, ))
            logger.info(' Matrix size {}'.format(slm.matrix.shape))
            original = np.get_printoptions()
            np.set_printoptions(3, suppress=True)
            logger.info(transf_matrix)
            logger.info(slm.xopt)
            np.set_printoptions(**original)

            logger.info(' Duration %0.3f sec. \n' % (time() - t, ))

        return transf_streamlines, slr_bmd
Example #57
0
                 setUp=None,
                 tearDown=None,
                 checker=None,
                 obj=None,
                 result_var='_'):
        self._result_var = result_var
        self._nose_obj = obj
        doctest.DocTestCase.__init__(self,
                                     test,
                                     optionflags=optionflags,
                                     setUp=setUp,
                                     tearDown=tearDown,
                                     checker=checker)


print_state = numpy.get_printoptions()


class NumpyDoctest(npd.Doctest):
    name = 'numpydoctest'  # call nosetests with --with-numpydoctest
    score = 1000  # load late, after doctest builtin

    # always use whitespace and ellipsis options for doctests
    doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS

    # files that should be ignored for doctests
    doctest_ignore = ['generate_numpy_api.py', 'setup.py']

    # Custom get; class variables to allow subclassing
    doctest_case_class = NumpyDocTestCase
    out_check_class = NumpyOutputChecker
Example #58
0
def linprog_verbose_callback(xk, **kwargs):
    """
    A sample callback function demonstrating the linprog callback interface.
    This callback produces detailed output to sys.stdout before each iteration
    and after the final iteration of the simplex algorithm.

    Parameters
    ----------
    xk : array_like
        The current solution vector.
    **kwargs : dict
        A dictionary containing the following parameters:

        tableau : array_like
            The current tableau of the simplex algorithm.
            Its structure is defined in _solve_simplex.
        phase : int
            The current Phase of the simplex algorithm (1 or 2)
        nit : int
            The current iteration number.
        pivot : tuple(int, int)
            The index of the tableau selected as the next pivot,
            or nan if no pivot exists
        basis : array(int)
            A list of the current basic variables.
            Each element contains the name of a basic variable and its value.
        complete : bool
            True if the simplex algorithm has completed
            (and this is the final call to callback), otherwise False.
    """
    tableau = kwargs["tableau"]
    nit = kwargs["nit"]
    pivrow, pivcol = kwargs["pivot"]
    phase = kwargs["phase"]
    basis = kwargs["basis"]
    complete = kwargs["complete"]

    saved_printoptions = np.get_printoptions()
    np.set_printoptions(linewidth=500,
                        formatter={'float':lambda x: "{: 12.4f}".format(x)})
    if complete:
        print("--------- Iteration Complete - Phase {:d} -------\n".format(phase))
        print("Tableau:")
    elif nit == 0:
        print("--------- Initial Tableau - Phase {:d} ----------\n".format(phase))

    else:
        print("--------- Iteration {:d}  - Phase {:d} --------\n".format(nit, phase))
        print("Tableau:")

    if nit >= 0:
        print("" + str(tableau) + "\n")
        if not complete:
            print("Pivot Element: T[{:.0f}, {:.0f}]\n".format(pivrow, pivcol))
        print("Basic Variables:", basis)
        print()
        print("Current Solution:")
        print("x = ", xk)
        print()
        print("Current Objective Value:")
        print("f = ", -tableau[-1, -1])
        print()
    np.set_printoptions(**saved_printoptions)
Example #59
0
def _test():
    import doctest
    start_suppress = np.get_printoptions()['suppress']
    np.set_printoptions(suppress=True)
    doctest.testmod()
    np.set_printoptions(suppress=start_suppress)
Example #60
0
def get_printoptions():
    return np.get_printoptions()