示例#1
0
def get_mirrors(upper_triangle):
    """
    Given 3 or 6 rational numbers, return a 3x3 or 4x4 matrix whose rows are
    the normal vectors of the reflection planes.
    """

    # error handling function when the input coxeter matrix is invalid.
    def err_handler(err_type, flag):
        print("Invalid input Coxeter diagram.")
        sys.exit(1)

    np.seterrcall(err_handler)
    np.seterr(all='call')

    coxeter_matrix = np.array(fill_matrix(upper_triangle)).astype(np.float)
    C = -np.cos(np.pi / coxeter_matrix)
    M = np.zeros_like(C)

    M[0, 0] = 1
    M[1, 0] = C[0, 1]
    M[1, 1] = np.sqrt(1 - M[1, 0] * M[1, 0])
    M[2, 0] = C[0, 2]
    M[2, 1] = (C[1, 2] - M[1, 0] * M[2, 0]) / M[1, 1]
    M[2, 2] = np.sqrt(1 - M[2, 0] * M[2, 0] - M[2, 1] * M[2, 1])
    if len(coxeter_matrix) == 4:
        M[3, 0] = C[0, 3]
        M[3, 1] = (C[1, 3] - M[1, 0] * M[3, 0]) / M[1, 1]
        M[3, 2] = (C[2, 3] - M[2, 0] * M[3, 0] - M[2, 1] * M[3, 1]) / M[2, 2]
        M[3, 3] = np.sqrt(1 - M[3, 0] * M[3, 0] - M[3, 1] * M[3, 1] -
                          M[3, 2] * M[3, 2])
    return M
示例#2
0
    def solve(self, iteration, firms_index, prices, costs):
        """Solve the fixed point problem defined by the zeta-markup equation to compute prices and shares in this
        market. Also return a set of any exception classes encountered during computation along with contraction
        statistics.
        """

        # configure NumPy to identify floating point errors
        errors = set()
        with np.errstate(divide='call',
                         over='call',
                         under='ignore',
                         invalid='call'):
            np.seterrcall(lambda *_: errors.add(
                exceptions.SyntheticPricesFloatingPointError))

            # solve the fixed point problem
            prices, converged, iterations, evaluations = self.compute_bertrand_nash_prices(
                iteration, firms_index, prices, costs)

            # compute the associated shares
            delta = self.update_delta_with_variable('prices', prices)
            mu = self.update_mu_with_variable('prices', prices)
            shares = self.compute_probabilities(delta,
                                                mu) @ self.agents.weights

        # determine whether the fixed point converged
        if not converged:
            errors.add(exceptions.SyntheticPricesConvergenceError)
        return prices, shares, errors, iterations, evaluations
示例#3
0
    def compute_omega_by_theta_jacobian(
            self, tilde_costs: Array, xi_jacobian: Array,
            parameters: Parameters,
            costs_type: str) -> (Tuple[Array, List[Error]]):
        """Compute the Jacobian of omega (equivalently, of transformed marginal costs) with respect to theta."""
        errors: List[Error] = []

        # configure NumPy to identify floating point errors
        with np.errstate(divide='call',
                         over='call',
                         under='ignore',
                         invalid='call'):
            np.seterrcall(lambda *_: errors.append(
                exceptions.OmegaByThetaJacobianFloatingPointError()))

            # compute the Jacobian
            eta_jacobian, eta_jacobian_errors = self.compute_eta_by_theta_jacobian(
                xi_jacobian, parameters)
            errors.extend(eta_jacobian_errors)
            if costs_type == 'linear':
                omega_jacobian = -eta_jacobian
            else:
                assert costs_type == 'log'
                omega_jacobian = -eta_jacobian / np.exp(tilde_costs)
            return omega_jacobian, errors
示例#4
0
    def __init__(self, name="Tool", parent_app=None):
        """**Constructor**"""
        super().__init__()

        self.name = name
        self.parent_application = parent_app
        self.parameters = (
            OrderedDict()
        )  # keep the dictionary key in order for the parameter table
        self.active = True  # defines if the Tool is plotted
        self.applytotheory = True  # Do we also apply the tool to the theory?

        # LOGGING STUFF
        self.logger = logging.getLogger(self.parent_application.logger.name +
                                        "." + self.name)
        self.logger.debug("New " + self.toolname + " Tool")
        # np.seterr(all="call")
        #np.seterr(all="ignore")
        np.seterrcall(self.write)

        self.do_cite("")

        if CmdBase.mode == CmdMode.GUI:
            self.print_signal.connect(
                self.print_qtextbox
            )  # Asynchronous print when using multithread
示例#5
0
def setup_system():
  logger=logging.getLogger()#logging.getLogger('quicknxs')
  logger.setLevel(min(FILE_LEVEL, CONSOLE_LEVEL, GUI_LEVEL))
  if not sys.platform.startswith('win'):
    # no console logger for windows (py2exe)
    console=logging.StreamHandler(sys.__stdout__)
    formatter=logging.Formatter('%(levelname) 7s: %(message)s')
    console.setFormatter(formatter)
    console.setLevel(CONSOLE_LEVEL)
    logger.addHandler(console)

  logfile=logging.FileHandler(paths.LOG_FILE, 'w')
  formatter=logging.Formatter('[%(levelname)s] - %(asctime)s - %(filename)s:%(lineno)i:%(funcName)s %(message)s', '')
  logfile.setFormatter(formatter)
  logfile.setLevel(FILE_LEVEL)
  logger.addHandler(logfile)

  logging.info('*** QuickNXS %s Logging started ***'%str_version)

  # define numpy warning behavior
  global nplogger
  old_class=logging.getLoggerClass()
  logging.setLoggerClass(NumpyLogger)
  nplogger=logging.getLogger('numpy')
  nplogger.setLevel(logging.DEBUG)
  null_handler=logging.StreamHandler(StringIO())
  null_handler.setLevel(logging.CRITICAL)
  nplogger.addHandler(null_handler)
  logging.setLoggerClass(old_class)
  seterr(divide='call', over='call', under='ignore', invalid='call')
  seterrcall(numpy_logger)

  # write information on program exit
  sys.excepthook=excepthook_overwrite
  atexit.register(goodby)
示例#6
0
def get_mirrors(upper_triangle):
    """
    Given three or six integers/rationals that represent the
    angles between the mirrors (a rational p means the angle is π/p),
    return a 3x3 or 4x4 matrix whose rows are the normal vectors of the mirrors.
    """
    # error handling function when the input coxeter matrix is invalid.
    def err_handler(err_type, flag):
        print("Invalid input Coxeter diagram.")
        sys.exit(1)

    np.seterrcall(err_handler)
    np.seterr(all="call")

    coxeter_matrix = np.array(fill_matrix(upper_triangle)).astype(np.float)
    C = -np.cos(np.pi / coxeter_matrix)
    M = np.zeros_like(C)

    M[0, 0] = 1
    M[1, 0] = C[0, 1]
    M[1, 1] = np.sqrt(1 - M[1, 0]*M[1, 0])
    M[2, 0] = C[0, 2]
    M[2, 1] = (C[1, 2] - M[1, 0]*M[2, 0]) / M[1, 1]
    M[2, 2] = np.sqrt(1 - M[2, 0]*M[2, 0] - M[2, 1]*M[2, 1])
    if len(coxeter_matrix) == 4:
        M[3, 0] = C[0, 3]
        M[3, 1] = (C[1, 3] - M[1, 0]*M[3, 0]) / M[1, 1]
        M[3, 2] = (C[2, 3] - M[2, 0]*M[3, 0] - M[2, 1]*M[3, 1]) / M[2, 2]
        M[3, 3] = np.sqrt(1 - M[3, 0]*M[3, 0] - M[3, 1]*M[3, 1] - M[3, 2]*M[3, 2])
    return M
示例#7
0
    def compute_micro(self,
                      delta: Optional[Array] = None
                      ) -> Tuple[Array, List[Error]]:
        """Compute micro moments."""
        errors: List[Error] = []
        assert self.moments is not None

        # configure NumPy to identify floating point errors
        with np.errstate(divide='call',
                         over='call',
                         under='ignore',
                         invalid='call'):
            np.seterrcall(lambda *_: errors.append(
                exceptions.MicroMomentsFloatingPointError()))

            # compute probabilities with the outside option eliminated
            probabilities, _ = self.compute_probabilities(
                delta, eliminate_outside=True)

            # compute the micro moments
            micro = np.zeros((self.moments.MM, 1), options.dtype)
            for m, moment in enumerate(self.moments.micro_moments):
                assert isinstance(moment, ProductsAgentsCovarianceMoment)
                z = probabilities.T @ self.products.X2[:, [moment.X2_index]]
                d = self.agents.demographics[:, [moment.demographics_index]]
                demeaned_z = z - z.T @ self.agents.weights
                demeaned_d = d - d.T @ self.agents.weights
                micro[m] = demeaned_z.T @ (self.agents.weights *
                                           demeaned_d) - moment.value
            return micro, errors
示例#8
0
    def solve(
            self, costs: Array, prices: Array, iteration: Iteration, firms_index: int = 0) -> (
            Tuple[Array, Array, List[Error], bool, int, int]):
        """Solve for synthetic prices and shares. By default, use unchanged firm IDs."""
        errors: List[Error] = []

        # configure NumPy to identify floating point errors
        with np.errstate(divide='call', over='call', under='ignore', invalid='call'):
            np.seterrcall(lambda *_: errors.append(exceptions.SyntheticPricesFloatingPointError()))

            # solve the fixed point problem
            prices, converged, iterations, evaluations = self.compute_equilibrium_prices(
                costs, iteration, firms_index, prices
            )
            if not converged:
                errors.append(exceptions.SyntheticPricesConvergenceError())

            # switch to identifying floating point errors with synthetic share computation
            np.seterrcall(lambda *_: errors.append(exceptions.SyntheticSharesFloatingPointError()))

            # compute the associated shares
            delta = self.update_delta_with_variable('prices', prices)
            mu = self.update_mu_with_variable('prices', prices)
            shares = self.compute_probabilities(delta, mu) @ self.agents.weights
            return prices, shares, errors, converged, iterations, evaluations
示例#9
0
文件: Tool.py 项目: AMaywurm/RepTate
    def __init__(self, name="Tool", parent_app=None):
        """
        **Constructor**

        The following variables should be set by the particular realization of the Tool:
            - parameters     (dict): Parameters of the Tool

        Keyword Arguments:
            - name {str} -- Name of Tool (default: {"Tool"})
        """
        super().__init__()

        self.name = name
        self.parent_application = parent_app
        self.parameters = OrderedDict(
        )  # keep the dictionary key in order for the parameter table
        self.active = True  #defines if the Tool is plotted
        self.applytotheory = True  # Do we also apply the tool to the theory?

        self.do_cite("")

        if CmdBase.mode == CmdMode.GUI:
            self.print_signal.connect(
                self.print_qtextbox
            )  # Asynchronous print when using multithread

        # LOGGING STUFF
        self.logger = logging.getLogger(self.parent_application.logger.name +
                                        '.' + self.name)
        self.logger.debug('New ' + self.toolname + ' Tool')
        np.seterr(all="call")
        np.seterrcall(self.write)
示例#10
0
    def compute_tilde_costs(
            self, costs_type: str,
            costs_bounds: Bounds) -> Tuple[Array, Array, List[Error]]:
        """Compute transformed marginal costs."""
        errors: List[Error] = []

        # configure NumPy to identify floating point errors
        with np.errstate(divide='call',
                         over='call',
                         under='ignore',
                         invalid='call'):
            np.seterrcall(
                lambda *_: errors.append(exceptions.CostsFloatingPointError()))

            # compute marginal costs
            eta, eta_errors = self.compute_eta()
            errors.extend(eta_errors)
            costs = self.products.prices - eta

            # clip marginal costs that are outside of acceptable bounds
            clipped_costs = (costs < costs_bounds[0]) | (costs >
                                                         costs_bounds[1])
            if clipped_costs.any():
                costs = np.clip(costs, *costs_bounds)

            # take the log of marginal costs under a log-linear specification
            if costs_type == 'linear':
                tilde_costs = costs
            else:
                assert costs_type == 'log'
                if np.any(costs <= 0):
                    errors.append(exceptions.NonpositiveCostsError())
                with np.errstate(all='ignore'):
                    tilde_costs = np.log(costs)
            return tilde_costs, clipped_costs, errors
示例#11
0
文件: colorize.py 项目: zplab/zplib
def blend(top, bottom, top_alpha=None, bottom_alpha=None, mode='normal', input_max=1):
    """Blend two RGB[A] arrays, using normal or screen-blending mode.

    Parameters:
        top, bottom: arrays of shape (x, y, 3) or (x, y, 4) for RGB or RGBA images.
        top_alpha, bottom_alpha: if not None, ignore any alpha channel in image
            and use this value instead (slightly faster if all alpha is equal)
        mode: 'normal', 'screen', 'multiply', or 'overlay'
        input_max: maximum value for the input images and top_opacity value
            (e.g. 1, 255, 65535).

    Returns: image, alpha
        image: array of shape (x, y, 3) with dtype matching the bottom paramter
        alpha: scalar (if RGB arrays or top_alpha and bottom_alpha are specified)
            or array of shape (x, y, 1). To make a RGBA image from the latter,
            just do: numpy.concatenate([image, alpha], axis=-1)
    """
    bottom = numpy.asarray(bottom)
    dtype = bottom.dtype
    assert mode in ('normal', 'screen', 'multiply', 'overlay')
    top, top_alpha = _prepare_and_premultiply(top, top_alpha, input_max)
    bottom, bottom_alpha = _prepare_and_premultiply(bottom, bottom_alpha, input_max)
    out_alpha = top_alpha + bottom_alpha - top_alpha * bottom_alpha
    if mode == 'normal':
        out = top + bottom * (1 - top_alpha)
    elif mode == 'screen':
        out = top + bottom - top * bottom
    elif mode == 'multiply':
        out = top * bottom + top * (1 - bottom_alpha) + bottom * (1 - top_alpha)
        out.clip(0, 1, out=out)
    elif mode == 'overlay':
        mult_mask = (2 * bottom <= bottom_alpha)[:, :, 0]
        out = numpy.empty_like(bottom)
        t = top[mult_mask]
        if top_alpha.ndim > 0:
            ta = top_alpha[mult_mask]
        else:
            ta = top_alpha
        b = bottom[mult_mask]
        if bottom_alpha.ndim > 0:
            ba = bottom_alpha[mult_mask]
        else:
            ba = bottom_alpha
        out[mult_mask] = 2 * t * b + t * (1 - ba) + b * (1 - ta)

        mult_mask = ~mult_mask
        t = top[mult_mask]
        if top_alpha.ndim > 0:
            ta = top_alpha[mult_mask]
        b = bottom[mult_mask]
        if bottom_alpha.ndim > 0:
            ba = bottom_alpha[mult_mask]
        out[mult_mask] = t * (1 + ba) + b * (1 + ta) - 2 * t * b - ta * ba
        out.clip(0, 1, out=out)
    def on_invalid(err, flag):
        out[numpy.isnan(out)] = 0
    numpy.seterrcall(on_invalid)
    with numpy.errstate(invalid='call'):
        out /= out_alpha
    return (out * input_max).astype(dtype), (out_alpha * input_max).astype(dtype)
示例#12
0
def get_mirrors(coxeter_diagram):
    """
    Given a Coxter diagram consists of integers/rationals that represent the angles between
    the mirrors (a rational p means the angle is π/p), return a square matrix whose
    rows are the normal vectors of the mirrors. This matrix is not unique, here we use a
    lower triangle one to simplify the computations.
    """

    # error handling function when the input coxeter matrix is invalid.
    def err_handler(err_type, flag):
        print(
            "Invalid input Coxeter diagram. This diagram does not give a finite \
symmetry group of an uniform polytope. See \
https://en.wikipedia.org/wiki/Coxeter_group#Symmetry_groups_of_regular_polytopes \
for a complete list of valid Coxeter diagrams.")
        sys.exit(1)

    np.seterrcall(err_handler)
    np.seterr(all="call")

    coxeter_matrix = np.array(make_symmetry_matrix(coxeter_diagram)).astype(
        np.float)
    C = -np.cos(np.pi / coxeter_matrix)
    M = np.zeros_like(C)
    n = len(M)
    # the first normal vector is simply (1, 0, ...)
    M[0, 0] = 1
    # in the i-th row, the j-th entry can be computed via the (j, j) entry.
    for i in range(1, n):
        for j in range(i):
            M[i, j] = (C[i, j] - np.dot(M[j, :j], M[i, :j])) / M[j, j]
        # the (i, i) entry is used to normalize this vector
        M[i, i] = np.sqrt(1 - np.dot(M[i, :i], M[i, :i]))

    return M
示例#13
0
    def compute_prices(self, iteration: Iteration, firm_ids: Optional[Array],
                       ownership: Optional[Array], costs: Optional[Array],
                       prices: Optional[Array]) -> Tuple[Array, List[Error]]:
        """Estimate equilibrium prices. By default, use unchanged firm IDs, use unchanged prices as starting values,
        and compute marginal costs.
        """
        errors: List[Error] = []
        ownership_matrix = self.get_ownership_matrix(firm_ids, ownership)
        if costs is None:
            costs, errors = self.compute_costs()

        # configure NumPy to identify floating point errors
        with np.errstate(divide='call',
                         over='call',
                         under='ignore',
                         invalid='call'):
            np.seterrcall(lambda *_: errors.append(
                exceptions.EquilibriumPricesFloatingPointError()))

            # compute equilibrium prices
            prices, converged, *_ = self.compute_equilibrium_prices(
                costs, iteration, ownership_matrix, prices)
            if not converged:
                errors.append(exceptions.EquilibriumPricesConvergenceError())
            return prices, errors
示例#14
0
    def __init__(self, cache_size=1024):

        self._reg = np.zeros(32, dtype=np.uint32)

        self._hi = np.uint32(0)
        self._lo = np.uint32(0)
        self._pc = np.uint32(0)
        self.instr_c = 0
        self.epc = np.uint32(0)
        self.cause = np.uint32(0)
        self.badvaddr = np.uint32(0)
        self.status = np.uint32(0)
        self.instr = Instr()

        self.ir = np.uint32(0)

        self.mem = np.empty(cache_size, dtype='uint8')

        self.flush_cache()

        self.over = False

        self.ops = {
            name.lower(): getattr(self, "_{}".format(name.lower()))
            for name, _ in MIPSI.__members__.items()
        }

        np.seterr(over="call")
        np.seterrcall(self.errcall)
示例#15
0
    def compute_xi_by_theta_jacobian(
            self,
            parameters: Parameters,
            delta: Optional[Array] = None) -> Tuple[Array, List[Error]]:
        """Use the Implicit Function Theorem to compute the Jacobian of xi (equivalently, of delta) with respect to
        theta. By default, use unchanged delta values.
        """
        errors: List[Error] = []
        if delta is None:
            assert self.delta is not None
            delta = self.delta

        # configure NumPy to identify floating point errors
        with np.errstate(divide='call',
                         over='call',
                         under='ignore',
                         invalid='call'):
            np.seterrcall(lambda *_: errors.append(
                exceptions.XiByThetaJacobianFloatingPointError()))

            # compute the Jacobian
            probabilities, conditionals = self.compute_probabilities(delta)
            shares_by_xi_jacobian = self.compute_shares_by_xi_jacobian(
                probabilities, conditionals)
            shares_by_theta_jacobian = self.compute_shares_by_theta_jacobian(
                parameters, delta, probabilities, conditionals)
            xi_by_theta_jacobian, replacement = approximately_solve(
                shares_by_xi_jacobian, -shares_by_theta_jacobian)
            if replacement:
                errors.append(
                    exceptions.SharesByXiJacobianInversionError(
                        shares_by_xi_jacobian, replacement))
            return xi_by_theta_jacobian, errors
示例#16
0
def setup_system():
    logger=logging.getLogger()
    logger.setLevel(min(FILE_LEVEL, CONSOLE_LEVEL, GUI_LEVEL))

    # no console logger for windows (win32gui)
    console=logging.StreamHandler(sys.__stdout__)
    formatter=logging.Formatter('%(levelname) 7s: %(message)s')
    console.setFormatter(formatter)
    console.setLevel(CONSOLE_LEVEL)
    logger.addHandler(console)

    logging.getLogger('matplotlib').setLevel(logging.WARNING)
    if min(FILE_LEVEL, CONSOLE_LEVEL, GUI_LEVEL)>logging.DEBUG:
        logging.getLogger('numba').setLevel(logging.WARNING)
    logging.info('*** GenX %s Logging started ***'%str_version)

    # define numpy warning behavior
    global nplogger
    old_class=logging.getLoggerClass()
    logging.setLoggerClass(NumpyLogger)
    nplogger=logging.getLogger('numpy')
    nplogger.setLevel(logging.DEBUG)
    null_handler=logging.StreamHandler(StringIO())
    null_handler.setLevel(logging.CRITICAL)
    nplogger.addHandler(null_handler)
    logging.setLoggerClass(old_class)
    seterr(divide='call', over='call', under='ignore', invalid='call')
    # warnings.filterwarnings(action="error", category=ComplexWarning)
    logging.captureWarnings(True)
    seterrcall(numpy_logger)

    # write information on program exit
    # sys.excepthook=excepthook_overwrite
    atexit.register(genx_exit_message)
示例#17
0
    def solve_equilibrium(
            self, costs: Array, prices: Optional[Array], iteration: Optional[Iteration]) -> (
            Tuple[Array, Array, Array, List[Error], bool, int, int]):
        """If not already estimated, compute equilibrium prices along with associated delta and shares."""
        errors: List[Error] = []

        # configure NumPy to identify floating point errors
        with np.errstate(divide='call', over='call', under='ignore', invalid='call'):
            np.seterrcall(lambda *_: errors.append(exceptions.EquilibriumPricesFloatingPointError()))

            # solve the fixed point problem if prices haven't already been estimated
            if iteration is None:
                assert prices is not None
                converged = True
                iterations = evaluations = 0
            else:
                prices, converged, iterations, evaluations = self.compute_equilibrium_prices(costs, iteration)
                if not converged:
                    errors.append(exceptions.EquilibriumPricesConvergenceError())

            # switch to identifying floating point errors with equilibrium share computation
            np.seterrcall(lambda *_: errors.append(exceptions.EquilibriumSharesFloatingPointError()))

            # compute the associated shares
            delta = self.update_delta_with_variable('prices', prices)
            mu = self.update_mu_with_variable('prices', prices)
            shares = self.compute_probabilities(delta, mu) @ self.agents.weights
            return prices, shares, delta, errors, converged, iterations, evaluations
示例#18
0
def get_txs():
    import logging.config
    from .logging_config import config
    logging.config.dictConfig(config)
    logging.captureWarnings(True)

    import numpy

    def err_handler(type_, flag):
        logger.warning("Floating point error (%s), with flag %s" %
                       (type, flag))

    numpy.seterrcall(err_handler)
    numpy.seterr(all='call')

    import argparse
    parser = argparse.ArgumentParser(description='Assign OFX transactions')
    parser.add_argument('--threshold',
                        default=95,
                        help="Percentage to assume transaction predicted.")
    parser.add_argument('paths', nargs='*', help="OFX files to process")

    args = parser.parse_args()

    ofx_roots = []
    for path in args.paths:
        with open(path) as file_:
            ofx_roots.append((xmltodict.parse(file_.read()), path))

    txs = [tr for root, path in ofx_roots for tr in load_transactions(root)]
    return args, txs, ofx_roots
示例#19
0
def error_handler(error_type, flag):
    print("{} (flag: {}) encountered.".format(error_type, flag))
    if input("Abort (q) or ignore for this session (press enter) ?") == 'q':
        raise Exception("Abort.")
    else:
        np.seterr(divide='warn')
        np.seterrcall(error_handler)
示例#20
0
 def _optional(self):
     # Turn on: inf as NaN.
     pd.options.mode.use_inf_as_na = True
     # Handle numpy errors.
     np.seterr(all='call')
     self._check_results_size(self.project_path)
     self._np_error_stat = {}
     np.seterrcall(self._np_error_callback)
示例#21
0
 def wrapper(*args: Any, **kwargs: Any) -> Any:
     """Configure NumPy to detect numerical errors."""
     detector = NumericalErrorDetector(self.error)
     with np.errstate(divide='call', over='call', under='ignore', invalid='call'):
         np.seterrcall(detector)
         returned = decorated(*args, **kwargs)
     if detector.detected is not None:
         returned[-1].append(detector.detected)
     return returned
示例#22
0
    def compute_micro_by_theta_jacobian(
            self, delta: Array,
            xi_jacobian: Array) -> Tuple[Array, List[Error]]:
        """Compute the Jacobian of micro moments with respect to theta."""
        errors: List[Error] = []
        assert self.moments is not None

        # configure NumPy to identify floating point errors
        with np.errstate(divide='call',
                         over='call',
                         under='ignore',
                         invalid='call'):
            np.seterrcall(lambda *_: errors.append(
                exceptions.MicroMomentsByThetaJacobianFloatingPointError()))

            # compute probabilities with the outside option eliminated and their tensor derivative with respect to xi
            probabilities, conditionals = self.compute_probabilities(
                delta, eliminate_outside=True)
            probabilities_tensor, _ = self.compute_probabilities_by_xi_tensor(
                probabilities, conditionals)

            # pre-transpose the tensor derivatives
            probabilities_tensor = probabilities_tensor.swapaxes(1, 2)

            # compute the Jacobian
            micro_jacobian = np.zeros((self.moments.MM, self.parameters.P))
            for p, parameter in enumerate(self.parameters.unfixed):
                # derivatives with respect to linear parameters are zero
                if isinstance(parameter, LinearCoefficient):
                    continue

                # compute the tangent of probabilities (with the outside option removed from the choice set) with
                #   respect to the parameter
                probabilities_tangent, _ = self.compute_probabilities_by_parameter_tangent(
                    parameter, probabilities, conditionals, delta)

                # fill the gradient of micro moments with respect to the parameter moment-by-moment
                for m, moment in enumerate(self.moments.micro_moments):
                    assert isinstance(moment, ProductsAgentsCovarianceMoment)
                    z_tangent = probabilities_tangent.T @ self.products.X2[:, [
                        moment.X2_index
                    ]]
                    z_jacobian = np.squeeze(
                        probabilities_tensor
                        @ self.products.X2[:, [moment.X2_index]])
                    d = self.agents.demographics[:,
                                                 [moment.demographics_index]]
                    demeaned_z_tangent = z_tangent - z_tangent.T @ self.agents.weights
                    demeaned_z_jacobian = z_jacobian - z_jacobian @ self.agents.weights
                    weighted_demeaned_d = self.agents.weights * (
                        d - d.T @ self.agents.weights)
                    micro_jacobian[m, p] = (
                        demeaned_z_tangent.T @ weighted_demeaned_d +
                        (demeaned_z_jacobian @ weighted_demeaned_d).T
                        @ xi_jacobian[:, [p]])
            return micro_jacobian, errors
示例#23
0
def setup_logging(console_level=logging.INFO,
                  log_filename="polar2grid.log",
                  log_numpy=True):
    """Setup the logger to the console to the logging level defined in the
    command line (default INFO).  Sets up a file logging for everything,
    regardless of command line level specified.  Adds extra logger for
    tracebacks to go to the log file if the exception is caught.  See
    `exc_handler` for more information.

    :param console_level: Python logging level integer (ex. logging.INFO).
    :param log_filename: Log messages to console and specified log_filename (None for no file log)
    :param log_numpy: Tell numpy to log invalid values encountered
    """
    # set the root logger to DEBUG so that handlers can have all possible messages to filter
    root_logger = logging.getLogger("")
    root_logger.setLevel(min(console_level, logging.DEBUG))

    # Console output is minimal
    console = logging.StreamHandler(sys.stderr)
    console_format = "%(levelname)-8s : %(message)s"
    console.setFormatter(logging.Formatter(console_format))
    console.setLevel(console_level)
    console.addFilter(SatPyWarningFilter())
    if console_level > logging.DEBUG:
        # if we are only showing INFO/WARNING/ERROR messages for P2G then
        # filter out messages from these packages
        console.addFilter(
            ThirdPartyFilter([
                "satpy", "pyresample", "pyspectral", "trollimage", "pyorbital",
                "trollsift"
            ]))
    root_logger.addHandler(console)

    # Log file messages have a lot more information
    if log_filename:
        file_handler = logging.FileHandler(log_filename)
        file_format = "[%(asctime)s] : %(levelname)-8s : %(name)s : %(funcName)s : %(message)s"
        file_handler.setFormatter(logging.Formatter(file_format))
        file_handler.setLevel(logging.DEBUG)
        root_logger.addHandler(file_handler)

        # Make a traceback logger specifically for adding tracebacks to log file
        traceback_log = logging.getLogger("traceback")
        traceback_log.propagate = False
        traceback_log.setLevel(logging.ERROR)
        traceback_log.addHandler(file_handler)

    if log_numpy:
        import numpy

        class TempLog(object):
            def write(self, msg):
                logging.getLogger("numpy").debug(msg)

        numpy.seterr(invalid="log")
        numpy.seterrcall(TempLog())
示例#24
0
def normdiff(array1, array2):
    """Calculates normalized difference index array from input arrays"""
    log = _FPErr_Log(
        "NaN generated while calculating normalized difference index: ")
    np.seterrcall(log)
    np.seterr(invalid='log')
    normalizeddiff = np.divide(
        array1.astype(np.float32) - array2.astype(np.float32),
        array1.astype(np.float32) + array2.astype(np.float32))
    return normalizeddiff
示例#25
0
    def __enter__(self):
        # Start time; needed to log how long our computation takes.
        self.tstart = time.time()
        self.counting = True

        numpy.seterrcall(lambda type_, flag: logger.error("Floating point \
error (%s) with flag %s", type_, flag))
        numpy.seterr(all='call')

        return logger
示例#26
0
def normdiff(array1, array2):
    """Calculates normalized difference index array from input arrays"""
    log = _FPErr_Log(
        "NaN generated while calculating normalized difference index: ")
    np.seterrcall(log)
    np.seterr(invalid='log')
    normalizeddiff = np.divide(
        array1.astype(np.float32) - array2.astype(np.float32),
        array1.astype(np.float32) + array2.astype(np.float32))
    return normalizeddiff
示例#27
0
    def solve_supply(
            self, initial_tilde_costs: Array, xi_jacobian: Array,
            costs_type: str, costs_bounds: Bounds,
            compute_jacobian: bool) -> Tuple[Array, Array, Array, List[Error]]:
        """Compute transformed marginal costs for this market. Then, if compute_jacobian is True, compute the Jacobian
        of omega (equivalently, of transformed marginal costs) with respect to theta. If necessary, replace null
        elements in transformed marginal costs with their last values before computing their Jacobian.
        """
        errors: List[Error] = []

        # configure NumPy to identify floating point errors
        with np.errstate(divide='call',
                         over='call',
                         under='ignore',
                         invalid='call'):
            np.seterrcall(
                lambda *_: errors.append(exceptions.CostsFloatingPointError()))

            # compute marginal costs
            eta, eta_errors = self.compute_eta()
            errors.extend(eta_errors)
            costs = self.products.prices - eta

            # clip marginal costs that are outside of acceptable bounds
            clipped_costs = (costs < costs_bounds[0]) | (costs >
                                                         costs_bounds[1])
            if clipped_costs.any():
                costs = np.clip(costs, *costs_bounds)

            # take the log of marginal costs under a log-linear specification
            if costs_type == 'linear':
                tilde_costs = costs
            else:
                assert costs_type == 'log'
                if np.any(costs <= 0):
                    errors.append(exceptions.NonpositiveCostsError())
                with np.errstate(all='ignore'):
                    tilde_costs = np.log(costs)

        # if the gradient is to be computed, replace invalid transformed marginal costs with their last computed
        #   values before computing their Jacobian, which is zero for clipped marginal costs
        omega_jacobian = np.full((self.J, self.parameters.P), np.nan,
                                 options.dtype)
        if compute_jacobian:
            valid_tilde_costs = tilde_costs.copy()
            bad_tilde_costs_index = ~np.isfinite(tilde_costs)
            valid_tilde_costs[bad_tilde_costs_index] = initial_tilde_costs[
                bad_tilde_costs_index]
            omega_jacobian, jacobian_errors = self.compute_omega_by_theta_jacobian(
                valid_tilde_costs, xi_jacobian, costs_type)
            errors.extend(jacobian_errors)
            omega_jacobian[clipped_costs.flat] = 0
        return tilde_costs, omega_jacobian, clipped_costs, errors
示例#28
0
    def solve_merger(self, iteration, firms_index, prices, costs):
        """Market-specific computation for Results.solve_merger."""
        errors = set()

        # configure NumPy to identify floating point errors
        with np.errstate(divide='call', over='call', under='ignore', invalid='call'):
            np.seterrcall(lambda *_: errors.add(exceptions.ChangedPricesFloatingPointError))
            prices, converged = self.compute_bertrand_nash_prices(iteration, firms_index, prices, costs)[:2]

        # determine whether the fixed point converged
        if not converged:
            errors.add(exceptions.ChangedPricesConvergenceError)
        return prices, errors
示例#29
0
    def close_loggers():
        """
        To run once everything has been completed.
        """

        message = "Shutting down program"
        setup_logger.info(message)
        logging.shutdown()
        if file_handler:
            core_logger.removeHandler(file_handler)
        np.seterrcall(old_np_error_call)
        sys.stderr = old_stderr
        sys.stdout = old_stdout
示例#30
0
def main_func(argv: List[str]):
    """
    Main function that should handle all the top-level processing for this program

    :param argv: List of arguments passed to the program (meant to be sys.argv)
    """

    # Perform argument parsing and program setup
    parsed_args, user_args = setup_func(argv, get_mama_parser)

    # Set Numpy error handling to shunt error messages to a logging function
    np.seterr(all='call')
    np.seterrcall(numpy_err_handler)

    # Attempt to print package version info (pandas has a nice version info summary)
    if logging.root.level <= logging.DEBUG:
        logging.debug("Printing Pandas' version summary:")
        with contextlib.redirect_stdout(io.StringIO()) as f:
            pd.show_versions()
        logging.debug("%s\n", f.getvalue())

    # Execute the rest of the program, but catch and log exceptions before failing
    try:

        # Validate user inputs and create internal dictionary
        iargs = validate_inputs(parsed_args, user_args)

        # Run the MAMA pipeline
        result_sumstats = mama_pipeline(
            iargs[SUMSTATS_MAP], iargs['ld_scores'], iargs['snp_list'],
            iargs[COL_MAP], iargs[RE_MAP], iargs[FILTER_MAP],
            iargs[REG_LD_COEF_OPT], iargs[REG_SE2_COEF_OPT],
            iargs[REG_INT_COEF_OPT], iargs[REG_LD_COEF_SCALE_COEF],
            iargs['use_standardized_units'], iargs[HARM_FILENAME_FSTR],
            iargs[REG_FILENAME_FSTR], iargs['input_sep'])

        # Write out the summary statistics to disk
        logging.info("Writing results to disk.")
        for (ancestry, phenotype), ss_df in result_sumstats.items():
            filename = f"{iargs['out']}_{ancestry}_{phenotype}{RESULTS_SUFFIX}"
            logging.info("\t%s", filename)
            write_sumstats_to_file(filename, ss_df)

        # Log any remaining information TODO(jonbjala) Timing info?
        logging.info("\nExecution complete.\n")

    # Disable pylint error since we do actually want to capture all exceptions here
    except Exception as exc:  # pylint: disable=broad-except
        logging.exception(exc)
        sys.exit(1)
示例#31
0
def errstate(*args, **kwds):
    """Context manager like np.errstate that can also be used as a decorator
    """
    call = kwds.pop('call', None)
    old_errstate = np.geterr()
    try:
        old_errstate = np.seterr(*args, **kwds)
        if call is not None:
            old_call = np.seterrcall(call)
        yield np.geterr()
    finally:
        np.seterr(**old_errstate)
        if call is not None:
            np.seterrcall(old_call)
示例#32
0
def setup_logging(console_level=logging.INFO, log_filename="polar2grid.log", log_numpy=True):
    """Setup the logger to the console to the logging level defined in the
    command line (default INFO).  Sets up a file logging for everything,
    regardless of command line level specified.  Adds extra logger for
    tracebacks to go to the log file if the exception is caught.  See
    `exc_handler` for more information.

    :param console_level: Python logging level integer (ex. logging.INFO).
    :param log_filename: Log messages to console and specified log_filename (None for no file log)
    :param log_numpy: Tell numpy to log invalid values encountered
    """
    # set the root logger to DEBUG so that handlers can have all possible messages to filter
    root_logger = logging.getLogger('')
    root_logger.setLevel(logging.DEBUG)

    # Console output is minimal
    console = logging.StreamHandler(sys.stderr)
    console_format = "%(levelname)-8s : %(message)s"
    console.setFormatter(logging.Formatter(console_format))
    console.setLevel(console_level)
    console.addFilter(SatPyWarningFilter())
    if console_level > logging.DEBUG:
        # if we are only showing INFO/WARNING/ERROR messages for P2G then
        # filter out messages from these packages
        console.addFilter(ThirdPartyFilter(['satpy', 'pyresample', 'pyspectral', 'trollimage',
                                            'pyorbital', 'trollsift']))
    root_logger.addHandler(console)

    # Log file messages have a lot more information
    if log_filename:
        file_handler = logging.FileHandler(log_filename)
        file_format = "[%(asctime)s] : PID %(process)6d : %(levelname)-8s : %(name)s : %(funcName)s : %(message)s"
        file_handler.setFormatter(logging.Formatter(file_format))
        file_handler.setLevel(logging.DEBUG)
        root_logger.addHandler(file_handler)

        # Make a traceback logger specifically for adding tracebacks to log file
        traceback_log = logging.getLogger('traceback')
        traceback_log.propagate = False
        traceback_log.setLevel(logging.ERROR)
        traceback_log.addHandler(file_handler)

    if log_numpy:
        import numpy
        class TempLog(object):
            def write(self, msg):
                logging.getLogger("numpy").debug(msg)
        numpy.seterr(invalid="log")
        numpy.seterrcall(TempLog())
示例#33
0
class BinaryCrossEntropy(LossFunction):
    """Binary cross-entropy"""
    fp_warned = False

    @staticmethod
    def fp_warn(err, flag):
        """Warn once if zero encountered in np.log"""
        # pylint: disable = unused-argument
        if not BinaryCrossEntropy.fp_warned:
            frameinfo = getframeinfo(currentframe())
            sys.stderr.write(
                f"Warning: {frameinfo.filename}: {frameinfo.lineno}: 0 encountered in np.log; "
                "ensure that model predictions are probabilities\n")
            BinaryCrossEntropy.fp_warned = True

    np.seterrcall(fp_warn.__func__)

    @staticmethod
    def loss(y_true, y_pred):
        assert all(y_pred >= 0) and all(y_pred <= 1)
        with np.errstate(invalid="call", divide="call"):
            losses = -y_true * np.log(y_pred) - (1 - y_true) * np.log(1 -
                                                                      y_pred)
        losses[np.isnan(
            losses
        )] = 0  # to handle indeterminate case where y_pred[i] = y_true[i]
        losses[np.isinf(
            losses
        )] = 23  # to handle case where y_pred[i] = 1 - y_true[i]; -log(1e-10) ~ 23
        return losses
示例#34
0
文件: EM.py 项目: baturay/RML-AC
    def __init__(self, _mData):
        self.mData = _mData

        self.lInitialCenters = []  # [ centers as [values] ]
        
        # iteration information
        self.numSteps = 0
        self.lLastCenters = []  # [ centers as [values] ]
        self.lCenters = [] # [ centers as [values] ]
        self.lUnUsedCenterInds = [] # indices of data already chosen as centers

        self.bPPC = False # use PPC

        # init Cij to 0's
        self.mCij =  [ [ 0 for i in range(len(_mData.data)) ]
                       for j in range(len(_mData.data)) ]

        # matrix prob/log_likelihood of instance i in cluster j
        self.mGammas = []
        self.mLikelihood_il = []

        self.bVerbose = False

        self.sErrInfo = ""
        self.saved_handler = np.seterrcall(self)
        self.save_err = np.seterr(all='log')
        self.numErrs = 0

        self.bEMLikelihoodEachStep = False
        self.dEMLikelihood = 0
示例#35
0
def get_mirrors(coxeter_diagram):
    """
    Given three or six or ten integers/rationals that represent
    the angles between the mirrors (a rational p means the
    angle is π/p), return a square matrix whose rows
    are the normal vectors of the mirrors.
    """

    # error handling function when the input coxeter matrix is invalid.
    def err_handler(err_type, flag):
        print(
            "Invalid input Coxeter diagram. This diagram does not give a finite \
symmetry group of an uniform polytope. See \
https://en.wikipedia.org/wiki/Coxeter_group#Symmetry_groups_of_regular_polytopes \
for a complete list of valid Coxeter diagrams.")
        sys.exit(1)

    np.seterrcall(err_handler)
    np.seterr(all="call")

    coxeter_matrix = np.array(make_symmetry_matrix(coxeter_diagram)).astype(
        np.float)
    C = -np.cos(np.pi / coxeter_matrix)
    M = np.zeros_like(C)

    M[0, 0] = 1
    M[1, 0] = C[0, 1]
    M[1, 1] = np.sqrt(1 - M[1, 0] * M[1, 0])
    M[2, 0] = C[0, 2]
    M[2, 1] = (C[1, 2] - M[1, 0] * M[2, 0]) / M[1, 1]
    M[2, 2] = np.sqrt(1 - M[2, 0] * M[2, 0] - M[2, 1] * M[2, 1])

    if len(coxeter_matrix) > 3:
        M[3, 0] = C[0, 3]
        M[3, 1] = (C[1, 3] - M[1, 0] * M[3, 0]) / M[1, 1]
        M[3, 2] = (C[2, 3] - M[2, 0] * M[3, 0] - M[2, 1] * M[3, 1]) / M[2, 2]
        M[3, 3] = np.sqrt(1 - np.dot(M[3, :3], M[3, :3]))

    if len(coxeter_matrix) == 5:
        M[4, 0] = C[4, 0]
        M[4, 1] = (C[4, 1] - M[1, 0] * M[4, 0]) / M[1, 1]
        M[4, 2] = (C[4, 2] - M[2, 0] * M[4, 0] - M[2, 1] * M[4, 1]) / M[2, 2]
        M[4, 3] = (C[4, 3] - M[3, 0] * M[4, 0] - M[3, 1] * M[4, 1] -
                   M[3, 2] * M[4, 2]) / M[3, 3]
        M[4, 4] = np.sqrt(1 - np.dot(M[4, :4], M[4, :4]))

    return M
示例#36
0
    def push_err(self, arg):
        """ Set numpy numerical error handling via a stack.

    """
        try:
            import numpy
        except ImportError:
            raise UsageError("could not import numpy.")

        sentinel = object()

        args = parse_argstring(self.push_err, arg)
        kwds = {}
        errcall = sentinel
        for key in ['all', 'divide', 'over', 'under', 'invalid']:
            value = getattr(args, key)
            if value is not None:
                kwds[key] = value
        if args.call_func is not None:
            if args.no_call_func:
                raise UsageError("You cannot specify both a --call-func and "
                                 "--no-call-func at the same time.")
            global_ns = self.shell.user_global_ns
            local_ns = self.shell.user_ns
            try:
                errcall = eval(args.call_func, global_ns, local_ns)
            except Exception as e:
                raise UsageError(
                    "Could not find function {0!r}\n{1}: {2}".format(
                        args.call_func, e.__class__.__name__, e))
        elif args.no_call_func:
            errcall = None

        old_options = numpy.geterr()
        old_errcall = numpy.geterrcall()
        numpy.seterr(**kwds)
        if errcall is not sentinel:
            try:
                numpy.seterrcall(errcall)
            except ValueError as e:
                raise UsageError(str(e))
        stack = getattr(self, '_numpy_err_stack', [])
        stack.append((old_options, old_errcall))
        self._numpy_err_stack = stack
        if not args.quiet:
            print_numpy_err(numpy.geterr(), numpy.geterrcall())
示例#37
0
def identify_pinwheels(re_contours,  im_contours, intersections):
    """
    Locates the pinwheels from the intersection of the real and
    imaginary contours of of polar OR map.
    """
    warning_counter = WarningCounter()
    pinwheels = []
    np.seterrcall(warning_counter)
    for (re_ind, im_ind) in intersections:
        re_contour = re_contours[re_ind]
        im_contour = im_contours[im_ind]
        np.seterr(divide='call', invalid='call')
        x, y = find_intersections(re_contour, im_contour)
        np.seterr(divide='raise', invalid='raise')
        pinwheels += zip(x,y)

    warning_counter.warn()
    return pinwheels
示例#38
0
    def identify_pinwheels(self, re_contours, im_contours, intersections, silence_warnings=True):
        """
        Locates the pinwheels from the intersection of the real and
        imaginary contours of of polar OR map.
        """
        warning_counter = WarningCounter()
        pinwheels = []
        np.seterrcall(warning_counter)
        for (re_ind, im_ind) in intersections:
            re_contour = re_contours[re_ind]
            im_contour = im_contours[im_ind]
            np.seterr(divide="call", invalid="call")
            x, y = self.find_intersections(re_contour, im_contour)
            np.seterr(divide="raise", invalid="raise")
            pinwheels += zip(x, y)

        if not silence_warnings:
            warning_counter.warn()
        return pinwheels
示例#39
0
def setup_logging(console_level=logging.INFO, log_filename="polar2grid.log", log_numpy=True):
    """Setup the logger to the console to the logging level defined in the
    command line (default INFO).  Sets up a file logging for everything,
    regardless of command line level specified.  Adds extra logger for
    tracebacks to go to the log file if the exception is caught.  See
    `exc_handler` for more information.

    :param console_level: Python logging level integer (ex. logging.INFO).
    :param log_filename: Log messages to console and specified log_filename (None for no file log)
    :param log_numpy: Tell numpy to log invalid values encountered
    """
    root_logger = logging.getLogger('')
    root_logger.setLevel(logging.DEBUG)

    # Console output is minimal
    console = logging.StreamHandler(sys.stderr)
    console_format = "%(levelname)-8s : %(message)s"
    console.setFormatter(logging.Formatter(console_format))
    console.setLevel(console_level)
    root_logger.addHandler(console)

    # Log file messages have a lot more information
    if log_filename:
        file_handler = logging.FileHandler(log_filename)
        file_format = "[%(asctime)s] : PID %(process)6d : %(levelname)-8s : %(name)s : %(funcName)s : %(message)s"
        file_handler.setFormatter(logging.Formatter(file_format))
        file_handler.setLevel(logging.DEBUG)
        root_logger.addHandler(file_handler)

        # Make a traceback logger specifically for adding tracebacks to log file
        traceback_log = logging.getLogger('traceback')
        traceback_log.propagate = False
        traceback_log.setLevel(logging.ERROR)
        traceback_log.addHandler(file_handler)

    if log_numpy:
        import numpy
        class TempLog(object):
            def write(self, msg):
                logging.getLogger("numpy").debug(msg)
        numpy.seterr(invalid="log")
        numpy.seterrcall(TempLog())
示例#40
0
def magic_pop_err(self, arg):
    """ Pop the last set of numpy numerical error handling settings from the
    stack.

"""
    try:
        import numpy
    except ImportError:
        raise UsageError("could not import numpy.")
    args = parse_argstring(magic_pop_err, arg)

    stack = getattr(self, '_numpy_err_stack', [])
    if stack:
        kwds, errcall = stack.pop()
        numpy.seterr(**kwds)
        numpy.seterrcall(errcall)
    elif not args.quiet:
        print "At the end of the stack."
        print
    self._numpy_err_stack = stack
    if not args.quiet:
        print_numpy_err(numpy.geterr(), numpy.geterrcall())
def run(model_creator_class): 
    """Main entry point for specific models. model_creator is an instance of a
    class used to set up the model and the data."""
    get_options()
    if not is_mpi_slave(options):
        timer = SimpleTimer()
    prev_handler = np.seterrcall(float_err_handler)
    prev_err = np.seterr(all='call')
    np.seterr(under='ignore')
    random.seed(options.seed)
    np.random.seed(options.seed)
    model_creator = model_creator_class(options)
    model = model_creator.get_model()
    if not is_mpi_slave(options):
        _print_sim_context(model.dataset)
    _run_models([model], model.dataset)
    ul.tempfeeder_exp().close()
import numpy as np

np.geterrcall()  # we did not yet set a handler, returns None
oldsettings = np.seterr(all='call')


def err_handler(err_type, flag):
    print
    "Floating point error (%s), with flag %s" % (err_type, flag)


oldhandler = np.seterrcall(err_handler)
np.array([1, 2, 3]) / 0.0
cur_handler = np.geterrcall()
cur_handler is err_handler
示例#43
0
import numpy as np
import math
import fitting as fit
import scipy.cluster.hierarchy as hcluster
import fastcluster as fc
import scipy.spatial.distance as ssd
import scipy.misc as deriv
np.seterr(all='call')
def errorhandler(errstr, errflag):
	#print errstr
	return [-1,-1,1],[0,0,0]
np.seterrcall(errorhandler)
def clusterFit(f):
	disabled=False
	try: 
		popt,pcov=fit.calc_fitVal(f.rmsSet[:,0],f.rmsSet[:,2])
		rs=RSquared2(f.rmsSet[:,0],f.rmsSet[:,2],popt)
		if rs>0.95 and popt[2]>0.85:
			final_result=np.array([0,0,0,0,0,0])
			final_result=np.vstack([final_result,(popt[0],popt[1],popt[2],rs,np.min(f.rmsSet[:,0]),np.max(f.rmsSet[:,0]))])
			final_result=np.delete(final_result,0,0)
			return final_result
		else:
			disabled=True
			return main_solver(f)
	except:
		if not disabled:
			return main_solver(f)


示例#44
0
文件: k2sc.py 项目: petigura/k2sc
def detrend(dataset, args):
    """
    Needs to have args defined
    """

    ## Setup the logger
    ## ----------------
    logger  = logging.getLogger('Worker %i' % mpi_rank)
    logger.name = '<{:d}>'.format(dataset.epic)

    np.seterrcall(lambda e,f: logger.info(e))
    np.seterr(invalid='ignore')

    ## Main variables
    ## --------------
    Result  = namedtuple('SCResult', 'detrender pv tr_time tr_position cdpp_r cdpp_t cdpp_c warn')
    results = []  # a list of Result tuples, one per aperture
    masks   = []  # a list of light curve masks, one per aperture 

    ## Initialise utility variables
    ## ----------------------------
    ds   = dataset
    info = logger.info

    ## Periodic signal masking
    ## -----------------------
    if args.p_mask_center and args.p_mask_period and args.p_mask_duration:
        ds.mask_periodic_signal(
            args.p_mask_center, args.p_mask_period, args.p_mask_duration
            )

    ## Initial outlier and period detection
    ## ------------------------------------
    ## We carry out an initial outlier and period detection using
    ## a default GP hyperparameter vector based on campaign 4 fits
    ## done using (almost) noninformative priors.

    for iset in range(ds.nsets):
        flux = ds.fluxes[iset]
        inputs = np.transpose([ds.time,ds.x,ds.y])
        detrender = Detrender(
            flux, inputs, mask=isfinite(flux), splits=args.splits, 
            kernel=BasicKernelEP(), tr_nrandom=args.tr_nrandom,
            tr_nblocks=args.tr_nblocks, tr_bspan=args.tr_bspan
            )
    
        ttrend,ptrend = detrender.predict(
            detrender.kernel.pv0+1e-5, components=True
            )

        cflux = flux - ptrend + median(ptrend) - ttrend + median(ttrend)
        cflux /= nanmedian(cflux)

        ## Iterative sigma-clipping
        ## ------------------------
        info('Starting initial outlier detection')
        fmask  = isfinite(cflux)
        omask  = fmask.copy()
        i, nm  = 0, None
        while nm != omask.sum() and i<10:
            nm = omask.sum()
            _, sigma = medsig(cflux[omask])
            omask[fmask] &= (cflux[fmask] < 1+5*sigma) & (cflux[fmask] > 1-5*sigma)
            i += 1
        masks.append(fmask)
        ofrac = (~omask).sum() / omask.size
        if ofrac < 0.25:
            masks[-1] &= omask
            info('  Flagged %i (%4.1f%%) outliers.', (~omask).sum(), ofrac)
        else:
            info('  Found %i (%4.1f%%) outliers. Not flagging..', (~omask).sum(), ofrac)

        ## Lomb-Scargle period search
        ## --------------------------
        info('Starting Lomb-Scargle period search')
        mask  = masks[-1]
        nflux = flux - ptrend + nanmedian(ptrend)
        ntime = ds.time - ds.time.mean()
        pflux = np.poly1d(np.polyfit(ntime[mask], nflux[mask], 9))(ntime)
        period, fap = psearch(ds.time[mask], (nflux-pflux)[mask], args.ls_min_period, args.ls_max_period)
        
        if fap < 1e-50:
            ds.is_periodic = True
            ds.ls_fap    = fap
            ds.ls_period = period
        
    ## Kernel selection
    ## ----------------
    args.kernel='basic'
    if args.kernel:
        info('Overriding automatic kernel selection, using %s kernel as given in the command line', args.kernel)
        if 'periodic' in args.kernel and not args.kernel_period:
            logger.critical('Need to give period (--kernel-period) if overriding automatic kernel detection with a periodic kernel. Quitting.')
            exit(1)
        kernel = kernels[args.kernel](period=args.kernel_period)
    else:
        info('  Using %s position kernel', args.default_position_kernel)
        if ds.is_periodic:
            info('  Found periodicity p = {:7.2f} (fap {:7.4e} < 1e-50), will use a quasiperiodic kernel'.format(ds.ls_period, ds.ls_fap))
        else:
            info('  No strong periodicity found, using a basic kernel')

        if args.default_position_kernel.lower() == 'sqrexp':
            kernel = QuasiPeriodicKernel(period=ds.ls_period)   if ds.is_periodic else BasicKernel() 
        else:
            kernel = QuasiPeriodicKernelEP(period=ds.ls_period) if ds.is_periodic else BasicKernelEP()


    ## Detrending
    ## ----------
    for iset in range(ds.nsets):
        if ds.nsets > 1:
            logger.name = 'Worker {:d} <{:d}-{:d}>'.format(mpi_rank, dataset.epic, iset+1)
        np.random.seed(args.seed)
        tstart = time()
        inputs = np.transpose([ds.time,ds.x,ds.y])
        detrender = Detrender(ds.fluxes[iset], inputs, mask=masks[iset],
                              splits=args.splits, kernel=kernel, tr_nrandom=args.tr_nrandom,
                              tr_nblocks=args.tr_nblocks, tr_bspan=args.tr_bspan)

        de = DiffEvol(detrender.neglnposterior, kernel.bounds, args.de_npop)

        ## Period population generation
        ## ----------------------------
        if isinstance(kernel, QuasiPeriodicKernel):
            de._population[:,2] = np.clip(normal(kernel.period, 0.1*kernel.period, size=de.n_pop),
                                          args.ls_min_period, args.ls_max_period)

        ## Global hyperparameter optimisation
        ## ----------------------------------
        info('Starting global hyperparameter optimisation using DE')
        tstart_de = time()
        for i,r in enumerate(de(args.de_niter)):
            info('  DE iteration %3i -ln(L) %4.1f', i, de.minimum_value)
            tcur_de = time()
            if ((de._fitness.ptp() < 3) or (tcur_de - tstart_de > args.de_max_time)) and (i>2):
                break
        info('  DE finished in %i seconds', tcur_de-tstart_de)
        info('  DE minimum found at: %s', np.array_str(de.minimum_location, precision=3, max_line_width=250))
        info('  DE -ln(L) %4.1f', de.minimum_value)

        ## Local hyperparameter optimisation
        ## ---------------------------------
        info('Starting local hyperparameter optimisation')
        try:
            with warnings.catch_warnings():
                warnings.filterwarnings('ignore', category=RuntimeWarning, append=True)
                pv, warn = detrender.train(de.minimum_location)
        except ValueError as e:
            logger.error('Local optimiser failed, %s', e)
            logger.error('Skipping the file')
            return
        info('  Local minimum found at: %s', np.array_str(pv, precision=3))

        ## Trend computation
        ## -----------------
        (mt,tt),(mp,tp) = map(lambda a: (nanmedian(a), a-nanmedian(a)), detrender.predict(pv, components=True))

        ## Iterative sigma-clipping
        ## ------------------------
        info('Starting final outlier detection')
        flux = detrender.data.unmasked_flux
        cflux = flux-tp-tt
        cflux /= nanmedian(cflux)

        fmask = isfinite(cflux)
        mhigh = zeros_like(fmask)
        mlow  = zeros_like(fmask)
        mask  = fmask.copy()
        i, nm = 0, None
        while nm != mask.sum() and i<10:
            nm = mask.sum()
            _, sigma = medsig(cflux[mask])
            mhigh[fmask] = cflux[fmask] > 1+5*sigma
            mlow[fmask]  = cflux[fmask] < 1-5*sigma
            mask &= fmask & (~mlow) & (~mhigh)
            i += 1
        ds.mflags[iset][~fmask] |= M_NOTFINITE
        ds.mflags[iset][mhigh]  |= M_OUTLIER_U
        ds.mflags[iset][mlow]   |= M_OUTLIER_D
        
        info('  %5i too high', mhigh.sum())
        info('  %5i too low',  mlow.sum())
        info('  %5i not finite', (~fmask).sum())

        ## Detrending and CDPP computation
        ## -------------------------------
        info('Computing time and position trends')
        dd = detrender.data
        cdpp_r = cdpp(dd.masked_time,   dd.masked_flux)
        cdpp_t = cdpp(dd.unmasked_time, dd.unmasked_flux-tp,    exclude=~dd.mask)
        cdpp_c = cdpp(dd.unmasked_time, dd.unmasked_flux-tp-tt, exclude=~dd.mask)
        results.append(Result(detrender, pv, tt+mt, tp+mp, cdpp_r, cdpp_t, cdpp_c, warn))
        info('  CDPP - raw - %6.3f', cdpp_r)
        info('  CDPP - position component removed - %6.3f', cdpp_t)
        info('  CDPP - full reduction - %6.3f', cdpp_c)
        info('Detrending time %6.3f', time()-tstart)

    info('Finished')
    return dataset, results
def main(args):

  # Make numpy report all floating point errors to the log
  # NOTE: warnings were coming up due to tiny data ranges (below machine epsilon)
  np.seterr(all='log')
  np.seterrcall(FPlogger()) # pass an object that implements the write method

  # define some variable sets to plot
  varsets = {
    'N': [
        'NMOBIL', 'NRESORB', 'NUPTAKESALL', 'NUPTAKEL', 'VEGNSUM', 'NETNMIN',
        'LTRFALNALL', 'AVLNSUM', 'AVLNINPUT', 'AVLNLOST', 'ORGNLOST'
    ],
    'C': [
        'NEP', 'NPPALL', 'GPPALL', 'VEGCSUM', 'LTRFALCALL', 'SOMCSHLW',
        'SOMCDEEP', 'SOMCMINEA', 'SOMCMINEC', 'RHMOIST', 'RHQ10', 'SOILLTRFCN'
    ],
    'E': [
        'SNOWFALL','RAINFALL','EETTOTAL','PETTOTAL','TAIR','SOILTAVE',
        'SOILVWC','RZTHAWPCT','ALD',
    ]
  }

  if args.report:
    report_on_file(args)
    sys.exit(0)

  if 'E' == args.varset:
    logging.info("Make sure the cmt file is the right one!")

  logging.debug("Loading netcdf file (%s)..." % args.inputfile)
  dsA = nc.Dataset(args.inputfile)
  titlestring = "%s" % (args.inputfile)



  if args.stitch:
    logging.info("Attempting to stitch stages %s onto inputfile before displaying..." % (args.stitch))

    logging.debug("Create a temporary file to concatenate everything into...")
    tmpdata = nc.Dataset('/tmp/junk.nc', 'w')

    logging.info("Make sure the right files exist...")
    inputdir = os.path.split(args.inputfile)[0]
    bn = os.path.basename(args.inputfile)  # e.g.: 'cmtbgc_yly-eq.nc'
    for stage in args.stitch:
      sn = "%s-%s.nc" % (bn[0:10], stage)
      if not os.path.exists(os.path.join(inputdir,sn)):
        logging.error("File %s does not exist in %s!" % (sn, inputdir))
        logging.error("Cannot perform file stitching. Quitting...")
        sys.exit(-1)
      else:
        logging.info("File exists for stitching...")
        titlestring += "\n%s" % (os.path.join(inputdir,sn))


    logging.debug("Copy the dimensions from dataset A in to the temporary file...")
    for d in dsA.dimensions:
      if 'tstep' == d:
        tmpdata.createDimension(d, None)
      else:
        tmpdata.createDimension(d, len(dsA.dimensions[d]))

    logging.info("Copy values from first input file to tmpdata...")
    if not 'YEAR' in tmpdata.variables:
      tmpdata.createVariable('YEAR', 'i', dsA.variables['YEAR'].dimensions)
    tmpdata.variables['YEAR'][:] = dsA.variables['YEAR'][:]

    for v in varsets[args.varset]:
      print v
      if not v in tmpdata.variables:
        tmpdata.createVariable(v, 'f', dsA.variables[v].dimensions)
      if dsA.variables[v].dimensions[0] == 'tstep':
        tmpdata.variables[v][:] = dsA.variables[v][:]
        logging.info("tmpdata.variables[%s].shape: %s" % (v, tmpdata.variables[v].shape))


    logging.debug("Process each requested stage to stitch together...")
    stage_end_indices = ''
    for stage in args.stitch:
      seidx = len(tmpdata.dimensions['tstep'])
      stage_end_indices += '%i ' % seidx # make space delimited string of end of stage
                                 # indices

      logging.info("First getting time axis data...")
      if not 'YEAR' in tmpdata.variables:
        tmpdata.createVariable('YEAR', 'f', dsA.variables['YEAR'].dimensions)

      tmpdata.variables['YEAR'][seidx:] = get_more_data(stage, 'YEAR', args)

      logging.info("Next, getting all other variables in the var set...")
      for v in varsets[args.varset]:
        if not v in tmpdata.variables:
          tmpdata.createVariable(v, 'f', dsA.variables[v].dimensions)
        if dsA.variables[v].dimensions[0] == 'tstep':
          tmpdata.variables[v][seidx:] = get_more_data(stage, v, args)
          logging.info("tmpdata.variables[%s].shape: %s" % (v, tmpdata.variables[v].shape))

    del dsA
    dsA = tmpdata


  logging.debug("Accquiring figure and subplot objects...")
  fig, axar = plt.subplots(6, 2, sharex=True)

  logging.debug("Setup figure title...")
  fig.suptitle(titlestring)

  # Would like to label xaxis with these:
  xdata = dsA.variables['YEAR'][:]

  logging.info("%s years: [%s ... %s]" % (len(xdata), xdata[0:3], xdata[-3:]))

  logging.debug("Plot each variable in the variable set...")
  for i, v in enumerate(varsets[args.varset]):
    row = i % 6
    col = 0 if i < 6 else 1
    ax = axar[row, col]

    logging.debug( "subplot [%s, %s] %s, dims: %s" % (row, col, v, dsA.variables[v].dimensions))

    #logging.debug("choose data to plot based on variable's dimensions...")
    if dsA.variables[v].dimensions == ('tstep','pft','vegpart'):
      data2plot = round_tiny_range(dsA.variables[v][:,args.pft,0])
      linecollection = ax.plot(data2plot)
      ax.set_title('%s %s %s '%(v, 'PFT', args.pft), fontdict={'fontsize':8})

    elif dsA.variables[v].dimensions == ('tstep','pft'):
      data2plot = round_tiny_range(dsA.variables[v][:,args.pft])
      linecollection = ax.plot(data2plot)
      ax.set_title('%s %s %s '%(v, 'PFT', args.pft), fontdict={'fontsize':8})

    elif dsA.variables[v].dimensions == ('tstep',):
      data2plot = round_tiny_range(dsA.variables[v][:])
      linecollection = ax.plot(data2plot)
      ax.set_title(v, fontdict={'fontsize':8})

    elif dsA.variables[v].dimensions == ('tstep','soilayer'):
      data2plot = round_tiny_range(dsA.variables[v][:,0])
      linecollection = ax.plot(data2plot)
      ax.set_title('%s %s %s '%(v, 'layer', 0), fontdict={'fontsize':8})

    else:
      logging.error("unknown dimensions for variable %s." % v)

    logging.debug("setting the max number of ticks for x and y axes...")
    ax.yaxis.set_major_locator(MaxNLocator(nbins=4, prune='both'))
    ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='both'))

    if args.stitch:
      logging.debug("Setting the end-of-stage marker lines...")
      for seidx in stage_end_indices.split():
        ax.axvline(seidx, color='red')



  for row in axar:
    for ax in row:
      if 0 == len(ax.lines):
        logging.debug("Turn off empty axes...")
        ax.set_visible(False)

      logging.debug("Adjust font size for axes tick labels")
      plt.setp(ax.get_yticklabels(), fontsize=8)
      plt.setp(ax.get_xticklabels(), fontsize=8)

  topadj = 0.92 - 0.025*len(titlestring.splitlines())
  print topadj
  fig.subplots_adjust(top=topadj)
  fig.subplots_adjust(hspace=.5)

  if args.save:
    saved_file_name = "plot_cmt.png"
    print "Savging plot as '%s'..." % saved_file_name
    plt.savefig(saved_file_name, dpi=72)

  if args.display:
    print "Showing plot..."
    plt.show()
示例#46
0
def main():
	import optparse
	import logging
	import sys

	parser = optparse.OptionParser()
	parser.add_option('--threads', dest='threads', default=1, type=int, help='Use this many concurrent processors')
	parser.add_option('-v', '--verbose', dest='verbose', action='count', default=0,
					  help='Make more verbose')

	parser.add_option('--grid', '-g', dest='gridn', type=int, default=5, help='Dust parameter grid size')
	parser.add_option('--steps', '-s', dest='steps', type=int, default=10, help='# Optimization step')
	parser.add_option('--suffix', dest='suffix', default='', help='Output file suffix')

	parser.add_option('--no-100', dest='no100', action='store_true', default=False,
					  help='Omit PACS-100 data?')

	parser.add_option('--callgrind', dest='callgrind', action='store_true', default=False, help='Turn on callgrind around tractor.optimize()')

	parser.add_option('--resume', '-r', dest='resume', type=int, default=-1, help='Resume from a previous run at the given step?')

	parser.add_option('--zoom', dest='zoom', type=float, default=1, help='Scale down the model to only touch the (1/zoom x 1/zoom) central region of the images')

	parser.add_option('--damp', dest='damp', type=float, default=1., help='LSQR damping')

	opt,args = parser.parse_args()

	if opt.verbose == 0:
		lvl = logging.INFO
		log_init(2)
	else:
		lvl = logging.DEBUG
		log_init(3)
	
	logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)

	if opt.threads > 1 and False:
		global dpool
		import debugpool
		dpool = debugpool.DebugPool(opt.threads)
		Time.add_measurement(debugpool.DebugPoolMeas(dpool))
		mp = multiproc(pool=dpool)
	else:
		print 'N threads', opt.threads
		mp = multiproc(opt.threads)#, wrap_all=True)

	if opt.callgrind:
		import callgrind
	else:
		callgrind = None

	np.seterrcall(np_err_handler)
	np.seterr(all='call')
	#np.seterr(all='raise')

	if opt.resume > -1:
		pfn = 'herschel-%02i%s.pickle' % (opt.resume, opt.suffix)
		print 'Reading from', pfn
		tractor = unpickle_from_file(pfn)
		tractor.mp = mp

		ds = tractor.getCatalog()[0]
		print 'DustSheet:', ds

		# derivs = ds.getParamDerivatives(tim)
		# dim = np.zeros(tim.shape)
		# #for k,deriv in enumerate(derivs[:40]):
		# for k,deriv in enumerate(derivs[::10]):
		# 	dim[:,:] = 0
		# 	deriv.addTo(dim)
		# 	plt.clf()
		# 	plt.imshow(dim, interpolation='nearest', origin='lower')
		# 	plt.savefig('deriv-%04i.png' % k)

		#tim = tractor.getImages()[0]
		# for it,tim in enumerate(tractor.getImages()):
		# 	X = ds._getTransformation(tim)
		# 	# #print 'X', X
		# 	keys = X.keys()
		# 	keys.sort()
		# 	# for k in keys[::10]:
		# 	# for k in keys[:40]:
		# 	for k in keys[::202]:
		# 		I,G,nil,nil = X[k]
		# 		rim = np.zeros_like(tim.getImage())
		# 		rim.ravel()[I] = G
		# 		plt.clf()
		# 		plt.imshow(rim, interpolation='nearest', origin='lower')
		# 		plt.colorbar()
		# 		plt.savefig('rim-%i-%04i.png' % (it,k))
		# 		print 'pix', k
		# sys.exit(0)

		makeplots(tractor, opt.resume, opt.suffix)
		step0 = opt.resume + 1

	else:
		step0 = 0
		tractor = create_tractor(opt)
		tractor.mp = mp

		# zero out invvar outside the model bounds.
		ds = tractor.getCatalog()[0]
		rd = ds.getRaDecCorners()
		for i,tim in enumerate(tractor.getImages()):
			poly = np.array([tim.getWcs().positionToPixel(RaDecPos(rdi[0], rdi[1])) for rdi in rd])
			poly = poly[:-1,:]
			print 'Model bounding box in image', tim.name, 'coordinates:'
			print poly.shape
			print poly
			H,W = tim.shape
			xx,yy = np.meshgrid(np.arange(W), np.arange(H))
			inside = point_in_poly(xx, yy, poly)
			iv = tim.getInvvar()
			iv[(inside == 0)] = 0.
			tim.setInvvar(iv)

		print 'Precomputing transformations...'
		ds = tractor.getCatalog()[0]

		# Split the grid-spread matrix into strips...
		async_results = []
		for im in tractor.getImages():
			args = []
			H,W = ds.shape
			dy = 10
			y = 0
			while y <= H:
				args.append((ds, im, y, min(H, y+dy)))
				y += dy
			async_results.append(mp.map_async(_map_trans, args))
		# Glue to strips back together...
		XX = []
		for ar in async_results:
			Xblocks = ar.get()
			X = Xblocks[0]
			for xi in Xblocks[1:]:
				X.update(xi)
			XX.append(X)
			
		for im,X in zip(tractor.getImages(), XX):
			ds._normalizeTransformation(im, X)
			ds._setTransformation(im, X)
		print 'done precomputing.'

		makeplots(tractor, 0, opt.suffix)
		pfn = 'herschel-%02i%s.pickle' % (0, opt.suffix)
		pickle_to_file(tractor, pfn)
		print 'Wrote', pfn

	for im in tractor.getImages():
		im.freezeAllBut('sky')

	for i in range(step0, opt.steps):
		if callgrind:
			callgrind.callgrind_start_instrumentation()

		tractor.optimize(damp=opt.damp, alphas=[1e-3, 1e-2, 0.1, 0.3, 1., 3., 10., 30., 100.])

		if callgrind:
			callgrind.callgrind_stop_instrumentation()

		makeplots(tractor, 1 + i, opt.suffix)
		pfn = 'herschel-%02i%s.pickle' % (1 + i, opt.suffix)
		pickle_to_file(tractor, pfn)
		print 'Wrote', pfn
示例#47
0
import cPickle as pickle
import gzip
import numpy as np
import matplotlib.pyplot as plt
from mpi4py import MPI
import time

comm = MPI.COMM_WORLD
rank = comm.rank

# Start debugging mode when an error is raised
def debugger(type,flag):
    print 'In debugger!'
    #~ import ipdb
    #~ ipdb.set_trace()
np.seterrcall(debugger)    
np.seterr(all='call')

# Parameters are read from the second command line argument
param = import_module(utils.param_file())
experiment_module = import_module(param.c.experiment.module)
experiment_name = param.c.experiment.name

c = param.c
if rank==0:
    logfilepath = utils.logfilename('')+'/'
else:
    logfilepath = None
c.logfilepath = comm.bcast(logfilepath, root=0)
c.display = False # Keep logfile small.
示例#48
0
文件: Factor.py 项目: Sayan-Paul/kod
def _bivar_factor_operation(phi1, phi2, operation, n_jobs=1):
    """
    Returns product of two factors.

    Parameters
    ----------
    phi1: factors

    phi2: factors

    operation: M | D
            M: multiplies phi1 and phi2
            D: divides phi1 by phi2
    """
    try:
        from joblib import Parallel, delayed
        use_joblib = True
    except ImportError:
        use_joblib = False

    def err_handler(type, flag):
        raise Exceptions.InvalidValueError(type)

    np.seterrcall(err_handler)
    np.seterr(divide='raise', over='raise', under='raise', invalid='call')

    phi1_vars = list(phi1.variables)
    phi2_vars = list(phi2.variables)
    common_var_list = [var for var in phi1_vars if var in phi2_vars]
    if common_var_list:
        variables = phi1_vars
        variables.extend([var for var in phi2.variables
                         if var not in common_var_list])
        cardinality = list(phi1.cardinality)
        cardinality.extend(phi2.get_cardinality(var) for var in phi2.variables
                           if var not in common_var_list)

        phi1_indexes = [i for i in range(len(phi1.variables))]
        phi2_indexes = [variables.index(var) for var in phi2.variables]
        values = []
        phi1_cumprod = np.delete(np.concatenate(
            (np.array([1]), np.cumprod(phi1.cardinality[::-1])), axis=1)[::-1], 0)
        phi2_cumprod = np.delete(np.concatenate(
            (np.array([1]), np.cumprod(phi2.cardinality[::-1])), axis=1)[::-1], 0)

        if operation == 'M':
            if use_joblib and n_jobs != 1:
                values = Parallel(n_jobs=n_jobs, backend='threading')(
                    delayed(_parallel_helper_m)(index, phi1, phi2,
                                                phi1_indexes, phi2_indexes,
                                                phi1_cumprod, phi2_cumprod)
                    for index in product(*[range(card) for card in cardinality]))
            else:
                # TODO: @ankurankan Make this cleaner
                indexes = np.array(list(map(list, product(*[range(card) for card in cardinality]))))
                values = (phi1.values[np.sum(indexes[:, phi1_indexes] * phi1_cumprod, axis=1).ravel()] *
                          phi2.values[np.sum(indexes[:, phi2_indexes] * phi2_cumprod, axis=1).ravel()])

        elif operation == 'D':
            if use_joblib and n_jobs != 1:
                values = Parallel(n_jobs, backend='threading')(
                    delayed(_parallel_helper_d)(index, phi1, phi2,
                                                phi1_indexes, phi2_indexes,
                                                phi1_cumprod, phi2_cumprod)
                    for index in product(*[range(card) for card in cardinality]))
            else:
                # TODO: @ankurankan Make this cleaner and handle case of division by zero
                for index in product(*[range(card) for card in cardinality]):
                    index = np.array(index)
                    try:
                        values.append(phi1.values[np.sum(index[phi1_indexes] * phi1_cumprod)] /
                                      phi2.values[np.sum(index[phi2_indexes] * phi2_cumprod)])
                    except (Exceptions.InvalidValueError, FloatingPointError):
                        # zero division error should return 0 if both operands
                        # equal to 0. Ref Koller page 365, Fig 10.7
                        values.append(0)

        phi = Factor(variables, cardinality, values)
        return phi
    else:
        values = np.zeros(phi1.values.shape[0] * phi2.values.shape[0])
        phi2_shape = phi2.values.shape[0]
        if operation == 'M':
            for value_index in range(phi1.values.shape[0]):
                values[value_index * phi2_shape: (value_index + 1) * phi2_shape] = (phi1.values[value_index] *
                                                                                    phi2.values)
        elif operation == 'D':
            # reference: Koller Defination 10.7
            raise ValueError("Factors Division not defined for factors with no"
                             " common scope")
        variables = phi1_vars + phi2_vars
        cardinality = list(phi1.cardinality) + list(phi2.cardinality)
        phi = Factor(variables, cardinality, values)
        return phi
示例#49
0
文件: demo_sfnn.py 项目: TZ2016/cgt
from cgt.core import get_surrogate_func
from cgt import nn
import numpy as np
import pickle
from sklearn.preprocessing import StandardScaler
from scipy.special import expit as sigmoid
from param_collection import ParamCollection
from cgt.distributions import gaussian_diagonal
from demo_char_rnn import Table, make_rmsprop_state


def err_handler(type, flag):
    print type, flag
    # raise FloatingPointError('refer to err_handler for more details')
np.seterr(divide='call', over='call', invalid='call')
np.seterrcall(err_handler)
np.set_printoptions(precision=4, suppress=True)
print cgt.get_config(True)


def generate_examples(N, x, y, p_y):
    X = x * np.ones((N, x.size))
    Y = y * np.ones((N, y.size))
    for i, p in enumerate(p_y):
        if p is not None:
            Y[:, i] = 0.
            Y[:, i][:int(N*p)] = 1.
    np.random.shuffle(Y)
    return X, Y

示例#50
0
        global_ns = get_shell(self).user_global_ns
        local_ns = get_shell(self).user_ns
        try:
            errcall = eval(args.call_func, global_ns, local_ns)
        except Exception, e:
            raise UsageError('Could not find function %r.\n%s: %s' % 
                (args.call_func, e.__class__.__name__, e))
    elif args.no_call_func:
        errcall = None

    old_options = numpy.geterr()
    old_errcall = numpy.geterrcall()
    numpy.seterr(**kwds)
    if errcall is not sentinel:
        try:
            numpy.seterrcall(errcall)
        except ValueError, e:
            raise UsageError(str(e))
    stack = getattr(self, '_numpy_err_stack', [])
    stack.append((old_options, old_errcall))
    self._numpy_err_stack = stack
    if not args.quiet:
        print_numpy_err(numpy.geterr(), numpy.geterrcall())


@magic_arguments()
@argument('-q', '--quiet', action='store_true',
    help="Do not print the new settings.")
def magic_pop_err(self, arg):
    """ Pop the last set of numpy numerical error handling settings from the
    stack.
示例#51
0
#from collections import OrderedDict

from user import *
from ops import Ops
from fns import Fns

from libpyparsing.pyparsing import *  #TODO: import only what I need

#The few functions below were taken from fourFn.py and SimpleCalc.py in the examples of the pyparsing lib.
#I have commented out some of the lines and modified others. The original files can be found at the following links:
#http://pyparsing.wikispaces.com/file/view/fourFn.py/30154950/fourFn.py
#http://pyparsing.wikispaces.com/file/view/SimpleCalc.py/30112812/SimpleCalc.py


log = Log() #required for numpy exceptions
np.seterrcall(log)
np.seterr(all="log")
#ParserElement.enablePackrat() #WARNING: MIGHT BREAK STUFF


exprStack = []
def pushFirst( strg, loc, toks ):
    exprStack.append( toks[0] )
def pushUMinus( strg, loc, toks ):
    for t in toks:
      if t == '-': 
        exprStack.append( 'unary -' )
      else:
        break

bnf = None
import numpy as np


def err_handler(err_type, flag):
    print
    "Floating point error (%s), with flag %s" % (err_type, flag)


saved_handler = np.seterrcall(err_handler)
save_err = np.seterr(all='call')
np.array([1, 2, 3]) / 0.0
np.seterrcall(saved_handler)
np.seterr(**save_err)


class Log(object):
    @staticmethod
    def write(msg):
        print
        "LOG: %s" % msg


log = Log()
saved_handler = np.seterrcall(log)
save_err = np.seterr(all='log')
np.array([1, 2, 3]) / 0.0
np.seterrcall(saved_handler)
np.seterr(**save_err)
示例#53
0
def assign_variables(assignment_expressions, df, locals_dict, df_alias=None, trace_rows=None):
    """
    Evaluate a set of variable expressions from a spec in the context
    of a given data table.

    Expressions are evaluated using Python's eval function.
    Python expressions have access to variables in locals_d (and df being
    accessible as variable df.) They also have access to previously assigned
    targets as the assigned target name.

    lowercase variables starting with underscore are temp variables (e.g. _local_var)
    and not returned except in trace_results

    uppercase variables starting with underscore are temp scalar variables (e.g. _LOCAL_SCALAR)
    and not returned except in trace_assigned_locals
    This is useful for defining general purpose local constants in expression file

    Users should take care that expressions (other than temp scalar variables) should result in
    a Pandas Series (scalars will be automatically promoted to series.)

    Parameters
    ----------
    assignment_expressions : pandas.DataFrame of target assignment expressions
        target: target column names
        expression: pandas or python expression to evaluate
    df : pandas.DataFrame
    locals_d : Dict
        This is a dictionary of local variables that will be the environment
        for an evaluation of "python" expression.
    trace_rows: series or array of bools to use as mask to select target rows to trace

    Returns
    -------
    variables : pandas.DataFrame
        Will have the index of `df` and columns named by target and containing
        the result of evaluating expression
    trace_df : pandas.DataFrame or None
        a dataframe containing the eval result values for each assignment expression
    """

    np_logger = NumpyLogger(logger)

    def is_throwaway(target):
        return target == '_'

    def is_temp_scalar(target):
        return target.startswith('_') and target.isupper()

    def is_temp(target):
        return target.startswith('_')

    def to_series(x):
        if x is None or np.isscalar(x):
            return pd.Series([x] * len(df.index), index=df.index)
        return x

    assert assignment_expressions.shape[0] > 0

    trace_assigned_locals = trace_results = None
    if trace_rows is not None:
        # convert to numpy array so we can slice ndarrays as well as series
        trace_rows = np.asanyarray(trace_rows)
        if trace_rows.any():
            trace_results = OrderedDict()
            trace_assigned_locals = OrderedDict()

    # avoid touching caller's passed-in locals_d parameter (they may be looping)
    _locals_dict = local_utilities()
    if locals_dict is not None:
        _locals_dict.update(locals_dict)
    if df_alias:
        _locals_dict[df_alias] = df
    else:
        _locals_dict['df'] = df
    local_keys = list(_locals_dict.keys())

    # build a dataframe of eval results for non-temp targets
    # since we allow targets to be recycled, we want to only keep the last usage
    variables = OrderedDict()

    # need to be able to identify which variables causes an error, which keeps
    # this from being expressed more parsimoniously
    for e in zip(assignment_expressions.target, assignment_expressions.expression):
        target, expression = e

        assert isinstance(target, str), \
            "expected target '%s' for expression '%s' to be string not %s" % \
            (target, expression, type(target))

        if target in local_keys:
            logger.warning("assign_variables target obscures local_d name '%s'", str(target))

        if is_temp_scalar(target) or is_throwaway(target):
            try:
                x = eval(expression, globals(), _locals_dict)
            except Exception as err:
                logger.error("assign_variables error: %s: %s", type(err).__name__, str(err))
                logger.error("assign_variables expression: %s = %s", str(target), str(expression))
                raise err

            if not is_throwaway(target):
                _locals_dict[target] = x
                if trace_assigned_locals is not None:
                    trace_assigned_locals[uniquify_key(trace_assigned_locals, target)] = x
            continue

        try:

            # FIXME - log any numpy warnings/errors but don't raise
            np_logger.target = str(target)
            np_logger.expression = str(expression)
            saved_handler = np.seterrcall(np_logger)
            save_err = np.seterr(all='log')

            # FIXME should whitelist globals for security?
            globals_dict = {}
            expr_values = to_series(eval(expression, globals_dict, _locals_dict))

            np.seterr(**save_err)
            np.seterrcall(saved_handler)

        except Exception as err:
            logger.error("assign_variables error: %s: %s", type(err).__name__, str(err))
            logger.error("assign_variables expression: %s = %s", str(target), str(expression))
            raise err

        if not is_temp(target):
            variables[target] = expr_values

        if trace_results is not None:
            trace_results[uniquify_key(trace_results, target)] = expr_values[trace_rows]

        # update locals to allows us to ref previously assigned targets
        _locals_dict[target] = expr_values

    if trace_results is not None:

        trace_results = pd.DataFrame.from_dict(trace_results)

        trace_results.index = df[trace_rows].index

        # add df columns to trace_results
        trace_results = pd.concat([df[trace_rows], trace_results], axis=1)

    # we stored result in dict - convert to df
    variables = util.df_from_dict(variables, index=df.index)

    return variables, trace_results, trace_assigned_locals
示例#54
0
logging.basicConfig(filename=conf_dir+"pychemqt.log", filemode="w",
                    level=loglevel, datefmt="%d-%b-%Y %H:%M:%S", format=fmt)
logging.info(
    QtWidgets.QApplication.translate("pychemqt", "Starting pychemqt"))


# Derive numpy error log to pychemqt log
class NumpyErrorLog(object):
    """Numpy error message catch and send to pychemqt log
    Use debug level for this messages"""
    @staticmethod
    def write(msg):
        logging.debug(msg)

from numpy import seterr, seterrcall  # noqa
seterrcall(NumpyErrorLog)
seterr(all='log')


class SplashScreen(QtWidgets.QSplashScreen):
    """Class to define a splash screen to show loading progress"""
    def __init__(self):
        QtWidgets.QSplashScreen.__init__(
            self,
            QtGui.QPixmap(os.environ["pychemqt"] + "/images/splash.jpg"))
        QtWidgets.QApplication.flush()

    def showMessage(self, msg):
        """Procedure to update message in splash"""
        align = QtCore.Qt.Alignment(QtCore.Qt.AlignBottom |
                                    QtCore.Qt.AlignRight |
示例#55
0
def initialize(log_file, level='INFO', modules=[]):
  '''
    Start logging of all modules of the plot-script.
  '''
  global logger
  if level=='DEBUG':
    level=logging.DEBUG
  else:
    level=logging.INFO
  file_handle=logging.FileHandler(log_file, 'w')
  file_handle.setLevel(level)
  formatter=logging.Formatter("%(asctime)s %(levelname) 8s %(message)s")
  # add formatter to ch
  file_handle.setFormatter(formatter)
  console_handle=logging.StreamHandler()
  console_handle.setLevel(logging.INFO)
  logger=logging.getLogger() # get the root logger
  logger.setLevel(logging.DEBUG)
  logger.addHandler(console_handle)
  logger.addHandler(file_handle)
  sys.stdout=RedirectOutput(sys.stdout, logger.info)
  sys.stderr=RedirectOutput(sys.stderr, logger.error, connect_on_keyword=[('Warning', logger.warning)])
  try:
    import numpy
  except:
    pass
  else:
    # log numpy errors as warnings
    numpy.seterrcall(numpy_error_handler)
    numpy.seterr(all='call')
  # redirect warnigs to the logger
  warnings.resetwarnings()
  warnings.simplefilter('always')
  try:
    logging.captureWarnings(True)
  except AttributeError:
    pass
  if level==logging.DEBUG:
    # In complete debug mode function calls of defined modules get logged, too
    logger.debug("Beginning initialize logging for all modules...")
    #sys.exc_clear=log_call(sys.exc_clear)
    for module in modules:
      if module.startswith('*'):
        if module.endswith('*'):
          module=module.strip('*')
          log_decorator=log_both
        else:
          module=module.strip('*')
          log_decorator=log_input
      elif module.endswith('*'):
        module=module.strip('*')
        log_decorator=log_output
      else:
        log_decorator=log_call
      if len(module.split('.'))>1:
        imported_module=__import__(module, globals(), locals(),
                                   fromlist=(module.split('.')[-1]))
      else:
        imported_module=__import__(module, globals(), locals())
      logger.debug('    logging moduel %s'%imported_module.__name__)
      logon(imported_module, log_decorator=log_decorator)
    logger.debug("... ready initializing the debug system.")
    decorators.logger=logger
示例#56
0
def check_priors():
	np.seterrcall(np_err_handler)
	np.seterr(all='call')


	import logging
	import sys
	lvl = logging.DEBUG
	log_init(3)
	logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)

	if True:
		# Check two-pixel priors: smoothness.
		H,W = 1,2
		logsa = np.zeros((H,W)) + np.log(1e-3)
		logt  = np.zeros((H,W)) + np.log(17.)
		emis  = np.zeros((H,W)) + 2.
		dwcs = Tan(11.2, 41.9, 1, 1, 1e-3, 0, 0, 1e-3, W, H)

		ds = DustSheet(logsa, logt, emis, dwcs)
		cat = Catalog()
		cat.append(ds)
		tractor = Tractor()
		tractor.setCatalog(cat)

		p0 = tractor.getParams()
		print 'lnp0', tractor.getLogProb()

		if True:
			# check getLogProb()
			for j,xx in enumerate([
				np.linspace(np.log(1e-5), np.log(1e-1), 20),
				np.linspace(np.log(1e-5), np.log(1e-1), 20),
				np.linspace(np.log(10.), np.log(20.), 20),
				np.linspace(np.log(10.), np.log(20.), 20),
				np.linspace(0., 4., 20),
				np.linspace(0., 4., 20),
				]):
				pp = []
				for x in xx:
					tractor.setParam(j, x)
					p = tractor.getLogProb()
					pp.append(p)
				tractor.setParam(j, p0[j])
				plt.clf()
				plt.plot(xx, pp, 'ro-')
				plt.title(ds.getParamNames()[j])
			plt.savefig('p%i.png' % (20 + j))
			

			# set the absolute priors to have little effect and repeat.
			ds.prior_logt_std = np.log(100.)
			ds.prior_emis_std = np.log(100.)
			for j,xx in enumerate([
				np.linspace(np.log(1e-5), np.log(1e-1), 20),
				np.linspace(np.log(1e-5), np.log(1e-1), 20),
				np.linspace(np.log(10.), np.log(20.), 20),
				np.linspace(np.log(10.), np.log(20.), 20),
				np.linspace(0., 4., 20),
				np.linspace(0., 4., 20),
				]):
				pp = []
				for x in xx:
					tractor.setParam(j, x)
					p = tractor.getLogProb()
					pp.append(p)
				tractor.setParam(j, p0[j])
				plt.clf()
				plt.plot(xx, pp, 'ro-')
				plt.title(ds.getParamNames()[j])
				plt.savefig('p%i.png' % (30 + j))



			# revert the priors
			ds.prior_logt_std = np.log(1.2)
			ds.prior_emis_std = np.log(0.5)

		# check getLogPriorChi.
		for j,(ip,val) in enumerate([
			(0, np.log(1e-1)),
			(1, np.log(1e-5)),
			(2, np.log(5.)),
			(3, np.log(5.)),
			(2, np.log(30.)),
			(3, np.log(30.)),
			(4, 1.),
			(5, 1.),
			(4, 3.),
			(5, 3.),
			]):
			print
			print
			print 'Setting', ds.getParamNames()[ip], 'from', p0[ip], 'to', val

			tractor.setParams(p0)
			tractor.setParam(ip, val)
			xx = [val]
			xxall = [tractor.getParams()]
			pp = [tractor.getLogProb()]
			for i in range(10):
				tractor.optimize()#damp=1e-3)
				xx.append(tractor.getParams()[ip])
				pp.append(tractor.getLogProb())
				xxall.append(tractor.getParams())
			plt.clf()
			plt.plot(xx, pp, 'ro-')
			plt.axvline(val, color='r', lw=2, alpha=0.5)
			plt.title(ds.getParamNames()[ip])
			plt.savefig('p%i.png' % (j+40))

			plt.clf()
			xxall = np.vstack(xxall)
			print 'xxall', xxall.shape
			for i in range(6):
				#plt.subplot(1,3,(i/2)+1)
				#plt.plot(xxall[:,i], pp, 'ro-')
				#if i == ip:
				#		plt.axvline(xxall[0,i], color='r', lw=2, alpha=0.5)
				#plt.title(ds.getParamNames()[i])
				plt.subplot(3,1,(i/2)+1)
				c = 'b'
				if i == ip:
					c = 'r'
				plt.plot(xxall[:,i], 'o-', color=c)
				plt.title(ds.getParamNames()[i])
			plt.savefig('p%i.png' % (j+50))

	if False:
		# Check single-pixel priors: getLogPrior()
		N = 1

		H,W = N,N
		logsa = np.zeros((H,W)) + np.log(1e-3)
		logt  = np.zeros((H,W)) + np.log(17.)
		emis  = np.zeros((H,W)) + 2.
		dwcs = Tan(11.2, 41.9, 1, 1, 1e-3, 0, 0, 1e-3, N, N)

		ds = DustSheet(logsa, logt, emis, dwcs)
		cat = Catalog()
		cat.append(ds)
		tractor = Tractor()
		tractor.setCatalog(cat)

		p0 = tractor.getParams()
		print 'lnp0', tractor.getLogProb()

		# no prior on solid angle
		# N(log(17.), log(1.2)) on T
		# N(2, 0.5) on emis
		for j,xx in enumerate([
			np.linspace(np.log(1e-5), np.log(1e-1), 20),
			np.linspace(np.log(10.), np.log(20.), 20),
			np.linspace(0., 4., 20),
			]):
			pp = []
			for x in xx:
				tractor.setParam(j, x)
				p = tractor.getLogProb()
				pp.append(p)
			tractor.setParam(j, p0[j])
			plt.clf()
			plt.plot(xx, pp, 'ro-')
			plt.title(ds.getParamNames()[j])
			plt.savefig('p%i.png' % j)

		# Check single-pixel priors: getPriorChi()
		for j,(ip,val) in enumerate([
			(0, 1e-2),
			(1, np.log(5.)),
			(1, np.log(30.)),
			(2, 1.),
			(2, 3.),
			]):
			print
			print
			print 'Setting', ds.getParamNames()[ip], 'to', val

			tractor.setParams(p0)
			tractor.setParam(ip, val)
			xx = [val]
			pp = [tractor.getLogProb()]
			for i in range(10):
				tractor.optimize(damp=1e-3)
				xx.append(tractor.getParams()[ip])
				pp.append(tractor.getLogProb())
			plt.clf()
			plt.plot(xx, pp, 'ro-')
			plt.title(ds.getParamNames()[ip])
			plt.savefig('p%i.png' % (j+10))
def catch_nans():
    np.seterr(invalid="call")
    np.seterrcall(examine_nan)