Esempio n. 1
0
def assert_almost_equal_inf(x, y, decimal=6, msg=None):
    x = np.atleast_1d(x)
    y = np.atleast_1d(y)
    assert_equal(np.isposinf(x), np.isposinf(y))
    assert_equal(np.isneginf(x), np.isneginf(y))
    assert_equal(np.isnan(x), np.isnan(y))
    assert_almost_equal(x[np.isfinite(x)], y[np.isfinite(y)])
Esempio n. 2
0
def non_matches(arr, val):
    '''
    Given a ndarray and an arbitrary 
    value, including np.nan, np.inf, etc.,
    return an ndarray that contains 
    only elements that are *not* equal 
    to val.  
    
    :param arr: n-dimensional numpy array
    :type arr: numpy.ndarray
    :param val: value, including special values numpy.nan, numpy.inf, numpy.neginf, etc.
    :type val: ANY.
    '''
    
    # Special value?
    if np.isfinite(val):
        # No, just normal value:
        return arr[arr != val]
    # Is special value, such as numpy.nan.
    # Create ndarray with True/False entries
    # that reflect which entries are not equal
    # to val:
    elif np.isnan(val):
        cond = np.logical_not(np.isnan(arr))
    elif np.isinf(val):
        cond = np.logical_not(np.isinf(arr))
    elif np.isneginf(val):
        cond = np.logical_not(np.isneginf(arr))
    elif np.isposinf(val):
        cond = np.logical_not(np.isposinf(arr))
        
    # Use the True/False ndarray as a mask
    # over arr:
    return arr[cond]
Esempio n. 3
0
    def test_init_owa_inf(self):
        r"""Test of initialization and __init__ -- OWA.

        Method: An affordance to allow you to set OWA = +Infinity from a JSON
        specs-file is offered by OpticalSystem: if OWA is supplied as 0, it is
        set to +Infinity.  We instantiate OpticalSystem objects and verify that
        this is done.
        """
        for specs in [specs_default, specs_simple, specs_multi]:
            # the input dict is modified in-place -- so copy it
            our_specs = deepcopy(specs)
            our_specs['OWA'] = 0
            for syst in our_specs['starlightSuppressionSystems']:
                syst['OWA'] = 0
            optsys = self.fixture(**deepcopy(our_specs))
            self.assertTrue(np.isposinf(optsys.OWA.value))
            for syst in optsys.starlightSuppressionSystems:
                self.assertTrue(np.isposinf(syst['OWA'].value))
        # repeat, but allow the special value to propagate up
        for specs in [specs_default, specs_simple, specs_multi]:
            # the input dict is modified in-place -- so copy it
            our_specs = deepcopy(specs)
            for syst in our_specs['starlightSuppressionSystems']:
                syst['OWA'] = 0
            optsys = self.fixture(**deepcopy(our_specs))
            self.assertTrue(np.isposinf(optsys.OWA.value))
Esempio n. 4
0
    def __add__(self, other):

        assert isinstance(other, ShapeFunction), "Can only add other shape function"

        assert self.name == other.name, "Cannot add shapes of different features"

        new_splits = self.splits.copy()
        new_vals = self.values.copy()

        for split, val in zip(other.splits, other.values):
            idx = np.searchsorted(new_splits, split, side='right')
            new_val = val
            if split in new_splits:
                idx_2 = np.argwhere(new_splits == split)
                new_vals[idx_2] = new_vals[idx_2] + new_val
            elif idx == len(new_splits) and (~np.isposinf(split)):
                new_splits = np.append(new_splits, split)
                new_vals = np.append(new_vals, new_val)
            elif np.isposinf(split):
                new_vals[-1] = new_vals[-1] + new_val
            else:
                new_splits = np.insert(new_splits, idx, split)
                new_vals = np.insert(new_vals, idx, new_val)

        return ShapeFunction(new_splits, new_vals, self.name)
Esempio n. 5
0
def imagesDiffer(imageArr1, imageArr2, skipMaskArr=None, rtol=1.0e-05, atol=1e-08):
    """Compare the pixels of two image arrays; return True if close, False otherwise
    
    Inputs:
    - image1: first image to compare
    - image2: second image to compare
    - skipMaskArr: pixels to ignore; nonzero values are skipped
    - rtol: relative tolerance (see below)
    - atol: absolute tolerance (see below)
    
    rtol and atol are positive, typically very small numbers.
    The relative difference (rtol * abs(b)) and the absolute difference "atol" are added together
    to compare against the absolute difference between "a" and "b".
    
    Return a string describing the error if the images differ significantly, an empty string otherwise
    """
    retStrs = []
    if skipMaskArr != None:
        maskedArr1 = numpy.ma.array(imageArr1, copy=False, mask = skipMaskArr)
        maskedArr2 = numpy.ma.array(imageArr2, copy=False, mask = skipMaskArr)
        filledArr1 = maskedArr1.filled(0.0)
        filledArr2 = maskedArr2.filled(0.0)
    else:
        filledArr1 = imageArr1
        filledArr2 = imageArr2

    nan1 = numpy.isnan(filledArr1)
    nan2 = numpy.isnan(filledArr2)
    if numpy.any(nan1 != nan2):
        retStrs.append("NaNs differ")

    posinf1 = numpy.isposinf(filledArr1)
    posinf2 = numpy.isposinf(filledArr2)
    if numpy.any(posinf1 != posinf2):
        retStrs.append("+infs differ")

    neginf1 = numpy.isneginf(filledArr1)
    neginf2 = numpy.isneginf(filledArr2)
    if numpy.any(neginf1 != neginf2):
        retStrs.append("-infs differ")

    # compare values that should be comparable (are neither infinite, nan nor masked)
    valSkipMaskArr = nan1 | nan2 | posinf1 | posinf2 | neginf1 | neginf2
    if skipMaskArr != None:
        valSkipMaskArr |= skipMaskArr
    valMaskedArr1 = numpy.ma.array(imageArr1, copy=False, mask = valSkipMaskArr)
    valMaskedArr2 = numpy.ma.array(imageArr2, copy=False, mask = valSkipMaskArr)
    valFilledArr1 = valMaskedArr1.filled(0.0)
    valFilledArr2 = valMaskedArr2.filled(0.0)
    
    if not numpy.allclose(valFilledArr1, valFilledArr2, rtol=rtol, atol=atol):
        errArr = numpy.abs(valFilledArr1 - valFilledArr2)
        maxErr = errArr.max()
        maxPosInd = numpy.where(errArr==maxErr)
        maxPosTuple = (maxPosInd[1][0], maxPosInd[0][0])
        errStr = "maxDiff=%s at position %s; value=%s vs. %s" % \
            (maxErr, maxPosTuple, valFilledArr1[maxPosInd][0], valFilledArr2[maxPosInd][0])
        retStrs.insert(0, errStr)
    return "; ".join(retStrs)
Esempio n. 6
0
def _generate_colorbar_ticks_label(
    data_transform=False, colorbarlabel=None, trans_base_list=None, forcelabel=None, plotlev=None, plotlab=None
):
    """
    Return (colorbar_ticks,colorbar_labels)
    """
    # data_transform==True and levels!=None
    if data_transform == True:
        if colorbarlabel != None:
            colorbarlabel = pb.iteflat(colorbarlabel)
            transformed_colorbarlabel_ticks, x, y, trans_base_list = mathex.plot_array_transg(
                colorbarlabel, trans_base_list, copy=True
            )

        # Note if/else blocks are organized in 1st tire by check if the two
        # ends are -inf/inf and 2nd tire by check if colorbarlabel is None
        if np.isneginf(plotlab[0]) and np.isposinf(plotlab[-1]):
            if colorbarlabel != None:
                ftuple = (transformed_colorbarlabel_ticks, colorbarlabel)
            else:
                ftuple = (plotlev, plotlab[1:-1])
        elif np.isneginf(plotlab[0]) or np.isposinf(plotlab[-1]):
            raise ValueError("It's strange to set only side as infitive")
        else:
            if colorbarlabel != None:
                ftuple = (transformed_colorbarlabel_ticks, colorbarlabel)
            else:
                ftuple = (plotlev, plotlab)

    # data_transform==False
    else:
        if np.isneginf(plotlab[0]) and np.isposinf(plotlab[-1]):
            # if colorbarlabel is forced, then ticks and ticklabels will be forced.
            if colorbarlabel != None:
                ftuple = (colorbarlabel, colorbarlabel)
            # This by default will be done, it's maintained here only for clarity.
            else:
                ftuple = (plotlab[1:-1], plotlab[1:-1])
        elif np.isneginf(plotlab[0]) or np.isposinf(plotlab[-1]):
            raise ValueError("It's strange to set only side as infitive")
        else:
            if colorbarlabel != None:
                ftuple = (colorbarlabel, colorbarlabel)
            else:
                ftuple = (plotlab, plotlab)

    ftuple = list(ftuple)
    if forcelabel != None:
        if len(forcelabel) != len(ftuple[1]):
            raise ValueError(
                """the length of the forcelabel and the
                length of labeled ticks is not equal!"""
            )
        else:
            ftuple[1] = forcelabel

    return ftuple
Esempio n. 7
0
def _transform_data(pdata, levels, data_transform):
    """
    Return [pdata,plotlev,plotlab,extend,trans_base_list];
    if data_transform == False, trans_base_list = None.

    Notes:
    ------
    pdata: data used for contourf plotting.
    plotlev: the levels used in contourf plotting.
    extend: the value for parameter extand in contourf.
    trans_base_list: cf. mathex.plot_array_transg
    """
    if levels == None:
        ftuple = (pdata, None, None, "neither")
        if data_transform == True:
            raise Warning("Strange levels is None but data_transform is True")
    else:
        if data_transform == True:
            # make the data transform before plotting.
            pdata_trans, plotlev, plotlab, trans_base_list = mathex.plot_array_transg(pdata, levels, copy=True)
            if np.isneginf(plotlab[0]) and np.isposinf(plotlab[-1]):
                ftuple = (pdata_trans, plotlev[1:-1], plotlab, "both")
            elif np.isneginf(plotlab[0]) or np.isposinf(plotlab[-1]):
                raise ValueError(
                    """only one extreme set as infinitive, please
                    set both as infinitive if arrow colorbar is wanted."""
                )
            else:
                ftuple = (pdata_trans, plotlev, plotlab, "neither")
        # data_transform==False
        else:
            plotlev = pb.iteflat(levels)
            plotlab = pb.iteflat(levels)
            if np.isneginf(plotlab[0]) and np.isposinf(plotlab[-1]):
                # here the levels would be like [np.NINF,1,2,3,np.PINF]
                # in following contourf, all values <1 and all values>3 will be
                # automatically plotted in the color of two arrows.
                # easy to see in this example:
                # a=np.tile(np.arange(10),10).reshape(10,10);
                # fig,ax=g.Create_1Axes();
                # cs=ax.contourf(a,levels=np.arange(2,7),extend='both');
                # plt.colorbar(cs)
                ftuple = (pdata, plotlev[1:-1], plotlab, "both")
            elif np.isneginf(plotlab[0]) or np.isposinf(plotlab[-1]):
                raise ValueError(
                    """only one extreme set as infinitive, please
                    set both as infinitive if arrow colorbar is wanted."""
                )
            else:
                ftuple = (pdata, plotlev, plotlab, "neither")
    datalist = list(ftuple)

    if data_transform == True:
        datalist.append(trans_base_list)
    else:
        datalist.append(None)
    return datalist
Esempio n. 8
0
def _diagnose(self):

    # Update log.
    self.logger.debug("diagnose: data: shape: " + str(self.data.shape))
    self.logger.debug("diagnose: data: dtype: " + str(self.data.dtype))
    self.logger.debug("diagnose: data: size: %.2fMB", self.data.nbytes * 9.53674e-7)
    self.logger.debug("diagnose: data: nans: " + str(np.sum(np.isnan(self.data))))
    self.logger.debug("diagnose: data: -inf: " + str(np.sum(np.isneginf(self.data))))
    self.logger.debug("diagnose: data: +inf: " + str(np.sum(np.isposinf(self.data))))
    self.logger.debug("diagnose: data: positives: " + str(np.sum(self.data > 0)))
    self.logger.debug("diagnose: data: negatives: " + str(np.sum(self.data < 0)))
    self.logger.debug("diagnose: data: mean: " + str(np.mean(self.data)))
    self.logger.debug("diagnose: data: min: " + str(np.min(self.data)))
    self.logger.debug("diagnose: data: max: " + str(np.max(self.data)))

    self.logger.debug("diagnose: data_white: shape: " + str(self.data_white.shape))
    self.logger.debug("diagnose: data_white: dtype: " + str(self.data_white.dtype))
    self.logger.debug("diagnose: data_white: size: %.2fMB", self.data_white.nbytes * 9.53674e-7)
    self.logger.debug("diagnose: data_white: nans: " + str(np.sum(np.isnan(self.data_white))))
    self.logger.debug("diagnose: data_white: -inf: " + str(np.sum(np.isneginf(self.data_white))))
    self.logger.debug("diagnose: data_white: +inf: " + str(np.sum(np.isposinf(self.data_white))))
    self.logger.debug("diagnose: data_white: positives: " + str(np.sum(self.data_white > 0)))
    self.logger.debug("diagnose: data_white: negatives: " + str(np.sum(self.data_white < 0)))
    self.logger.debug("diagnose: data_white: mean: " + str(np.mean(self.data_white)))
    self.logger.debug("diagnose: data_white: min: " + str(np.min(self.data_white)))
    self.logger.debug("diagnose: data_white: max: " + str(np.max(self.data_white)))

    self.logger.debug("diagnose: data_dark: shape: " + str(self.data_dark.shape))
    self.logger.debug("diagnose: data_dark: dtype: " + str(self.data_dark.dtype))
    self.logger.debug("diagnose: data_dark: size: %.2fMB", self.data_dark.nbytes * 9.53674e-7)
    self.logger.debug("diagnose: data_dark: nans: " + str(np.sum(np.isnan(self.data_dark))))
    self.logger.debug("diagnose: data_dark: -inf: " + str(np.sum(np.isneginf(self.data_dark))))
    self.logger.debug("diagnose: data_dark: +inf: " + str(np.sum(np.isposinf(self.data_dark))))
    self.logger.debug("diagnose: data_dark: positives: " + str(np.sum(self.data_dark > 0)))
    self.logger.debug("diagnose: data_dark: negatives: " + str(np.sum(self.data_dark < 0)))
    self.logger.debug("diagnose: data_dark: mean: " + str(np.mean(self.data_dark)))
    self.logger.debug("diagnose: data_dark: min: " + str(np.min(self.data_dark)))
    self.logger.debug("diagnose: data_dark: max: " + str(np.max(self.data_dark)))

    self.logger.debug("diagnose: theta: shape: " + str(self.theta.shape))
    self.logger.debug("diagnose: theta: dtype: " + str(self.theta.dtype))
    self.logger.debug("diagnose: theta: size: %.2fMB", self.theta.nbytes * 9.53674e-7)
    self.logger.debug("diagnose: theta: nans: " + str(np.sum(np.isnan(self.theta))))
    self.logger.debug("diagnose: theta: -inf: " + str(np.sum(np.isneginf(self.theta))))
    self.logger.debug("diagnose: theta: +inf: " + str(np.sum(np.isposinf(self.theta))))
    self.logger.debug("diagnose: theta: positives: " + str(np.sum(self.theta > 0)))
    self.logger.debug("diagnose: theta: negatives: " + str(np.sum(self.theta < 0)))
    self.logger.debug("diagnose: theta: mean: " + str(np.mean(self.theta)))
    self.logger.debug("diagnose: theta: min: " + str(np.min(self.theta)))
    self.logger.debug("diagnose: theta: max: " + str(np.max(self.theta)))

    self.logger.info("diagnose [ok]")
Esempio n. 9
0
    def test_neginf(self):
        arr =np.empty(100)
        arr[:] = -np.inf
        for np_func, acml_func in self.vector_funcs:
            np_out = np_func(arr)
            acml_out = acml_func(arr)

            equal_nan = np.isnan(np_out) == np.isnan(acml_out)
            equal_posinf = np.isposinf(np_out) == np.isposinf(acml_out)
            equal_neginf = np.isneginf(np_out) == np.isneginf(acml_out)
            self.assertTrue( np.alltrue(equal_nan), msg="NaN-test failed for %s" % acml_func)
            self.assertTrue( np.alltrue(equal_posinf), msg="posinf-test failed for %s" % acml_func)
            self.assertTrue( np.alltrue(equal_neginf), msg="neginf-test failed for %s" % acml_func)
Esempio n. 10
0
def traverse_data(obj, is_numpy=is_numpy, use_numpy=True):
    """ Recursively traverse an object until a flat list is found.

    If NumPy is available, the flat list is converted to a numpy array
    and passed to transform_array() to handle ``nan``, ``inf``, and
    ``-inf``.

    Otherwise, iterate through all items, converting non-JSON items

    Args:
        obj (list) : a list of values or lists
        is_numpy (bool, optional): Whether NumPy is availanble
            (default: True if NumPy is importable)
        use_numpy (bool, optional) toggle NumPy as a dependency for testing
            This argument is only useful for testing (default: True)
    """
    is_numpy = is_numpy and use_numpy
    if is_numpy and all(isinstance(el, np.ndarray) for el in obj):
        return [transform_array(el) for el in obj]
    obj_copy = []
    for item in obj:
        if isinstance(item, (list, tuple)):
            obj_copy.append(traverse_data(item))
        elif isinstance(item, float):
            if np.isnan(item):
                item = 'NaN'
            elif np.isposinf(item):
                item = 'Infinity'
            elif np.isneginf(item):
                item = '-Infinity'
            obj_copy.append(item)
        else:
            obj_copy.append(item)
    return obj_copy
    def Draw(self, args=None):
        """Draw the various functions"""

        if not args or "SAME" not in args:
            # make a 'blank' function to occupy the complete range of x values:
            lower_lim = min([lim[0] for lim in self.functions_dict.keys()])
            if np.isneginf(lower_lim):
                lower_lim = -999
            upper_lim = max([lim[1] for lim in self.functions_dict.keys()])
            if np.isposinf(upper_lim):
                upper_lim = 999
            blank = ROOT.TF1("blank" + str(np.random.randint(0, 10000)), "1.5", lower_lim, upper_lim)
            blank.Draw()
            max_value = max([func.GetMaximum(lim[0], lim[1])
                             for lim, func in self.functions_dict.iteritems()]) * 1.1
            blank.SetMaximum(max_value)
            min_value = min([func.GetMinimum(lim[0], lim[1])
                             for lim, func in self.functions_dict.iteritems()]) * 0.9
            blank.SetMinimum(min_value)
            ROOT.SetOwnership(blank, False)  # NEED THIS SO IT ACTUALLY GETS DRAWN. SERIOUSLY, WTF?!
            blank.SetLineColor(ROOT.kWhite)

        # now draw the rest of the functions
        args = "" if not args else args
        for func in self.functions_dict.values():
            func.Draw("SAME" + args)
Esempio n. 12
0
def get_freq_label(lo, hi):
    """Return frequency label given a lo and hi
        frequency pair.
    """
    if np.isposinf(hi):
        hi = r'$\infty$'
    return "%s - %s MHz" % (lo, hi)
Esempio n. 13
0
def encode_fill_value(v, dtype):
    # early out
    if v is None:
        return v
    if dtype.kind == 'f':
        if np.isnan(v):
            return 'NaN'
        elif np.isposinf(v):
            return 'Infinity'
        elif np.isneginf(v):
            return '-Infinity'
        else:
            return float(v)
    elif dtype.kind in 'ui':
        return int(v)
    elif dtype.kind == 'b':
        return bool(v)
    elif dtype.kind in 'SV':
        v = base64.standard_b64encode(v)
        if not PY2:  # pragma: py2 no cover
            v = str(v, 'ascii')
        return v
    elif dtype.kind == 'U':
        return v
    elif dtype.kind in 'mM':
        return int(v.view('u8'))
    else:
        return v
Esempio n. 14
0
    def _update_parameters(self):
        """
        Update parameters of the acquisition required to evaluate the function. In particular:
            * Sample representer points repr_points
            * Compute their log values repr_points_log
            * Compute belief locations logP
        """
        self.repr_points, self.repr_points_log = self.sampler.get_samples(self.num_repr_points, self.proposal_function, self.burn_in_steps)

        if np.any(np.isnan(self.repr_points_log)) or np.any(np.isposinf(self.repr_points_log)):
            raise RuntimeError("Sampler generated representer points with invalid log values: {}".format(self.repr_points_log))

        # Removing representer points that have 0 probability of being the minimum (corresponding to log probability being minus infinity)
        idx_to_remove = np.where(np.isneginf(self.repr_points_log))[0]
        if len(idx_to_remove) > 0:
            idx = list(set(range(self.num_repr_points)) - set(idx_to_remove))
            self.repr_points = self.repr_points[idx, :]
            self.repr_points_log = self.repr_points_log[idx]

        # We predict with the noise as we need to make sure that var is indeed positive definite.
        mu, _ = self.model.predict(self.repr_points)
        # we need a vector
        mu = np.ndarray.flatten(mu)
        var = self.model.predict_covariance(self.repr_points)
        
        self.logP, self.dlogPdMu, self.dlogPdSigma, self.dlogPdMudMu = epmgp.joint_min(mu, var, with_derivatives=True)
        # add a second dimension to the array
        self.logP = np.reshape(self.logP, (self.logP.shape[0], 1))
Esempio n. 15
0
def traverse_data(datum, is_numpy=is_numpy, use_numpy=True):
    """recursively dig until a flat list is found
    if numpy is available convert the flat list to a numpy array
    and send off to transform_array() to handle nan, inf, -inf
    otherwise iterate through items in array converting non-json items

    Args:
        datum (list) : a list of values or lists
        is_numpy: True if numpy is present (see imports)
        use_numpy: toggle numpy as a dependency for testing purposes
    """
    is_numpy = is_numpy and use_numpy
    if is_numpy and not any(isinstance(el, (list, tuple)) for el in datum):
        return transform_array(np.asarray(datum))
    datum_copy = []
    for item in datum:
        if isinstance(item, (list, tuple)):
            datum_copy.append(traverse_data(item))
        elif isinstance(item, float):
            if np.isnan(item):
                item = 'NaN'
            elif np.isposinf(item):
                item = 'Infinity'
            elif np.isneginf(item):
                item = '-Infinity'
            datum_copy.append(item)
        else:
            datum_copy.append(item)
    return datum_copy
Esempio n. 16
0
def set_logp_to_neg_inf(X, logp, bounds):
    """Set `logp` to negative infinity when `X` is outside the allowed bounds.

    # Arguments
        X: tensorflow.Tensor
            The variable to apply the bounds to
        logp: tensorflow.Tensor
            The log probability corrosponding to `X`
        bounds: list of `Region` objects
            The regions corrosponding to allowed regions of `X`

    # Returns
        logp: tensorflow.Tensor
            The newly bounded log probability
    """
    conditions = []
    for l, u in bounds:
        lower_is_neg_inf = not isinstance(l, tf.Tensor) and np.isneginf(l)
        upper_is_pos_inf = not isinstance(u, tf.Tensor) and np.isposinf(u)

        if not lower_is_neg_inf and upper_is_pos_inf:
            conditions.append(tf.greater(X, l))
        elif lower_is_neg_inf and not upper_is_pos_inf:
            conditions.append(tf.less(X, u))
        elif not (lower_is_neg_inf or upper_is_pos_inf):
            conditions.append(tf.logical_and(tf.greater(X, l), tf.less(X, u)))

    if len(conditions) > 0:
        is_inside_bounds = conditions[0]
        for condition in conditions[1:]:
            is_inside_bounds = tf.logical_or(is_inside_bounds, condition)

        logp = tf.select(is_inside_bounds, logp, tf.fill(tf.shape(X), config.dtype(-np.inf)))

    return logp
Esempio n. 17
0
def check_kurt_expect(distfn, arg, m, v, k, msg):
    if np.isfinite(k):
        m4e = distfn.expect(lambda x: np.power(x-m, 4), arg)
        npt.assert_allclose(m4e, (k + 3.) * np.power(v, 2), atol=1e-5, rtol=1e-5,
                err_msg=msg + ' - kurtosis')
    elif not np.isposinf(k):
        npt.assert_(np.isnan(k))
Esempio n. 18
0
def merciless_print(i, node, fn):
    """Debugging theano. Prints inputs and outputs at every point.
    In case NaN, Inf or -Inf is detected, fires up the pdb debugger."""
    print ''
    print '-------------------------------------------------------'
    print 'Node %s' % str(i)
    theano.printing.debugprint(node)
    print 'Inputs : %s' % [input for input in fn.inputs]
    print 'Outputs: %s' % [output for output in fn.outputs]
    print 'Node:'
    for output in fn.outputs:
        try:
            if numpy.isnan(output[0]).any():
                print '*** NaN detected ***'
                theano.printing.debugprint(node)
                print 'Inputs : %s' % [input[0] for input in fn.inputs]
                print 'Outputs: %s' % [output[0] for output in fn.outputs]
                pdb.set_trace()
                raise ValueError('Found NaN in computation!')
            if numpy.isposinf(output[0]).any() or numpy.isneginf(output[0]).any():
                print '*** Inf detected ***'
                theano.printing.debugprint(node)
                print 'Inputs : %s' % [input[0] for input in fn.inputs]
                print 'Outputs: %s' % [output[0] for output in fn.outputs]
                pdb.set_trace()
                raise ValueError('Found Inf in computation!')
        except TypeError:
            logging.debug('Couldn\'t check node for NaN/Inf: {0}'.format(node))
Esempio n. 19
0
    def to_standard_form(self,):
        """
        Return an instance of StandardLP by factoring this problem.
        """
        A = self.A.tocsc(copy=True)
        b = self.b.copy()
        c = self.c.copy()
        r = self.r.copy()
        l = self.l.copy()
        u = self.u.copy()
        f = self.f

        # abort if lower bound equals -Infinity
        if np.isneginf(self.l).any():
            raise ValueError('Lower bounds (l) contains -inf.')


        # shift lower bounds to zero (x <- x-l) so that new problem
        #  has the following form
        #
        #     optimize c^Tx + c^Tl
        #
        #     s.t. b-Al <= Ax <= b-Al+r
        #             0 <=  x <= u-l

        # indices where u is not +inf
        ind = np.where(np.isposinf(u)==False)[0]
        u[ind] -= l[ind]

        b = b - A.dot(l)
        f += np.dot(c,l)

        # Convert equality constraints to a pair of inequalities
        A = vstack([A,A]) # Double A matrix

        b = np.r_[b,b]
        b[:self.m] *= -1
        b[self.m:] += r

        # add upper bounds
        nubs = len(ind)
        Aubs = coo_matrix((np.ones(nubs), (np.arange(nubs,ind))))
        b = np.r_[b,u[ind]]
        A = vstack([A,Aubs])

        #  Now lp has the following form,
	    #
	    #  maximize c^Tx + c^Tl
        #
	    # s.t. -Ax <= -b
	    #       Ax <=  b+r-l
	    #        x <=  u-l
	    #        x >=  0

        assert A.shape[0] == b.shape[0]

        lp = StandardLP(A,b,c,f=f)

        return lp
Esempio n. 20
0
def msr2k(rvnames, rvs, trunclb, truncub, G):
    # robustnes
    klb = trunclb[0]; kub=truncub[0];
    # reliability
    corr = np.eye(len(rvnames))
    probdata = ProbData(names=rvnames, rvs=rvs, corr=corr, nataf=False)
    analysisopt = AnalysisOpt(gradflag='DDM', recordu=False, recordx=False,
            flagsens=False, verbose=False)
    # limit state 1
    def gf1(x, param=None):
        m, C, Sre, Na = x
        K = C*(Sre**m)*(G**m)*(np.pi**(m/2.))*Na
        return K-kub
    def dgdq1(x, param=None):
        m, C, Sre, Na = x
        Srem = Sre**m; Gm = G**m; pim2 = np.pi**(m/2.)
        dgdm = C*np.log(Sre)*Srem*Gm*pim2*Na+C*Srem*np.log(G)*Gm*pim2*Na+\
               C*Srem*Gm*np.log(np.pi)*pim2*0.5*Na
        dgdC = Srem*Gm*pim2*Na
        dgdSre = C*m*(Sre**(m-1.))*Gm*pim2*Na
        dgdNa = C*Srem*Gm*pim2
        return [dgdm, dgdC, dgdSre, dgdNa]
    gfunc1 = Gfunc(gf1, dgdq1)
    formBeta1 = CompReliab(probdata, gfunc1, analysisopt)

    # limit state 2
    def gf2(x, param=None):
        m, C, Sre, Na = x
        K = C*(Sre**m)*(G**m)*(np.pi**(m/2))*Na
        return klb-K
    def dgdq2(x, param=None):
        m, C, Sre, Na = x
        Srem = Sre**m; Gm = G**m; pim2 = np.pi**(m/2)
        dgdm = C*np.log(Sre)*Srem*Gm*pim2*Na+C*Srem*np.log(G)*Gm*pim2*Na+\
               C*Srem*Gm*np.log(np.pi)*pim2*0.5*Na
        dgdC = Srem*Gm*pim2*Na
        dgdSre = C*m*(Sre**(m-1.))*Gm*pim2*Na
        dgdNa = C*Srem*Gm*pim2
        return [-dgdm, -dgdC, -dgdSre, -dgdNa]
    gfunc2 = Gfunc(gf2, dgdq2)
    formBeta2 = CompReliab(probdata, gfunc2, analysisopt)

    # system reliability
    try:
        if np.isneginf(klb):
            formresults = formBeta1.form_result()
            pf = formresults.pf1
        elif np.isposinf(kub):
            formresults = formBeta2.form_result()
            pf = formresults.pf1
        else:
            sysBeta = SysReliab([formBeta1, formBeta2], [2])
            sysformres = sysBeta.mvn_msr(sysBeta.syscorr)
            pf = sysformres.pf
        # formresults = formBeta2.form_result()
        # pf = formresults.pf1
    except np.linalg.LinAlgError:
        pf = 0.
    return pf
Esempio n. 21
0
def integrate_fejer2_pinf(f, a, b = None, exponent = None, *args, **kwargs):
    """Fejer2 integration from a to +oo."""
    if isposinf(a):
        return 0,0
    if exponent is None:
        exponent = params.integration_infinite.exponent
    vt = VarTransformReciprocal_PInf(a, U = b, exponent = exponent)
    return _integrate_with_vartransform(f, vt, integrate_fejer2, *args, **kwargs)
Esempio n. 22
0
def build_finalize_min(dshape):
    dtype = numpy_dtype(dshape.measure)
    missing = _dynd_missing_types[dtype]
    if np.issubdtype(dtype, np.floating):
        return lambda x: np.where(np.isposinf(x), missing, x)
    else:
        value = np.iinfo(dtype).max
        return lambda x: np.where(x == value, missing, x)
Esempio n. 23
0
    def __call__(self, vals, maxdist=None):
        """
        Evaluate interpolator for values given at the source points.

        You can interpolate multiple datasets of source values (``vals``) at
        once: the ``vals`` array should have the shape (number of source
        points, number of source datasets). If you want to interpolate only one
        set of source values, ``vals`` can have the shape (number of source
        points, 1) or just (number of source points,) - which is a flat/1-D
        array. The output will have the same number of dimensions as ``vals``,
        i.e. it will be a flat 1-D array in case ``vals`` is a 1-D array.

        Parameters
        ----------
        vals : ndarray of float, shape (numsourcepoints, ...)
            Values at the source points which to interpolate

        maxdist : float
            the maximum distance up to which points will be included into the
            interpolation calculation

        Returns
        -------
        output : ndarray of float with shape (numtargetpoints,...)

        """
        self._check_shape(vals)

        weights = 1.0 / self.dists ** self.p

        # if maxdist isn't given, take the maximum distance
        if maxdist is not None:
            outside = self.dists > maxdist
            weights[outside] = 0

        # take care of point coincidence
        weights[np.isposinf(weights)] = 1e12

        # shape handling (time, ensemble etc)
        wshape = weights.shape
        weights.shape = wshape + ((vals.ndim - 1) * (1,))

        # expand vals to trg grid
        trgvals = vals[self.ix]

        # nan handling
        if self.remove_missing:
            isnan = np.isnan(trgvals)
            weights = np.broadcast_to(weights, isnan.shape)
            masked_weights = np.ma.array(weights, mask=isnan)

            interpol = (np.nansum(weights * trgvals, axis=1) /
                        np.sum(masked_weights, axis=1))
        else:
            interpol = (np.sum(weights * trgvals, axis=1) /
                        np.sum(weights, axis=1))

        return interpol
Esempio n. 24
0
    def PlanToIK(self, robot, goal_pose, ranker=ik_ranking.JointLimitAvoidance,
                 num_attempts=1, **kw_args):
        from openravepy import (IkFilterOptions,
                                IkParameterization,
                                IkParameterizationType)

        # FIXME: Currently meta-planners duplicate IK ranking in each planning
        # thread. It should be possible to fix this by IK ranking once, then
        # calling PlanToConfiguration in separate threads.

        # Find an unordered list of IK solutions.
        with robot.GetEnv():
            manipulator = robot.GetActiveManipulator()
            ik_param = IkParameterization(
                goal_pose, IkParameterizationType.Transform6D)
            ik_solutions = manipulator.FindIKSolutions(
                ik_param, IkFilterOptions.CheckEnvCollisions,
                ikreturn=False, releasegil=True
            )

        if ik_solutions.shape[0] == 0:
            raise PlanningError('There is no IK solution at the goal pose.')

        # Sort the IK solutions in ascending order by the costs returned by the
        # ranker. Lower cost solutions are better and infinite cost solutions
        # are assumed to be infeasible.
        scores = ranker(robot, ik_solutions)
        ranked_indices = numpy.argsort(scores)
        ranked_indices = ranked_indices[~numpy.isposinf(scores)]
        ranked_ik_solutions = ik_solutions[ranked_indices, :]

        if ranked_ik_solutions.shape[0] == 0:
            raise PlanningError('All IK solutions have infinite cost.')

        # Sequentially plan to the solutions in descending order of cost.
        planner = self.delegate_planner or robot.planner
        p = openravepy.KinBody.SaveParameters

        with robot.CreateRobotStateSaver(p.ActiveDOF):
            robot.SetActiveDOFs(manipulator.GetArmIndices())

            num_attempts = min(ranked_ik_solutions.shape[0], num_attempts)
            for i, ik_sol in enumerate(ranked_ik_solutions[0:num_attempts, :]):
                try:
                    traj = planner.PlanToConfiguration(robot, ik_sol)
                    logger.info('Planned to IK solution %d of %d.',
                                i + 1, num_attempts)
                    return traj
                except PlanningError as e:
                    logger.warning(
                        'Planning to IK solution %d of %d failed: %s',
                        i + 1, num_attempts, e)

        raise PlanningError(
            'Planning to the top {:d} of {:d} IK solutions failed.'
            .format(num_attempts, ranked_ik_solutions.shape[0]))
Esempio n. 25
0
 def compare_with_tolerance(cls, a, b, max_difference):
     if np.isposinf(a):
         if a == b:
             return 0
         return 1
     elif np.isneginf(a):
         if a == b:
             return 0
         return -1
     elif np.isposinf(b):
         return -1
     elif np.isneginf(b):
         return 1
     if np.abs(a-b) < max_difference:
         return 0
     if a < b:
         return -1
     else:
         return 1
Esempio n. 26
0
 def FloatStr(f):
     if f is numpy.NaN or f is numpy.nan:
         return('NaN') # or 'NA'
     if has_pandas and pandas.isnull(f):
         return('NaN')
     if numpy.isposinf(f):
         return('Inf')
     if numpy.isneginf(f):
         return('-Inf')
     return(repr(f))
Esempio n. 27
0
    def __call__ (self, *args: [numpy.array], last=None) -> numpy.array:
        result = numpy.divide (*args)

        if numpy.isposinf (result) or numpy.isneginf (result) or \
           numpy.isnan (result):

            result = numpy.array (eval (self.default)
                if type (self.default) is str else self.default)

        return result
Esempio n. 28
0
def interp1exp(x,y,xi):
    '''
    linear interpolation with linear extrapolation
    '''
    val=np.interp(xi,x,y,left=np.inf*-1.0,right=np.inf)
    if np.isneginf(val):
        return np.polyval(np.polyfit(x[:2],y[:2],1),xi)
    elif np.isposinf(val):
        return np.polyval(np.polyfit(x[-2:],y[-2:],1),xi)
    return val
Esempio n. 29
0
 def FloatStr(f):
     if f is numpy.NaN or f is numpy.nan:
         return 'NaN'  # or 'NA'
     if has_pandas and pandas.isnull(f):
         return 'NaN'
     if numpy.isposinf(f):
         return 'Inf'
     if numpy.isneginf(f):
         return '-Inf'
     return repr(f)
Esempio n. 30
0
  def lift(self, n):
    """Returns (positive rate within n largest) / (overall positive rate) for
       each individual.
    
    @return list of counts, in order of individuals
    """
    self.countKernel.prepared_call(self.countGridDim,
                                   self.outputs,
                                   self.trainSet.size,
                                   len(self.trainSet.positives),
                                   self.popSize,
                                   self.thresholds,
                                   self.counts)
    
    driver.Context.synchronize()

    countsMat = driver.from_device(self.counts,
                                   shape=(self.popSize, self.countBlockDim[0]),
                                   dtype=np.uint32)
    #log.debug("counts %r: %s", countsMat.shape, str(countsMat))
    log.debug("count sum over threads: %s", str(countsMat.sum(axis=1)))
    
    self.countSums = countsMat.sum(axis=1)
    
    self.nlargestPositiveRate = np.float32(self.countSums) / n
    log.debug("positive rate (n largest outputs): %s", str(self.nlargestPositiveRate))
    
    overallPositiveRate = float(len(self.trainSet.positives)) / float(self.trainSet.size)
    log.debug("positive rate (overall): %.04f", overallPositiveRate)
    
    lifts = self.nlargestPositiveRate / overallPositiveRate
    
    sortedLifts = sorted(enumerate(lifts), key=lambda (i, l): l, reverse=True)
    topIndex, topLift = sortedLifts[0]
    
    topOutputs = self.outputsMat[topIndex]
    
    nans = np.sum(np.isnan(topOutputs))
    neginfs = np.sum(np.isneginf(topOutputs))
    posinfs = np.sum(np.isposinf(topOutputs))
    omin = np.nanmin(topOutputs)
    omax = np.nanmax(topOutputs)
    threshold = self.thresholdsMat[topIndex]
    
    """
    log.info("The top ANN's outputs are:")
    log.info(
      "  %.02f%% NaN, %.02f%% -inf, %.02f%% +inf, min %.02e, max %.02e, thresh %.02e",
      100.0 * nans / len(topOutputs),
      100.0 * neginfs / len(topOutputs),
      100.0 * posinfs / len(topOutputs),
      omin, omax, threshold)
    """
    
    return lifts
Esempio n. 31
0
                         unpack=True)
    data[:, :, shot, 7] = datload[0:4, :]

time_ms = datload[0, :]
time_s = time_ms * 1e-6
filtcutoff = 1e4
filtcutoffname = '1e4'
for scope in np.arange(8):
    print('On Scope ', scope)
    for shot in np.arange(25):
        for direction in np.arange(3):
            bdotarray = data[direction+1, :, shot, scope] - \
                np.mean(data[direction+1, 0:meancutoff, shot, scope])
            neginfs = np.isneginf(bdotarray)
            bdotarray[np.where(neginfs)] = -maxrange
            posinfs = np.isposinf(bdotarray)
            bdotarray[np.where(posinfs)] = maxrange
            bdot[direction, :, shot, scope] = bdotarray
            if (direction == 0):
                probe_area = r_probe_area
            if (direction == 1):
                probe_area = tz_probe_area
            if (direction == 2):
                probe_area = tz_probe_area
            barray = sp.cumtrapz(bdotarray / probe_area, time_s) * 1e4  # Gauss
            b[direction, :, shot, scope] = barray
            bfilt1[direction, :, shot,
                   scope] = butter_highpass_filter(barray,
                                                   filtcutoff,
                                                   125e6,
                                                   order=3)
Esempio n. 32
0
def calc_bhhh_hessian_approximation(orig_nest_coefs,
                                    index_coefs,
                                    design,
                                    choice_vec,
                                    rows_to_obs,
                                    rows_to_nests,
                                    ridge=None,
                                    weights=None,
                                    use_jacobian=True,
                                    *args,
                                    **kwargs):
    """
    Parameters
    ----------
    orig_nest_coefs : 1D or 2D ndarray.
        All elements should by ints, floats, or longs. If 1D, should have 1
        element for each nesting coefficient being estimated. If 2D, should
        have 1 column for each set of nesting coefficients being used to
        predict the probabilities of each alternative being chosen. There
        should be one row per nesting coefficient. Elements denote the inverse
        of the scale coefficients for each of the lower level nests.
    index_coefs : 1D or 2D ndarray.
        All elements should by ints, floats, or longs. If 1D, should have 1
        element for each utility coefficient being estimated
        (i.e. num_features). If 2D, should have 1 column for each set of
        coefficients being used to predict the probabilities of choosing each
        alternative. There should be one row per index coefficient.
    design : 2D ndarray.
        There should be one row per observation per available alternative.
        There should be one column per utility coefficient being estimated. All
        elements should be ints, floats, or longs.
    choice_vec : 1D ndarray.
        All elements should by ints, floats, or longs. Each element represents
        whether the individual associated with the given row chose the
        alternative associated with the given row.
    rows_to_obs : 2D scipy sparse array.
        There should be one row per observation per available alternative and
        one column per observation. This matrix maps the rows of the design
        matrix to the unique observations (on the columns).
    rows_to_nests : 2D scipy sparse array.
        There should be one row per observation per available alternative and
        one column per nest. This matrix maps the rows of the design matrix to
        the unique nests (on the columns).
    ridge : int, float, long, or None, optional.
        Determines whether or not ridge regression is performed. If an int,
        float or long is passed, then that scalar determines the ridge penalty
        for the optimization. Default == None. Note that if this parameter is
        passed, the values of the BHHH matrix MAY BE INCORRECT since it is not
        100% clear how penalization affects the information matrix.
    use_jacobian : bool, optional.
        Determines whether or not the jacobian will be used when calculating
        the gradient. When performing model estimation, `use_jacobian` should
        be `True` if the values that are actually being estimated are the
        logit of the nest coefficients. Default `== False`.

    Returns
    -------
    bhhh_matrix : 2D ndarray.
       The negative of the sum of the outer products of the gradient of the
       log-likelihood function for each observation.
    """
    # Calculate the weights for the sample
    if weights is None:
        weights = np.ones(design.shape[0])
    weights_per_obs = np.max(rows_to_obs.toarray() * weights[:, None], axis=0)

    # Transform the nest coefficients into their "always positive" versions
    nest_coefs = naturalize_nest_coefs(orig_nest_coefs)

    # Get the vectors and matrices needed to calculate the gradient
    vector_dict = prep_vectors_for_gradient(nest_coefs, index_coefs, design,
                                            choice_vec, rows_to_obs,
                                            rows_to_nests)

    # Calculate the index for each alternative for each person
    sys_utility = design.dot(index_coefs)

    # Calculate w_ij
    long_w = sys_utility / vector_dict["long_nest_params"]
    # Guard against overflow
    inf_index = np.isposinf(long_w)
    long_w[inf_index] = max_comp_value

    ##########
    # Calculate d_log_likelihood_d_nest_params
    ##########
    # Calculate the term that only depends on nest level values
    log_exp_sums = np.log(vector_dict["ind_sums_per_nest"])
    # Guard against overflow
    log_exp_sums[np.isneginf(log_exp_sums)] = -1 * max_comp_value

    # Calculate the first term of the derivative of the log-liikelihood
    # with respect to the nest parameters. Note we do not sum this object since
    # we want the values at the 'individual' level, which they already are.
    nest_gradient_term_1 = ((vector_dict["obs_to_chosen_nests"] -
                             vector_dict["nest_choice_probs"]) * log_exp_sums)

    # Calculate the second term of the derivative of the log-liikelihood
    # with respect to the nest parameters
    half_deriv = (
        (vector_dict["long_probs"] -
         vector_dict["long_chosen_nest"] * vector_dict["prob_given_nest"]) *
        long_w)[:, None]
    # "Spread out" the second term across the appropriate nests
    spread_half_deriv = rows_to_nests.multiply(half_deriv)
    # Aggregate the spread out half-derivatives to the individual level
    # This object should have shape (num_obs, num_nests)
    nest_gradient_term_2 = rows_to_obs.transpose().dot(spread_half_deriv).A

    # Calculate the third term of the derivative of the log-likelihood
    # with respect to the nest parameters
    nest_gradient_term_3a = (
        choice_vec -
        vector_dict["long_chosen_nest"] * vector_dict["prob_given_nest"])

    nest_gradient_term_3b = ((-1 * nest_gradient_term_3a * long_w) /
                             vector_dict["long_nest_params"])

    # Guard against overflow
    inf_idx = np.isposinf(nest_gradient_term_3b)
    nest_gradient_term_3b[inf_idx] = max_comp_value

    neg_inf_idx = np.isneginf(nest_gradient_term_3b)
    nest_gradient_term_3b[neg_inf_idx] = -1 * max_comp_value

    # Get the nest-wide version of this piece of the gradient
    spread_out_term_3b = rows_to_nests.multiply(nest_gradient_term_3b[:, None])
    nest_gradient_term_3 = rows_to_obs.transpose().dot(spread_out_term_3b).A

    # Combine the terms. Note the "nest_coefs * (1 - nest_coefs)" is due to the
    # fact that we're estimating the logit of the nest coefficients instead of
    # the nest coefficient itself. We therefore need to multiply by
    # d_nest_coef_d_estimated_variable to get the correct gradient.
    # d_nest_coef_d_estimated_variable == nest_coefs * (1 - nest_coefs).
    # As with the various nest_gradient_terms, the nest_gradient should be of
    # shape (num_obs, num_nests)
    if use_jacobian:
        jacobian = (nest_coefs * (1.0 - nest_coefs))[None, :]
    else:
        jacobian = 1

    nest_gradient = (
        (nest_gradient_term_1 + nest_gradient_term_2 + nest_gradient_term_3) *
        jacobian)

    ##########
    # Calculate d_loglikelihood_d_beta
    ##########
    beta_gradient_term_1 = (
        vector_dict["scaled_y"] - vector_dict["p_tilde_given_nest"] +
        vector_dict["p_tilde_given_nest"] * vector_dict["long_nest_params"] -
        vector_dict["long_probs"])[:, None]
    #####
    # Calculate the derivative with respect to beta
    #####
    beta_gradient = rows_to_obs.T.dot(beta_gradient_term_1 * design)

    #####
    # Combine the gradient pieces
    #####
    gradient_matrix = np.concatenate((nest_gradient, beta_gradient), axis=1)

    #####
    # Compute and return the outer product of each row of the gradient
    # with itself. Then sum these individual matrices together. The line below
    # does the same computation just with less memory and time.
    bhhh_matrix =\
        gradient_matrix.T.dot(weights_per_obs[:, None] * gradient_matrix)

    if ridge is not None:
        # The rational behind adding 2 * ridge is that the information
        # matrix should approximate the hessian and in the hessian we subtract
        # 2 * ridge at the end. We add 2 * ridge here, since we will multiply
        # by negative one afterwards. I don't know if this is the correct way
        # to calculate the Fisher Information Matrix in ridge regression
        # models.
        bhhh_matrix += 2 * ridge

    # Note the "-1" is because we are approximating the Fisher information
    # matrix which has a negative one in the front of it?
    # Note that if we were using the bhhh_matrix to calculate the robust
    # covariance matrix, then we would not multiply by negative one here.
    return -1 * bhhh_matrix
Esempio n. 33
0
def calc_nested_probs(nest_coefs,
                      index_coefs,
                      design,
                      rows_to_obs,
                      rows_to_nests,
                      chosen_row_to_obs=None,
                      return_type="long_probs",
                      *args,
                      **kwargs):
    """
    Parameters
    ----------
    nest_coefs : 1D or 2D ndarray.
        All elements should by ints, floats, or longs. If 1D, should have 1
        element for each nesting coefficient being estimated. If 2D, should
        have 1 column for each set of nesting coefficients being used to
        predict the probabilities of each alternative being chosen. There
        should be one row per nesting coefficient. Elements denote the inverse
        of the scale coefficients for each of the lower level nests.
    index_coefs : 1D or 2D ndarray.
        All elements should by ints, floats, or longs. If 1D, should have 1
        element for each utility coefficient being estimated (i.e.
        num_features). If 2D, should have 1 column for each set of coefficients
        being used to predict the probabilities of each alternative being
        chosen. There should be one row per index coefficient.
    design : 2D ndarray.
        There should be one row per observation per available alternative.
        There should be one column per utility coefficient being estimated. All
        elements should be ints, floats, or longs.
    rows_to_obs : 2D scipy sparse array.
        There should be one row per observation per available alternative and
        one column per observation. This matrix maps the rows of the design
        matrix to the unique observations (on the columns).
    rows_to_nests : 2D scipy sparse array.
        There should with one row per observation per available alternative and
        one column per nest. This matrix maps the rows of the design matrix to
        the unique nests (on the columns).
    chosen_row_to_obs : 2D scipy sparse array, or None, optional.
        There should be one row per observation per available alternative and
        one column per observation. This matrix indicates, for each observation
        (on the columns), which rows of the design matrix were the realized
        outcome. If an array is passed then an array of shape
        (num_observations,) can be returned and each element will be the
        probability of the realized outcome of the given observation.
        Default == None.
    return_type : str, optional.
        Indicates what object(s) are to be returned from the function. Valid
        values are: `['long_probs', 'chosen_probs', 'long_and_chosen_probs',
        'all_prob_dict']`. If `long_probs`, the long format probabilities (a 1D
        numpy array with one element per observation per available alternative)
        will be returned. If `chosen_probs`, a 1D numpy array with one element
        per observation will be returned, where the values are the
        probabilities of the chosen alternative for the given observation. If
        `long_and_chosen_probs`, a tuple of chosen_probs and long_probs will be
        returned. If `all_prob_dict`, a dictionary will be returned. The values
        will all be 1D numpy arrays of probabilities dictated by the value's
        corresponding key. The keys will be `long_probs`, `nest_choice_probs`,
        `prob_given_nest`, and `chosen_probs`. If chosen_row_to_obs is None,
        then `chosen_probs` will be None. If `chosen_row_to_obs` is passed,
        then `chosen_probs` will be a 1D array as described above.
        `nest_choice_probs` is of the same shape as `rows_to_nests` and it
        denotes the probability of each individual choosing each of the
        possible nests. `prob_given_nest` is of the same shape as `long_probs`
        and it denotes the probability of the individual associated with a
        given row choosing the alternative associated with that row, given that
        the individual chooses the nest that contains the given alternative.
        Default == `long_probs`.

    Returns
    -------
    See above for documentation of the `return_type` kwarg.
    """
    # Check for 2D index coefficients or nesting coefficients
    try:
        assert len(index_coefs.shape) <= 2
        assert (len(index_coefs.shape) == 1) or (index_coefs.shape[1] == 1)
        assert len(nest_coefs.shape) <= 2
        assert (len(nest_coefs.shape) == 1) or (nest_coefs.shape[1] == 1)
    except AssertionError:
        msg = "Support for 2D index_coefs or nest_coefs not yet implemented."
        raise NotImplementedError(msg)

    # Check for kwarg validity
    valid_return_types = [
        'long_probs', 'chosen_probs', 'long_and_chosen_probs', 'all_prob_dict'
    ]
    if return_type not in valid_return_types:
        msg = "return_type must be one of the following values: "
        raise ValueError(msg + str(valid_return_types))

    chosen_probs_needed = ['chosen_probs', 'long_and_chosen_probs']
    if chosen_row_to_obs is None and return_type in chosen_probs_needed:
        msg = "chosen_row_to_obs is None AND return_type in {}."
        raise ValueError(
            msg.format(chosen_probs_needed) + "\nThis is invalid.")

    # Calculate the index for each alternative for each individual, V = X*beta
    index_vals = design.dot(index_coefs)

    # Get the long format nest parameters for each row of the design matrix
    long_nest_coefs = rows_to_nests.dot(nest_coefs)

    # Calculate the scaled index values (index / nest_param = V / lambda)
    scaled_index = index_vals / long_nest_coefs

    # Guard against overflow
    pos_inf_idx = np.isposinf(scaled_index)
    neg_inf_idx = np.isneginf(scaled_index)
    scaled_index[pos_inf_idx] = max_comp_value
    scaled_index[neg_inf_idx] = -1 * max_comp_value

    # Calculate the e^(scaled-index) = exp(V / lambda)
    exp_scaled_index = np.exp(scaled_index)

    # Guard against overflow
    inf_idx = np.isposinf(exp_scaled_index)
    exp_scaled_index[inf_idx] = max_comp_value
    # Guard against underflow. Note that I'm not sure this is the best place or
    # best way to perform such guarding. If all of an observations indices
    # suffer underflow, then we'll have 0 / 0 when calculating the
    # probabilities and I should use L'Hopital's rule to get the correct
    # probability. However, replacing underflowed values here may result in
    # incorrectly assigning probabilities of either zero for all alternatives
    # or 1 / num_alternatives for all alternatives.
    zero_idx = (exp_scaled_index == 0)
    exp_scaled_index[zero_idx] = min_comp_value

    # Calculate the log-sum for each nest, for each observation. Note that the
    # "*" is used to compute the dot product between the mapping matrix which
    # is a scipy.sparse matrix and the second term which is a scipy sparse
    # matrix. Note the dimensions of ind_log_sums_per_nest are (obs, nests).
    # Calculates sum _{j \in C_m} exp(V_{ij} / \lambda_m) for each nest m.
    ind_exp_sums_per_nest = (rows_to_obs.T *
                             rows_to_nests.multiply(exp_scaled_index[:, None]))
    # Ensure that ind_exp_sums_per_nest is an ndarray
    if isinstance(ind_exp_sums_per_nest, np.matrixlib.defmatrix.matrix):
        ind_exp_sums_per_nest = np.asarray(ind_exp_sums_per_nest)
    elif issparse(ind_exp_sums_per_nest):
        ind_exp_sums_per_nest = ind_exp_sums_per_nest.toarray()
    # Guard against overflow
    inf_idx = np.isposinf(ind_exp_sums_per_nest)
    ind_exp_sums_per_nest[inf_idx] = max_comp_value

    # Get the long-format representation of ind_log_sums_per_nest. Each row
    # will have two columns, one for each nest. The entries of the matrix will
    # be the log-sum for each nest, for the individual associated with the
    # given row. The "*" is used to perform the dot product since rows_to_obs
    # is a sparse matrix & ind_exp_sums_per_nest is a dense numpy matrix.
    long_exp_sums_per_nest = rows_to_obs.dot(ind_exp_sums_per_nest)
    if isinstance(long_exp_sums_per_nest, np.matrixlib.defmatrix.matrix):
        long_exp_sums_per_nest = np.asarray(long_exp_sums_per_nest)

    # Get the relevant log-sum for each row of the long-format data
    # Note the .A converts the numpy matrix into a numpy array
    # This is sum _{j \in C_m} exp(V_{ij} / \lambda_m) for the nest
    # belonging to each row
    long_exp_sums = (rows_to_nests.multiply(long_exp_sums_per_nest).sum(
        axis=1).A).ravel()

    # Get the denominators for each individual
    ind_denom = (np.power(ind_exp_sums_per_nest,
                          nest_coefs[None, :]).sum(axis=1))
    # Guard against overflow and underflow
    inf_idx = np.isposinf(ind_denom)
    ind_denom[inf_idx] = max_comp_value

    zero_idx = (ind_denom == 0)
    ind_denom[zero_idx] = min_comp_value

    # Get the long format denominators.
    long_denom = rows_to_obs.dot(ind_denom)
    # Ensure that long_denom is 1D.
    long_denom.ravel()

    # Get the long format numerators
    long_numerators = (exp_scaled_index * np.power(long_exp_sums,
                                                   (long_nest_coefs - 1)))
    # Guard agains overflow and underflow
    inf_idx = np.isposinf(long_numerators)
    long_numerators[inf_idx] = max_comp_value

    zero_idx = (long_numerators == 0)
    long_numerators[zero_idx] = min_comp_value

    # Calculate and return the long-format probabilities
    long_probs = (long_numerators / long_denom).ravel()
    # Guard against underflow
    long_probs[np.where(long_probs == 0)] = min_comp_value

    # If desired, isolate the probabilities of the chosen alternatives
    if chosen_row_to_obs is None:
        chosen_probs = None
    else:
        # chosen_probs will be of shape (num_observations,)
        chosen_probs = (chosen_row_to_obs.transpose().dot(long_probs))
        chosen_probs = np.asarray(chosen_probs).ravel()

    # Return the long form and chosen probabilities if desired
    if return_type == 'long_and_chosen_probs':
        return chosen_probs, long_probs
    # If working with predictions, return just the long form probabilities
    elif return_type == 'long_probs':
        return long_probs
    # If estimating the model and storing fitted probabilities or testing the
    # model on data for which we know the chosen alternative, just return the
    # chosen probabilities.
    elif return_type == 'chosen_probs':
        return chosen_probs
    # If we want all the factors of the probability (e.g. as when calculating
    # the gradient)
    elif return_type == 'all_prob_dict':
        # Create the dictionary of the various probabilities to be returned
        prob_dict = {}
        prob_dict["long_probs"] = long_probs
        prob_dict["chosen_probs"] = chosen_probs

        # Calculate the 'prob_given_nest' array
        prob_given_nest = exp_scaled_index / long_exp_sums
        # Guard against underflow
        zero_idx = (prob_given_nest == 0)
        prob_given_nest[zero_idx] = min_comp_value

        # Calculate the 'nest_choice_probs'. Note ind_denom is a matrix with
        # shape (num_obs, 1) so no need to explicitly broadcast
        nest_choice_probs = (
            np.power(ind_exp_sums_per_nest, nest_coefs[None, :]) /
            ind_denom[:, None])
        # Guard against underflow
        zero_idx = (nest_choice_probs == 0)
        nest_choice_probs[zero_idx] = min_comp_value
        # Return dictionary.
        # Note the ".A" converts the numpy matrix into a numpy array
        prob_dict["prob_given_nest"] = prob_given_nest
        prob_dict["nest_choice_probs"] = nest_choice_probs
        prob_dict["ind_sums_per_nest"] = ind_exp_sums_per_nest

        return prob_dict
Esempio n. 34
0
    def check(self, data=None, dtype=None, dtypes=None):
        """Check the special function against the data."""

        if self.knownfailure:
            import pytest
            pytest.xfail(reason=self.knownfailure)

        if data is None:
            data = self.data

        if dtype is None:
            dtype = data.dtype
        else:
            data = data.astype(dtype)

        rtol, atol = self.get_tolerances(dtype)

        # Apply given filter functions
        if self.param_filter:
            param_mask = np.ones((data.shape[0], ), np.bool_)
            for j, filter in zip(self.param_columns, self.param_filter):
                if filter:
                    param_mask &= list(filter(data[:, j]))
            data = data[param_mask]

        # Pick parameters from the correct columns
        params = []
        for idx, j in enumerate(self.param_columns):
            if np.iscomplexobj(j):
                j = int(j.imag)
                params.append(data[:, j].astype(complex))
            elif dtypes and idx < len(dtypes):
                params.append(data[:, j].astype(dtypes[idx]))
            else:
                params.append(data[:, j])

        # Helper for evaluating results
        def eval_func_at_params(func, skip_mask=None):
            if self.vectorized:
                got = func(*params)
            else:
                got = []
                for j in range(len(params[0])):
                    if skip_mask is not None and skip_mask[j]:
                        got.append(np.nan)
                        continue
                    got.append(
                        func(*tuple([params[i][j]
                                     for i in range(len(params))])))
                got = np.asarray(got)
            if not isinstance(got, tuple):
                got = (got, )
            return got

        # Evaluate function to be tested
        got = eval_func_at_params(self.func)

        # Grab the correct results
        if self.result_columns is not None:
            # Correct results passed in with the data
            wanted = tuple([data[:, icol] for icol in self.result_columns])
        else:
            # Function producing correct results passed in
            skip_mask = None
            if self.nan_ok and len(got) == 1:
                # Don't spend time evaluating what doesn't need to be evaluated
                skip_mask = np.isnan(got[0])
            wanted = eval_func_at_params(self.result_func, skip_mask=skip_mask)

        # Check the validity of each output returned
        assert_(len(got) == len(wanted))

        for output_num, (x, y) in enumerate(zip(got, wanted)):
            if np.issubdtype(x.dtype,
                             np.complexfloating) or self.ignore_inf_sign:
                pinf_x = np.isinf(x)
                pinf_y = np.isinf(y)
                minf_x = np.isinf(x)
                minf_y = np.isinf(y)
            else:
                pinf_x = np.isposinf(x)
                pinf_y = np.isposinf(y)
                minf_x = np.isneginf(x)
                minf_y = np.isneginf(y)
            nan_x = np.isnan(x)
            nan_y = np.isnan(y)

            olderr = np.seterr(all='ignore')
            try:
                abs_y = np.absolute(y)
                abs_y[~np.isfinite(abs_y)] = 0
                diff = np.absolute(x - y)
                diff[~np.isfinite(diff)] = 0

                rdiff = diff / np.absolute(y)
                rdiff[~np.isfinite(rdiff)] = 0
            finally:
                np.seterr(**olderr)

            tol_mask = (diff <= atol + rtol * abs_y)
            pinf_mask = (pinf_x == pinf_y)
            minf_mask = (minf_x == minf_y)

            nan_mask = (nan_x == nan_y)

            bad_j = ~(tol_mask & pinf_mask & minf_mask & nan_mask)

            point_count = bad_j.size
            if self.nan_ok:
                bad_j &= ~nan_x
                bad_j &= ~nan_y
                point_count -= (nan_x | nan_y).sum()

            if not self.distinguish_nan_and_inf and not self.nan_ok:
                # If nan's are okay we've already covered all these cases
                inf_x = np.isinf(x)
                inf_y = np.isinf(y)
                both_nonfinite = (inf_x & nan_y) | (nan_x & inf_y)
                bad_j &= ~both_nonfinite
                point_count -= both_nonfinite.sum()

            if np.any(bad_j):
                # Some bad results: inform what, where, and how bad
                msg = [""]
                msg.append("Max |adiff|: %g" % diff.max())
                msg.append("Max |rdiff|: %g" % rdiff.max())
                msg.append(
                    "Bad results (%d out of %d) for the following points (in output %d):"
                    % (
                        np.sum(bad_j),
                        point_count,
                        output_num,
                    ))
                for j in np.nonzero(bad_j)[0]:
                    j = int(j)
                    fmt = lambda x: "%30s" % np.array2string(x[j],
                                                             precision=18)
                    a = "  ".join(map(fmt, params))
                    b = "  ".join(map(fmt, got))
                    c = "  ".join(map(fmt, wanted))
                    d = fmt(rdiff)
                    msg.append("%s => %s != %s  (rdiff %s)" % (a, b, c, d))
                assert_(False, "\n".join(msg))
Esempio n. 35
0
    def step(
        self,
        input_activities=None,
        # input_pool,
        n_inputs=0,
        # n_max_inputs,
        # new_input_indices,
        raw_val=0.,
    ):
        """
        Incrementally build the set of categories.

        Parameters
        ----------
        input_activities: array of floats
            The under-construction array of input activities
            for this time step.
        n_inputs : int
            The number of inputs currently assigned.
        #n_max_inputs : int
        #     The maximum number of inputs possible.
        #new_input_indices: list of tuples of (int, int)
        #    Tuples of (child_index, parent_index). Each time a new child
        #    node is added, it is recorded on this list.
        raw_val: float or string or convertable to string
            The new piece of data to add to the history of observations.

        Returns
        -------
        input_activities: array of floats
            The full, padded array of input activities, updated.
        n_inputs: int
            The number of input activity elements that are currently
            being used.
        """
        self.timestep += 1

        # Determine whether the observation is string or numerical.
        is_string = True
        try:
            float_val = float(raw_val)
            if np.isnan(float_val):
                val = "NaN"
            elif np.isposinf(float_val):
                val = "positive_infinity"
            elif np.isneginf(float_val):
                val = "negative_infinity"
            else:
                val = float_val
                is_string = False
        except ValueError:
            val = str(raw_val)

        # input_activities is modified by calls to categorize()
        if is_string:
            self.string_cats.add(val)
            self.string_cats.categorize(val, input_activities)
        else:
            self.numeric_cats.add(val)
            self.numeric_cats.categorize(val, input_activities)

        if self.timestep % self.split_frequency == 0:
            # Try to grow new categories.
            # success, n_inputs, new_input_indices = self.numeric_cats.grow(
            #     input_pool, new_input_indices)
            # success, n_inputs, new_input_indices = self.string_cats.grow(
            #     input_pool, new_input_indices)
            n_inputs = self.numeric_cats.grow(n_inputs)
            n_inputs = self.string_cats.grow(n_inputs)

        return input_activities, n_inputs
Esempio n. 36
0
from typing import List, Any
import numpy as np

AR_c: np.ndarray[Any, np.dtype[np.complex128]]
AR_m: np.ndarray[Any, np.dtype[np.timedelta64]]
AR_M: np.ndarray[Any, np.dtype[np.datetime64]]
AR_O: np.ndarray[Any, np.dtype[np.object_]]

np.fix(AR_c)  # E: incompatible type
np.fix(AR_m)  # E: incompatible type
np.fix(AR_M)  # E: incompatible type

np.isposinf(AR_c)  # E: incompatible type
np.isposinf(AR_m)  # E: incompatible type
np.isposinf(AR_M)  # E: incompatible type
np.isposinf(AR_O)  # E: incompatible type

np.isneginf(AR_c)  # E: incompatible type
np.isneginf(AR_m)  # E: incompatible type
np.isneginf(AR_M)  # E: incompatible type
np.isneginf(AR_O)  # E: incompatible type
Esempio n. 37
0
def estimate_deltas(
    G,
    intervened_node: str,
    n_timesteps: int,
    start_year: int,
    start_month: int,
    country: Optional[str] = "South Sudan",
    state: Optional[str] = None,
):
    """ Utility function that estimates Rate of Change (deltas) for the
    intervened node per timestep. This will use the units that the CAG
    was parameterized with. WARNING: The state and country should be same as what was
    passed to G.parameterize() or else you could get mismatched data.

    Deltas are estimated by percent change between each time step. (i.e,
    (current - next)/current). Heuristics are in place to handle NAN and INF
    values. If changed from 0 to 0 (NAN case), then delta = 0. If increasing
    from 0 (+INF case), then delta = positive absolute mean of all finite
    deltas. If decreasing from 0 (-INF case), then delta = negative absolute
    mean of all finite deltas.

    See function get_true_values to see how the data is aggregated to fill in
    values for missing time points which calculating the deltas.

    Args:
        G: A completely parameterized and quantified CAG with indicators,
        estimated transition matrx, and indicator values.

        intervened_node: A string of the full name of the node in which we
        are intervening on.

        n_timesteps: Number of time steps.

        start_year: The starting year (e.g, 2012).

        start_month: The starting month (1-12).

    Returns:
        1D numpy array of deltas.
    """

    intervener_indicator = list(
        G.nodes(data=True)[intervened_node]["indicators"].keys())[0]

    query_base = " ".join([
        f"select * from indicator",
        f"where `Variable` like '{intervener_indicator}'",
    ])

    query_parts = {"base": query_base}

    if country is not None:
        check_q = query_parts["base"] + f"and `Country` is '{country}'"
        check_r = list(engine.execute(check_q))
        if check_r == []:
            warnings.warn(
                f"Selected Country not found for {intervener_indicator}! Using default settings (South Sudan)"
            )
            query_parts["country"] = f"and `Country` is 'South Sudan'"
        else:
            query_parts["country"] = f"and `Country` is '{country}'"
    if state is not None:
        check_q = query_parts["base"] + f"and `State` is '{state}'"
        check_r = list(engine.execute(check_q))
        if check_r == []:
            warnings.warn(
                f"Selected State not found for {intervener_indicator}! Using default settings (Aggregration over all States)"
            )
            query_parts["state"] = ""
        else:
            query_parts["state"] = f"and `State` is '{state}'"

    unit = list(
        G.nodes(data=True)[intervened_node]["indicators"].values())[0].unit

    int_vals = np.zeros(n_timesteps + 1)
    int_vals[0] = list(
        G.nodes(data=True)[intervened_node]["indicators"].values())[0].mean
    year = start_year
    month = start_month
    for j in range(1, n_timesteps + 1):
        query_parts["year"] = f"and `Year` is '{year}'"
        query_parts["month"] = f"and `Month` is '{month}'"

        query = " ".join(query_parts.values())
        results = list(engine.execute(query))

        if results != []:
            int_vals[j] = np.mean(
                [float(r["Value"]) for r in results if r["Unit"] == unit])

            if month == 12:
                year = year + 1
                month = 1
            else:
                month = month + 1
            continue

        query_parts["month"] = ""
        query = " ".join(query_parts.values())
        results = list(engine.execute(query))

        if results != []:
            int_vals[j] = np.mean(
                [float(r["Value"]) for r in results if r["Unit"] == unit])

            if month == 12:
                year = year + 1
                month = 1
            else:
                month = month + 1
            continue

        query_parts["year"] = ""
        query = " ".join(query_parts.values())
        results = list(engine.execute(query))

        if results != []:
            int_vals[j] = np.mean(
                [float(r["Value"]) for r in results if r["Unit"] == unit])

            if month == 12:
                year = year + 1
                month = 1
            else:
                month = month + 1
            continue

    per_ch = np.roll(int_vals, -1) - int_vals

    per_ch = per_ch / int_vals

    per_mean = np.abs(np.mean(per_ch[np.isfinite(per_ch)]))

    per_ch[np.isnan(per_ch)] = 0
    per_ch[np.isposinf(per_ch)] = per_mean
    per_ch[np.isneginf(per_ch)] = -per_mean

    return np.delete(per_ch, -1)
Esempio n. 38
0
def convert_to_large_num(vec):
    vec[np.isposinf(vec)]=np.finfo(np.float).max
    vec[np.isneginf(vec)]=np.finfo(np.float).min
    return vec
Esempio n. 39
0
    lambda x: da.tensordot(x, np.ones(x.shape[:2]), axes=[(0, 1), (0, 1)]),
    lambda x: x.sum(axis=0),
    lambda x: x.max(axis=0),
    lambda x: x.sum(axis=(1, 2)),
    lambda x: x.astype(np.complex128),
    lambda x: x.map_blocks(lambda x: x * 2),
    lambda x: x.map_overlap(lambda x: x * 2, depth=0, trim=True),
    lambda x: x.map_overlap(lambda x: x * 2, depth=0, trim=False),
    lambda x: x.round(1),
    lambda x: x.reshape((x.shape[0] * x.shape[1], x.shape[2])),
    lambda x: abs(x),
    lambda x: x > 0.5,
    lambda x: x.rechunk((4, 4, 4)),
    lambda x: x.rechunk((2, 2, 1)),
    lambda x: np.isneginf(x),
    lambda x: np.isposinf(x),
]


@pytest.mark.parametrize("func", functions)
def test_basic(func):
    x = da.random.random((2, 3, 4), chunks=(1, 2, 2))
    x[x < 0.8] = 0

    y = x.map_blocks(sparse.COO.from_numpy)

    xx = func(x)
    yy = func(y)

    assert_eq(xx, yy)
Esempio n. 40
0
def upgrade():
    bind = op.get_bind()

    sa.Enum(IntermediateValueModel.TrialIntermediateValueType).create(
        bind, checkfirst=True)

    # MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE <tbl_name>
    # ADD COLUMN <col_name> ... DEFAULT "FINITE_OR_NAN"', but seemingly Alembic
    # does not support such a SQL statement. So first add a column with schema-level
    # default value setting, then remove it by `batch_op.alter_column()`.
    with op.batch_alter_table("trial_intermediate_values") as batch_op:
        batch_op.add_column(
            sa.Column(
                "intermediate_value_type",
                sa.Enum("FINITE",
                        "INF_POS",
                        "INF_NEG",
                        "NAN",
                        name="trialintermediatevaluetype"),
                nullable=False,
                server_default="FINITE",
            ), )
    with op.batch_alter_table("trial_intermediate_values") as batch_op:
        batch_op.alter_column(
            "intermediate_value_type",
            existing_type=sa.Enum("FINITE",
                                  "INF_POS",
                                  "INF_NEG",
                                  "NAN",
                                  name="trialintermediatevaluetype"),
            existing_nullable=False,
            server_default=None,
        )

    session = orm.Session(bind=bind)
    try:
        records = session.query(IntermediateValueModel).all()
        mapping = []
        for r in records:
            value: float
            if np.isclose(r.intermediate_value, RDB_MAX_FLOAT) or np.isposinf(
                    r.intermediate_value):
                value = float("inf")
            elif np.isclose(r.intermediate_value,
                            RDB_MIN_FLOAT) or np.isneginf(
                                r.intermediate_value):
                value = float("-inf")
            elif np.isnan(r.intermediate_value):
                value = float("nan")
            else:
                value = r.intermediate_value
            (
                stored_value,
                float_type,
            ) = IntermediateValueModel.intermediate_value_to_stored_repr(value)
            mapping.append({
                "trial_intermediate_value_id": r.trial_intermediate_value_id,
                "intermediate_value_type": float_type,
                "intermediate_value": stored_value,
            })
        session.bulk_update_mappings(IntermediateValueModel, mapping)
        session.commit()
    except SQLAlchemyError as e:
        session.rollback()
        raise e
    finally:
        session.close()
Esempio n. 41
0
def bvnu( dh, dk, r ):
    '''
    A function for computing bivariate normal probabilities.
    It calculates the probability that x > dh and y > dk. 
    parameters:  
      dh 1st lower integration limit
      dk 2nd lower integration limit
      r   correlation coefficient
    Example: p = bvnu( -3, -1, .35 )
    Note: to compute the probability that x < dh and y < dk, 
    use bvnu( -dh, -dk, r ). 
    '''
    if np.isposinf(dh) or np.isposinf(dk):
         return 0
    if np.isneginf(dh):
        if np.isneginf(dk):
            return 1
        else:
            return phid(-dk)
    elif np.isneginf(dk):
        return phid(-dh)
    elif r==0:
        return phid(-dh) * phid(-dk)
    else:
        tp = 2*np.pi;
        h = dh;
        k = dk;
        hk = h*k;
        bvn = 0; 
        if abs(r) < 0.3:
            # Gauss Legendre points and weights, n =  6    
            w = np.array([0.1713244923791705, 0.3607615730481384, 0.4679139345726904]);
            x = np.array([0.9324695142031522, 0.6612093864662647, 0.2386191860831970]);
        elif abs(r) < 0.75:
            # Gauss Legendre points and weights, n = 12
            w = np.array([.04717533638651177, 0.1069393259953183, 0.1600783285433464, 
                            0.2031674267230659, 0.2334925365383547, 0.2491470458134029]);
            x = np.array([0.9815606342467191, 0.9041172563704750, 0.7699026741943050, 
                            0.5873179542866171, 0.3678314989981802, 0.1252334085114692]);
        else:
            # Gauss Legendre points and weights, n = 20
            w = np.array([.01761400713915212, .04060142980038694, .06267204833410906, 
                            .08327674157670475, 0.1019301198172404, 0.1181945319615184,
                            0.1316886384491766, 0.1420961093183821, 0.1491729864726037,
                            0.1527533871307259]);
            x = np.array([0.9931285991850949, 0.9639719272779138, 0.9122344282513259,
                        0.8391169718222188, 0.7463319064601508, 0.6360536807265150,
                        0.5108670019508271, 0.3737060887154196, 0.2277858511416451,
                        0.07652652113349733]);
        w = np.concatenate([w, w])
        x = np.concatenate([1-x, 1+x])
        if abs(r) < 0.925:
            hs = (h*h + k*k)/2.0
            asr = np.arcsin(r)/2.0
            sn = np.sin(asr*x); # vector
            bvn = np.inner(np.exp((sn*hk-hs)/(1-np.square(sn))), w);
            bvn = bvn*asr/tp + phid(-h)*phid(-k);
        else: # extra complexity to handle highly correlated case
            if r < 0:
                k = -k; hk = -hk
            if abs(r) < 1:
                ass = 1-r**2; # renamed as to ass
                a = np.sqrt(ass);
                bs = (h-k)**2;
                asr = -( bs/ass + hk )/2;
                c = (4-hk)/8 ;
                d = (12-hk)/80;
                if asr > -100:
                    bvn = a*np.exp(asr)*(1-c*(bs-ass)*(1-d*bs)/3+c*d*ass**2);
                if hk  > -100:
                    b = np.sqrt(bs);
                    sp = np.sqrt(tp)*phid(-b/a);
                    bvn = bvn - np.exp(-hk/2)*sp*b*( 1 - c*bs*(1-d*bs)/3 );
                a = a/2;
                xs = np.square(a*x);
                asr = -( bs/xs + hk )/2;
                ix = ( asr > -100 );
                xs = xs[ix];
                sp = ( 1 + c*xs * (1+5*d*xs) );
                rs = np.sqrt(1-xs);
                ep = np.exp( -(hk/2)*xs / np.square(1+rs)) / rs;
                ip = np.inner(np.exp(asr[ix]) * (sp-ep), w[ix])
                bvn = (a*ip - bvn)/tp;
            if r > 0:
                bvn =  bvn + phid( -max( h, k ) ); 
            elif h >= k:
                bvn = -bvn;
            else:
                if h < 0:
                    L = phid(k)-phid(h);
                else:
                    L = phid(-h)-phid(-k);
                bvn =  L - bvn;
        p = max([0, min([1, bvn])]);
        return p
Esempio n. 42
0
    def test_degenerate_cases(self):
        """Test that we return the correct values when our distribution doesn't vary"""
        predictions = np.array([[1, 0], [1, 1]])  # first component is always 1
        for inf in [
                EmpiricalInferenceResults(d_t=1,
                                          d_y=2,
                                          pred=np.mean(predictions, axis=0),
                                          pred_dist=predictions,
                                          inf_type='coefficient'),
                NormalInferenceResults(d_t=1,
                                       d_y=2,
                                       pred=np.mean(predictions, axis=0),
                                       pred_stderr=np.std(predictions, axis=0),
                                       inf_type='coefficient')
        ]:
            zs = inf.zstat()
            pv = inf.pvalue()
            # test value 0 is less than estimate of 1 and variance is 0, so z score should be inf
            assert np.isposinf(zs[0])
            # predictions in column 1 have nonzero variance, so the zstat should always be some finite value
            assert np.isfinite(zs[1])
            assert pv[
                0] == 0  # pvalue should be zero when test value is greater or less than all samples

            test_point = np.array([1, 0.5])
            zs = inf.zstat(test_point)
            pv = inf.pvalue(test_point)
            # test value 1 is equal to the estimate of 1 and variance is 0, so z score should be nan
            assert np.isnan(zs[0])
            # predictions in column 1 have nonzero variance, so the zstat should always be some finite value
            assert np.isfinite(zs[1])
            # pvalue is also nan when variance is 0 and the point tested is equal to the point tested
            assert np.isnan(pv[0])
            # pvalue for second column should be greater than zero since some points are on either side
            # of the tested value
            assert 0 < pv[1] <= 1

            test_point = np.array([2, 1])
            zs = inf.zstat(test_point)
            pv = inf.pvalue(test_point)
            # test value 2 is greater than estimate of 1 and variance is 0, so z score should be -inf
            assert np.isneginf(zs[0])
            # predictions in column 1 have nonzero variance, so the zstat should always be some finite value
            assert np.isfinite(zs[1])
            # pvalue is also nan when variance is 0 and the point tested is equal to the point tested
            assert pv[
                0] == 0  # pvalue should be zero when test value is greater or less than all samples

            pop = PopulationSummaryResults(np.mean(predictions,
                                                   axis=0).reshape(1, 2),
                                           np.std(predictions,
                                                  axis=0).reshape(1, 2),
                                           d_t=1,
                                           d_y=2,
                                           alpha=0.05,
                                           value=0,
                                           decimals=3,
                                           tol=0.001)
            pop._print(
            )  # verify that we can access all attributes even in degenerate case
            pop.summary()
Esempio n. 43
0
# True

print(math.isinf(1e100))
# False

print(math.isinf(-1e1000))
# True

a = np.array([1, np.inf, -np.inf])
print(a)
# [  1.  inf -inf]

print(np.isinf(a))
# [False  True  True]

print(np.isposinf(a))
# [False  True False]

print(np.isneginf(a))
# [False False  True]

print(np.isfinite(a))
# [ True False False]

print(np.isinf(1e1000))
# True

print(np.nan_to_num(a))
# [ 1.00000000e+000  1.79769313e+308 -1.79769313e+308]

print(np.nan_to_num(a, posinf=1e100, neginf=-1e100))
Esempio n. 44
0
def histplot_withsub_raw(datas, bins, weights=None, usererror = None, labels = None, scale=1., removenorm = None, **kwargs,):
    bins = np.array(copy.deepcopy(bins))
    bins = bins[bins > 160]
    # bins = bins[bins < 2100]
    settings = {
        "xlabel" : r"$m_{T, Vh} [GeV]$",
        "ylabel": 'Arbitrary unit',
        "title1": r"$\mathbf{ATLAS}$",# \newline Ptl next-leading, full cuts, 2 b-tags $",
        "title1_1": r"$\mathit{Internal}$",
        "title2": r"$\mathit{\sqrt{s}=13\:TeV,36.1\:fb^{-1}}$",# Ptl next-leading, full cuts, 2 b-tags $",
        #"title3": r"$\mathbf{2\;lep.,2\;b-tag}$",
        "title3": "0 lep., 2 b-tag",
        "title4": "0 lep., 2 b-tag",
        "filename": "deltatest2",
        "log_y":False,
        "norm":False,
        "central":"none",
        "upper_y": 1.6, 
        "do_errorbar": False
        }
    for each_key in kwargs.items():
        settings[each_key[0]] = kwargs[each_key[0]]
    fig, (ax1, ax2) = plt.subplots(2, 1, gridspec_kw={'height_ratios':[3, 1]}, figsize=(10, 10))
    fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.06)

    if weights is None:
        weights = []
        for each in datas:
            weights.append(np.ones(len(each)))

    if removenorm:
        for i in range(len(weights)):
            totalweightbefore = np.sum(weights[i])
            weights[i] = np.array(weights[i]) / np.sum(weights[i])
            if usererror is not None:
                if usererror[i] is not None:
                    usererror[i] = usererror[i]/totalweightbefore

    sigmas = []
    weight_in_binses = []
    for i in range(len(datas)):
        datas[i] = np.array(datas[i])
        event_location = np.digitize(datas[i]/scale, bins)
        sigma2 = []
        weight_in_bins = []
        for j in range(np.size(bins) - 1):
            bin_weight = weights[i][np.where(event_location == j+1)[0]]
            if not (usererror is not None and usererror[i] is not None):
                sigma2.append(np.sum(bin_weight**2.))
            weight_in_bins.append(np.sum(bin_weight))
            if usererror is not None:
                if usererror[i] is not None:
                    binederror = usererror[i][np.where(event_location == j+1)[0]]
                    sigma2.append(np.sum(binederror**2))

        sigmas.append(np.array(sigma2)**0.5)
        weight_in_binses.append(np.array(weight_in_bins))

    colors = ['b', 'g', 'r', 'c', 'm', 'y']
    # print(np.array(datas).shape, np.array(weights).shape)
    # print(np.array(datas[1]).shape, np.array(weights[1]).shape)
    # print(np.array(datas[0]).shape, np.array(weights[0]).shape)
    for i in range(len(datas)):
        datas[i] = np.array(datas[i])/scale
    ax1.hist(datas, bins, histtype='step', fill=False, color=colors[0:len(datas)], weights=weights)
    bins = np.array(bins)
    bin_centre = []
    for i in range(len(datas)):
        bin_centre = (bins[0:-1] + bins[1:])/2
        ax1.errorbar(bin_centre, weight_in_binses[i], xerr=0.0001, yerr=sigmas[i], fmt='.', color=colors[i], label=str(labels[i]))

    handles, lelabels = ax1.get_legend_handles_labels()
    sys_patch = mpatches.Patch(color='black', hatch='/////', fill=False, linewidth=0, label='MC uncertainty')
    handles.append(sys_patch)
    ax1.legend(handles=handles, loc='upper right',prop={'size': 20}, frameon=False)
    
    ymin, ymax = ax1.get_ylim()
    ax1.set_ylim([0,ymax* settings["upper_y"]])
    ax1.text(0.05, 1.55 / 1.7, settings['title1'], fontsize=25, transform=ax1.transAxes, style='italic', fontweight='bold')
    ax1.text(0.227, 1.55/ 1.7, settings['title1_1'], fontsize=25, transform=ax1.transAxes)
    ax1.text(0.05, 1.40 / 1.7, settings['title2'], fontsize=20, transform=ax1.transAxes, style='italic', fontweight='bold')
    ax1.text(0.05, 1.23 / 1.7, settings['title3'], fontsize=18, weight='bold', style='italic', transform=ax1.transAxes)
    ax1.text(0.05, 1.12 / 1.7, settings['title4'], fontsize=18, weight='bold', style='italic', transform=ax1.transAxes)
    ax1.set_ylabel(settings['ylabel'], fontsize=20)
    if settings['log_y']:
        ax1.set_yscale('log')
        ax1.set_ylim([0.1, 10**(math.log10(ymax) * settings["upper_y"])])
        ax1.yaxis.set_major_locator(matplotlib.ticker.LogLocator(base=10,numticks=100))
        ax1.minorticks_on()
    ax1.get_xaxis().set_ticks([])


    i = -1
    for each_x, each_error, each_y_mc in zip(bin_centre, sigmas[0], weight_in_binses[0]):
        i += 1
        if each_y_mc <= 0:
            continue
        ax2.add_patch(
            matplotlib.patches.Rectangle(
                (each_x - (bins[i+1]-bins[i]) / 2., - each_error/each_y_mc), # x, y
                bins[i+1]-bins[i],        # width
                each_error/each_y_mc*2,        # height
                color='black', alpha=0.5,
                hatch='/////', fill=False, linewidth=0,
                ))


    i = 0
    centerheight = []
    for each_height, each_label in zip(weight_in_binses, labels):
        if i == 0:
            centerheight = np.array(each_height)
            i += 1
            continue
        # for j in range(len(centerheight)):
        #     if centerheight[j] <= 0:
        #         centerheight[j] = 0
        # new_each_height = np.array(each_height)/centerheight
        new_each_height = []
        for eachn, eachd in zip(each_height, centerheight):
            if eachn == 0:
                new_each_height.append(0)
            elif eachd <= 0:
                new_each_height.append(10)
            else:
                new_each_height.append(eachn/eachd)
        new_each_height  = np.array(new_each_height)
        if i != 0:
            new_each_height[np.isnan(new_each_height)] = -10
            new_each_height[np.isposinf(new_each_height)] = 2
            new_each_height[np.isneginf(new_each_height)] = -2
        else:
            new_each_height[np.isnan(new_each_height)] = 1
            new_each_height[np.isinf(new_each_height)] = 1
        ax2.hist(bin_centre, bins, weights=new_each_height-1, label=each_label, histtype=u'step', color=colors[i])
        i += 1
    ax2.set_ylim([-1, 1])
    ax2.plot([bins[0], bins[np.size(bins)-1]], [0, 0], linestyle='--', color='k')
    ax2.set_ylabel(settings["central"] + "/MC" + "-1", fontsize=20)
    ax2.set_xlabel(settings['xlabel'], fontsize=20)

    fig.savefig(settings['filename'] + '.pdf', bbox_inches='tight', pad_inches = 0.25)
    plt.close(fig)
Esempio n. 45
0
def _diagnose(self):
    # Update log.
    self.logger.debug("diagnose: data: shape: " + str(self.data.shape))
    self.logger.debug("diagnose: data: dtype: " + str(self.data.dtype))
    self.logger.debug("diagnose: data: size: %.2fMB",
                      self.data.nbytes * 9.53674e-7)
    self.logger.debug("diagnose: data: nans: " +
                      str(np.sum(np.isnan(self.data))))
    self.logger.debug("diagnose: data: -inf: " +
                      str(np.sum(np.isneginf(self.data))))
    self.logger.debug("diagnose: data: +inf: " +
                      str(np.sum(np.isposinf(self.data))))
    self.logger.debug("diagnose: data: positives: " +
                      str(np.sum(self.data > 0)))
    self.logger.debug("diagnose: data: negatives: " +
                      str(np.sum(self.data < 0)))
    self.logger.debug("diagnose: data: mean: " + str(np.mean(self.data)))
    self.logger.debug("diagnose: data: min: " + str(np.min(self.data)))
    self.logger.debug("diagnose: data: max: " + str(np.max(self.data)))

    self.logger.debug("diagnose: data_white: shape: " +
                      str(self.data_white.shape))
    self.logger.debug("diagnose: data_white: dtype: " +
                      str(self.data_white.dtype))
    self.logger.debug("diagnose: data_white: size: %.2fMB",
                      self.data_white.nbytes * 9.53674e-7)
    self.logger.debug("diagnose: data_white: nans: " +
                      str(np.sum(np.isnan(self.data_white))))
    self.logger.debug("diagnose: data_white: -inf: " +
                      str(np.sum(np.isneginf(self.data_white))))
    self.logger.debug("diagnose: data_white: +inf: " +
                      str(np.sum(np.isposinf(self.data_white))))
    self.logger.debug("diagnose: data_white: positives: " +
                      str(np.sum(self.data_white > 0)))
    self.logger.debug("diagnose: data_white: negatives: " +
                      str(np.sum(self.data_white < 0)))
    self.logger.debug("diagnose: data_white: mean: " +
                      str(np.mean(self.data_white)))
    self.logger.debug("diagnose: data_white: min: " +
                      str(np.min(self.data_white)))
    self.logger.debug("diagnose: data_white: max: " +
                      str(np.max(self.data_white)))

    self.logger.debug("diagnose: data_dark: shape: " +
                      str(self.data_dark.shape))
    self.logger.debug("diagnose: data_dark: dtype: " +
                      str(self.data_dark.dtype))
    self.logger.debug("diagnose: data_dark: size: %.2fMB",
                      self.data_dark.nbytes * 9.53674e-7)
    self.logger.debug("diagnose: data_dark: nans: " +
                      str(np.sum(np.isnan(self.data_dark))))
    self.logger.debug("diagnose: data_dark: -inf: " +
                      str(np.sum(np.isneginf(self.data_dark))))
    self.logger.debug("diagnose: data_dark: +inf: " +
                      str(np.sum(np.isposinf(self.data_dark))))
    self.logger.debug("diagnose: data_dark: positives: " +
                      str(np.sum(self.data_dark > 0)))
    self.logger.debug("diagnose: data_dark: negatives: " +
                      str(np.sum(self.data_dark < 0)))
    self.logger.debug("diagnose: data_dark: mean: " +
                      str(np.mean(self.data_dark)))
    self.logger.debug("diagnose: data_dark: min: " +
                      str(np.min(self.data_dark)))
    self.logger.debug("diagnose: data_dark: max: " +
                      str(np.max(self.data_dark)))

    self.logger.debug("diagnose: theta: shape: " + str(self.theta.shape))
    self.logger.debug("diagnose: theta: dtype: " + str(self.theta.dtype))
    self.logger.debug("diagnose: theta: size: %.2fMB",
                      self.theta.nbytes * 9.53674e-7)
    self.logger.debug("diagnose: theta: nans: " +
                      str(np.sum(np.isnan(self.theta))))
    self.logger.debug("diagnose: theta: -inf: " +
                      str(np.sum(np.isneginf(self.theta))))
    self.logger.debug("diagnose: theta: +inf: " +
                      str(np.sum(np.isposinf(self.theta))))
    self.logger.debug("diagnose: theta: positives: " +
                      str(np.sum(self.theta > 0)))
    self.logger.debug("diagnose: theta: negatives: " +
                      str(np.sum(self.theta < 0)))
    self.logger.debug("diagnose: theta: mean: " + str(np.mean(self.theta)))
    self.logger.debug("diagnose: theta: min: " + str(np.min(self.theta)))
    self.logger.debug("diagnose: theta: max: " + str(np.max(self.theta)))

    self.logger.info("diagnose [ok]")
Esempio n. 46
0
    def build(self,
              results_file,
              model_file,
              show_fits=False,
              file_id=None,
              pool=None):
        print('-' * 75)
        print('Builing transmission loss priors from file:', results_file)

        az_dirs = ['S', 'SW', 'W', 'NW', 'N', 'NE', 'E', 'SE']

        # read in data, convert tloss to dB relative to 1 km, and wrap azimuths to [-180.0:180.0]
        print('\t' + "Reading in data...")
        rngs, az, tloss = np.loadtxt(results_file, unpack=True)

        output_rngs = np.sort(np.unique(rngs)[::5])

        az[az > 180.0] -= 360.0
        az[az < -180.0] += 360.0

        tloss = 10.0 * np.log10(tloss)
        tloss[np.isneginf(tloss)] = min(tloss[np.isfinite(tloss)])
        tloss[np.isposinf(tloss)] = max(tloss[np.isfinite(tloss)])

        tloss_vals = np.linspace(
            min(tloss) - 2.5,
            max(tloss) + 2.5,
            len(output_rngs) * 2)
        pdf_vals = np.empty(
            (self.az_bin_cnt, len(output_rngs), len(tloss_vals)))

        for az_index in range(self.az_bin_cnt):
            center = -180 + 360.0 / self.az_bin_cnt * az_index
            if az_index == 0:
                az_mask = np.logical_or(az >= 180.0 - self.az_bin_wdth / 2.0,
                                        az <= -180.0 + self.az_bin_wdth / 2.0)
            else:
                az_mask = np.logical_and(center - self.az_bin_wdth / 2.0 <= az,
                                         az <= center + self.az_bin_wdth / 2.0)

            if show_fits:
                f, ((ax1, ax2)) = plt.subplots(2, 1, figsize=(7.5, 10))

                ax1.set_xlabel('Range [km]')
                ax1.set_ylabel('Transmission Loss [dB]')
                ax1.set_xlim([0.0, 1000.0])
                ax1.set_ylim([min(tloss) - 5.0, max(tloss) + 5.0])

                ax2.set_xlabel('Range [km]')
                ax2.set_ylabel('Transmission Loss [dB]')
                ax2.set_xlim([0.0, 1000.0])
                ax2.set_ylim([min(tloss) - 5.0, max(tloss) + 5.0])

                plt.suptitle(
                    "Stochastic Transmission Loss Model \n Azimuth: " +
                    az_dirs[az_index],
                    fontsize=18)
                plt.show(block=False)

                ax1.plot(rngs[az_mask][::11],
                         tloss[az_mask][::11],
                         'ko',
                         markersize=1)
                plt.pause(0.001)

            print('\t' + "Propagation direction (" + az_dirs[az_index] +
                  ")..." + '\t',
                  end=' ')
            prog_bar.prep(50)

            # Define tloss pdf at each range point from KDE
            for nr, rng_val in enumerate(output_rngs):
                masked_tloss = tloss[np.logical_and(az_mask, rngs == rng_val)]

                if np.std(masked_tloss) < 0.01:
                    pdf_vals[az_index][nr] = norm.pdf(
                        tloss_vals, loc=np.mean(masked_tloss), scale=0.01)
                else:
                    kernel = gaussian_kde(masked_tloss)
                    pdf_vals[az_index][nr] = kernel.evaluate(tloss_vals)

                prog_bar.increment(
                    int(
                        np.floor((50.0 * (nr + 1)) / len(output_rngs)) -
                        np.floor((50.0 * nr) / len(output_rngs))))

                if show_fits:
                    ax2.scatter([rng_val] * len(tloss_vals),
                                tloss_vals,
                                c=pdf_vals[az_index][nr],
                                cmap=cm.nipy_spectral_r,
                                marker='o',
                                s=[12.5] * len(tloss_vals),
                                alpha=0.5,
                                edgecolor='none')
                    plt.pause(0.001)

            prog_bar.close()
            if show_fits:
                plt.close()

        priors = [0] * 3
        priors[0] = output_rngs
        priors[1] = tloss_vals
        priors[2] = pdf_vals

        pickle.dump(priors, open(model_file, "wb"))
        print(' ')
Esempio n. 47
0
def onp_isposinf(x):
    return onp.isposinf(x)
Esempio n. 48
0
def load_picoscope(shot_number,
                   maxrange=1,
                   scopenum=4,
                   time_range=[-2.0, 198.0],
                   location='',
                   plot=False):
    def butter_highpass(cutoff, fs, order=5):
        nyq = 0.5 * fs
        normal_cutoff = cutoff / nyq
        b, a = signal.butter(order,
                             normal_cutoff,
                             btype='highpass',
                             analog=False)
        return b, a

    def butter_highpass_filter(data, cutoff, fs, order=5):
        b, a = butter_highpass(cutoff, fs, order=order)
        y = signal.filtfilt(b, a, data)
        return y

    if (type(scopenum) == int):
        if scopenum == 1:
            scopename = '03102020pico1/'
        elif scopenum == 2:
            scopename = '03102020pico2/'
        elif scopenum == 3:
            scopename = '03102020pico3/'
        elif scopenum == 4:
            scopename = '03102020pico4/'
        elif scopenum == 5:
            scopename = '03102020pico5/'
        elif scopenum == 6:
            scopename = '03102020pico6/'
        elif scopenum == 7:
            scopename = '03102020pico7/'
        else:
            scopename = '03102020pico8/'
    else:
        print(f'scopenum is not an int, {scopenum}')
        sys.exit()

    probe_dia = 0.003175  #m (1/8'' probe)
    probe_dia = 0.00158755  #m (1/16'' probe)
    ##hole_sep = 0.001016     #m (1/16''probe)  ## Aparently unused variable
    r_probe_area = np.pi * (probe_dia / 2)**2
    #tz_probe_area = probe_dia*hole_sep  ## Aparently unused variable
    startintg_index = 0  #3000
    meancutoff = 1000
    ##### load file
    # The location and filename lines must be updated to your system.
    location = '/Volumes/CarFlor/Research/Data/2020/03102020/'
    filename = '20200310-0001 ('

    print(location + scopename + filename + str(shot_number) + ').txt')
    try:
        data = np.loadtxt(location + scopename + filename + str(shot_number) +
                          ').txt',
                          skiprows=2,
                          unpack=True)
    except NameError as err:
        print(
            "Double check you have updated the location variable to your OS system; mac, pc: ",
            err)
    ##### return data
    dataraw = data

    print(dataraw.shape)
    Bdotraw1 = dataraw[1, :]
    Bdotraw2 = dataraw[2, :]
    Bdotraw3 = dataraw[3, :]
    #Bdotraw4 = dataraw[4, :]
    data = data[:, startintg_index:]

    time_ms = data[0, :]
    time_s = time_ms * 1e-6
    timeB_s = time_s[1:]
    timeB_ms = time_ms[1:]
    timeraw = dataraw[0, :]

    Bdot1 = data[1, :] - np.mean(data[1, 0:meancutoff])
    neginfs = np.isneginf(Bdot1)
    Bdot1[np.where(neginfs)] = -maxrange
    posinfs = np.isposinf(Bdot1)
    Bdot1[np.where(posinfs)] = maxrange

    Bdot2 = data[2, :] - np.mean(data[2, 0:meancutoff])
    neginfs = np.isneginf(Bdot2)
    Bdot2[np.where(neginfs)] = -maxrange
    posinfs = np.isposinf(Bdot2)
    Bdot2[np.where(posinfs)] = maxrange

    Bdot3 = data[3, :] - np.mean(data[3, 0:meancutoff])
    neginfs = np.isneginf(Bdot3)
    Bdot3[np.where(neginfs)] = -maxrange
    posinfs = np.isposinf(Bdot3)
    Bdot3[np.where(posinfs)] = maxrange

    #### 03102020 does not use the fourth pico port for magnetic data
    """Bdot4 = data[4,:] - np.mean(data[4, 0:meancutoff])
    neginfs = np.isneginf(Bdot4)
    Bdot4[np.where(neginfs)] = -maxrange
    posinfs = np.isposinf(Bdot4)
    Bdot4[np.where(posinfs)] = maxrange"""

    B1 = sp.cumtrapz(Bdot1 / r_probe_area, time_s) * 1e4  #Gauss
    B2 = sp.cumtrapz(Bdot2 / r_probe_area, time_s) * 1e4  #Gauss
    B3 = sp.cumtrapz(Bdot3 / r_probe_area, time_s) * 1e4  #Gauss
    #B4 = sp.cumtrapz(Bdot4/r_probe_area,time_s)*1e4 #Gauss
    #Bt7 = 3.162*sp.cumtrapz(Btdot7/tz_probe_area,time_s)*1e4#Gauss
    #Bt9 = 3.162*sp.cumtrapz(Btdot9/tz_probe_area,time_s)*1e4#Gauss
    #Bz7 = sp.cumtrapz(Bzdot7/tz_probe_area,time_s)*1e4#Gauss
    #Bz9 = sp.cumtrapz(Bzdot9/tz_probe_area,time_s)*1e4#Gauss
    #filtering

    #fps = 30
    #sine_fq = 10 #Hz
    #duration = 10 #seconds
    #sine_5Hz = sine_generator(fps,sine_fq,duration)
    #sine_fq = 1 #Hz
    #duration = 10 #seconds
    #sine_1Hz = sine_generator(fps,sine_fq,duration)

    #sine = sine_5Hz + sine_1Hz

    #filtered_sine = butter_highpass_filter(sine.data,10,fps)

    #Integration and Calibration
    #Bx =sp.cumtrapz(Bxdot/probe_area,time_s)
    #Bx = 3.162*Bx/1.192485591065652224e-03

    #By =sp.cumtrapz(Bydot/probe_area,time_s)
    #By = 3.162*By/1.784763055992550198e-03

    #Bz =sp.cumtrapz(Bzdot/probe_area,time_s)
    #Bz = 3.162*Bz/1.297485014039849059e-03
    #meanBx = np.mean(Bx)
    # Filtering
    B1filt = butter_highpass_filter(B1, 5e4, 125e6, order=3)
    B2filt = butter_highpass_filter(B2, 5e4, 125e6, order=3)
    B3filt = butter_highpass_filter(B3, 5e4, 125e6, order=3)
    #B4filt = butter_highpass_filter(B4, 5e4, 125e6, order = 3)
    #Btot = np.sqrt(Bxfilt**2+Byfilt**2+Bzfilt**2)
    #Btotave=Btotave+Btot

    #if plot:
    #    plt.figure(1)
    #    plt.plot(time,data[1,:])
    #    plt.figure(2)
    #    plt.plot(time[1:],Btot)

    return time_ms, time_s, timeB_s, timeB_ms, Bdot1, Bdot2, Bdot3, B1, B2, B3, B1filt, B2filt, B3filt, Bdotraw1, Bdotraw2, Bdotraw3, timeraw
Esempio n. 49
0
def plot_interval_censored_lifetimes(lower_bound,
                                     upper_bound,
                                     entry=None,
                                     left_truncated=False,
                                     sort_by_lower_bound=True,
                                     event_observed_color="#A60628",
                                     event_right_censored_color="#348ABD",
                                     ax=None,
                                     **kwargs):
    """
    Returns a lifetime plot for interval censored data.

    Parameters
    -----------
    lower_bound: (n,) numpy array or pd.Series
      the start of the period the subject experienced the event in.
    upper_bound: (n,) numpy array or pd.Series
      the end of the period the subject experienced the event in. If the value is equal to the corresponding value in lower_bound, then
      the individual's event was observed (not censored).
    entry: (n,) numpy array or pd.Series
      offsetting the births away from t=0. This could be from left-truncation, or delayed entry into study.
    left_truncated: boolean
      if entry is provided, and the data is left-truncated, this will display additional information in the plot to reflect this.
    sort_by_lower_bound: boolean
      sort by the lower_bound vector
    event_observed_color: str
      default: "#A60628"
    event_right_censored_color: str
      default: "#348ABD"
      applies to any individual with an upper bound of infinity.

    Returns
    -------
    ax:

    Examples
    ---------
    .. code:: python

        import pandas as pd
        import numpy as np
        from lifelines.plotting import plot_interval_censored_lifetimes
        df = pd.DataFrame({'lb':[20,15,30, 10, 20, 30], 'ub':[25, 15, np.infty, 20, 20, np.infty]})
        ax = plot_interval_censored_lifetimes(lower_bound=df['lb'], upper_bound=df['ub'])
    """
    from matplotlib import pyplot as plt

    if ax is None:
        ax = plt.gca()

    # If lower_bounds is pd.Series with non-default index, then use index values as y-axis labels.
    label_plot_bars = type(lower_bound) is pd.Series and type(
        lower_bound.index) is not pd.RangeIndex

    N = lower_bound.shape[0]
    if N > 25:
        warnings.warn(
            "For less visual clutter, you may want to subsample to less than 25 individuals."
        )

    assert upper_bound.shape[0] == N

    if sort_by_lower_bound:
        ix = np.argsort(lower_bound, 0)
        upper_bound = _iloc(upper_bound, ix)
        lower_bound = _iloc(lower_bound, ix)
        if entry:
            entry = _iloc(lower_bound, ix)

    if entry is None:
        entry = np.zeros(N)

    for i in range(N):
        if np.isposinf(_iloc(upper_bound, i)):
            c = event_right_censored_color
            ax.hlines(i,
                      _iloc(entry, i),
                      _iloc(lower_bound, i),
                      color=c,
                      lw=1.5)
        else:
            c = event_observed_color
            ax.hlines(i,
                      _iloc(entry, i),
                      _iloc(upper_bound, i),
                      color=c,
                      lw=1.5)
            if _iloc(lower_bound, i) == _iloc(upper_bound, i):
                ax.scatter(_iloc(lower_bound, i), i, color=c, marker="o", s=13)
            else:
                ax.scatter(_iloc(lower_bound, i), i, color=c, marker=">", s=13)
                ax.scatter(_iloc(upper_bound, i), i, color=c, marker="<", s=13)

        if left_truncated:
            ax.hlines(i, 0, _iloc(entry, i), color=c, lw=1.0, linestyle="--")

    if label_plot_bars:
        ax.set_yticks(range(0, N))
        ax.set_yticklabels(lower_bound.index)
    else:
        from matplotlib.ticker import MaxNLocator

        ax.yaxis.set_major_locator(MaxNLocator(integer=True))

    ax.set_xlim(0)
    ax.set_ylim(-0.5, N)
    return ax
Esempio n. 50
0
def imagesDiffer(image0, image1, skipMask=None, rtol=1.0e-05, atol=1e-08):
    """!Compare the pixels of two image or mask arrays; return True if close, False otherwise

    @param[in] image0  image 0, an lsst.afw.image.Image, lsst.afw.image.Mask,
        or transposed numpy array (see warning)
    @param[in] image1  image 1, an lsst.afw.image.Image, lsst.afw.image.Mask,
        or transposed numpy array (see warning)
    @param[in] skipMask  mask of pixels to skip, or None to compare all pixels;
        an lsst.afw.image.Mask, lsst.afw.image.Image, or transposed numpy array (see warning);
        all non-zero pixels are skipped
    @param[in] rtol  maximum allowed relative tolerance; more info below
    @param[in] atol  maximum allowed absolute tolerance; more info below

    The images are nearly equal if all pixels obey:
        |val1 - val0| <= rtol*|val1| + atol
    or, for float types, if nan/inf/-inf pixels match.

    @warning the comparison equation is not symmetric, so in rare cases the assertion
    may give different results depending on which image comes first.

    @warning the axes of numpy arrays are transposed with respect to Image and Mask data.
    Thus for example if image0 and image1 are both lsst.afw.image.ImageD with dimensions (2, 3)
    and skipMask is a numpy array, then skipMask must have shape (3, 2).

    @return a string which is non-empty if the images differ

    @throw TypeError if the dimensions of image0, image1 and skipMask do not match,
    or any are not of a numeric data type.
    """
    errStrList = []
    imageArr0 = image0.getArray() if hasattr(image0, "getArray") else image0
    imageArr1 = image1.getArray() if hasattr(image1, "getArray") else image1
    skipMaskArr = skipMask.getArray() if hasattr(skipMask, "getArray") else skipMask

    # check the inputs
    arrArgNameList = [
        (imageArr0, image0, "image0"),
        (imageArr1, image1, "image1"),
    ]
    if skipMask is not None:
        arrArgNameList.append((skipMaskArr, skipMask, "skipMask"))
    for i, (arr, arg, name) in enumerate(arrArgNameList):
        try:
            assert arr.dtype.kind in ("b", "i", "u", "f", "c")
        except Exception:
            raise TypeError(f"{name!r}={arg!r} is not a supported type")
        if i != 0:
            if arr.shape != imageArr0.shape:
                raise TypeError(f"{name} shape = {arr.shape} != {imageArr0.shape} = image0 shape")

    # np.allclose mis-handled unsigned ints in numpy 1.8
    # and subtraction doesn't give the desired answer in any case
    # so cast unsigned arrays into int64 (there may be a simple
    # way to safely use a smaller data type but I've not found it)
    if imageArr0.dtype.kind == "u":
        imageArr0 = imageArr0.astype(
            np.promote_types(imageArr0.dtype, np.int8))
    if imageArr1.dtype.kind == "u":
        imageArr1 = imageArr1.astype(
            np.promote_types(imageArr1.dtype, np.int8))

    if skipMaskArr is not None:
        skipMaskArr = np.array(skipMaskArr, dtype=bool)
        maskedArr0 = np.ma.array(imageArr0, copy=False, mask=skipMaskArr)
        maskedArr1 = np.ma.array(imageArr1, copy=False, mask=skipMaskArr)
        filledArr0 = maskedArr0.filled(0.0)
        filledArr1 = maskedArr1.filled(0.0)
    else:
        skipMaskArr = None
        filledArr0 = imageArr0
        filledArr1 = imageArr1

    try:
        np.array([np.nan], dtype=imageArr0.dtype)
        np.array([np.nan], dtype=imageArr1.dtype)
    except Exception:
        # one or both images does not support non-finite values (nan, etc.)
        # so just use value comparison
        valSkipMaskArr = skipMaskArr
    else:
        # both images support non-finite values, of which numpy has exactly three: nan, +inf and -inf;
        # compare those individually in order to give useful diagnostic output
        nan0 = np.isnan(filledArr0)
        nan1 = np.isnan(filledArr1)
        if np.any(nan0 != nan1):
            errStrList.append("NaNs differ")

        posinf0 = np.isposinf(filledArr0)
        posinf1 = np.isposinf(filledArr1)
        if np.any(posinf0 != posinf1):
            errStrList.append("+infs differ")

        neginf0 = np.isneginf(filledArr0)
        neginf1 = np.isneginf(filledArr1)
        if np.any(neginf0 != neginf1):
            errStrList.append("-infs differ")

        valSkipMaskArr = nan0 | nan1 | posinf0 | posinf1 | neginf0 | neginf1
        if skipMaskArr is not None:
            valSkipMaskArr |= skipMaskArr

    # compare values that should be comparable (are finite and not masked)
    valMaskedArr1 = np.ma.array(imageArr0, copy=False, mask=valSkipMaskArr)
    valMaskedArr2 = np.ma.array(imageArr1, copy=False, mask=valSkipMaskArr)
    valFilledArr1 = valMaskedArr1.filled(0.0)
    valFilledArr2 = valMaskedArr2.filled(0.0)

    if not np.allclose(valFilledArr1, valFilledArr2, rtol=rtol, atol=atol):
        errArr = np.abs(valFilledArr1 - valFilledArr2)
        maxErr = errArr.max()
        maxPosInd = np.where(errArr == maxErr)
        maxPosTuple = (maxPosInd[1][0], maxPosInd[0][0])
        errStr = f"maxDiff={maxErr} at position {maxPosTuple}; " \
                 f"value={valFilledArr1[maxPosInd][0]} vs. {valFilledArr2[maxPosInd][0]}"
        errStrList.insert(0, errStr)

    return "; ".join(errStrList)
  def VerifySampleAndPdfConsistency(self, pspherical, rtol=0.075):
    """Verifies samples are consistent with the PDF using importance sampling.

    In particular, we verify an estimate the surface area of the n-dimensional
    hypersphere, and the surface areas of the spherical caps demarcated by
    a handful of survival rates.

    Args:
      pspherical: A `PowerSpherical` distribution instance.
      rtol: Relative difference tolerable.
    """
    dim = tf.compat.dimension_value(pspherical.event_shape[-1])
    nsamples = int(1e5)
    samples = pspherical.sample(
        sample_shape=[nsamples], seed=test_util.test_seed())
    samples = tf.debugging.check_numerics(samples, 'samples')
    log_prob = pspherical.log_prob(samples)
    log_prob, samples = self.evaluate([log_prob, samples])
    # Check that the log_prob is not nan or +inf. It can be -inf since
    # if we sample a direction diametrically opposite to the mean direction,
    # we'll get an inner product of -1.
    self.assertFalse(np.any(np.isnan(log_prob)))
    self.assertFalse(np.any(np.isposinf(log_prob)))
    log_importance = -log_prob
    sphere_surface_area_estimate, importance = self.evaluate([
        tf.reduce_mean(tf.math.exp(log_importance), axis=0),
        tf.exp(log_importance)])
    true_sphere_surface_area = 2 * (np.pi)**(dim / 2) * self.evaluate(
        tf.exp(-tf.math.lgamma(dim / 2)))
    # Broadcast to correct size
    true_sphere_surface_area += np.zeros_like(sphere_surface_area_estimate)
    # Highly concentrated distributions do not get enough coverage to provide
    # a reasonable full-sphere surface area estimate. These are covered below
    # by CDF-based hypersphere cap surface area estimates.
    # Because the PowerSpherical distribution has zero mass at
    # -`mean_direction` (and points close to -`mean_direction` due to floating
    # point), we only compute this at concentration = 0, which has guaranteed
    # mass everywhere.
    self.assertAllClose(
        true_sphere_surface_area[0],
        sphere_surface_area_estimate[0], rtol=rtol)

    # Assert surface area of hyperspherical cap For some CDFs in [.05,.45],
    # (h must be greater than 0 for the hypersphere cap surface area
    # calculation to hold).
    for survival_rate in 0.95, .9, .75, .6:
      cdf = (1 - survival_rate)
      mean_dir = self.evaluate(pspherical.mean_direction)
      dotprods = np.sum(samples * mean_dir, -1)
      # Empirical estimate of the effective dot-product of the threshold that
      # selects for a given CDF level, that is the cosine of the largest
      # passable angle, or the minimum cosine for a within-CDF sample.
      dotprod_thresh = np.percentile(
          dotprods, 100 * survival_rate, axis=0, keepdims=True)
      # We mask this sum because it is possible for the log_prob to be -inf when
      # the mean_direction is -mean_dir.
      importance_masked = np.ma.array(
          importance, mask=dotprods <= dotprod_thresh)
      sphere_cap_surface_area_ests = (
          cdf * (importance_masked).sum(0) /
          (dotprods > dotprod_thresh).sum(0))
      h = (1 - dotprod_thresh)
      self.assertGreaterEqual(h.min(), 0)  # h must be >= 0 for the eqn below
      true_sphere_cap_surface_area = (
          0.5 * true_sphere_surface_area *
          self.evaluate(tf.math.betainc((dim - 1) / 2, 0.5, 2 * h - h**2)))
      if dim == 3:  # For 3-d we have a simpler form we can double-check.
        self.assertAllClose(2 * np.pi * h, true_sphere_cap_surface_area)

      self.assertAllClose(
          true_sphere_cap_surface_area,
          sphere_cap_surface_area_ests +
          np.zeros_like(true_sphere_cap_surface_area),
          rtol=rtol)
Esempio n. 52
0
    def get_eps(self, delta):  # minimize over \lambda
        if not self.flag:
            self.build_zeroth_oracle()
            self.flag = True

        if delta < 0 or delta > 1:
            print("Error! delta is a probability and must be between 0 and 1")
        if delta == 0:
            return self.RDP_inf
        else:

            def fun(x):  # the input the RDP's \alpha
                if x <= 1:
                    return np.inf
                else:
                    return np.log(1 / delta) / (x - 1) + self.evalRDP(x)

            def fun_int(i):  # the input is RDP's \alpha in integer
                if i <= 1 | i >= len(self.RDPs_int):
                    return np.inf
                else:
                    return np.log(1 / delta) / (i - 1) + self.RDPs_int[i - 1]

            # When do we have computational constraints?
            # Only when we have subsampled items.

            # First check if the forward difference is positive at self.m, or if it is infinite
            while (self.m < self.m_max) and (not np.isposinf(fun(
                    self.m))) and (fun_int(self.m - 1) - fun_int(self.m - 2) <
                                   0):
                # If so, double m, expand logBimomC until the forward difference is positive

                if self.flag_subsample:

                    # The following line is m^2 time.
                    self.logBinomC = utils.get_binom_coeffs(self.m * 2 + 1)

                    # Update deltas_caches
                    for key, val in self.deltas_cache.items():
                        if type(key) is tuple:
                            func_tmp = key[0]
                        else:
                            func_tmp = key
                        cgf = lambda x: x * func_tmp(x + 1)
                        deltas, signs_deltas = utils.get_forward_diffs(
                            cgf, self.m * 2)

                        self.deltas_cache[key] = [deltas, signs_deltas]

                new_alphas = range(self.m + 1, self.m * 2 + 1, 1)
                self.alphas = np.concatenate(
                    (self.alphas, np.array(new_alphas)))  # array of integers
                self.m = self.m * 2

            mm = np.max(self.alphas)

            rdp_int_new = np.zeros_like(self.alphas, float)

            for key, val in self.cache.items():
                idx = self.idxhash[key]
                rdp = self.RDPs[idx]
                newarray = np.zeros_like(self.alphas, float)
                for j in range(2, mm + 1, 1):
                    newarray[j - 1] = rdp(1.0 * j)
                newarray[0] = newarray[1]
                coeff = self.coeffs[idx]
                rdp_int_new += newarray * coeff
                self.cache[key] = newarray

            self.RDPs_int = rdp_int_new

            # # update the integer CGF and the cache for each function
            # rdp_int_new = np.zeros_like(self.RDPs_int)
            # for key,val in self.cache.items():
            #     idx = self.idxhash[key]
            #     rdp = self.RDPs[idx]
            #     newarray = np.zeros_like(self.RDPs_int)
            #     for j in range(self.m):
            #         newarray[j] = rdp(1.0*(j+self.m+1))
            #
            #     coeff = self.coeffs[idx]
            #     rdp_int_new += newarray * coeff
            #     self.cache[key] = np.concatenate((val, newarray))
            #
            # # update the corresponding quantities
            # self.RDPs_int = np.concatenate((self.RDPs_int, rdp_int_new))

            #self.m = self.m*2

            bestint = np.argmin(
                np.log(1 / delta) /
                (self.alphas[1:] - 1) + self.RDPs_int[1:]) + 1

            if bestint == self.m - 1:
                if self.verbose:
                    print('Warning: Reach quadratic upper bound: m_max.')
                # In this case, we matches the maximum qudaratic upper bound
                # Fix it by calling O(1) upper bounds and do logarithmic search
                cur = fun(bestint)
                while (not np.isposinf(cur)
                       ) and fun(bestint - 1) - fun(bestint - 2) < -1e-8:
                    bestint = bestint * 2
                    cur = fun(bestint)
                    if bestint > self.m_lin_max and self.approx == True:
                        print('Warning: Reach linear upper bound: m_lin_max.')
                        return cur

                results = minimize_scalar(fun,
                                          method='Bounded',
                                          bounds=[self.m - 1, bestint + 2],
                                          options={'disp': False})
                if results.success:
                    return results.fun
                else:
                    return None
                #return fun(bestint)

            if bestint == 0:
                if self.verbose:
                    print('Warning: Smallest alpha = 1.')

            # find the best integer alpha.
            bestalpha = self.alphas[bestint]

            results = minimize_scalar(fun,
                                      method='Bounded',
                                      bounds=[bestalpha - 1, bestalpha + 1],
                                      options={'disp': False})
            # the while loop above ensures that bestint+2 is at most m, and also bestint is at least 0.
            if results.success:
                return results.fun
            else:
                # There are cases when certain \delta is not feasible.
                # For example, let p and q be uniform the privacy R.V. is either 0 or \infty and unless all \infty
                # events are taken cared of by \delta, \epsilon cannot be < \infty
                return -1
Esempio n. 53
0
def calc_nested_gradient(orig_nest_coefs,
                         index_coefs,
                         design,
                         choice_vec,
                         rows_to_obs,
                         rows_to_nests,
                         ridge=None,
                         weights=None,
                         use_jacobian=True,
                         *args,
                         **kwargs):
    """
    Parameters
    ----------
    orig_nest_coefs : 1D or 2D ndarray.
        All elements should by ints, floats, or longs. If 1D, should have 1
        element for each nesting coefficient being estimated. If 2D, should
        have 1 column for each set of nesting coefficients being used to
        predict the probabilities of each alternative being chosen. There
        should be one row per nesting coefficient. Elements denote the logit of
        the inverse of the scale coefficients for each lower level nests.
    index_coefs : 1D or 2D ndarray.
        All elements should by ints, floats, or longs. If 1D, should have 1
        element for each utility coefficient being estimated
        (i.e. num_features). If 2D, should have 1 column for each set of
        coefficients being used to predict the probabilities of choosing each
        alternative. There should be one row per index coefficient.
    design : 2D ndarray.
       There should be one row per observation per available alternative. There
       should be one column per utility coefficient being estimated. All
       elements should be ints, floats, or longs.
    choice_vec : 1D ndarray.
        All elements should by ints, floats, or longs. Each element represents
        whether the individual associated with the given row chose the
        alternative associated with the given row.
    rows_to_obs : 2D scipy sparse array.
        There should be one row per observation per available alternative and
        one column per observation. This matrix maps the rows of the design
        matrix to the unique observations (on the columns).
    rows_to_nests : 2D scipy sparse array.
        There should be one row per observation per available alternative and
        one column per nest. This matrix maps the rows of the design matrix to
        the unique nests (on the columns).
    ridge : int, float, long, or None, optional.
        Determines whether or not ridge regression is performed. If an int,
        float or long is passed, then that scalar determines the ridge penalty
        for the optimization. Default `== None`.
    weights : 1D ndarray or None.
        Allows for the calculation of weighted log-likelihoods. The weights can
        represent various things. In stratified samples, the weights may be
        the proportion of the observations in a given strata for a sample in
        relation to the proportion of observations in that strata in the
        population. In latent class models, the weights may be the probability
        of being a particular class.
    use_jacobian : bool, optional.
        Determines whether or not the jacobian will be used when calculating
        the gradient. When performing model estimation, `use_jacobian` should
        be `True` if the values being estimated are actually the logit of the
        nest coefficients. Default `== True`.

    Returns
    -------
    gradient : 1D numpy array.
       The gradient of the log-likelihood with respect to the given nest
       coefficients and index coefficients.
    """
    # Calculate the weights for the sample
    if weights is None:
        weights = np.ones(design.shape[0])
    weights_per_obs = np.max(rows_to_obs.toarray() * weights[:, None], axis=0)

    # Transform the nest coefficients into their "always positive" versions
    nest_coefs = naturalize_nest_coefs(orig_nest_coefs)

    # Get the vectors and matrices needed to calculate the gradient
    vector_dict = prep_vectors_for_gradient(nest_coefs, index_coefs, design,
                                            choice_vec, rows_to_obs,
                                            rows_to_nests)

    # Calculate the index for each alternative for each person
    sys_utility = design.dot(index_coefs)

    # Calculate w_ij
    long_w = sys_utility / vector_dict["long_nest_params"]
    # Guard against overflow
    inf_index = np.isposinf(long_w)
    long_w[inf_index] = max_comp_value

    ##########
    # Calculate d_log_likelihood_d_nest_params
    ##########
    # Calculate the term that onlny depends on nest level values
    log_exp_sums = np.log(vector_dict["ind_sums_per_nest"])
    # Guard against overflow
    log_exp_sums[np.isneginf(log_exp_sums)] = -1 * max_comp_value

    # Calculate the first term of the derivative of the log-liikelihood
    # with respect to the nest parameters
    nest_gradient_term_1 = ((vector_dict["obs_to_chosen_nests"] -
                             vector_dict["nest_choice_probs"]) * log_exp_sums *
                            weights_per_obs[:, None]).sum(axis=0)

    # Calculate the second term of the derivative of the log-liikelihood
    # with respect to the nest parameters
    half_deriv = (
        (vector_dict["long_probs"] -
         vector_dict["long_chosen_nest"] * vector_dict["prob_given_nest"]) *
        long_w * weights)
    nest_gradient_term_2 = (
        rows_to_nests.transpose().dot(half_deriv)[:, None]).ravel()

    # Calculate the third term of the derivative of the log-likelihood
    # with respect to the nest parameters
    nest_gradient_term_3a = (
        choice_vec -
        vector_dict["long_chosen_nest"] * vector_dict["prob_given_nest"])
    nest_gradient_term_3b = ((-1 * nest_gradient_term_3a * long_w * weights) /
                             vector_dict["long_nest_params"])
    # Guard against overflow
    inf_idx = np.isposinf(nest_gradient_term_3b)
    nest_gradient_term_3b[inf_idx] = max_comp_value

    neg_inf_idx = np.isneginf(nest_gradient_term_3b)
    nest_gradient_term_3b[neg_inf_idx] = -1 * max_comp_value

    # Get the nest-wide version of this piece of the gradient
    nest_gradient_term_3 = (
        rows_to_nests.transpose().dot(nest_gradient_term_3b)).ravel()

    # Combine the two terms. Note the "nest_coefs * (1 - nest_coefs)" is due to
    # the fact that we're estimating the logit of the nest coefficients instead
    # of the nest coefficient itself. We therefore need to multiply by
    # d_nest_coef_d_estimated_variable to get the correct gradient.
    # d_nest_coef_d_estimated_variable == nest_coefs * (1 - nest_coefs).
    if use_jacobian:
        jacobian = nest_coefs * (1.0 - nest_coefs)
    else:
        jacobian = 1
    nest_gradient = (
        (nest_gradient_term_1 + nest_gradient_term_2 + nest_gradient_term_3) *
        jacobian)[None, :]

    ##########
    # Calculate d_loglikelihood_d_beta
    ##########
    beta_gradient_term_1 = (
        (vector_dict["scaled_y"] - vector_dict["p_tilde_given_nest"] +
         vector_dict["p_tilde_given_nest"] * vector_dict["long_nest_params"] -
         vector_dict["long_probs"]) * weights)[None, :]
    #####
    # Calculate the derivative with respect to beta
    #####
    beta_gradient = beta_gradient_term_1.dot(design)

    #####
    # Combine the gradient pieces and account for ridge parameter
    #####
    gradient = np.concatenate((nest_gradient, beta_gradient), axis=1).ravel()

    if ridge is not None:
        # Note that the 20 is used in place of 'infinity' since I would really
        # like to specify the expected value of the nest coefficient to 1, but
        # that would make the logit of the nest parameter infinity. Instead I
        # use 20 as a close enough value-- (1 + exp(-20))**-1 is approx. 1.
        params = np.concatenate(((20 - orig_nest_coefs), index_coefs), axis=0)

        gradient -= 2 * ridge * params

    return gradient
Esempio n. 54
0
def mvstdnormcdf(lower, upper, corrcoef, **kwds):
    '''standardized multivariate normal cumulative distribution function

    This is a wrapper for scipy.stats.kde.mvn.mvndst which calculates
    a rectangular integral over a standardized multivariate normal
    distribution.

    This function assumes standardized scale, that is the variance in each dimension
    is one, but correlation can be arbitrary, covariance = correlation matrix

    Parameters
    ----------
    lower, upper : array_like, 1d
       lower and upper integration limits with length equal to the number
       of dimensions of the multivariate normal distribution. It can contain
       -np.inf or np.inf for open integration intervals
    corrcoef : float or array_like
       specifies correlation matrix in one of three ways, see notes
    optional keyword parameters to influence integration
        * maxpts : int, maximum number of function values allowed. This
             parameter can be used to limit the time. A sensible
             strategy is to start with `maxpts` = 1000*N, and then
             increase `maxpts` if ERROR is too large.
        * abseps : float absolute error tolerance.
        * releps : float relative error tolerance.

    Returns
    -------
    cdfvalue : float
        value of the integral

    Notes
    -----
    The correlation matrix corrcoef can be given in 3 different ways
    If the multivariate normal is two-dimensional than only the
    correlation coefficient needs to be provided.
    For general dimension the correlation matrix can be provided either
    as a one-dimensional array of the upper triangular correlation
    coefficients stacked by rows, or as full square correlation matrix

    See Also
    --------
    mvnormcdf : cdf of multivariate normal distribution without
        standardization

    Examples
    --------

     print mvstdnormcdf([-np.inf,-np.inf], [0.0,np.inf], 0.5)
    0.5
     corr = [[1.0, 0, 0.5],[0,1,0],[0.5,0,1]]
     print mvstdnormcdf([-np.inf,-np.inf,-100.0], [0.0,0.0,0.0], corr, abseps=1e-6)
    0.166666399198
     print mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0],corr, abseps=1e-8)
    something wrong completion with ERROR > EPS and MAXPTS function values used;
                        increase MAXPTS to decrease ERROR; 1.048330348e-006
    0.166666546218
     print mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0], corr,
                            maxpts=100000, abseps=1e-8)
    0.166666588293

    '''
    n = len(lower)
    # don't know if converting to array is necessary,
    # but it makes ndim check possible
    lower = np.array(lower)
    upper = np.array(upper)
    corrcoef = np.array(corrcoef)

    correl = np.zeros(n * (n - 1) / 2.0)  # dtype necessary?

    if (lower.ndim != 1) or (upper.ndim != 1):
        raise ValueError('can handle only 1D bounds')
    if len(upper) != n:
        raise ValueError('bounds have different lengths')
    if n == 2 and corrcoef.size == 1:
        correl = corrcoef
        # print 'case scalar rho', n
    elif corrcoef.ndim == 1 and len(corrcoef) == n * (n - 1) / 2.0:
        # print 'case flat corr', corrcoeff.shape
        correl = corrcoef
    elif corrcoef.shape == (n, n):
        # print 'case square corr',  correl.shape
        for ii in range(n):
            for jj in range(ii):
                correl[jj + ((ii - 2) * (ii - 1)) / 2] = corrcoef[ii, jj]
    else:
        raise ValueError('corrcoef has incorrect dimension')

    if not 'maxpts' in kwds:
        if n > 2:
            kwds['maxpts'] = 10000 * n

    lowinf = np.isneginf(lower)
    uppinf = np.isposinf(upper)
    infin = 2.0 * np.ones(n)

    np.putmask(infin, lowinf, 0)  # infin.putmask(0,lowinf)
    np.putmask(infin, uppinf, 1)  # infin.putmask(1,uppinf)
    # this has to be last
    np.putmask(infin, lowinf * uppinf, -1)

    ##    #remove infs
    ##    np.putmask(lower,lowinf,-100)# infin.putmask(0,lowinf)
    ##    np.putmask(upper,uppinf,100) #infin.putmask(1,uppinf)

    # print lower,',',upper,',',infin,',',correl
    # print correl.shape
    # print kwds.items()
    error, cdfvalue, inform = scipy.stats.kde.mvn.mvndst(
        lower, upper, infin, correl, **kwds)
    if inform:
        print('something wrong', informcode[inform], error)
    return cdfvalue
Esempio n. 55
0
def imagesDiffer(imageArr1,
                 imageArr2,
                 skipMaskArr=None,
                 rtol=1.0e-05,
                 atol=1e-08):
    """Compare the pixels of two image arrays; return True if close, False otherwise
    
    Inputs:
    - image1: first image to compare
    - image2: second image to compare
    - skipMaskArr: pixels to ignore; nonzero values are skipped
    - rtol: relative tolerance (see below)
    - atol: absolute tolerance (see below)
    
    rtol and atol are positive, typically very small numbers.
    The relative difference (rtol * abs(b)) and the absolute difference "atol" are added together
    to compare against the absolute difference between "a" and "b".
    
    Return a string describing the error if the images differ significantly, an empty string otherwise
    """
    retStrs = []
    if skipMaskArr is not None:
        maskedArr1 = numpy.ma.array(imageArr1, copy=False, mask=skipMaskArr)
        maskedArr2 = numpy.ma.array(imageArr2, copy=False, mask=skipMaskArr)
        filledArr1 = maskedArr1.filled(0.0)
        filledArr2 = maskedArr2.filled(0.0)
    else:
        filledArr1 = imageArr1
        filledArr2 = imageArr2

    nan1 = numpy.isnan(filledArr1)
    nan2 = numpy.isnan(filledArr2)
    if numpy.any(nan1 != nan2):
        retStrs.append("NaNs differ")

    posinf1 = numpy.isposinf(filledArr1)
    posinf2 = numpy.isposinf(filledArr2)
    if numpy.any(posinf1 != posinf2):
        retStrs.append("+infs differ")

    neginf1 = numpy.isneginf(filledArr1)
    neginf2 = numpy.isneginf(filledArr2)
    if numpy.any(neginf1 != neginf2):
        retStrs.append("-infs differ")

    # compare values that should be comparable (are neither infinite, nan nor masked)
    valSkipMaskArr = nan1 | nan2 | posinf1 | posinf2 | neginf1 | neginf2
    if skipMaskArr is not None:
        valSkipMaskArr |= skipMaskArr
    valMaskedArr1 = numpy.ma.array(imageArr1, copy=False, mask=valSkipMaskArr)
    valMaskedArr2 = numpy.ma.array(imageArr2, copy=False, mask=valSkipMaskArr)
    valFilledArr1 = valMaskedArr1.filled(0.0)
    valFilledArr2 = valMaskedArr2.filled(0.0)

    if not numpy.allclose(valFilledArr1, valFilledArr2, rtol=rtol, atol=atol):
        errArr = numpy.abs(valFilledArr1 - valFilledArr2)
        maxErr = errArr.max()
        maxPosInd = numpy.where(errArr == maxErr)
        maxPosTuple = (maxPosInd[1][0], maxPosInd[0][0])
        errStr = "maxDiff=%s at position %s; value=%s vs. %s" % \
            (maxErr, maxPosTuple, valFilledArr1[maxPosInd][0], valFilledArr2[maxPosInd][0])
        retStrs.insert(0, errStr)
    return "; ".join(retStrs)
Esempio n. 56
0
    def _display_on_map_static(self,
                               index,
                               map_name,
                               palette="RdYlBu",
                               **kwargs):
        try:
            import escher
            if os.path.exists(map_name):
                map_json = map_name
                map_name = None
            else:
                map_json = None

            values = self.normalized_gaps

            values = values[~numpy.isnan(values)]
            values = values[~numpy.isinf(values)]

            data = self.solutions.iloc[index]
            # Find values above decimal precision
            data = data[numpy.abs(data.normalized_gaps.astype(float)) >
                        non_zero_flux_threshold]
            # Remove NaN rows
            data = data[~numpy.isnan(data.normalized_gaps.astype(float))]

            reaction_data = dict(data.normalized_gaps)
            for rid, gap in six.iteritems(reaction_data):
                if numpy.isposinf(gap):
                    gap = numpy.max(values)
                elif numpy.isneginf(gap):
                    gap = numpy.min(values)

                reaction_data[rid] = gap

            scale = self.plot_scale(palette)
            reaction_data['min'] = min(numpy.abs(values) * -1)
            reaction_data['max'] = max(numpy.abs(values))

            reaction_scale = [
                dict(type='min', color=scale[0][1], size=24),
                dict(type='value',
                     value=scale[0][0],
                     color=scale[0][1],
                     size=21),
                dict(type='value',
                     value=scale[1][0],
                     color=scale[1][1],
                     size=16),
                dict(type='value',
                     value=scale[2][0],
                     color=scale[2][1],
                     size=8),
                dict(type='value',
                     value=scale[3][0],
                     color=scale[3][1],
                     size=16),
                dict(type='value',
                     value=scale[4][0],
                     color=scale[4][1],
                     size=21),
                dict(type='max', color=scale[4][1], size=24)
            ]

            builder = escher.Builder(map_name=map_name,
                                     map_json=map_json,
                                     reaction_data=reaction_data,
                                     reaction_scale=reaction_scale)

            if in_ipnb():
                from IPython.display import display
                display(builder.display_in_notebook())
            else:
                builder.display_in_browser()

        except ImportError:
            print("Escher must be installed in order to visualize maps")
Esempio n. 57
0
  def testKernelResultsUsingTruncatedDistribution(self):
    def log_prob(x):
      return array_ops.where(
          x >= 0.,
          -x - x**2,  # Non-constant gradient.
          array_ops.fill(x.shape, math_ops.cast(-np.inf, x.dtype)))
    # This log_prob has the property that it is likely to attract
    # the flow toward, and below, zero...but for x <=0,
    # log_prob(x) = -inf, which should result in rejection, as well
    # as a non-finite log_prob.  Thus, this distribution gives us an opportunity
    # to test out the kernel results ability to correctly capture rejections due
    # to finite AND non-finite reasons.
    # Why use a non-constant gradient?  This ensures the leapfrog integrator
    # will not be exact.

    num_results = 1000
    # Large step size, will give rejections due to integration error in addition
    # to rejection due to going into a region of log_prob = -inf.
    step_size = 0.1
    num_leapfrog_steps = 5
    num_chains = 2

    with self.test_session(graph=ops.Graph()) as sess:

      # Start multiple independent chains.
      initial_state = ops.convert_to_tensor([0.1] * num_chains)

      states, kernel_results = hmc.sample_chain(
          num_results=num_results,
          target_log_prob_fn=log_prob,
          current_state=initial_state,
          step_size=step_size,
          num_leapfrog_steps=num_leapfrog_steps,
          seed=42)

      states_, kernel_results_ = sess.run([states, kernel_results])
      pstates_ = kernel_results_.proposed_state

      neg_inf_mask = np.isneginf(kernel_results_.proposed_target_log_prob)

      # First:  Test that the mathematical properties of the above log prob
      # function in conjunction with HMC show up as expected in kernel_results_.

      # We better have log_prob = -inf some of the time.
      self.assertLess(0, neg_inf_mask.sum())
      # We better have some rejections due to something other than -inf.
      self.assertLess(neg_inf_mask.sum(), (~kernel_results_.is_accepted).sum())
      # We better have accepted a decent amount, even near end of the chain.
      self.assertLess(
          0.1, kernel_results_.is_accepted[int(0.9 * num_results):].mean())
      # We better not have any NaNs in states or log_prob.
      # We may have some NaN in grads, which involve multiplication/addition due
      # to gradient rules.  This is the known "NaN grad issue with tf.where."
      self.assertAllEqual(np.zeros_like(states_),
                          np.isnan(kernel_results_.proposed_target_log_prob))
      self.assertAllEqual(np.zeros_like(states_),
                          np.isnan(states_))
      # We better not have any +inf in states, grads, or log_prob.
      self.assertAllEqual(np.zeros_like(states_),
                          np.isposinf(kernel_results_.proposed_target_log_prob))
      self.assertAllEqual(
          np.zeros_like(states_),
          np.isposinf(kernel_results_.proposed_grads_target_log_prob[0]))
      self.assertAllEqual(np.zeros_like(states_),
                          np.isposinf(states_))

      # Second:  Test that kernel_results is congruent with itself and
      # acceptance/rejection of states.

      # Proposed state is negative iff proposed target log prob is -inf.
      np.testing.assert_array_less(pstates_[neg_inf_mask], 0.)
      np.testing.assert_array_less(0., pstates_[~neg_inf_mask])

      # Acceptance probs are zero whenever proposed state is negative.
      acceptance_probs = np.exp(np.minimum(
          kernel_results_.log_accept_ratio, 0.))
      self.assertAllEqual(
          np.zeros_like(pstates_[neg_inf_mask]),
          acceptance_probs[neg_inf_mask])

      # The move is accepted ==> state = proposed state.
      self.assertAllEqual(
          states_[kernel_results_.is_accepted],
          pstates_[kernel_results_.is_accepted],
      )
      # The move was rejected <==> state[t] == state[t - 1].
      for t in range(1, num_results):
        for i in range(num_chains):
          if kernel_results_.is_accepted[t, i]:
            self.assertNotEqual(states_[t, i], states_[t - 1, i])
          else:
            self.assertEqual(states_[t, i], states_[t - 1, i])
Esempio n. 58
0
def colormap_image(imin, cmap=None, cbounds=None):

    # check to make sure that imin has 2 dimensions or less
    assert (imin.ndim <= 2)

    if cmap is None:
        cmap = jet(64)

    # copy the image
    im = imin.astype(num.double)

    # create the cbounds argument
    if cbounds is not None:
        # check the cbounds input
        assert (len(cbounds) == 2)
        assert (cbounds[0] <= cbounds[1])
        # threshold at cbounds
        im = im.clip(cbounds[0], cbounds[1])

    neginfidx = num.isneginf(im)
    isneginf = num.any(neginfidx)
    posinfidx = num.isposinf(im)
    isposinf = num.any(posinfidx)
    nanidx = num.isnan(im)
    isnan = num.any(nanidx)

    # scale the image to be between 0 and cmap.N-1
    goodidx = [
        num.logical_not(
            num.logical_or(num.logical_or(neginfidx, posinfidx), nanidx))
    ]
    minv = im[goodidx].min()
    maxv = im[goodidx].max()
    if minv == maxv and minv >= 1.:
        minv -= 1.
    elif minv == maxv:
        maxv += 1.
    dv = maxv - minv
    if isposinf:
        maxv = maxv + dv * .025
        im[posinfidx] = maxv
        #print "Some elements of image are +infty"

    if isneginf or isnan:
        minv = minv - dv * .025
        im[neginfidx] = minv
        im[nanidx] = minv
        #print "Some elements of image are -infty or nan"

    im = (im - minv) / (maxv - minv)
    im *= (float(cmap.shape[0] - 1))

    # round
    im = im.round()
    im = im.astype(int)

    # create rgb image
    r = cmap[im, 0]
    g = cmap[im, 1]
    b = cmap[im, 2]
    newshape = r.shape + (1, )
    rgb = num.concatenate((num.reshape(r, newshape), num.reshape(
        g, newshape), num.reshape(b, newshape)), 2)

    # scale to 0 to 255
    rgb *= 255
    rgb = rgb.astype(num.uint8)

    clim = [minv, maxv]

    return rgb, clim
Esempio n. 59
0
    def refine(self, refine_results, iter_number):
        """
                    Compute the order, number of nodes, and segment ends required for the new grid
                    and assigns them to the transcription of each phase. Method of refinement is
                    different for the first iteration and is done separately

                    Parameters
                    ----------
                    iter_number: int
                        An integer value representing the iteration of the grid refinement
                    refine_results : dict
                        A dictionary where each key is the path to a phase in the problem, and the
                        associated value are various properties of that phase needed by the refinement
                        algorithm.  refine_results is returned by check_error.  This method modifies it
                        in place, adding the new_num_segments, new_order, and new_segment_ends.

                    Returns
                    -------
                    refined : dict
                        A dictionary of phase paths : phases which were refined.

        """
        if iter_number == 0:
            self.refine_first_iter(refine_results)
            return

        x_dd = {}
        for phase_path, phase_refinement_results in refine_results.items():
            phase = self.phases[phase_path]
            self.error[phase_path] = refine_results[phase_path][
                'max_rel_error']
            tx = phase.options['transcription']
            gd = tx.grid_data

            num_scalar_states = 0
            for state_name, options in phase.state_options.items():
                shape = options['shape']
                size = np.prod(shape)
                num_scalar_states += size

            seg_order = gd.transcription_order
            seg_ends = gd.segment_ends
            numseg = gd.num_segments

            need_refine = phase_refinement_results['need_refinement']
            if not phase.refine_options['refine'] or not np.any(need_refine):
                refine_results[phase_path]['new_order'] = seg_order
                refine_results[phase_path][
                    'new_num_segments'] = gd.num_segments
                refine_results[phase_path]['new_segment_ends'] = seg_ends
                continue

            left_end_idxs = gd.subset_node_indices['segment_ends'][0::2]
            left_end_idxs = np.append(left_end_idxs,
                                      gd.subset_num_nodes['all'])
            refine_seg_idxs = np.where(need_refine)[0]

            old_left_end_idxs = self.previous_gd[
                phase_path].subset_node_indices['segment_ends'][0::2]
            old_left_end_idxs = np.append(
                old_left_end_idxs,
                self.previous_gd[phase_path].subset_num_nodes['all'])

            # compute curvature
            L, D = interpolation_lagrange_matrix(gd, gd)
            t = phase.get_val(f'timeseries.time')
            x, _, _, x_d = eval_ode_on_grid(phase=phase, transcription=tx)
            x_dd[phase_path] = {}
            P = {}
            P_hat = {}
            R = np.zeros(numseg)

            # Compute the maximum magnitude of the second derivative of each state
            # Find the same value at the same time on the previous solution
            # If the ratio of these two values for a given state is highest, it is stored as the curvature
            for state_name, options in phase.state_options.items():
                x_dd[phase_path][state_name] = D @ x_d[state_name]
                P[state_name] = np.zeros(numseg)
                P_hat[state_name] = np.zeros(numseg)
                for k in np.nditer(refine_seg_idxs):
                    interp = LagrangeBarycentricInterpolant(
                        self.previous_gd[phase_path].
                        node_stau[old_left_end_idxs[
                            self.
                            parent_seg_map[phase_path][k]]:old_left_end_idxs[
                                self.parent_seg_map[phase_path][k] + 1]],
                        options['shape'])
                    interp.setup(
                        x0=-1,
                        xf=1,
                        f_j=self.previous_x_dd[phase_path][state_name]
                        [old_left_end_idxs[self.parent_seg_map[phase_path][k]]:
                         old_left_end_idxs[self.parent_seg_map[phase_path][k] +
                                           1]])
                    P[state_name][k] = np.max(
                        np.absolute(x_dd[phase_path][state_name]
                                    [left_end_idxs[k]:left_end_idxs[k + 1]]))
                    xdd_max_time = gd.node_stau[left_end_idxs[k] + np.argmax(
                        np.absolute(x_dd[phase_path][state_name]
                                    [left_end_idxs[k]:left_end_idxs[k + 1]]))]
                    P_hat[state_name][k] = abs(interp.eval(xdd_max_time))
                    if P[state_name][k] / P_hat[state_name][k] > R[k]:
                        R[k] = P[state_name][k] / P_hat[state_name][k]
            non_smooth_idxs = np.where(
                R > phase.refine_options['smoothness_factor'])[0]
            smooth_need_refine_idxs = np.setdiff1d(refine_seg_idxs,
                                                   non_smooth_idxs)

            mul_factor = np.ones(numseg)
            h = 0.5 * (seg_ends[1:] - seg_ends[:-1])
            H = np.ones(numseg, dtype=int)
            h_prev = 0.5 * (self.previous_gd[phase_path].segment_ends[1:] -
                            self.previous_gd[phase_path].segment_ends[:-1])

            split_parent_seg_idxs = self.parent_seg_map[phase_path][
                smooth_need_refine_idxs]

            q_smooth = (
                np.log(self.error[phase_path][smooth_need_refine_idxs] /
                       self.previous_error[phase_path][split_parent_seg_idxs])
                + 2.5 * np.log(seg_order[smooth_need_refine_idxs] /
                               self.previous_gd[phase_path].
                               transcription_order[split_parent_seg_idxs])
            ) / (np.log(
                (h[smooth_need_refine_idxs] / h_prev[split_parent_seg_idxs])) +
                 np.log(seg_order[smooth_need_refine_idxs] / self.previous_gd[
                     phase_path].transcription_order[split_parent_seg_idxs]))

            q_smooth[q_smooth < 3] = 3.0
            q_smooth[np.isposinf(q_smooth)] = 3.0
            mul_factor[smooth_need_refine_idxs] = (self.error[phase_path][smooth_need_refine_idxs] /
                                                   phase.refine_options['tolerance']) ** \
                                                  (1 / (q_smooth - 2.5))

            new_order = np.ceil(gd.transcription_order *
                                mul_factor).astype(int)
            if gd.transcription == 'gauss-lobatto':
                odd_idxs = np.where(new_order % 2 != 0)
                new_order[odd_idxs] += 1

            split_seg_idxs = np.concatenate([
                np.where(new_order > phase.refine_options['max_order'])[0],
                non_smooth_idxs
            ])

            check_comb_indx = np.where(
                np.logical_and(
                    np.logical_and(
                        np.logical_and(np.invert(need_refine[:-1]),
                                       np.invert(need_refine[1:])),
                        new_order[:-1] == new_order[1:]),
                    new_order[:-1] == phase.refine_options['min_order']))[0]

            reduce_order_indx = np.setdiff1d(np.where(np.invert(need_refine)),
                                             check_comb_indx)

            new_order[split_seg_idxs] = seg_order[split_seg_idxs]
            split_parent_seg_idxs = self.parent_seg_map[phase_path][
                split_seg_idxs]

            q_split = np.log(
                (self.error[phase_path][split_seg_idxs] /
                 self.previous_error[phase_path][split_parent_seg_idxs]) /
                (seg_order[split_seg_idxs] / self.previous_gd[phase_path].
                 transcription_order[split_parent_seg_idxs])**2.5) / np.log(
                     (h[split_seg_idxs] / h_prev[split_parent_seg_idxs]) /
                     (seg_order[split_seg_idxs] / self.previous_gd[phase_path].
                      transcription_order[split_parent_seg_idxs]))

            q_split[q_split < 3] = 3
            q_split[np.isposinf(q_split)] = 3

            H[split_seg_idxs] = np.maximum(
                np.minimum(
                    np.ceil(
                        (self.error[phase_path][split_seg_idxs] /
                         phase.refine_options['tolerance'])**(1 / q_split)),
                    np.ceil(
                        np.log(self.error[phase_path][split_seg_idxs] /
                               phase.refine_options['tolerance']) /
                        np.log(seg_order[split_seg_idxs]))),
                2 * np.ones(split_seg_idxs.size))

            # reduce segment order where error is much below the tolerance
            if reduce_order_indx.size > 0:
                # compute normalization factor beta
                beta = {}
                for state_name, options in phase.state_options.items():
                    beta[state_name] = 0
                    for k in range(0, numseg):
                        beta_seg = np.max(
                            np.abs(x[state_name]
                                   [left_end_idxs[k]:left_end_idxs[k + 1]]))
                        if beta_seg > beta[state_name]:
                            beta[state_name] = beta_seg
                    beta[state_name] += 1

                for k in np.nditer(reduce_order_indx):
                    seg_size = {
                        'radau-ps': seg_order[k] + 1,
                        'gauss-lobatto': seg_order[k]
                    }
                    if seg_order[k] == phase.refine_options['min_order']:
                        continue
                    new_order_state = {}
                    new_order[k] = seg_order[k]
                    a = np.zeros((seg_size[gd.transcription],
                                  seg_size[gd.transcription]))
                    s, _ = lgr(seg_order[k], include_endpoint=True)
                    if gd.transcription == 'gauss-lobatto':
                        s, _ = lgl(seg_order[k])
                    for j in range(0, seg_size[gd.transcription]):
                        roots = s[s != s[j]]
                        Q = np.poly(roots)
                        a[:, j] = Q / np.polyval(Q, s[j])

                    for state_name, options in phase.state_options.items():
                        new_order_state[state_name] = seg_order[k]
                        b = a @ x[state_name][
                            left_end_idxs[k]:left_end_idxs[k + 1]]

                        for i in range(seg_size[gd.transcription] - 1,
                                       phase.refine_options['min_order'], -1):
                            if np.abs(b[i]) / beta[state_name] < phase.refine_options['tolerance']/10 and \
                                    i - 1 < new_order_state[state_name]:
                                new_order_state[state_name] = i - 1
                            else:
                                new_order_state[state_name] = seg_order[k]
                        new_order[k] = max(new_order_state.values())

            # combine unnecessary segments
            merge_seg = np.zeros(numseg, dtype=bool)
            if check_comb_indx.size > 0:
                for k in np.nditer(check_comb_indx):
                    seg_size = {
                        'radau-ps': seg_order[k] + 1,
                        'gauss-lobatto': seg_order[k]
                    }
                    if merge_seg[k]:
                        continue

                    a = np.zeros((seg_size[gd.transcription],
                                  seg_size[gd.transcription]))
                    h_ = np.maximum(h[k], h[k + 1])
                    s, _ = lgr(new_order[k].astype(int), include_endpoint=True)
                    if gd.transcription == 'gauss-lobatto':
                        s, _ = lgl(new_order[k])
                    for j in range(0, seg_size[gd.transcription]):
                        roots = s[s != s[j]]
                        Q = np.poly(roots)
                        a[:, j] = Q / np.polyval(Q, s[j])

                    merge_seg[k + 1] = True

                    for state_name, options in phase.state_options.items():
                        beta = 1 + np.max(
                            x[state_name][left_end_idxs[k]:left_end_idxs[k +
                                                                         1]])
                        c = a @ x[state_name][
                            left_end_idxs[k]:left_end_idxs[k + 1]]
                        b = np.multiply(
                            c.ravel(),
                            np.array([
                                (h_ / h[k])**l
                                for l in range(seg_size[gd.transcription])
                            ]))
                        b_hat = np.multiply(
                            c.ravel(),
                            np.array([
                                (h_ / h[k + 1])**l
                                for l in range(seg_size[gd.transcription])
                            ]))
                        err_val = np.dot(
                            np.absolute(b - b_hat).ravel(),
                            np.array([
                                2**l for l in range(seg_size[gd.transcription])
                            ])) / beta

                        if err_val > phase.refine_options[
                                'tolerance'] / 10 and merge_seg[k + 1]:
                            merge_seg[k + 1] = False

            H[np.where(merge_seg)] = 0

            new_order = np.repeat(new_order, repeats=H)
            new_num_segments = int(np.sum(H))
            new_segment_ends = split_segments(gd.segment_ends, H)

            if gd.transcription == 'gauss-lobatto':
                new_order[new_order %
                          2 == 0] = new_order[new_order % 2 == 0] + 1

            self.parent_seg_map[phase_path] = np.zeros(new_num_segments,
                                                       dtype=int)
            for i in range(1, new_num_segments):
                for j in range(1, numseg):
                    if new_segment_ends[i] == gd.segment_ends[j]:
                        self.parent_seg_map[phase_path][i] = int(j)
                        break
                    elif gd.segment_ends[
                            j - 1] < new_segment_ends[i] < gd.segment_ends[j]:
                        self.parent_seg_map[phase_path][i] = int(j - 1)
                        break

            refine_results[phase_path]['new_order'] = new_order
            refine_results[phase_path]['new_num_segments'] = new_num_segments
            refine_results[phase_path]['new_segment_ends'] = new_segment_ends

            tx.options['order'] = new_order
            tx.options['num_segments'] = new_num_segments
            tx.options['segment_ends'] = new_segment_ends
            tx.init_grid()
            self.previous_x_dd[phase_path] = x_dd[phase_path].copy()
            self.previous_error[phase_path] = self.error[phase_path].copy()
            self.previous_gd[phase_path] = copy.deepcopy(gd)
Esempio n. 60
0
    def _pour(self, a, b):
        """Execute the Div tank (operation) in the pour (forward) direction.

    Parameters
    ----------
    a: np.ndarray
      The numerator array.
    b: np.ndarray
      The denominator array

    Returns
    -------
    dict(
      target: np.ndarray
        The result of a/b
      smaller_size_array: np.ndarray
        Either 'a' or 'b' depending on which has fewer elements.
      a_is_smaller: bool
        Whether a is the smaller sized array.
      missing_vals: np.ndarray
        The values from either 'a' or 'b' that were lost when the other array had a zero in that location.
      remainder: np.ndarray
        The remainder of a/b in the case that 'a' and 'b' are of integer type.
    )

    """
        # If they aren't numpy arrays then cast them to arrays.
        if type(a) is not np.ndarray:
            a = np.array(a)
        if type(b) is not np.ndarray:
            b = np.array(b)

        # Find the array with fewer elements and save that.
        a_is_smaller = a.size < b.size
        if a_is_smaller:
            smaller_size_array = ut.maybe_copy(a)
        else:
            smaller_size_array = ut.maybe_copy(b)

        # Do the division
        target = np.array(a / b)

        # Save the values of the larger array whose values are erased by a zero in
        # the smaller array
        if a_is_smaller:
            missing_vals = b[(target == 0)]
        else:
            missing_vals = a[np.isposinf(target) | np.isneginf(target)
                             | np.isnan(target)]

        # Don't allowed integer division by zero.
        if a.dtype in (np.int32, np.int64) and b.dtype in (np.int32, np.int64):
            if (b == 0).any():
                raise ZeroDivisionError(
                    "Integer division by zero is not supported.")
            remainder = np.array(np.remainder(a, b))
        else:
            remainder = np.array([], dtype=target.dtype)

        return {
            'target': target,
            'smaller_size_array': smaller_size_array,
            'a_is_smaller': a_is_smaller,
            'missing_vals': missing_vals,
            'remainder': remainder
        }