Пример #1
0
def main():
    saved_handler = sp.seterrcall(err_handler)
    saved_err = sp.seterr(all='call')

    print('============ Part 1: Plotting =============================')
    x, y = load_data('ex2/ex2data1.txt')
    plot_data(x, y)
    pl.show()

    print('============ Part 2: Compute Cost and Gradient ============')
    m, n = x.shape
    x = sp.column_stack((sp.ones((m, 1)), x))
    init_theta = sp.asmatrix(sp.zeros((n + 1, 1)))
    cost, grad = cost_function(init_theta, x, y)
    print('Cost at initial theta: %s' % cost)
    print('Gradient at initial theta:\n %s' % grad)

    print('============ Part 3: Optimizing minimize ====================')
    # res = op.minimize(cost_function, init_theta, args=(x, y), jac=True, method='Newton-CG')
    res = op.minimize(cost_function_without_grad, init_theta, args=(x, y), method='Powell')
    # print('Cost at theta found by fmin: %s' % cost)
    print('Result by minimize:\n%s' % res)
    plot_decision_boundary(res.x, x, y)
    pl.show()

    print('============ Part 4: Optimizing fmin ====================')
    res = op.fmin(cost_function_without_grad, init_theta, args=(x, y))
    # print('Cost at theta found by fmin: %s' % cost)
    print('Result by fmin:\n%s' % res)
    plot_decision_boundary(res, x, y)
    pl.show()

    sp.seterrcall(saved_handler)
    sp.seterr(**saved_err)
Пример #2
0
def cumulative_state_probability(threshold, beta, value):
    """Fragility curve calculation EQRM manual 7.8
    (Note eqn in manual is wrong.  Is is correct here.)

    Calcultate the cumulative state probability.

    P_cumulative(slight) = P(slight)+P(mid)+P(high)...

    Note that value/threshold is the median, not the mean.

    Since this is cumulative and the values are descending;
    column 0 is Slight
    column 1 is Moderate
    column 2 is Extreme
    column 3 is Complete

    parameters
      threshold - median damage state threshold
      beta - represents uncertainty in the damage state
      value - motion of the structure
          for buildings - either peak spectral displacement ,
                              peak spectrial acceleration or
          for bridges - spectral acceleration at t = 1 sec
    """

    oldsettings = seterr(divide='ignore')
    temp = (1 / beta) * log(value / threshold)
    seterr(**oldsettings)
    return norm.cdf(temp)
Пример #3
0
def relative_bin_deviation(h1, h2): # 79 us @array, 104 us @list \w 100 bins
    r"""
    Calculate the bin-wise deviation between two histograms.
    
    The relative bin deviation between two histograms :math:`H` and :math:`H'` of size
    :math:`m` is defined as:
    
    .. math::
    
        d_{rbd}(H, H') = \sum_{m=1}^M
            \frac{
                \sqrt{(H_m - H'_m)^2}
              }{
                \frac{1}{2}
                \left(
                    \sqrt{H_m^2} +
                    \sqrt{{H'}_m^2}
                \right)
              }
    
    *Attributes:*

    - semimetric (triangle equation satisfied?)
    
    *Attributes for normalized histograms:*

    - :math:`d(H, H')\in[0, \infty)`
    - :math:`d(H, H) = 0`
    - :math:`d(H, H') = d(H', H)`
    
    *Attributes for not-normalized histograms:*

    - :math:`d(H, H')\in[0, \infty)`
    - :math:`d(H, H) = 0`
    - :math:`d(H, H') = d(H', H)`
    
    *Attributes for not-equal histograms:*

    - not applicable 
    
    Parameters
    ----------
    h1 : sequence
        The first histogram.
    h2 : sequence
        The second histogram, same bins as ``h1``.
    
    Returns
    -------
    relative_bin_deviation : float
        Relative bin deviation between the two histograms.
    """
    h1, h2 = __prepare_histogram(h1, h2)
    numerator = scipy.sqrt(scipy.square(h1 - h2))
    denominator = (scipy.sqrt(scipy.square(h1)) + scipy.sqrt(scipy.square(h2))) / 2.
    old_err_state = scipy.seterr(invalid='ignore') # divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0
    result = numerator / denominator
    scipy.seterr(**old_err_state)
    result[scipy.isnan(result)] = 0 # faster than scipy.nan_to_num, which checks for +inf and -inf also
    return scipy.sum(result)
Пример #4
0
def calculate_corner_periods(periods, ground_motion, magnitude):
    """
    periods - 1D array, dimension # of periods
    ground_motion - XD array, with last dimension periods.
    magnitude - 1D array, dimension # of events
    from Newmark and Hall 1982

    returns TAV,TVD - the acceleration and velocity dependent
    corner periods
    TAV and TVD have the same dimensions as ground_motion, except
       the last axis (periods) is dropped.
    """
    #reference_periods = 0.3,1.0
    S03 = interp(array(0.3), ground_motion, periods, axis=-1)
    S10 = interp(array(1.0), ground_motion, periods, axis=-1)
    # print 'S03',S03[:,0:5]

    # print 'S10',S10[:,0:5]
    # interpolate the ground motion at reference periods
    oldsettings = seterr(invalid='ignore')
    acceleration_dependent = (S10 / S03)
    seterr(**oldsettings)
    acceleration_dependent[where(S03 < 0.00000000000001)] = 0.0
    acceleration_dependent = acceleration_dependent[..., 0]  # and collapse

    # This assumes ground_motion.shape = (1, events)
    velocity_dependent = (10**((magnitude-5.0)/2))[newaxis,:]
    assert len(acceleration_dependent.shape) == 2
    assert len(velocity_dependent.shape) == 2
    # removed this assert so test_cadell_damage passes.
    # This test passes 4660 sites, instead of the usual 1.
    # Fixing/ getting rid of the test is another option...
    #assert velocity_dependent.shape == acceleration_dependent.shape
    return acceleration_dependent, velocity_dependent
Пример #5
0
def FillFD(En_A,T):
	""" fill an array with Fermi-Dirac distribution """
	N = int((len(En_A)-1)/2)
	sp.seterr(over='ignore') ## ignore overflow in exp, not important in this calculation
	if T == 0.0: FD_A = 1.0*sp.concatenate([sp.ones(N),[0.5],sp.zeros(N)])
	else:        FD_A = FermiDirac(En_A,T)
	sp.seterr(over='warn')
	return FD_A
Пример #6
0
def solve(SA, SD, SAcap, update_function, rtol=0.05, maxits=100):
    """
    #FIXME DSG-EQRM what is the dimensions of these, and the return value?
    SA = demand curve (g)
    SAcap = capacity curve (g)
    SD = x axis (for both capcity and demand) (mm)

    update_function(intersection_point.x) makes a new SA,SD,SAcap.
    it also returns an exit flag
    it is usaully (always?)
    eqrm_code.capacity_spectrum_model.Capacity_spectrum_model.updated_responce

    rtol of 0.05 process will halt if intersection_x moved by
    less than 5% in the last iteration. All points are deemed to
    have converged.

    maxits is the maximum iterations. If maxits is exceeded, then
    any points that are not yet deemed to have converged are set
    to (intersection_x + old_intersection_x)/2
    """
    # old terminology, SDcr was intersection_x
    iters = 0
    intersection_x = find_intersection(SD, SA, SAcap)
    exit_flag = False
    while ((iters <= maxits) & (not exit_flag)):
        # if 1:
        # if iters>0:
        # print 'iter'
        #    print iters
        iters += 1  # update number of iterations
        old_intersection_x = intersection_x.copy()  # copy old intersection
        # update curves
        SA, SD, SAcap, exit_flag = update_function(intersection_x)

        # get new intersection
        intersection_x = find_intersection(SD, SA, SAcap)
        oldsettings = seterr(invalid='ignore')
        diff = abs(intersection_x - old_intersection_x) / \
            old_intersection_x  # diff
        seterr(**oldsettings)
        # This is needed in windows to stop nan's setting the diff to -1.#IND
        diff = nan_to_num(diff)
        max_diff = diff.max()  # find the relative change in intersection_x
        if max_diff < rtol:
            exit_flag = True  # check for convergence

    if (iters >= maxits):
        # if iteration doesn't converge, take the average value
        # use average values
        non_convergent = where(diff >= rtol)  # find non_convergent cases
        # x = (x+x_old)/2
        intersection_x[non_convergent] += old_intersection_x[non_convergent]
        intersection_x[non_convergent] *= 0.5
    else:
        non_convergent = array([])
    return intersection_x, non_convergent
Пример #7
0
def FillBE(En_A,T):
	""" fill an array with Bose-Einstein distribution """
	N = int((len(En_A)-1)/2)
	sp.seterr(over='ignore') ## ignore overflow in exp, not important in this calculation
	if T == 0.0: BE_A = -1.0*sp.concatenate([sp.ones(N),[0.5],sp.zeros(N)])
	else:        
		BE_A = BoseEinstein(En_A,T)
		BE_A[N] = -0.5
	sp.seterr(over='warn')
	return BE_A
Пример #8
0
def chi_square(h1, h2): # 23 us @array, 49 us @list \w 100
    r"""
    Chi-square distance.
    
    Measure how unlikely it is that one distribution (histogram) was drawn from the
    other. The Chi-square distance between two histograms :math:`H` and :math:`H'` of size
    :math:`m` is defined as:
    
    .. math::
    
        d_{\chi^2}(H, H') = \sum_{m=1}^M
            \frac{
                (H_m - H'_m)^2
            }{
                H_m + H'_m
            }
    
    *Attributes:*

    - semimetric
    
    *Attributes for normalized histograms:*

    - :math:`d(H, H')\in[0, 2]`
    - :math:`d(H, H) = 0`
    - :math:`d(H, H') = d(H', H)`
    
    *Attributes for not-normalized histograms:*

    - :math:`d(H, H')\in[0, \infty)`
    - :math:`d(H, H) = 0`
    - :math:`d(H, H') = d(H', H)`
    
    *Attributes for not-equal histograms:*

    - not applicable     
    
    Parameters
    ----------
    h1 : sequence
        The first histogram.
    h2 : sequence
        The second histogram.
    
    Returns
    -------
    chi_square : float
        Chi-square distance.
    """
    h1, h2 = __prepare_histogram(h1, h2)
    old_err_state = scipy.seterr(invalid='ignore') # divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0
    result = scipy.square(h1 - h2) / (h1 + h2)
    scipy.seterr(**old_err_state)
    result[scipy.isnan(result)] = 0 # faster than scipy.nan_to_num, which checks for +inf and -inf also
    return scipy.sum(result)
Пример #9
0
def kullback_leibler(h1, h2): # 83 us @array, 109 us @list \w 100 bins
    r"""
    Kullback-Leibler divergence.
    
    Compute how inefficient it would to be code one histogram into another.
    Actually computes :math:`\frac{d_{KL}(h1, h2) + d_{KL}(h2, h1)}{2}` to achieve symmetry.
    
    The Kullback-Leibler divergence between two histograms :math:`H` and :math:`H'` of size
    :math:`m` is defined as:
    
    .. math::
    
        d_{KL}(H, H') = \sum_{m=1}^M H_m\log\frac{H_m}{H'_m}
    
    *Attributes:*

    - quasimetric (but made symetric)
    
    *Attributes for normalized histograms:*

    - :math:`d(H, H')\in[0, \infty)`
    - :math:`d(H, H) = 0`
    - :math:`d(H, H') = d(H', H)`
    
    *Attributes for not-normalized histograms:*

    - not applicable
    
    *Attributes for not-equal histograms:*

    - not applicable
        
    Parameters
    ----------
    h1 : sequence
        The first histogram, where h1[i] > 0 for any i such that h2[i] > 0, normalized.
    h2 : sequence
        The second histogram, where h2[i] > 0 for any i such that h1[i] > 0, normalized, same bins as ``h1``.
    
    Returns
    -------
    kullback_leibler : float
        Kullback-Leibler divergence.

    """
    old_err_state = scipy.seterr(divide='raise')
    try:
        h1, h2 = __prepare_histogram(h1, h2)
        result = (__kullback_leibler(h1, h2) + __kullback_leibler(h2, h1)) / 2.
        scipy.seterr(**old_err_state)
        return result
    except FloatingPointError:
        scipy.seterr(**old_err_state)
        raise ValueError('h1 can only contain zero values where h2 also contains zero values and vice-versa')
Пример #10
0
def nonlin_damp(capacity_parameters, kappa, acceleration, displacement, csm_hysteretic_damping):
    """
    Calculate the damping correction (Fulford 02) using the
    exact capacity curve.
    """
    # print "csm_hysteretic_damping", csm_hysteretic_damping
    DyV, AyV, DuV, AuV, a, b, c = capacity_parameters
    Harea = hyst_area_rand(displacement, acceleration, DyV, AyV, DuV, AuV, csm_hysteretic_damping)
    oldsettings = seterr(invalid="ignore")
    BH = kappa * Harea / (2 * pi * displacement * acceleration)
    seterr(**oldsettings)
    return BH
Пример #11
0
def relative_bin_deviation(h1, h2):  # 79 us @array, 104 us @list \w 100 bins
    """
    Calculate the bin-wise deviation between two histograms.
    The relative bin deviation between two histograms \f$H\f$ and \f$H'\f$ of size
    \f$m\f$ is defined as
    \f[
        d_{rbd}(H, H') = \sum_{m=1}^M
            \frac{
                \sqrt{(H_m - H'_m)^2}
              }{
                \frac{1}{2}
                \left(
                    \sqrt{H_m^2} +
                    \sqrt{{H'}_m^2}
                \right)
              }
    \f]
    
    Attributes:
    - semimetric (triangle equation satisfied?)
    
    Attributes for normalized histograms:
    - \f$d(H, H')\in[0, \infty)\f$
    - \f$d(H, H) = 0\f$
    - \f$d(H, H') = d(H', H)\f$
    
    Attributes for not-normalized histograms:
    - \f$d(H, H')\in[0, \infty)\f$
    - \f$d(H, H) = 0\f$
    - \f$d(H, H') = d(H', H)\f$
    
    Attributes for not-equal histograms:
    - not applicable 
    
    @param h1 the first histogram
    @type h1 array-like sequence
    @param h2 the second histogram, same bins as h1
    @type h2 array-like sequence
    
    @return relative bin deviation
    @rtype float
    """
    h1, h2 = __prepare_histogram(h1, h2)
    numerator = scipy.sqrt(scipy.square(h1 - h2))
    denominator = (scipy.sqrt(scipy.square(h1)) + scipy.sqrt(scipy.square(h2))) / 2.0
    old_err_state = scipy.seterr(
        invalid="ignore"
    )  # divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0
    result = numerator / denominator
    scipy.seterr(**old_err_state)
    result[scipy.isnan(result)] = 0  # faster than scipy.nan_to_num, which checks for +inf and -inf also
    return scipy.sum(result)
Пример #12
0
    def compute_sf_entropy(self, sf_group, theoryDotTheory, theoryDotData, T):
        """
        Compute the entropy for a given scale factor.
        """
        try:
            prior_type, prior_params = self.sf_priors[sf_group]
        except KeyError:
            prior_type, prior_params = 'uniform in sf', None

        if prior_type == 'uniform in sf':
            if theoryDotTheory != 0:
                entropy = scipy.log(scipy.sqrt(2*scipy.pi*T/theoryDotTheory))
            else:
                entropy = 0
        elif prior_type == 'gaussian in log sf':
            # This is the integrand for the prior. Note that it's important
            #  that u = 0 corresponds to B_best. This ensures that the
            #  integration doesn't miss the (possibly sharp) peak there.
            try:
                import SloppyCell.misc_c
                integrand = SloppyCell.misc_c.log_gaussian_prior_integrand
            except ImportError:
                logger.warn('Falling back to python integrand on log gaussian '
                            'prior integration.')
                exp = scipy.exp
                def integrand(u, ak, bk, mulB, siglB, T, B_best, lB_best):
                    B = exp(u) * B_best
                    lB = u + lB_best
                    ret = exp(-ak/(2*T) * (B-B_best)**2
                              - (lB-mulB)**2/(2 * siglB**2))
                    return ret

            mulB, siglB = prior_params
            B_best = theoryDotData/theoryDotTheory
            lB_best = scipy.log(B_best)

            # Often we get overflow errors in the integration, but they
            #  don't actually cause a problem. (exp(-inf) is still 0.) This
            #  prevents scipy from printing annoying error messages in this
            #  case.
            prev_err = scipy.seterr(over='ignore')
            int_args = (theoryDotTheory, theoryDotData, mulB, siglB, T, B_best,
                        lB_best)
            ans, temp = scipy.integrate.quad(integrand, -scipy.inf, scipy.inf, 
                                             args = int_args, limit=1000)
            scipy.seterr(**prev_err)
            entropy = scipy.log(ans)
        else:
            raise ValueError('Unrecognized prior type: %s.' % prior_type)

        return entropy
Пример #13
0
def get_interpolated_SD(SD, SAdiff, id0, id1):
    # Select points for interpolation:
    x0 = SD[id0]
    y0 = SAdiff[id0]
    x1 = SD[id1]
    y1 = SAdiff[id1]
    dx = x1 - x0
    dy = y1 - y0

    # interpolate SD
    oldsettings = seterr(invalid='ignore')
    SDcr = x1 - y1 * (dx / dy)
    seterr(**oldsettings)
    return SDcr
Пример #14
0
    def _monte_carlo(self, mean, sigma, var_in_last_axis):
        """
        Perform random sampling about mean with sigma.
        self.sample_shape and self._vs() controls the shape of the
        result.
        """
        assert sigma.shape == mean.shape
        variate_site = self._vs(sigma, var_in_last_axis)

        oldsettings = seterr(over='ignore')
        # self.sample_shape and variate_site will have compatible dims
        sample_values = self.val_func(mean[self.sample_shape] + 
                                      variate_site * sigma[self.sample_shape])
        seterr(**oldsettings)
        return sample_values
Пример #15
0
    def test_array_ab_diff(self):
        a = array([7.35287023, 3.98947559, 0.])
        b = array([7.38625883, 3.98947559, 0.])
        oldsettings = seterr(invalid='ignore')
        diff = abs(a - b) / b
        seterr(**oldsettings)
        # diff [ 0.00452037  0.                 NaN]
        # Windows can't handle the NaN,
        # so it has to be set to zero
        diff = nan_to_num(diff)
        # print "diff", diff

        # this would return max_diff -1.#IND if NaN's aren't removed,
        # in windows.
        max_diff = diff.max()
        # print "max_diff", max_diff
        assert max_diff == diff[0]
Пример #16
0
def chi_square(h1, h2):  # 23 us @array, 49 us @list \w 100
    """
    Measure how unlikely it is that one distribution (histogram) was drawn from the
    other. The Chi-square distance between two histograms \f$H\f$ and \f$H'\f$ of size
    \f$m\f$ is defined as
    \f[
        d_{\chi^2}(H, H') = \sum_{m=1}^M
            \frac{
                (H_m - H'_m)^2
            }{
                H_m + H'_m
            }
    \f]
    
    Attributes:
    - semimetric
    
    Attributes for normalized histograms:
    - \f$d(H, H')\in[0, 2]\f$
    - \f$d(H, H) = 0\f$
    - \f$d(H, H') = d(H', H)\f$
    
    Attributes for not-normalized histograms:
    - \f$d(H, H')\in[0, \infty)\f$
    - \f$d(H, H) = 0\f$
    - \f$d(H, H') = d(H', H)\f$
    
    Attributes for not-equal histograms:
    - not applicable     
    
    @param h1 the first histogram
    @type h1 array-like sequence
    @param h2 the second histogram
    @type h2 array-like sequence
    
    @return chi-square distance
    @rtype float    
    """
    h1, h2 = __prepare_histogram(h1, h2)
    old_err_state = scipy.seterr(
        invalid="ignore"
    )  # divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0
    result = scipy.square(h1 - h2) / (h1 + h2)
    scipy.seterr(**old_err_state)
    result[scipy.isnan(result)] = 0  # faster than scipy.nan_to_num, which checks for +inf and -inf also
    return scipy.sum(result)
Пример #17
0
def FillFDplusBE(En_A,T):
	""" fill an array with a som of Bose-Einstein and Fermi-Dirac
	numerically more precise than the sum of the above functions due to a pole in BE """
	N = int((len(En_A)-1)/2)
	sp.seterr(over=  'ignore') ## ignore overflow in sinh
	sp.seterr(divide='ignore') ## we deal with the pole ourselves
	if T == 0.0: FB_A = sp.zeros(len(En_A))
	else:        
		FB_A = 1.0/sp.sinh(En_A/T)
		FB_A[N] = 0.0
	sp.seterr(divide='warn')
	sp.seterr(over=  'warn')
	return FB_A
 def test_DLN_monte_carlo2(self):
     # dimensions (2,1,3,4) = 24 elements
     dim = (2,1,3,4)
     count_up = arange(1,24,1)
     log_mean = resize(count_up*10, dim)
     log_sigma = resize(count_up, dim)
     count_up_2 = arange(1,48,2)
     variate = resize(count_up_2, dim)
     var_method = 2
     
     dist = Distribution_Log_Normal(var_method)
     # Provide a predictable sample
     dist.rvs = lambda size: count_up_2
     sample_values = dist._monte_carlo(log_mean, log_sigma, True)
     
     oldsettings = seterr(over='ignore')
     actual = exp(log_mean + variate*log_sigma)
     seterr(**oldsettings)
     self.assert_(allclose(sample_values, actual))
     self.assert_(sample_values.shape == dim)
Пример #19
0
def kullback_leibler(h1, h2):  # 83 us @array, 109 us @list \w 100 bins
    """
    Compute how inefficient it would to be code one histogram into another.
    Actually computes \f$\frac{d_{KL}(h1, h2) + d_{KL}(h2, h1)}{2}\f$ to achieve symmetry.
    The Kullback-Leibler divergence between two histograms \f$H\f$ and \f$H'\f$ of size
    \f$m\f$ is defined as
    \f[
        d_{KL}(H, H') = \sum_{m=1}^M H_m\log\frac{H_m}{H'_m}
    \f]
    
    Attributes:
    - quasimetric (but made symetric)
    
    Attributes for normalized histograms:
    - \f$d(H, H')\in[0, \infty)\f$
    - \f$d(H, H) = 0\f$
    - \f$d(H, H') = d(H', H)\f$
    
    Attributes for not-normalized histograms:
    - not applicable
    
    Attributes for not-equal histograms:
    - not applicable
        
    @param h1 the first histogram, where h1[i] > 0 for any i such that h2[i] > 0, normalized
    @type h1 array-like sequence
    @param h2 the second histogram, where h2[i] > 0 for any i such that h1[i] > 0, normalized, same bins as h1
    @type h2 array-like sequence
    
    @return Kullback-Leibler divergence
    @rtype float
    """
    old_err_state = scipy.seterr(divide="raise")
    try:
        h1, h2 = __prepare_histogram(h1, h2)
        result = (__kullback_leibler(h1, h2) + __kullback_leibler(h2, h1)) / 2.0
        scipy.seterr(**old_err_state)
        return result
    except FloatingPointError:
        scipy.seterr(**old_err_state)
        raise ValueError("h1 can only contain zero values where h2 also contains zero values and vice-versa")
Пример #20
0
    def _propagate_wavenumbers(self, wavenumbers, distances):
        """Calculate the wavenumber offset, delta.

        Arguments
        ---------
        wavenumbers : array
            The wavevector
        distances : array
            An array of thicknesses, ordered from source to terminator

        Returns
        -------
        delta : array
            The phase difference
        """
        # Turn off 'invalid multiplication' error;
        # It's just the 'inf' boundaries.
        olderr = sp.seterr(invalid='ignore')
        deltas = wavenumbers*distances
        # Now turn the error back on
        sp.seterr(**olderr)
        return deltas
Пример #21
0
    def test_cumulative_state_probability(self):
        blocking_block_comments = True
        """Test that cumulative_state_probability works the same way
        as matlab function.

        Test that reduce_cumulative_to_pdf 'looks' right - it should
        look a bit like a bell curve - zero at ends with a max
        somewhere in the middle, and sum=one.
        """

        # test against matlab implementation:
        beta = 0.4
        value = 5.0
        
        threshold = array((0.000001, 0.00001, 1, 1.5, 2, 3, 4, 5, 10, 100, 1000))

        oldsettings = seterr(divide='ignore')
        x = (1/beta)*log(value/threshold)
        seterr(**oldsettings)
        # matlab:
        # Pr11 = normcdf2(1/THE_VUN_T.('beta_nsd_d')*log(SDcrAll./Thresh))

        root2 = sqrt(2)
        y = 0.5*(1 + erf(x/root2))
        # matlab:
        # root2 = sqrt(2);
        # y = 0.5*(1+erf(x/root2))
        y2 = cumulative_state_probability(threshold, beta, value)
        assert allclose(y, y2)

        reduce_cumulative_to_pdf(y)

        # y should now look a bit  like a bell curve - zero at ends
        # increasing to the middle, sum=one.
        assert (y.sum() == 1.0)
        assert y[0] == 0
        assert y[1] < y[2]
        assert y[-3] > y[-2]
Пример #22
0
    def bic(self):
        '''() -> ndarray
        Calculates the Bayesian Information Criteria for all models in 
        range.
        '''
        seterr('ignore')
        temp = []
        #set minimization funtion to work with powell method
        f_temp = lambda x :-self.lik(x)
        #find maximum likelihood for all models
        for i in range(self._min_order,self._max_order):
            #gen params
            param = self.initalize_param(i)[0]
            param = fmin_powell(f_temp, param ,disp=False)
            temp.append([i,nu.copy(param),self.lik(param)])

        #calculate bic
        #BIC, number of gausians
        out = nu.zeros((len(temp),2))
        for i,j in enumerate(temp):
            out[i,1] = j[2] + j[0]**2*nu.log(self.data.shape[0])
            out[i,0] = j[0]

        return out
Пример #23
0
def resolve_collision(A, B, time):
    """Detects and acts upon any collisions in the specified time interval between two entities
    :param A: First entity
    :param B: Second entity
    :param time: time interval over which to check if any collisions occur
    :return: None if no collision will occur in the timeframe, the two collided entities if there is a collision
    """
    # general overview of this function:
    # 1. find if the objects will collide in the given time
    # 2. if yes, calculate collision:
    # 2.1 represent velocities as normal velocity and tangential velocity
    # 2.2 do a 1D collision using the normal veloctiy
    # 2.3 add the normal and tangential velocities to get the new velocity

    scipy.seterr(divide="raise", invalid="raise")

    # for this function I make one of the objects the frame of reference
    # which means my calculations are much simplified
    displacement = A.displacement - B.displacement
    velocity = A.velocity - B.velocity
    acceleration = A.acceleration - B.acceleration
    radius_sum = A.radius + B.radius

    # this code finds when the the two entities will collide. See
    # http://www.gvu.gatech.edu/people/official/jarek/graphics/material/collisionsDeshpandeKharsikarPrabhu.pdf
    # for how I got the algorithm
    a = magnitude(velocity, m / s)**2
    b = m**2 / s * 2 * scipy.dot(displacement.asNumber(m),
                                 velocity.asNumber(m / s))
    c = magnitude(displacement, m)**2 - radius_sum**2

    try:
        t_to_impact = \
            (-b - m**2/s * math.sqrt((b**2 - 4*a*c).asNumber(m**4/s**2))) / (2 * a)
    except:
        return

    if not scipy.isfinite(t_to_impact.asNumber(s)):
        return

    if t_to_impact > time or t_to_impact < 0 * s:
        return

    # at this point, we know there is a collision
    print("Collision:", A.name, "and", B.name, "in", t_to_impact)

    # for this section, basically turn the vectors into normal velocity and tangential velocity,
    # then do a 1D collision calculation, using the normal velocities
    # since a ' (prime symbol) wouldn't work, I've replaced it with a _ in variable names

    n = displacement  # normal vector
    un = n / magnitude(n, m)  # normal unit vector
    unt = copy.deepcopy(un)  # normal tangent vector
    unt[0], unt[1] = -unt[1], unt[
        0]  # ofc the tangent is orthogonal to the normal

    # A's centripetal velocity
    vAn = m / s * scipy.dot(un.asNumber(), A.velocity.asNumber(m / s))
    # A's tangential velocity
    vAt = m / s * scipy.dot(unt.asNumber(), A.velocity.asNumber(m / s))

    # B's centripetal velocity
    vBn = m / s * scipy.dot(un.asNumber(), B.velocity.asNumber(m / s))
    # B's tangential velocity
    vBt = m / s * scipy.dot(unt.asNumber(), B.velocity.asNumber(m / s))

    # tangent velocities are unchanged, nothing happens to them
    vAt_ = vAt
    vBt_ = vBt

    # centripetal velocities are calculated with a simple 1D collision formula
    R = 0.1

    vAn_ = \
        (A.mass_fun() * vAn + B.mass_fun() * vBn + R * B.mass_fun() * (B.velocity - A.velocity)) / \
        (A.mass_fun() + B.mass_fun())

    vBn_ = \
        (A.mass_fun() * vAn + B.mass_fun() * vBn + R * A.mass_fun() * (A.velocity - B.velocity)) / \
        (A.mass_fun() + B.mass_fun())

    # convert scalar normal and tangent velocities to vector quantities
    VAn = vAn_ * un
    VAt = vAt_ * unt

    VBn = vBn_ * un
    VBt = vBt_ * unt

    # move until the point of impact
    A.move(t_to_impact)
    B.move(t_to_impact)

    # add em up to get v'
    A.velocity = VAn + VAt
    B.velocity = VBn + VBt

    # move for the rest of the frame
    A.move(time - t_to_impact)
    B.move(time - t_to_impact)

    return [A.name, B.name]
Пример #24
0
def plot_params(args):
    """Plot alpha, theta, and the emission probabilities"""
    old_err = sp.seterr(under='ignore')
    oldsize = matplotlib.rcParams['font.size']
    K, L = args.emit_probs.shape if not args.continuous_observations else args.means.shape

    # alpha
    #matplotlib.rcParams['font.size'] = 12
    pyplot.figure()
    _, xedges, yedges = sp.histogram2d([0, K], [0, K], bins=[K, K])
    extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
    pyplot.imshow(args.alpha.astype(sp.float64),
                  extent=extent,
                  interpolation='nearest',
                  vmin=0,
                  vmax=1,
                  cmap='OrRd',
                  origin='lower')
    pyplot.xticks(sp.arange(K) + .5, sp.arange(K) + 1)
    pyplot.gca().set_xticks(sp.arange(K) + 1, minor=True)
    pyplot.yticks(sp.arange(K) + .5, sp.arange(K) + 1)
    pyplot.gca().set_yticks(sp.arange(K) + 1, minor=True)
    pyplot.grid(which='minor', alpha=.2)
    for line in pyplot.gca().yaxis.get_ticklines() + pyplot.gca(
    ).xaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines(
            minor=True) + pyplot.gca().yaxis.get_ticklines(minor=True):
        # label is a Text instance
        line.set_markersize(0)
    pyplot.ylabel('Horizontal parent state')
    pyplot.xlabel('Node state')
    pyplot.title(
        r"Top root transition ($\alpha$) for {approx} iteration {iteration}".
        format(approx=args.approx, iteration=args.iteration))
    b = pyplot.colorbar(shrink=.9)
    b.set_label("Probability")
    outfile = (args.out_params + '_it{iteration}.png').format(param='alpha',
                                                              **args.__dict__)
    pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)

    # beta
    pyplot.figure()
    _, xedges, yedges = sp.histogram2d([0, K], [0, K], bins=[K, K])
    extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
    pyplot.clf()
    pyplot.imshow(args.beta.astype(sp.float64),
                  extent=extent,
                  interpolation='nearest',
                  vmin=0,
                  vmax=1,
                  cmap='OrRd',
                  origin='lower')
    pyplot.xticks(sp.arange(K) + .5, sp.arange(K) + 1)
    pyplot.gca().set_xticks(sp.arange(K) + 1, minor=True)
    pyplot.yticks(sp.arange(K) + .5, sp.arange(K) + 1)
    pyplot.gca().set_yticks(sp.arange(K) + 1, minor=True)
    pyplot.grid(which='minor', alpha=.2)
    for line in pyplot.gca().yaxis.get_ticklines() + pyplot.gca(
    ).xaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines(
            minor=True) + pyplot.gca().yaxis.get_ticklines(minor=True):
        # label is a Text instance
        line.set_markersize(0)
    pyplot.ylabel('Vertical parent state')
    pyplot.xlabel('Node state')
    pyplot.title(
        r"Left root transition ($\beta$) for {approx} iteration {iteration}".
        format(approx=args.approx, iteration=args.iteration))
    b = pyplot.colorbar(shrink=.9)
    b.set_label("Probability")
    outfile = (args.out_params + '_it{iteration}.png').format(param='beta',
                                                              **args.__dict__)
    pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)

    # theta
    if args.separate_theta:
        theta_tmp = args.theta
        for i in range((args.theta.shape)[0]):
            setattr(args, 'theta_%s' % (i + 1), args.theta[i, :, :, :])

    for theta_name in ['theta'] + ['theta_%s' % i for i in range(20)]:
        #print 'trying', theta_name
        if not hasattr(args, theta_name):
            #print 'missing', theta_name
            continue
        _, xedges, yedges = sp.histogram2d([0, K], [0, K], bins=[K, K])
        extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
        if K == 18:
            numx_plots = 6
            numy_plots = 3
        elif K == 15:
            numx_plots = 5
            numy_plots = 3
        else:
            numx_plots = int(ceil(sp.sqrt(K)))
            numy_plots = int(ceil(sp.sqrt(K)))
        matplotlib.rcParams['font.size'] = 8
        fig, axs = pyplot.subplots(numy_plots,
                                   numx_plots,
                                   sharex=True,
                                   sharey=True,
                                   figsize=(numx_plots * 2.5,
                                            numy_plots * 2.5))
        for k in xrange(K):
            pltx, plty = k // numx_plots, k % numx_plots
            #axs[pltx,plty].imshow(args.theta[k,:,:], extent=extent, interpolation='nearest',
            axs[pltx,
                plty].imshow(getattr(args,
                                     theta_name)[:, k, :].astype(sp.float64),
                             extent=extent,
                             interpolation='nearest',
                             vmin=0,
                             vmax=1,
                             cmap='OrRd',
                             aspect='auto',
                             origin='lower')
            #if k < numx_plots:
            #axs[pltx,plty].text(0 + .5, K - .5, 'vp=%s' % (k+1), horizontalalignment='left', verticalalignment='top', fontsize=10)
            axs[pltx, plty].text(0 + .5,
                                 K - .5,
                                 'hp=%s' % (k + 1),
                                 horizontalalignment='left',
                                 verticalalignment='top',
                                 fontsize=10)
            #axs[pltx,plty].xticks(sp.arange(K) + .5, sp.arange(K))
            #axs[pltx,plty].yticks(sp.arange(K) + .5, sp.arange(K))
            axs[pltx, plty].set_xticks(sp.arange(K) + .5)
            axs[pltx, plty].set_xticks(sp.arange(K) + 1, minor=True)
            axs[pltx, plty].set_xticklabels(sp.arange(K) + 1)
            axs[pltx, plty].set_yticks(sp.arange(K) + .5)
            axs[pltx, plty].set_yticks(sp.arange(K) + 1, minor=True)
            axs[pltx, plty].set_yticklabels(sp.arange(K) + 1)
            for line in axs[pltx, plty].yaxis.get_ticklines() + axs[
                    pltx, plty].xaxis.get_ticklines() + axs[
                        pltx, plty].yaxis.get_ticklines(
                            minor=True) + axs[pltx, plty].xaxis.get_ticklines(
                                minor=True):
                line.set_markersize(0)
            axs[pltx, plty].grid(True, which='minor', alpha=.2)

        #fig.suptitle(r"$\Theta$ with fixed parents for {approx} iteration {iteration}".
        #                    format(approx=args.approx, iteration=args.iteration),
        #                    fontsize=14, verticalalignment='top')
        fig.suptitle('Node state',
                     y=.03,
                     fontsize=14,
                     verticalalignment='center')
        #fig.suptitle('Horizontal parent state', y=.5, x=.02, rotation=90,
        fig.suptitle('Vertical parent state',
                     y=.5,
                     x=.02,
                     rotation=90,
                     verticalalignment='center',
                     fontsize=14)
        matplotlib.rcParams['font.size'] = 6.5
        fig.subplots_adjust(wspace=.05, hspace=.05, left=.05, right=.95)
        #b = fig.colorbar(shrink=.9)
        #b.set_label("Probability")
        outfile = (args.out_params + '_vertparent_it{iteration}.png').format(
            param=theta_name, **args.__dict__)
        pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)

        fig, axs = pyplot.subplots(numy_plots,
                                   numx_plots,
                                   sharex=True,
                                   sharey=True,
                                   figsize=(numx_plots * 2.5,
                                            numy_plots * 2.5))
        for k in xrange(K):
            pltx, plty = k // numx_plots, k % numx_plots
            axs[pltx, plty].imshow(
                getattr(args, theta_name)[k, :, :].astype(sp.float64),
                extent=extent,
                interpolation='nearest',
                #axs[pltx,plty].imshow(args.theta[:,k,:], extent=extent, interpolation='nearest',
                vmin=0,
                vmax=1,
                cmap='OrRd',
                aspect='auto',
                origin='lower')
            #if k < numx_plots:
            axs[pltx, plty].text(0 + .5,
                                 K - .5,
                                 'vp=%s' % (k + 1),
                                 horizontalalignment='left',
                                 verticalalignment='top',
                                 fontsize=10)
            #axs[pltx,plty].xticks(sp.arange(K) + .5, sp.arange(K))
            #axs[pltx,plty].yticks(sp.arange(K) + .5, sp.arange(K))
            axs[pltx, plty].set_xticks(sp.arange(K) + .5)
            axs[pltx, plty].set_xticks(sp.arange(K) + 1, minor=True)
            axs[pltx, plty].set_xticklabels(sp.arange(K) + 1)
            axs[pltx, plty].set_yticks(sp.arange(K) + .5)
            axs[pltx, plty].set_yticks(sp.arange(K) + 1, minor=True)
            axs[pltx, plty].set_yticklabels(sp.arange(K) + 1)
            for line in axs[pltx, plty].yaxis.get_ticklines() + axs[
                    pltx, plty].xaxis.get_ticklines() + axs[
                        pltx, plty].yaxis.get_ticklines(
                            minor=True) + axs[pltx, plty].xaxis.get_ticklines(
                                minor=True):
                line.set_markersize(0)
            axs[pltx, plty].grid(True, which='minor', alpha=.2)

        #fig.suptitle(r"$\Theta$ with fixed parents for {approx} iteration {iteration}".
        #                    format(approx=args.approx, iteration=args.iteration),
        #                    fontsize=14, verticalalignment='top')
        fig.suptitle('Node state',
                     y=.03,
                     fontsize=14,
                     verticalalignment='center')
        fig.suptitle(
            'Horizontal parent state',
            y=.5,
            x=.02,
            rotation=90,
            #fig.suptitle('Vertical parent state', y=.5, x=.02, rotation=90,
            verticalalignment='center',
            fontsize=14)
        matplotlib.rcParams['font.size'] = 6.5
        fig.subplots_adjust(wspace=.05, hspace=.05, left=.05, right=.95)
        #b = fig.colorbar(shrink=.9)
        #b.set_label("Probability")
        outfile = (args.out_params + '_it{iteration}.png').format(
            param=theta_name, **args.__dict__)
        pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)

    # emission probabilities
    if args.continuous_observations:
        # plot mean values
        matplotlib.rcParams['font.size'] = 8
        pyplot.figure(figsize=(max(1, round(L / 3.)), max(1, round(K / 3.))))
        print(max(1, round(L / 3.)), max(1, round(K / 3.)))
        pyplot.imshow(args.means.astype(sp.float64),
                      interpolation='nearest',
                      aspect='auto',
                      vmin=0,
                      vmax=args.means.max(),
                      cmap='OrRd',
                      origin='lower')
        for k in range(K):
            for l in range(L):
                pyplot.text(l,
                            k,
                            '%.1f' % (args.means[k, l]),
                            horizontalalignment='center',
                            verticalalignment='center',
                            fontsize=5)
        pyplot.yticks(sp.arange(K), sp.arange(K) + 1)
        pyplot.gca().set_yticks(sp.arange(K) + .5, minor=True)
        pyplot.xticks(sp.arange(L),
                      valid_marks,
                      rotation=30,
                      horizontalalignment='right')
        pyplot.gca().set_xticks(sp.arange(L) + .5, minor=True)
        pyplot.grid(which='minor', alpha=.2)
        for line in pyplot.gca().yaxis.get_ticklines() + pyplot.gca(
        ).xaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines(
                minor=True) + pyplot.gca().yaxis.get_ticklines(minor=True):
            # label is a Text instance
            line.set_markersize(0)
        pyplot.ylabel('Hidden State')
        pyplot.title("Emission Mean")
        #b = pyplot.colorbar(shrink=.7)
        #b.set_label("Probability")
        outfile = (args.out_params + '_it{iteration}.png').format(
            param='emission_means', **args.__dict__)
        pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)

        # plot variances
        pyplot.figure(figsize=(max(1, round(L / 3.)), max(1, round(K / 3.))))
        print(L / 3, K / 3.)
        pyplot.imshow(args.variances.astype(sp.float64),
                      interpolation='nearest',
                      aspect='auto',
                      vmin=0,
                      vmax=args.variances.max(),
                      cmap='OrRd',
                      origin='lower')
        for k in range(K):
            for l in range(L):
                pyplot.text(l,
                            k,
                            '%.1f' % (args.variances[k, l]),
                            horizontalalignment='center',
                            verticalalignment='center',
                            fontsize=5)
        pyplot.yticks(sp.arange(K), sp.arange(K) + 1)
        pyplot.gca().set_yticks(sp.arange(K) + .5, minor=True)
        pyplot.xticks(sp.arange(L),
                      valid_marks,
                      rotation=30,
                      horizontalalignment='right')
        pyplot.gca().set_xticks(sp.arange(L) + .5, minor=True)
        pyplot.grid(which='minor', alpha=.2)
        for line in pyplot.gca().yaxis.get_ticklines() + pyplot.gca(
        ).xaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines(
                minor=True) + pyplot.gca().yaxis.get_ticklines(minor=True):
            # label is a Text instance
            line.set_markersize(0)
        pyplot.ylabel('Hidden State')
        pyplot.title("Emission Variance")
        #b = pyplot.colorbar(shrink=.7)
        #b.set_label("Probability")
        outfile = (args.out_params + '_it{iteration}.png').format(
            param='emission_variances', **args.__dict__)
        pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)
    else:
        matplotlib.rcParams['font.size'] = 8
        pyplot.figure(figsize=(max(1, round(L / 3.)), max(1, round(K / 3.))))
        print(L / 3, K / 3.)
        pyplot.imshow(args.emit_probs.astype(sp.float64),
                      interpolation='nearest',
                      aspect='auto',
                      vmin=0,
                      vmax=1,
                      cmap='OrRd',
                      origin='lower')
        for k in range(K):
            for l in range(L):
                pyplot.text(l,
                            k,
                            '%2.0f' % (args.emit_probs[k, l] * 100),
                            horizontalalignment='center',
                            verticalalignment='center')
        pyplot.yticks(sp.arange(K), sp.arange(K) + 1)
        pyplot.gca().set_yticks(sp.arange(K) + .5, minor=True)
        pyplot.xticks(sp.arange(L),
                      valid_marks,
                      rotation=30,
                      horizontalalignment='right')
        pyplot.gca().set_xticks(sp.arange(L) + .5, minor=True)
        pyplot.grid(which='minor', alpha=.2)
        for line in pyplot.gca().yaxis.get_ticklines() + pyplot.gca(
        ).xaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines(
                minor=True) + pyplot.gca().yaxis.get_ticklines(minor=True):
            # label is a Text instance
            line.set_markersize(0)
        pyplot.ylabel('Hidden State')
        pyplot.title("Emission probabilities")
        #b = pyplot.colorbar(shrink=.7)
        #b.set_label("Probability")
        outfile = (args.out_params + '_it{iteration}.png').format(
            param='emission', **args.__dict__)
        pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)

    #broad_paper_enrichment = sp.array([[16,2,2,6,17,93,99,96,98,2],
    #                               [12,2,6,9,53,94,95,14,44,1],
    #                               [13,72,0,9,48,78,49,1,10,1],
    #                               [11,1,15,11,96,99,75,97,86,4],
    #                               [5,0,10,3,88,57,5,84,25,1],
    #                               [7,1,1,3,58,75,8,6,5,1],
    #                               [2,1,2,1,56,3,0,6,2,1],
    #                               [92,2,1,3,6,3,0,0,1,1],
    #                               [5,0,43,43,37,11,2,9,4,1],
    #                               [1,0,47,3,0,0,0,0,0,1],
    #                               [0,0,3,2,0,0,0,0,0,0],
    #                               [1,27,0,2,0,0,0,0,0,0],
    #                               [0,0,0,0,0,0,0,0,0,0],
    #                               [22,28,19,41,6,5,26,5,13,37],
    #                               [85,85,91,88,76,77,91,73,85,78],
    #                               [float('nan'), float('nan'),float('nan'),float('nan'),float('nan'),float('nan'),float('nan'),float('nan'),float('nan'),float('nan')]
    #                            ]) / 100.
    #mapping_from_broad = dict(zip(range(K), (5,2,0,14,4,6,9,1,12,-1,3,12,8,7,10,12,11,13)))
    #broad_paper_enrichment = broad_paper_enrichment[tuple(mapping_from_broad[i] for i in range(K)), :]
    #broad_names = ['Active promoter', 'Weak promoter', 'Inactive/poised promoter', 'Strong enhancer',
    #               'Strong enhancer', 'weak/poised enhancer', 'Weak/poised enhancer', 'Insulator',
    #               'Transcriptional transition', 'Transcriptional elongation', 'Weak transcribed',
    #               'Polycomb repressed', 'Heterochrom; low signal', 'Repetitive/CNV', 'Repetitive/CNV',
    #               'NA', 'NA', 'NA']
    #pyplot.figure(figsize=(L/3,K/3.))
    #print (L/3,K/3.)
    #pyplot.imshow(broad_paper_enrichment, interpolation='nearest', aspect='auto',
    #              vmin=0, vmax=1, cmap='OrRd', origin='lower')
    #for k in range(K):
    #    for l in range(L):
    #        pyplot.text(l, k, '%2.0f' % (broad_paper_enrichment[k,l] * 100), horizontalalignment='center', verticalalignment='center')
    #    pyplot.text(L, k, broad_names[mapping_from_broad[k]], horizontalalignment='left', verticalalignment='center', fontsize=6)
    #pyplot.yticks(sp.arange(K), sp.arange(K)+1)
    #pyplot.gca().set_yticks(sp.arange(K)+.5, minor=True)
    #pyplot.xticks(sp.arange(L), valid_marks, rotation=30, horizontalalignment='right')
    #pyplot.gca().set_xticks(sp.arange(L)+.5, minor=True)
    #pyplot.grid(which='minor', alpha=.2)
    #for line in pyplot.gca().yaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines(minor=True) + pyplot.gca().yaxis.get_ticklines(minor=True):
    ## label is a Text instance
    #    line.set_markersize(0)
    #pyplot.ylabel('Hidden State')
    #pyplot.title("Broad paper Emission probabilities")
    ##b = pyplot.colorbar(shrink=.7)
    ##b.set_label("Probability")
    #pyplot.subplots_adjust(right=.7)
    #outfile = (args.out_params + '_broadpaper.png').format(param='emission', **args.__dict__)
    #pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)

    pyplot.close('all')
    sp.seterr(**old_err)
    matplotlib.rcParams['font.size'] = oldsize
Пример #25
0
def herm_sqrt_inv(x, zero_tol=1E-15, sanity_checks=False, return_rank=False, sc_data=''):
    if isinstance(x,  mm.eyemat):
        x_sqrt = x
        x_sqrt_i = x
        rank = x.shape[0]
    else:
        try:
            ev = x.diag #simple_diag_matrix
            EV = None
        except AttributeError:
            ev, EV = la.eigh(x)
        
        zeros = ev <= zero_tol #throw away negative results too!
        
        ev_sqrt = sp.sqrt(ev)
        
        err = sp.seterr(divide='ignore', invalid='ignore')
        try:
            ev_sqrt_i = 1 / ev_sqrt
            ev_sqrt[zeros] = 0
            ev_sqrt_i[zeros] = 0
        finally:
            sp.seterr(divide=err['divide'], invalid=err['invalid'])
        
        if EV is None:
            x_sqrt = mm.simple_diag_matrix(ev_sqrt, dtype=x.dtype)
            x_sqrt_i = mm.simple_diag_matrix(ev_sqrt_i, dtype=x.dtype)
        else:
            B = mm.mmul_diag(ev_sqrt, EV.conj().T)
            x_sqrt = EV.dot(B)
            
            B = mm.mmul_diag(ev_sqrt_i, EV.conj().T)
            x_sqrt_i = EV.dot(B)
            
        rank = x.shape[0] - np.count_nonzero(zeros)
        
        if sanity_checks:
            if ev.min() < -zero_tol:
                log.warning("Sanity Fail in herm_sqrt_inv(): Throwing away negative eigenvalues! %s %s",
                            ev.min(), sc_data)
            
            if not np.allclose(x_sqrt.dot(x_sqrt), x):
                log.warning("Sanity Fail in herm_sqrt_inv(): x_sqrt is bad! %s %s",
                            la.norm(x_sqrt.dot(x_sqrt) - x), sc_data)
            
            if EV is None: 
                nulls = sp.zeros(x.shape[0])
                nulls[zeros] = 1
                nulls = sp.diag(nulls)
            else: #if we did an EVD then we use the eigenvectors
                nulls = EV.copy()
                nulls[:, sp.invert(zeros)] = 0
                nulls = nulls.dot(nulls.conj().T)
                
            eye = np.eye(x.shape[0])
            if not np.allclose(x_sqrt.dot(x_sqrt_i), eye - nulls):
                log.warning("Sanity Fail in herm_sqrt_inv(): x_sqrt_i is bad! %s %s",
                            la.norm(x_sqrt.dot(x_sqrt_i) - eye + nulls), sc_data)
    
    if return_rank:
        return x_sqrt, x_sqrt_i, rank
    else:
        return x_sqrt, x_sqrt_i
Пример #26
0
def main():
    # This configures numpy/scipy to raise an exception in case of errors, instead of printing a warning and going ahead.
    numpy.seterr(all='raise')
    scipy.seterr(all='raise')

    parser = argparse.ArgumentParser(
        description='Runs a set of benchmarks defined in a YAML file.')
    parser.add_argument(
        '--fruit-benchmark-sources-dir',
        help='Path to the fruit sources (used for benchmarking code only)')
    parser.add_argument('--fruit-sources-dir',
                        help='Path to the fruit sources')
    parser.add_argument('--boost-di-sources-dir',
                        help='Path to the Boost.DI sources')
    parser.add_argument(
        '--output-file',
        help=
        'The output file where benchmark results will be stored (1 per line, with each line in JSON format). These can then be formatted by e.g. the format_bench_results script.'
    )
    parser.add_argument(
        '--benchmark-definition',
        help=
        'The YAML file that defines the benchmarks (see fruit_wiki_benchs_fruit.yml for an example).'
    )
    parser.add_argument(
        '--continue-benchmark',
        help=
        'If this is \'true\', continues a previous benchmark run instead of starting from scratch (taking into account the existing benchmark results in the file specified with --output-file).'
    )
    args = parser.parse_args()

    if args.output_file is None:
        raise Exception('You must specify --output_file')
    if args.continue_benchmark == 'true':
        try:
            with open(args.output_file, 'r') as f:
                previous_run_completed_benchmarks = [
                    json.loads(line)['benchmark'] for line in f.readlines()
                ]
        except FileNotFoundError:
            previous_run_completed_benchmarks = []
    else:
        previous_run_completed_benchmarks = []
        run_command('rm', args=['-f', args.output_file])

    fruit_build_dir = tempfile.gettempdir() + '/fruit-benchmark-build-dir'

    with open(args.benchmark_definition, 'r') as f:
        yaml_file_content = yaml.full_load(f)
        global_definitions = yaml_file_content['global']
        benchmark_definitions = expand_benchmark_definitions(
            yaml_file_content['benchmarks'])

    benchmark_index = 0

    for (compiler_executable_name, additional_cmake_args), benchmark_definitions_with_current_config \
            in group_by(benchmark_definitions,
                        lambda benchmark_definition:
                            (benchmark_definition['compiler'], tuple(benchmark_definition['additional_cmake_args']))):

        print(
            'Preparing for benchmarks with the compiler %s, with additional CMake args %s'
            % (compiler_executable_name, additional_cmake_args))
        try:
            # We compute this here (and memoize the result) so that the benchmark's describe() will retrieve the cached
            # value instantly.
            determine_compiler_name(compiler_executable_name)

            # Build Fruit in fruit_build_dir, so that fruit_build_dir points to a built Fruit (useful for e.g. the config header).
            shutil.rmtree(fruit_build_dir, ignore_errors=True)
            os.makedirs(fruit_build_dir)
            modified_env = os.environ.copy()
            modified_env['CXX'] = compiler_executable_name
            run_command('cmake',
                        args=[
                            args.fruit_sources_dir,
                            '-DCMAKE_BUILD_TYPE=Release',
                            *additional_cmake_args,
                        ],
                        cwd=fruit_build_dir,
                        env=modified_env)
            run_command('make', args=make_args, cwd=fruit_build_dir)
        except Exception as e:
            print(
                'Exception while preparing for benchmarks with the compiler %s, with additional CMake args %s.\n%s\nGoing ahead with the rest.'
                % (compiler_executable_name, additional_cmake_args,
                   traceback.format_exc()))
            continue

        for benchmark_definition in benchmark_definitions_with_current_config:
            benchmark_index += 1
            print('%s/%s: %s' % (benchmark_index, len(benchmark_definitions),
                                 benchmark_definition))
            benchmark_name = benchmark_definition['name']

            if (benchmark_name in {
                    'boost_di_compile_time', 'boost_di_run_time',
                    'boost_di_executable_size'
            } and args.boost_di_sources_dir is None):
                raise Exception(
                    'Error: you need to specify the --boost-di-sources-dir flag in order to run Boost.DI benchmarks.'
                )

            if benchmark_name == 'new_delete_run_time':
                benchmark = SimpleNewDeleteRunTimeBenchmark(
                    benchmark_definition,
                    fruit_benchmark_sources_dir=args.
                    fruit_benchmark_sources_dir)
            elif benchmark_name == 'fruit_single_file_compile_time':
                benchmark = FruitSingleFileCompileTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_benchmark_sources_dir=args.
                    fruit_benchmark_sources_dir,
                    fruit_build_dir=fruit_build_dir)
            elif benchmark_name.startswith('fruit_'):
                benchmark_class = {
                    'fruit_compile_time':
                    FruitCompileTimeBenchmark,
                    'fruit_incremental_compile_time':
                    FruitIncrementalCompileTimeBenchmark,
                    'fruit_compile_memory':
                    FruitCompileMemoryBenchmark,
                    'fruit_run_time':
                    FruitRunTimeBenchmark,
                    'fruit_startup_time':
                    FruitStartupTimeBenchmark,
                    'fruit_startup_time_with_normalized_component':
                    FruitStartupTimeWithNormalizedComponentBenchmark,
                    'fruit_executable_size':
                    FruitExecutableSizeBenchmark,
                    'fruit_executable_size_without_exceptions_and_rtti':
                    FruitExecutableSizeBenchmarkWithoutExceptionsAndRtti,
                }[benchmark_name]
                benchmark = benchmark_class(
                    benchmark_definition=benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_dir=fruit_build_dir)
            elif benchmark_name.startswith('boost_di_'):
                benchmark_class = {
                    'boost_di_compile_time':
                    BoostDiCompileTimeBenchmark,
                    'boost_di_incremental_compile_time':
                    BoostDiIncrementalCompileTimeBenchmark,
                    'boost_di_compile_memory':
                    BoostDiCompileMemoryBenchmark,
                    'boost_di_run_time':
                    BoostDiRunTimeBenchmark,
                    'boost_di_startup_time':
                    BoostDiStartupTimeBenchmark,
                    'boost_di_executable_size':
                    BoostDiExecutableSizeBenchmark,
                    'boost_di_executable_size_without_exceptions_and_rtti':
                    BoostDiExecutableSizeBenchmarkWithoutExceptionsAndRtti,
                }[benchmark_name]
                benchmark = benchmark_class(
                    benchmark_definition=benchmark_definition,
                    boost_di_sources_dir=args.boost_di_sources_dir)
            elif benchmark_name.startswith('simple_di_'):
                benchmark_class = {
                    'simple_di_compile_time':
                    SimpleDiCompileTimeBenchmark,
                    'simple_di_incremental_compile_time':
                    SimpleDiIncrementalCompileTimeBenchmark,
                    'simple_di_compile_memory':
                    SimpleDiCompileMemoryBenchmark,
                    'simple_di_run_time':
                    SimpleDiRunTimeBenchmark,
                    'simple_di_startup_time':
                    SimpleDiStartupTimeBenchmark,
                    'simple_di_executable_size':
                    SimpleDiExecutableSizeBenchmark,
                    'simple_di_executable_size_without_exceptions_and_rtti':
                    SimpleDiExecutableSizeBenchmarkWithoutExceptionsAndRtti,
                    'simple_di_with_interfaces_compile_time':
                    SimpleDiWithInterfacesCompileTimeBenchmark,
                    'simple_di_with_interfaces_incremental_compile_time':
                    SimpleDiWithInterfacesIncrementalCompileTimeBenchmark,
                    'simple_di_with_interfaces_compile_memory':
                    SimpleDiWithInterfacesCompileMemoryBenchmark,
                    'simple_di_with_interfaces_run_time':
                    SimpleDiWithInterfacesRunTimeBenchmark,
                    'simple_di_with_interfaces_startup_time':
                    SimpleDiWithInterfacesStartupTimeBenchmark,
                    'simple_di_with_interfaces_executable_size':
                    SimpleDiWithInterfacesExecutableSizeBenchmark,
                    'simple_di_with_interfaces_executable_size_without_exceptions_and_rtti':
                    SimpleDiWithInterfacesExecutableSizeBenchmarkWithoutExceptionsAndRtti,
                    'simple_di_with_interfaces_and_new_delete_compile_time':
                    SimpleDiWithInterfacesAndNewDeleteCompileTimeBenchmark,
                    'simple_di_with_interfaces_and_new_delete_incremental_compile_time':
                    SimpleDiWithInterfacesAndNewDeleteIncrementalCompileTimeBenchmark,
                    'simple_di_with_interfaces_and_new_delete_compile_memory':
                    SimpleDiWithInterfacesAndNewDeleteCompileMemoryBenchmark,
                    'simple_di_with_interfaces_and_new_delete_run_time':
                    SimpleDiWithInterfacesAndNewDeleteRunTimeBenchmark,
                    'simple_di_with_interfaces_and_new_delete_startup_time':
                    SimpleDiWithInterfacesAndNewDeleteStartupTimeBenchmark,
                    'simple_di_with_interfaces_and_new_delete_executable_size':
                    SimpleDiWithInterfacesAndNewDeleteExecutableSizeBenchmark,
                    'simple_di_with_interfaces_and_new_delete_executable_size_without_exceptions_and_rtti':
                    SimpleDiWithInterfacesAndNewDeleteExecutableSizeBenchmarkWithoutExceptionsAndRtti,
                }[benchmark_name]
                benchmark = benchmark_class(
                    benchmark_definition=benchmark_definition)
            else:
                raise Exception("Unrecognized benchmark: %s" % benchmark_name)

            if benchmark.describe() in previous_run_completed_benchmarks:
                print(
                    "Skipping benchmark that was already run previously (due to --continue-benchmark):",
                    benchmark.describe())
                continue

            try:
                run_benchmark(benchmark,
                              output_file=args.output_file,
                              max_runs=global_definitions['max_runs'],
                              timeout_hours=global_definitions[
                                  'max_hours_per_combination'])
            except Exception as e:
                print(
                    'Exception while running benchmark: %s.\n%s\nGoing ahead with the rest.'
                    % (benchmark.describe(), traceback.format_exc()))
Пример #27
0
def coh_tmm(pol, n_list, d_list, th_0, lam_vac):
    """
    Main "coherent transfer matrix method" calc. Given parameters of a stack,
    calculates everything you could ever want to know about how light
    propagates in it. (If performance is an issue, you can delete some of the
    calculations without affecting the rest.)
    pol is light polarization, "s" or "p".
    n_list is the list of refractive indices, in the order that the light would
    pass through them. The 0'th element of the list should be the semi-infinite
    medium from which the light enters, the last element should be the semi-
    infinite medium to which the light exits (if any exits).
    th_0 is the angle of incidence: 0 for normal, pi/2 for glancing.
    Remember, for a dissipative incoming medium (n_list[0] is not real), th_0
    should be complex so that n0 sin(th0) is real (intensity is constant as
    a function of lateral position).
    d_list is the list of layer thicknesses (front to back). Should correspond
    one-to-one with elements of n_list. First and last elements should be "inf".
    lam_vac is vacuum wavelength of the light.
    Outputs the following as a dictionary (see manual for details)
    * r--reflection amplitude
    * t--transmission amplitude
    * R--reflected wave power (as fraction of incident)
    * T--transmitted wave power (as fraction of incident)
    * power_entering--Power entering the first layer, usually (but not always)
      equal to 1-R (see manual).
    * vw_list-- n'th element is [v_n,w_n], the forward- and backward-traveling
      amplitudes, respectively, in the n'th medium just after interface with
      (n-1)st medium.
    * kz_list--normal component of complex angular wavenumber for
      forward-traveling wave in each layer.
    * th_list--(complex) propagation angle (in radians) in each layer
    * pol, n_list, d_list, th_0, lam_vac--same as input
    """
    # Convert lists to numpy arrays if they're not already.
    n_list = array(n_list)
    d_list = array(d_list, dtype=float)

    # Input tests
    if ((hasattr(lam_vac, 'size') and lam_vac.size > 1)
          or (hasattr(th_0, 'size') and th_0.size > 1)):
        raise ValueError('This function is not vectorized; you need to run one '
                         'calculation at a time (1 wavelength, 1 angle, etc.)')
    if (n_list.ndim != 1) or (d_list.ndim != 1) or (n_list.size != d_list.size):
        raise ValueError("Problem with n_list or d_list!")
    assert d_list[0] == d_list[-1] == inf, 'd_list must start and end with inf!'
    assert abs((n_list[0]*np.sin(th_0)).imag) < 100*EPSILON, 'Error in n0 or th0!'
    assert is_forward_angle(n_list[0], th_0), 'Error in n0 or th0!'
    num_layers = n_list.size

    # th_list is a list with, for each layer, the angle that the light travels
    # through the layer. Computed with Snell's law. Note that the "angles" may be
    # complex!
    th_list = list_snell(n_list, th_0)

    # kz is the z-component of (complex) angular wavevector for forward-moving
    # wave. Positive imaginary part means decaying.
    kz_list = 2 * np.pi * n_list * cos(th_list) / lam_vac

    # delta is the total phase accrued by traveling through a given layer.
    # Ignore warning about inf multiplication
    olderr = sp.seterr(invalid='ignore')
    delta = kz_list * d_list
    sp.seterr(**olderr)

    # For a very opaque layer, reset delta to avoid divide-by-0 and similar
    # errors. The criterion imag(delta) > 35 corresponds to single-pass
    # transmission < 1e-30 --- small enough that the exact value doesn't
    # matter.
    for i in range(1, num_layers-1):
        if delta[i].imag > 35:
            delta[i] = delta[i].real + 35j
            if 'opacity_warning' not in globals():
                global opacity_warning
                opacity_warning = True
                print("Warning: Layers that are almost perfectly opaque "
                      "are modified to be slightly transmissive, "
                      "allowing 1 photon in 10^30 to pass through. It's "
                      "for numerical stability. This warning will not "
                      "be shown again.")

    # t_list[i,j] and r_list[i,j] are transmission and reflection amplitudes,
    # respectively, coming from i, going to j. Only need to calculate this when
    # j=i+1. (2D array is overkill but helps avoid confusion.)
    t_list = zeros((num_layers, num_layers), dtype=complex)
    r_list = zeros((num_layers, num_layers), dtype=complex)
    for i in range(num_layers-1):
        t_list[i,i+1] = interface_t(pol, n_list[i], n_list[i+1],
                                    th_list[i], th_list[i+1])
        r_list[i,i+1] = interface_r(pol, n_list[i], n_list[i+1],
                                    th_list[i], th_list[i+1])
    # At the interface between the (n-1)st and nth material, let v_n be the
    # amplitude of the wave on the nth side heading forwards (away from the
    # boundary), and let w_n be the amplitude on the nth side heading backwards
    # (towards the boundary). Then (v_n,w_n) = M_n (v_{n+1},w_{n+1}). M_n is
    # M_list[n]. M_0 and M_{num_layers-1} are not defined.
    # My M is a bit different than Sernelius's, but Mtilde is the same.
    M_list = zeros((num_layers, 2, 2), dtype=complex)
    for i in range(1, num_layers-1):
        M_list[i] = (1/t_list[i,i+1]) * np.dot(
            make_2x2_array(exp(-1j*delta[i]), 0, 0, exp(1j*delta[i]),
                           dtype=complex),
            make_2x2_array(1, r_list[i,i+1], r_list[i,i+1], 1, dtype=complex))
    Mtilde = make_2x2_array(1, 0, 0, 1, dtype=complex)
    for i in range(1, num_layers-1):
        Mtilde = np.dot(Mtilde, M_list[i])
    Mtilde = np.dot(make_2x2_array(1, r_list[0,1], r_list[0,1], 1,
                                   dtype=complex)/t_list[0,1], Mtilde)

    # Net complex transmission and reflection amplitudes
    r = Mtilde[1,0]/Mtilde[0,0]
    t = 1/Mtilde[0,0]

    # vw_list[n] = [v_n, w_n]. v_0 and w_0 are undefined because the 0th medium
    # has no left interface.
    # vw_list = zeros((num_layers, 2), dtype=complex)
    # vw = array([[t],[0]])
    # vw_list[-1,:] = np.transpose(vw)
    # for i in range(num_layers-2, 0, -1):
    #     vw = np.dot(M_list[i], vw)
    #     vw_list[i,:] = np.transpose(vw)

    # Net transmitted and reflected power, as a proportion of the incoming light
    # power.
    R = R_from_r(r)
    # T = T_from_t(pol, t, n_list[0], n_list[-1], th_0, th_list[-1])
    # power_entering = power_entering_from_r(pol, r, n_list[0], th_0)

    # return {'r': r, 't': t, 'R': R, 'T': T, 'power_entering': power_entering,
    #         'vw_list': vw_list, 'kz_list': kz_list, 'th_list': th_list,
    #         'pol': pol, 'n_list': n_list, 'd_list': d_list, 'th_0': th_0,
    #         'lam_vac':lam_vac}
    return {'R': R}
Пример #28
0
def coh_tmm(pol, n_list, d_list, th_0, lam_vac, pame_output=False):
    """

    ------- IMPORTANT -----
    pame_output IS FOR USE WITH vector_coh_tmm() AND PLASMONIC ASSAY 
    MODELING ENVIRONMENT.  If left as False, will return data unmolested
    from original implementation of tmm module.  There are several calls to
    coh_tmm from other utilitles herein, such as getting aborbed power in 
    each layer, so this keyword maintains compatibility with both applications.
    ----------------------------------------------

    Main "coherent transfer matrix method" calc. Given parameters of a stack,
    calculates everything you could ever want to know about how light
    propagates in it. (If performance is an issue, you can delete some of the
    calculations without affecting the rest.)
    
    pol is light polarization, "s" or "p".
    
    n_list is the list of refractive indices, in the order that the light would
    pass through them. The 0'th element of the list should be the semi-infinite
    medium from which the light enters, the last element should be the semi-
    infinite medium to which the light exits (if any exits).
    
    th_0 is the angle of incidence: 0 for normal, pi/2 for glancing.
    Remember, for a dissipative incoming medium (n_list[0] is not real), th_0
    should be complex so that n0 sin(th0) is real (intensity is constant as
    a function of lateral position).
    
    d_list is the list of layer thicknesses (front to back). Should correspond
    one-to-one with elements of n_list. First and last elements should be "inf".
    
    lam_vac is vacuum wavelength of the light.
    
    Outputs the following as a dictionary (see manual for details)
    
    * r--reflection amplitude
    * t--transmission amplitude
    * R--reflected wave power (as fraction of incident)
    * T--transmitted wave power (as fraction of incident)
    * A--total absorbed power (as fraction of incident) = (1 - (R+T) )
    * power_entering--Power entering the first layer, usually (but not always)
      equal to 1-R (see manual).
    * vw_list-- n'th element is [v_n,w_n], the forward- and backward-traveling
      amplitudes, respectively, in the n'th medium just after interface with
      (n-1)st medium.
    * kz_list--normal component of complex angular wavenumber for
      forward-traveling wave in each layer.
    * th_list--(complex) propagation angle (in radians) in each layer
    * pol, n_list, d_list, th_0, lam_vac--same as input

    """
    #convert lists to numpy arrays if they're not already.
    n_list = array(n_list)
    d_list = array(d_list, dtype=float)

    #input tests
    if ((hasattr(lam_vac, 'size') and lam_vac.size > 1)
            or (hasattr(th_0, 'size') and th_0.size > 1)):
        raise ValueError(
            'This function is not vectorized; you need to run one '
            'calculation at a time (1 wavelength, 1 angle, etc.)')

    if (n_list.ndim != 1) or (d_list.ndim != 1) or (n_list.size !=
                                                    d_list.size):
        raise ValueError("Problem with n_list or d_list!")

    if (d_list[0] != inf) or (d_list[-1] != inf):
        raise ValueError('d_list must start and end with inf!')

    # Should this case exist?  Light couldn't get down a semi-infinite absorbing
    # media to begin with!
    if abs((n_list[0] * np.sin(th_0)).imag) > 100 * EPSILON:
        raise ValueError('Error in n0 or th0!  Semi-infinite media cannot'
                         ' be absorbing (have imaginary component).')

    num_layers = n_list.size
    #th_list is a list with, for each layer, the angle that the light travels
    #through the layer. Computed with Snell's law. Note that the "angles" may be
    #complex!
    th_list = list_snell(n_list, th_0)

    #kz is the z-component of (complex) angular wavevector for forward-moving
    #wave. Positive imaginary part means decaying.
    kz_list = 2 * np.pi * n_list * cos(th_list) / lam_vac

    #delta is the total phase accrued by traveling through a given layer.
    #ignore warning about inf multiplication
    olderr = sp.seterr(invalid='ignore')
    delta = kz_list * d_list
    sp.seterr(**olderr)

    # For a very opaque layer, reset delta to avoid divide-by-0 and similar
    # errors. The criterion imag(delta) > 35 corresponds to single-pass
    # transmission < 1e-30 --- small enough that the exact value doesn't
    # matter.
    for i in range(1, num_layers - 1):
        if delta[i].imag > 35:
            delta[i] = delta[i].real + 35j
            if 'opacity_warning' not in globals():
                global opacity_warning
                opacity_warning = True
                print("Warning: Layers that are almost perfectly opaque "
                      "are modified to be slightly transmissive, "
                      "allowing 1 photon in 10^30 to pass through. It's "
                      "for numerical stability. This warning will not "
                      "be shown again.")

    #t_list[i,j] and r_list[i,j] are transmission and reflection amplitudes,
    #respectively, coming from i, going to j. Only need to calculate this when
    #j=i+1. (2D array is overkill but helps avoid confusion.)
    t_list = zeros((num_layers, num_layers), dtype=complex)
    r_list = zeros((num_layers, num_layers), dtype=complex)
    for i in range(num_layers - 1):
        t_list[i, i + 1] = interface_t(pol, n_list[i], n_list[i + 1],
                                       th_list[i], th_list[i + 1])
        r_list[i, i + 1] = interface_r(pol, n_list[i], n_list[i + 1],
                                       th_list[i], th_list[i + 1])
    #At the interface between the (n-1)st and nth material, let v_n be the
    #amplitude of the wave on the nth side heading forwards (away from the
    #boundary), and let w_n be the amplitude on the nth side heading backwards
    #(towards the boundary). Then (v_n,w_n) = M_n (v_{n+1},w_{n+1}). M_n is
    #M_list[n]. M_0 and M_{num_layers-1} are not defined.
    #My M is a bit different than Sernelius's, but Mtilde is the same.
    M_list = zeros((num_layers, 2, 2), dtype=complex)
    for i in range(1, num_layers - 1):
        M_list[i] = (1 / t_list[i, i + 1]) * np.dot(
            make_2x2_array(
                exp(-1j * delta[i]), 0, 0, exp(1j * delta[i]), dtype=complex),
            make_2x2_array(
                1, r_list[i, i + 1], r_list[i, i + 1], 1, dtype=complex))
    Mtilde = make_2x2_array(1, 0, 0, 1, dtype=complex)
    for i in range(1, num_layers - 1):
        Mtilde = np.dot(Mtilde, M_list[i])
    Mtilde = np.dot(
        make_2x2_array(1, r_list[0, 1], r_list[0, 1], 1, dtype=complex) /
        t_list[0, 1], Mtilde)

    #Net complex transmission and reflection amplitudes
    r = Mtilde[1, 0] / Mtilde[0, 0]
    t = 1 / Mtilde[0, 0]

    #vw_list[n] = [v_n, w_n]. v_0 and w_0 are undefined because the 0th medium
    #has no left interface.
    vw_list = zeros((num_layers, 2), dtype=complex)
    vw = array([[t], [0]])
    vw_list[-1, :] = np.transpose(vw)
    for i in range(num_layers - 2, 0, -1):
        vw = np.dot(M_list[i], vw)
        vw_list[i, :] = np.transpose(vw)

    #vn, wn ADDED MYSEFL
    vn = vw_list[:, 0]
    wn = vw_list[:, 1]

    #Net transmitted and reflected power, as a proportion of the incoming light
    #power.
    R = R_from_r(r)
    T = T_from_t(pol, t, n_list[0], n_list[-1], th_0, th_list[-1])
    A = 1.0 - (R + T)  #<--- ADDED MYSEFLF

    power_entering = power_entering_from_r(pol, r, n_list[0], th_0)

    # Need unaltered output to comput absorb_in_each_layer
    # Unchanged return from original source
    # https://github.com/sbyrnes321/tmm/blob/master/tmm_core.py#L304
    unaltered_out = {
        'r': r,
        't': t,
        'R': R,
        'T': T,
        'power_entering': power_entering,
        'vw_list': vw_list,
        'kz_list': kz_list,
        'th_list': th_list,
        'pol': pol,
        'n_list': n_list,
        'd_list': d_list,
        'th_0': th_0,
        'lam_vac': lam_vac
    }

    absorp_in_layers = absorp_in_each_layer(unaltered_out)

    if pame_output:
        # pame /vector_coh_tmm return
        out = {
            'r_amp': r,
            't_amp': t,
            'R': R,
            'T': T,
            'A': A,
            'pe': power_entering,
        }
        # Add quantities that have a value in each layer
        out.update(_flatten('vn', vn))
        out.update(_flatten('wn', wn))
        out.update(_flatten('kz', kz_list))
        #adsorption in each layer (special)
        out.update(_flatten('absorb', absorp_in_layers))
        out.update(_flatten('ang_prop', th_list))
        return out

    else:
        return unaltered_out
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--path',
                        default='trajdata',
                        help='directory of data to test')
    parser.add_argument('--output',
                        required=True,
                        nargs='+',
                        help='relative path to saved model')
    parser.add_argument('--obs_length',
                        default=9,
                        type=int,
                        help='observation length')
    parser.add_argument('--pred_length',
                        default=12,
                        type=int,
                        help='prediction length')
    parser.add_argument('--disable-write',
                        action='store_true',
                        help='disable writing new files')
    parser.add_argument('--disable-collision',
                        action='store_true',
                        help='disable collision metrics')
    parser.add_argument('--labels',
                        required=False,
                        nargs='+',
                        help='labels of models')
    parser.add_argument('--normalize_scene',
                        action='store_true',
                        help='augment scenes')
    parser.add_argument('--goals',
                        action='store_true',
                        help='Considers goals during prediction')
    parser.add_argument('--unimodal',
                        action='store_true',
                        help='provide unimodal evaluation')
    parser.add_argument('--topk',
                        action='store_true',
                        help='provide topk evaluation')
    parser.add_argument('--multimodal',
                        action='store_true',
                        help='provide multimodal nll evaluation')
    parser.add_argument('--modes',
                        default=1,
                        type=int,
                        help='number of modes to predict')
    args = parser.parse_args()

    scipy.seterr('ignore')

    ## Path to the data folder name to predict
    args.path = 'DATA_BLOCK/' + args.path + '/'

    ## Test_pred: Folders for saving model predictions
    args.path = args.path + 'test_pred/'

    if (not args.unimodal) and (not args.topk) and (not args.multimodal):
        args.unimodal = True  # Compute unimodal metrics by default

    if args.multimodal:
        args.modes = 20

    ## Writes to Test_pred
    ## Does this overwrite existing predictions? No. ###
    datasets = sorted([
        f.split('.')[-2] for f in os.listdir(args.path.replace('_pred', ''))
        if not f.startswith('.') and f.endswith('.ndjson')
    ])

    ## Model names are passed as arguments
    for model in args.output:
        model_name = model.split('/')[-1].replace('.pkl', '')

        # Loading the appropriate model (functionality only for SGAN and LSTM)
        print("Model Name: ", model_name)
        if 'sgan' in model_name:
            predictor = trajnetbaselines.sgan.SGANPredictor.load(model)
        else:
            predictor = trajnetbaselines.lstm.LSTMPredictor.load(model)

        # On CPU
        device = torch.device('cpu')
        predictor.model.to(device)

        total_scenes = 0
        average = 0
        final = 0
        gt_col = 0.
        pred_col = 0.
        neigh_scenes = 0
        topk_average = 0
        topk_final = 0
        all_goals = {}
        average_nll = 0

        ## Start writing in dataset/test_pred
        for dataset in datasets:
            # Model's name
            name = dataset.replace(
                args.path.replace('_pred', '') + 'test/', '')

            # Copy file from test into test/train_pred folder
            print('processing ' + name)
            if 'collision_test' in name:
                continue

            # Read file from 'test'
            reader = trajnetplusplustools.Reader(
                args.path.replace('_pred', '') + dataset + '.ndjson',
                scene_type='paths')
            ## Necessary modification of train scene to add filename (for goals)
            scenes = [(dataset, s_id, s) for s_id, s in reader.scenes()]
            ## Consider goals
            ## Goal file must be present in 'goal_files/test_private' folder
            ## Goal file must have the same name as corresponding test file
            if args.goals:
                goal_dict = pickle.load(
                    open('goal_files/test_private/' + dataset + '.pkl', "rb"))
                all_goals[dataset] = {
                    s_id: [goal_dict[path[0].pedestrian] for path in s]
                    for _, s_id, s in scenes
                }

            reader_gt = trajnetplusplustools.Reader(
                args.path.replace('_pred', '_private') + dataset + '.ndjson',
                scene_type='paths')
            scenes_gt = [s for _, s in reader_gt.scenes()]
            total_scenes += len(scenes_gt)

            for i, (filename, scene_id, paths) in enumerate(scenes):
                if i % 100 == 0:
                    print("Scenes evaluated: ",
                          '{}/{}'.format(i, len(scenes_gt)))
                ground_truth = scenes_gt[i]

                ## Convert numpy array to Track Rows ##
                ## Extract 1) first_frame, 2) frame_diff 3) ped_ids for writing predictions
                observed_path = paths[0]
                frame_diff = observed_path[1].frame - observed_path[0].frame
                first_frame = observed_path[args.obs_length -
                                            1].frame + frame_diff
                ped_id = observed_path[0].pedestrian

                goals = get_goals(paths, all_goals, filename,
                                  scene_id)  ## Zeros if no goals utilized
                predictions = predictor(paths,
                                        goals,
                                        n_predict=args.pred_length,
                                        obs_length=args.obs_length,
                                        modes=args.modes,
                                        args=args)

                if args.unimodal:  ## Unimodal
                    ## ADE / FDE
                    prediction, neigh_predictions = predictions[0]
                    prediction = np.round(prediction, 2)
                    ## make Track Rows
                    # primary
                    prediction = [
                        trajnetplusplustools.TrackRow(
                            first_frame + i * frame_diff, ped_id,
                            prediction[i, 0], prediction[i, 1], 0)
                        for i in range(len(prediction))
                    ]

                    primary_tracks = [
                        t for t in prediction if t.prediction_number == 0
                    ]
                    frame_gt = [
                        t.frame for t in ground_truth[0]
                    ][args.obs_length:args.obs_length + args.pred_length]
                    frame_pred = [t.frame for t in primary_tracks]

                    ## To verify if same scene
                    if frame_gt != frame_pred:
                        raise Exception('frame numbers are not consistent')

                    average_l2 = trajnetplusplustools.metrics.average_l2(
                        ground_truth[0][args.obs_length:args.obs_length +
                                        args.pred_length],
                        primary_tracks,
                        n_predictions=args.pred_length)
                    final_l2 = trajnetplusplustools.metrics.final_l2(
                        ground_truth[0][args.obs_length:args.obs_length +
                                        args.pred_length], primary_tracks)

                    # aggregate FDE and ADE
                    average += average_l2
                    final += final_l2

                    ## Collision Metrics
                    for j in range(1, len(ground_truth)):
                        if trajnetplusplustools.metrics.collision(
                                primary_tracks,
                                ground_truth[j],
                                n_predictions=args.pred_length):
                            gt_col += 1
                            break

                    # ## neighbours (if not empty) [Col-I]
                    if neigh_predictions.shape[1]:
                        neigh_scenes += 1
                        for n in range(neigh_predictions.shape[1]):
                            neigh = neigh_predictions[:, n]
                            neigh = np.round(neigh, 2)
                            neigh_track = [
                                trajnetplusplustools.TrackRow(
                                    first_frame + j * frame_diff, n,
                                    neigh[j, 0], neigh[j, 1], 0)
                                for j in range(len(neigh))
                            ]
                            if trajnetplusplustools.metrics.collision(
                                    primary_tracks,
                                    neigh_track,
                                    n_predictions=args.pred_length):
                                pred_col += 1
                                break

                primary_tracks_all = [
                    trajnetplusplustools.TrackRow(first_frame + i * frame_diff,
                                                  ped_id, x, y, m)
                    for m, (prim, neighs) in predictions.items()
                    for i, (x, y) in enumerate(prim)
                ]

                if args.topk:
                    topk_ade, topk_fde = trajnetplusplustools.metrics.topk(
                        primary_tracks_all,
                        ground_truth[0][args.obs_length:args.obs_length +
                                        args.pred_length],
                        n_predictions=args.pred_length)
                    topk_average += topk_ade
                    topk_final += topk_fde

                if args.multimodal:
                    nll_val = trajnetplusplustools.metrics.nll(
                        primary_tracks_all,
                        ground_truth[0],
                        n_predictions=args.pred_length,
                        n_samples=20)
                    average_nll += nll_val

        if args.unimodal:
            ## Average ADE and FDE
            average /= total_scenes
            final /= total_scenes
            gt_col /= (total_scenes * 0.01)
            if neigh_scenes == 0:
                pred_col = -1
            else:
                pred_col /= (neigh_scenes * 0.01)

            print('ADE: ', average)
            print('FDE: ', final)
            print("Col-I: ", pred_col)
            print("Col-II: ", gt_col)

        if args.topk:
            topk_average /= total_scenes
            topk_final /= total_scenes
            print('Topk_ADE: ', topk_average)
            print('Topk_FDE: ', topk_final)

        if args.multimodal:
            average_nll /= total_scenes
            print('Average NLL: ', average_nll)
Пример #30
0
        # 1500K
        p['S'] = 1.5852424435
        # p.model.chem_eq.DEBUG=True
        p.run_model()

        # [0.00036, 0.99946, 0.00018])
        expected_concentrations = np.array(
            [3.58768646e-04, 9.99461847e-01, 1.79384323e-04])
        n = p['n']
        n_moles = p['n_moles']
        concentrations = n / n_moles

        assert_rel_error(self, concentrations, expected_concentrations, 1e-4)

        expected_n_moles = 0.022726185333
        assert_rel_error(self, n_moles, expected_n_moles, 1e-4)
        assert_rel_error(self, p['gamma'], 1.16381209181, 1e-4)

        # check = p.check_partial_derivatives()


if __name__ == "__main__":

    import numpy as np
    import scipy as sp

    np.seterr(all='raise')
    sp.seterr(all='raise')

    unittest.main()
Пример #31
0
def enable_warnings():
    logging.root.setLevel(logging.WARNING)
    scipy.seterr(over='print', divide='print', invalid='print', under='ignore')
Пример #32
0
def main():

    scipy.seterr(all='ignore')

    #################################################
    ########## CALCULATE RAW EVENT VECTORS ##########
    #################################################

    db = mysql.connector.connect(
        host="localhost",
        user="******",
        database="monitor_db",
        passwd="D@1syKn0ws"
    )

    # LOAD RAW VECTORS 
    query = "SELECT name, start, end FROM raw_vectors"
    cursor = db.cursor(buffered=True)
    cursor.execute(query)

    raw_vectors = []
    for row in cursor:
        raw_vectors.append({
            'stream_name': row[0],
            'start': row[1],
            'end': row[2]
        })

    for raw_vector in tqdm(raw_vectors, desc='Creating Raw Vectors'):

        stream_name, start, end = raw_vector['stream_name'], raw_vector['start'], raw_vector['end']
        query = "SELECT timestamp, value FROM stream_data WHERE name='%s' and timestamp>=%s and timestamp<=%s ORDER BY timestamp ASC;" % (stream_name, start, end)
        cursor.execute(query)

        vector = []
        for row in cursor:
            vector.append(row[1])

        vector = np.asarray(vector)
        raw_vector['vector'] = vector

    # LOAD EVENTS
    query = "SELECT event, stream, timeStart, timeEnd FROM event"
    cursor = db.cursor()
    cursor.execute(query)

    events = []
    for row in cursor: 
        events.append({
            'event_name': row[0],
            'stream_name': row[1],
            'start': row[2],
            'end': row[3]
        })

    all_corr_data = []
    for event in tqdm(events, desc='Comparing Events'):

        event_name, stream_name, start, end = event['event_name'], event['stream_name'], event['start'], event['end']

        query = "SELECT name, timestamp, value FROM stream_data WHERE name='%s' and timestamp >= '%s' and timestamp <= '%s' ORDER BY timestamp ASC;"  % (stream_name, start, end)
        cursor.execute(query)

        event_vector = []
        for index, row in enumerate(cursor): 
            event_vector.append(row[2])

        event_vector = np.asarray(event_vector)

        corr_data = [] 
        for raw_vector_data in tqdm(raw_vectors, desc='Comparing to Event %s' % event_name):

            rstream_name, rstart, rend, rvector = raw_vector_data['stream_name'], raw_vector_data['start'], raw_vector_data['end'], raw_vector_data['vector']

            if rvector.shape[0] > event_vector.shape[0]:
                rvector = rvector[:event_vector.shape[0]]

            if rvector.shape[0] == event_vector.shape[0]:
                corr = pearsonr(event_vector, rvector)[0]
                corr_data.append([event_name, rstream_name, rstart, rend, corr])

        corr_data = pd.DataFrame(corr_data)
        corr_data = corr_data.rename(columns={0: 'event', 1: 'stream', 2: 'start', 3: 'end', 4:'corr'})
        corr_data = corr_data.dropna(subset=['corr'])
        corr_data = corr_data.sort_values('corr', ascending=False).reset_index(drop=True)
        corr_data['rank'] = corr_data.index + 1
        corr_data = corr_data[['event', 'rank', 'stream', 'start', 'end', 'corr']]
        all_corr_data.append(corr_data.iloc[:99, ])

    all_corr_data = pd.concat(all_corr_data)
    all_corr_data.to_csv(DATA_OUTPUT, index=False, header=False)

    #######################################
    ########## CREATE SQL SCRIPT ##########
    #######################################

    sqlstr = 'USE monitor_db;\n\n'
    sqlstr += 'DROP TABLE IF EXISTS raw_event_vectors;\n\n'

    sqlstr += 'CREATE TABLE raw_event_vectors(\n'
    sqlstr += '\tevent VARCHAR(100),\n'
    sqlstr += '\trank INTEGER,\n'
    sqlstr += '\tstream VARCHAR(100),\n'
    sqlstr += '\ttimeStart INTEGER,\n'
    sqlstr += '\ttimeEnd INTEGER,\n'
    sqlstr += '\tcorrelation FLOAT\n'
    sqlstr += ');\n\n'

    sqlstr += f'LOAD DATA LOCAL INFILE "{DATA_OUTPUT}" INTO TABLE raw_event_vectors FIELDS TERMINATED BY ",";\n\n'

    with open(SCRIPT_OUTPUT, 'w') as w: w.write(sqlstr)
Пример #33
0
def coh_tmm(pol, n_list, d_list, th_0, lam_vac):
    """
    This function is vectorized.
    Main "coherent transfer matrix method" calc. Given parameters of a stack,
    calculates everything you could ever want to know about how light
    propagates in it. (If performance is an issue, you can delete some of the
    calculations without affecting the rest.)

    pol is light polarization, "s" or "p".

    n_list is the list of refractive indices, in the order that the light would
    pass through them. The 0'th element of the list should be the semi-infinite
    medium from which the light enters, the last element should be the semi-
    infinite medium to which the light exits (if any exits).

    th_0 is the angle of incidence: 0 for normal, pi/2 for glancing.
    Remember, for a dissipative incoming medium (n_list[0] is not real), th_0
    should be complex so that n0 sin(th0) is real (intensity is constant as
    a function of lateral position).

    d_list is the list of layer thicknesses (front to back). Should correspond
    one-to-one with elements of n_list. First and last elements should be "inf".

    lam_vac is vacuum wavelength of the light.

    Outputs the following as a dictionary (see manual for details)

    * r--reflection amplitude
    * t--transmission amplitude
    * R--reflected wave power (as fraction of incident)
    * T--transmitted wave power (as fraction of incident)
    * power_entering--Power entering the first layer, usually (but not always)
      equal to 1-R (see manual).
    * vw_list-- n'th element is [v_n,w_n], the forward- and backward-traveling
      amplitudes, respectively, in the n'th medium just after interface with
      (n-1)st medium.
    * kz_list--normal component of complex angular wavenumber for
      forward-traveling wave in each layer.
    * th_list--(complex) propagation angle (in radians) in each layer
    * pol, n_list, d_list, th_0, lam_vac--same as input

    """
    # convert lists to numpy arrays if they're not already.
    n_list = array(n_list)
    d_list = array(d_list, dtype=float)[:, None]

    # input tests
    if hasattr(th_0, 'size') and th_0.size > 1 and th_0.size != lam_vac.size:
        raise ValueError(
            'This function is not vectorized for angles; you need to run one angle calculation at a time.'
        )
    if n_list.shape[0] != d_list.shape[0]:
        raise ValueError("Problem with n_list or d_list!")
    if (d_list[0] != inf) or (d_list[-1] != inf):
        raise ValueError('d_list must start and end with inf!')
    if any(abs((n_list[0] * np.sin(th_0)).imag) > 100 * EPSILON):
        raise ValueError('Error in n0 or th0!')
    if hasattr(th_0, 'size'):
        th_0 = array(th_0)
    num_layers = n_list.shape[0]
    num_wl = n_list.shape[1]

    # th_list is a list with, for each layer, the angle that the light travels
    # through the layer. Computed with Snell's law. Note that the "angles" may be
    # complex!
    th_list = list_snell(n_list, th_0)

    # kz is the z-component of (complex) angular wavevector for forward-moving
    # wave. Positive imaginary part means decaying.
    kz_list = 2 * np.pi * n_list * cos(th_list) / lam_vac

    # delta is the total phase accrued by traveling through a given layer.
    # ignore warning about inf multiplication
    olderr = sp.seterr(invalid='ignore')
    delta = kz_list * d_list
    sp.seterr(**olderr)

    # For a very opaque layer, reset delta to avoid divide-by-0 and similar
    # errors. The criterion imag(delta) > 35 corresponds to single-pass
    # transmission < 1e-30 --- small enough that the exact value doesn't
    # matter.
    # It DOES matter (for depth-dependent calculations!)
    delta[1:num_layers - 1, :] = np.where(
        delta[1:num_layers - 1, :].imag > 100,
        delta[1:num_layers - 1, :].real + 100j, delta[1:num_layers - 1, :])

    # t_list[i,j] and r_list[i,j] are transmission and reflection amplitudes,
    # respectively, coming from i, going to j. Only need to calculate this when
    # j=i+1. (2D array is overkill but helps avoid confusion.)
    t_list = zeros((num_wl, num_layers, num_layers), dtype=complex)
    r_list = zeros((num_wl, num_layers, num_layers), dtype=complex)

    for i in range(num_layers - 1):
        t_list[:, i, i + 1] = interface_t(pol, n_list[i], n_list[i + 1],
                                          th_list[i], th_list[i + 1])
        r_list[:, i, i + 1] = interface_r(pol, n_list[i], n_list[i + 1],
                                          th_list[i], th_list[i + 1])
    # At the interface between the (n-1)st and nth material, let v_n be the
    # amplitude of the wave on the nth side heading forwards (away from the
    # boundary), and let w_n be the amplitude on the nth side heading backwards
    # (towards the boundary). Then (v_n,w_n) = M_n (v_{n+1},w_{n+1}). M_n is
    # M_list[n]. M_0 and M_{num_layers-1} are not defined.
    # My M is a bit different than Sernelius's, but Mtilde is the same.

    M_list = zeros((num_layers, num_wl, 2, 2), dtype=complex)
    for i in range(1, num_layers - 1):
        A = make_2x2_array(exp(-1j * delta[i]),
                           np.zeros_like(delta[i]),
                           np.zeros_like(delta[i]),
                           exp(1j * delta[i]),
                           dtype=complex)
        B = make_2x2_array(np.ones_like(delta[i]),
                           r_list[:, i, i + 1],
                           r_list[:, i, i + 1],
                           np.ones_like(delta[i]),
                           dtype=complex)
        d = (1 / t_list[:, i, i + 1])

        M_list[i] = np.transpose(
            d * np.transpose(np.matmul(A, B)))  # , (1, 2, 0)), (2, 0, 1))

    Mtilde = make_2x2_array(np.ones_like(delta[i]),
                            np.zeros_like(delta[i]),
                            np.zeros_like(delta[i]),
                            np.ones_like(delta[i]),
                            dtype=complex)
    for i in range(1, num_layers - 1):
        Mtilde = np.matmul(Mtilde, M_list[i])

    A = make_2x2_array(np.ones_like(delta[i]),
                       r_list[:, 0, 1],
                       r_list[:, 0, 1],
                       np.ones_like(delta[i]),
                       dtype=complex)
    d = 1 / t_list[:, 0, 1]
    Mtilde = np.matmul(np.transpose(d * np.transpose(A, (1, 2, 0)), (2, 0, 1)),
                       Mtilde)

    # Net complex transmission and reflection amplitudes
    r = Mtilde[:, 1, 0] / Mtilde[:, 0, 0]
    t = np.ones_like(Mtilde[:, 0, 0]) / Mtilde[:, 0, 0]

    # vw_list[n] = [v_n, w_n]. v_0 and w_0 are undefined because the 0th medium
    # has no left interface.
    vw_list = zeros((num_layers, num_wl, 2), dtype=complex)
    vw = zeros((num_wl, 2, 2), dtype=complex)
    I = np.identity(2)
    vw[:, 0, 0] = t
    vw[:, 0, 1] = t
    vw_list[-1] = vw[:, 0, :]
    for i in range(num_layers - 2, 0, -1):
        vw = np.matmul(M_list[i], vw)
        vw_list[i, :, :] = vw[:, :, 1]
    vw_list[-1, :, 1] = 0

    # Net transmitted and reflected power, as a proportion of the incoming light
    # power.
    R = R_from_r(r)
    T = T_from_t(pol, t, n_list[0], n_list[-1], th_0, th_list[-1])
    power_entering = power_entering_from_r(pol, r, n_list[0], th_0)

    return {
        'r': r,
        't': t,
        'R': R,
        'T': T,
        'power_entering': power_entering,
        'vw_list': vw_list,
        'kz_list': kz_list,
        'th_list': th_list,
        'pol': pol,
        'n_list': n_list,
        'd_list': d_list,
        'th_0': th_0,
        'lam_vac': lam_vac
    }
Пример #34
0
#!/usr/bin/env python
"""
fitting

Phenome library

biolog data fitting functions
"""
from ductape.common.utils import compress, smooth
from scipy.optimize.minpack import curve_fit
import numpy as np
import logging
# No country for warnings
import scipy as sp
sp.seterr(all='ignore')
#

__author__ = "Marco Galardini"

logger = logging.getLogger('ductape.fitting')

def logistic(x, A, u, d, v, y0):
    '''
    Logistic growth model
    Taken from: "Modeling of the bacterial growth curve."
                (Zwietering et al., 1990)
                PMID: 16348228
    '''
    y = (A / (1 + np.exp( ( ((4 * u)/A) * (d - x) ) + 2 ))) + y0
    return y
Пример #35
0
        else:
            pcov = inf

        if return_full:
            return popt, pcov, infodict, errmsg, ier
        else:
            return popt, pcov

    # End of borrowed scipy fix

import numpy as np
import logging
# No country for warnings
import scipy as sp

sp.seterr(all='ignore')
#

__author__ = "Marco Galardini"

logger = logging.getLogger('ductape.fitting')


def logistic(x, A, u, d, v, y0):
    '''
    Logistic growth model
    Taken from: "Modeling of the bacterial growth curve."
                (Zwietering et al., 1990)
                PMID: 16348228
    '''
    y = (A / (1 + np.exp((((4 * u) / A) * (d - x)) + 2))) + y0
Пример #36
0
from unum.units import m, s, N, kg
import copy
import scipy
import scipy.linalg
import scipy.spatial
import math

scipy.seterr(divide="raise", invalid="raise")
G = 6.673 * 10**-11 * N * (m / kg)**2


def magnitude(vect, unit):
    # shorthand to work around scipy not working with units
    return unit * scipy.linalg.norm(vect.asNumber(unit))


def distance(A, B):
    return m * scipy.spatial.distance.cdist([A.displacement.asNumber(m)],
                                            [B.displacement.asNumber(m)])[0][0]


def speed(A, B):
    return magnitude(A.velocity - B.velocity, m / s)


def velocity(A, B):
    return A.velocity - B.velocity


def altitude(A, B):
    return distance(A, B) - A.radius - B.radius
Пример #37
0
def hyst_area_rand(D, A, DyV, AyV, DuV, AuV, csm_hysteretic_damping):
    """
    function to calculate hystereris loop area used in the building damage calcs
    this version works for where the building cap curves are chasen randomly
    (which means Ay needs to be rescaled by Rcap;)

    In:  DyV, AyV =  yield point (vectors(nev,1))
         DuV, AuV =  ultimate point (vectors(nev,1))
         D, A = peak dispacement and accel (vector(nev,1) of events)

    Out: Area = vector of events
         Glenn Fulford: 21/4/02.


This is a diagram, trying to show how the hysteresis area is
calculated if csm_hysteretic_damping = 'curve'.

The diagram shows half the hysteresis area.
y is the yield point
PP is the performance point
x2 is the translation along the displacement axis.
A2 is the area from (y-x2) to PP on the top curve.
A2 is also the area from y to (PP-x2) on the bottom curve.
A1 and A3 are triaglular areas.

The hysteresis area = 2(A1+A2-A3)

        y-x2    y          PP
        |       |          |
        |       |          |
        |       |          |
        |       |          | ___.......--=----.------'''
        |       |    _,.,-':'    __..--''
        |       |.-''      |_.-''
        |    _,'|        ,-+
        |  ,'   |     ,-' /|
        | /  A2 |  ,-'   / |
        |/      |,'     /  |
        :i______|______/   |
        +      /      /    |
       /|     /      /     |
      / |    /      /      |
     /  |   /      /       |
    /   |  /      /  A3    |
   /    | /      /         |
  / A1  |/      /          |
 /      +      /           |
/      '|     /            |


    """
    # use a coord system with the orign are at the beginning
    # of the hysterisis curve
    # disp('hyst: caprand='); disp(SAcapR(:,1));

    ky = (AyV / DyV)  # slope of linear part of capacity curve

    # x0=D-DyV    # x distance from linear part (<=0 implies point is linear)
    linear_region = where(D <= DyV)

    x2 = D - A / ky  # translation along displacement axis
    if csm_hysteretic_damping is 'trapezoidal':
        # y distance from linear part (<=0 implies point is linear)
        y1 = (A - AyV)
        y1[linear_region] = -1  # avoid NaNs - only Harea3 has any impact there

        x1 = 2 * x2 + y1 / ky

        cc = A - AyV
        bb = ky / (A - AyV)
        aa = -ky / bb

        oldsettings = seterr(under='ignore')
        Harea1 = cc * x1 + aa / bb * (1 - exp(-bb * x1))
        seterr(**oldsettings)
        Harea2 = 0.5 * y1 * y1 / ky
        Harea3 = 2 * x2 * AyV

        Harea = 2 * (Harea1 - Harea2 + Harea3)
        Harea[linear_region] = 0
    elif csm_hysteretic_damping is 'curve':
        cc = AuV
        bb = ky / (AuV - AyV)
        aa = (AyV - AuV) * exp(bb * DyV)

        Harea1 = 0.5 * AyV * DyV
        Harea2 = aa / bb * \
            (exp(-bb * DyV) - exp(-bb * (D + x2))) + cc * (D + x2 - DyV)
        Harea3 = 0.5 * A * (D - x2)

        Harea = 2 * (Harea1 + Harea2 - Harea3)
        Harea[linear_region] = 0

    # Area calculation from
    # ATC 40 Seismic Evaluation and Retrofit of Concrete Buildings 8-15
    # Ed = 4(Ay*Dpi - Dy*Api)

    elif csm_hysteretic_damping is 'parallelogram':
        Harea = 4 * (D * AyV - DyV * A)
        Harea[linear_region] = 0
    else:
        print "csm_hysteretic_damping", csm_hysteretic_damping

    return Harea
Пример #38
0
def herm_sqrt_inv(x,
                  zero_tol=1E-15,
                  sanity_checks=False,
                  return_rank=False,
                  sc_data=''):
    if isinstance(x, mm.eyemat):
        x_sqrt = x
        x_sqrt_i = x
        rank = x.shape[0]
    else:
        try:
            ev = x.diag  #simple_diag_matrix
            EV = None
        except AttributeError:
            ev, EV = la.eigh(x)

        zeros = ev <= zero_tol  #throw away negative results too!

        ev_sqrt = sp.sqrt(ev)

        err = sp.seterr(divide='ignore', invalid='ignore')
        try:
            ev_sqrt_i = 1 / ev_sqrt
            ev_sqrt[zeros] = 0
            ev_sqrt_i[zeros] = 0
        finally:
            sp.seterr(divide=err['divide'], invalid=err['invalid'])

        if EV is None:
            x_sqrt = mm.simple_diag_matrix(ev_sqrt, dtype=x.dtype)
            x_sqrt_i = mm.simple_diag_matrix(ev_sqrt_i, dtype=x.dtype)
        else:
            B = mm.mmul_diag(ev_sqrt, EV.conj().T)
            x_sqrt = EV.dot(B)

            B = mm.mmul_diag(ev_sqrt_i, EV.conj().T)
            x_sqrt_i = EV.dot(B)

        rank = x.shape[0] - np.count_nonzero(zeros)

        if sanity_checks:
            if ev.min() < -zero_tol:
                log.warning(
                    "Sanity Fail in herm_sqrt_inv(): Throwing away negative eigenvalues! %s %s",
                    ev.min(), sc_data)

            if not np.allclose(x_sqrt.dot(x_sqrt), x):
                log.warning(
                    "Sanity Fail in herm_sqrt_inv(): x_sqrt is bad! %s %s",
                    la.norm(x_sqrt.dot(x_sqrt) - x), sc_data)

            if EV is None:
                nulls = sp.zeros(x.shape[0])
                nulls[zeros] = 1
                nulls = sp.diag(nulls)
            else:  #if we did an EVD then we use the eigenvectors
                nulls = EV.copy()
                nulls[:, sp.invert(zeros)] = 0
                nulls = nulls.dot(nulls.conj().T)

            eye = np.eye(x.shape[0])
            if not np.allclose(x_sqrt.dot(x_sqrt_i), eye - nulls):
                log.warning(
                    "Sanity Fail in herm_sqrt_inv(): x_sqrt_i is bad! %s %s",
                    la.norm(x_sqrt.dot(x_sqrt_i) - eye + nulls), sc_data)

    if return_rank:
        return x_sqrt, x_sqrt_i, rank
    else:
        return x_sqrt, x_sqrt_i
Пример #39
0
def main(image, quantization_level):
    sp.seterr(all="ignore")
    used_qm = MIDDLE_COMPRESSION_QT
    if quantization_level == 1:
        used_qm = LOW_COMPRESSION_QT
    elif quantization_level == 2:
        used_qm = MIDDLE_COMPRESSION_QT
    elif quantization_level == 3:
        used_qm = HIGH_COMPRESSION_QT
    else:
        print("Quantization level can only be between 1 and 3")
        exit(1)

    quantized_image, shape, ac_coefs, dc_coefs = quantization(used_qm, image)

    encoded_qt = encode_quantization_table(used_qm)

    output_file = image.split("/")[-1].split(".")[0] + ".jpg"
    jpg_fd = open(output_file, "wb")

    #write SOI(Start of Image)
    jpg_fd.write(hex_to_bytes("FFD8"))

    #APP0 segment
    jpg_fd.write(hex_to_bytes('FFE000104A46494600010100000100010000'))

    # write Define Quantization Table(DQT)
    jpg_fd.write(hex_to_bytes("FFDB"))

    # write the size of DQT
    b = binary_to_bytes(encoded_qt)
    jpg_fd.write(int_to_bytes(len(b) + 3))
    print(int_to_bytes(len(b) + 3))

    # mode of the DQT: 00h for Y
    jpg_fd.write(hex_to_bytes("00"))

    # write encoded quantization table
    jpg_fd.write(binary_to_bytes(encoded_qt))

    jpg_fd.write(
        hex_to_bytes(
            'FFC401A20000000701010101010000000000000000040503020601000708090A0B0100020203010101010100000000000000010002030405060708090A0B1000020103030204020607030402060273010203110400052112314151061361227181143291A10715B14223C152D1E1331662F0247282F12543345392A2B26373C235442793A3B33617546474C3D2E2082683090A181984944546A4B456D355281AF2E3F3C4D4E4F465758595A5B5C5D5E5F566768696A6B6C6D6E6F637475767778797A7B7C7D7E7F738485868788898A8B8C8D8E8F82939495969798999A9B9C9D9E9F92A3A4A5A6A7A8A9AAABACADAEAFA110002020102030505040506040803036D0100021103042112314105511361220671819132A1B1F014C1D1E1234215526272F1332434438216925325A263B2C20773D235E2448317549308090A18192636451A2764745537F2A3B3C32829D3E3F38494A4B4C4D4E4F465758595A5B5C5D5E5F5465666768696A6B6C6D6E6F6475767778797A7B7C7D7E7F738485868788898A8B8C8D8E8F839495969798999A9B9C9D9E9F92A3A4A5A6A7A8A9AAABACADAEAFA'
        ))

    # write Start of Frame (SOF)
    # write the length of the segment (fixed)
    jpg_fd.write(hex_to_bytes("FFC0000B08"))

    # write height of the image
    jpg_fd.write(int_to_bytes(shape[0]))

    # write width of the image
    jpg_fd.write(int_to_bytes(shape[1]))

    # write number of components and component ids
    # Note: probably fixed
    jpg_fd.write(hex_to_bytes("01011100"))

    #SOS
    jpg_fd.write(hex_to_bytes("FFDA0008010100003F00"))
    a = encode_quantized_image(quantized_image)
    jpg_fd.write(
        binary_to_bytes(encode_quantized_image(quantized_image), add=1))

    #EOI
    jpg_fd.write(hex_to_bytes("FFD9"))
    jpg_fd.close()
Пример #40
0
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 04 16:42:15 2015
@author: Jan Øye Lindroos

IO library methods for reading and writing to file
and printing to screen
"""
import os, sys
import scipy as sp

sp.seterr(divide="ignore")  # Inore errors related to log giving inf
import pickle
import time

# Writer class for writing models to file
class writer(object):
    def __init__(self, rank, opt):
        self.rank = rank
        self.path = opt.path
        self.format = opt.fileformat
        if self.format == "hdf5":
            import h5py

            self.filename = opt.run_name + "_" + str(rank) + ".hdf5"

        else:
            self.filename = opt.run_name + "_" + str(rank) + ".dat"
            self.f = open(os.path.join(opt.path, opt.run_name, self.filename), "w", 0)
            self.header = True  # Determines weather the header shoul be written to file
            # V_SI = cu(V, 'ft/s', 'm/s')
            # A_SI = cu(A, 'inch**2', 'm**2')

            # print(p['T'], p['P'])
            # print("Ps", Ps_computed, Ps)
            # print("Ts", Ts_computed, Ts)
            # print("gamma", gams_computed, gams)
            # print("V", V_computed, V)
            # print("A", A_computed, A)
            # print("MN", MN_computed, MN)
            # print("rhos", rhos_computed, rhos)
            # print()
            tol = 1.0e-4
            assert_rel_error(self, gams_computed, gams, tol)
            assert_rel_error(self, MN_computed, MN, tol)
            assert_rel_error(self, Ps_computed, Ps, tol)
            assert_rel_error(self, Ts_computed, Ts, tol)
            assert_rel_error(self, hs_computed, hs, tol)
            assert_rel_error(self, rhos_computed, rhos, tol)
            assert_rel_error(self, gams_computed, gams, tol)
            assert_rel_error(self, V_computed, V, tol)
            assert_rel_error(self, A_computed, A, tol)


if __name__ == "__main__":
    import scipy

    np.seterr(all='raise')
    scipy.seterr(all='raise')
    unittest.main()
Пример #42
0
#% in this example to show that the theoretical prices, volatilities, and correlations match up with the observed market data.
#%
#% There is not, to my knowledge, a commodities methodology that incorporates so many market factors across multiple commodities
#% into one simulation. The advantages of such a model allows for more accurate modeling of spark spreads and pricing of deals
#% that are dependent on multiple commodities prices. I have included all files, including excel, associated with this calibration
#% and simulation.
from quantdsl.priceprocess.base import PriceProcess

try:
    from matplotlib import pylab as plt, pylab
except RuntimeError:
    pass
import scipy as np
from scipy.optimize import basinhopping

np.seterr(over='raise')


class SchwartzSmithFromFuturesAndImpliedVols(PriceProcess):
    def simulate_future_prices(self, observation_date, requirements,
                               path_count, calibration_params):

        fixing_dates = set()
        for requirement in requirements:
            fixing_dates.add(requirement[1])

        fixing_dates = sorted(list(fixing_dates))

        allMarketNames = []
        allOptimizedParams = []
        allSeasonalParams = []
Пример #43
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--path',
                        default='trajdata',
                        help='directory of data to test')
    parser.add_argument('--output',
                        required=True,
                        nargs='+',
                        help='relative path to saved model')
    parser.add_argument('--obs_length',
                        default=9,
                        type=int,
                        help='observation length')
    parser.add_argument('--pred_length',
                        default=12,
                        type=int,
                        help='prediction length')
    parser.add_argument('--disable-write',
                        action='store_true',
                        help='disable writing new files')
    parser.add_argument('--disable-collision',
                        action='store_true',
                        help='disable collision metrics')
    parser.add_argument('--labels',
                        required=False,
                        nargs='+',
                        help='labels of models')
    parser.add_argument('--sf',
                        action='store_true',
                        help='consider socialforce in evaluation')
    parser.add_argument('--orca',
                        action='store_true',
                        help='consider orca in evaluation')
    parser.add_argument('--kf',
                        action='store_true',
                        help='consider kalman in evaluation')
    parser.add_argument('--normalize_scene',
                        action='store_true',
                        help='augment scenes')
    parser.add_argument('--goals',
                        action='store_true',
                        help='Considers goals during prediction')
    parser.add_argument('--modes',
                        default=1,
                        type=int,
                        help='number of modes to predict')
    args = parser.parse_args()

    scipy.seterr('ignore')

    ## Path to the data folder name to predict
    args.path = 'DATA_BLOCK/' + args.path + '/'

    ## Test_pred : Folders for saving model predictions
    args.path = args.path + 'test_pred/'

    ## Writes to Test_pred
    ### Does this overwrite existing predictions? No. ###
    if not args.disable_write:
        write.main(args)

    ## Evaluates test_pred with test_private
    names = []
    for model in args.output:
        model_name = model.split('/')[-1].replace('.pkl', '')
        model_name = model_name + '_modes' + str(args.modes)
        names.append(model_name)

    ## labels
    if args.labels:
        labels = args.labels
    else:
        labels = names

    # Initiate Result Table
    table = Table()

    for num, name in enumerate(names):
        print(name)

        result_file = args.path.replace('pred', 'results') + name

        ## If result was pre-calculated and saved, Load
        if os.path.exists(result_file + '/results.pkl'):
            print("Loading Saved Results")
            with open(result_file + '/results.pkl', 'rb') as handle:
                [final_result, sub_final_result,
                 col_result] = pickle.load(handle)
            table.add_result(labels[num], final_result, sub_final_result)
            table.add_collision_entry(labels[num], col_result)

        # ## Else, Calculate results and save
        else:
            list_sub = sorted([
                f for f in os.listdir(args.path + name)
                if not f.startswith('.')
            ])

            ## Simple Collision Test
            col_result = collision_test(list_sub, name, args)
            table.add_collision_entry(labels[num], col_result)

            submit_datasets = [
                args.path + name + '/' + f for f in list_sub
                if 'collision_test.ndjson' not in f
            ]
            true_datasets = [
                args.path.replace('pred', 'private') + f for f in list_sub
                if 'collision_test.ndjson' not in f
            ]

            ## Evaluate submitted datasets with True Datasets [The main eval function]
            # results = {submit_datasets[i].replace(args.path, '').replace('.ndjson', ''):
            #             eval(true_datasets[i], submit_datasets[i], args)
            #            for i in range(len(true_datasets))}

            results_list = Parallel(n_jobs=4)(
                delayed(eval)(true_datasets[i], submit_datasets[i], args)
                for i in range(len(true_datasets)))
            results = {
                submit_datasets[i].replace(args.path,
                                           '').replace('.ndjson', ''):
                results_list[i]
                for i in range(len(true_datasets))
            }

            # print(results)
            ## Generate results
            final_result, sub_final_result = table.add_entry(
                labels[num], results)

            ## Save results as pkl (to avoid computation again)
            os.makedirs(result_file)
            with open(result_file + '/results.pkl', 'wb') as handle:
                pickle.dump([final_result, sub_final_result, col_result],
                            handle,
                            protocol=pickle.HIGHEST_PROTOCOL)

    ## Make Result Table
    table.print_table()
Пример #44
0
def greentensor(Freq, EpsB, Cell, NX, NY, NZ):
    """Returns the Fourier transform GF of the
       circular extension of the Green's tensor array
    """
    c0 = 299792458.0  # speed of light in vacuum
    Mu0 = 4.0 * sci.pi * 1.0e-7  # vacuum permeability
    Eps0 = 1.0 / (Mu0 * c0 * c0)  # vacuum permittivity
    Omega = 2.0 * sci.pi * Freq
    EtaB = -1.0j * Omega * Eps0 * EpsB
    ZetaB = -1.0j * Omega * Mu0
    KB = Omega * sci.sqrt(Eps0 * EpsB * Mu0)
    G = sci.zeros((NX, NY, NZ, 3, 3), complex)
    GC = sci.zeros((NX * 2, NY * 2, NZ * 2, 3, 3), complex)
    GF = sci.zeros((NX * 2, NY * 2, NZ * 2, 3, 3), complex)
    # 3D arrays of x,y,z coordinates
    xx, yy, zz = sci.mgrid[0:NX * Cell:Cell, 0:NY * Cell:Cell,
                           0:NZ * Cell:Cell]
    dd = sci.zeros((NX, NY, NZ), complex)
    alpha = sci.zeros((NX, NY, NZ), complex)
    beta = sci.zeros((NX, NY, NZ), complex)
    Q11 = sci.zeros((NX, NY, NZ), complex)
    Q12 = sci.zeros((NX, NY, NZ), complex)
    Q13 = sci.zeros((NX, NY, NZ), complex)
    Q21 = sci.zeros((NX, NY, NZ), complex)
    Q22 = sci.zeros((NX, NY, NZ), complex)
    Q23 = sci.zeros((NX, NY, NZ), complex)
    Q31 = sci.zeros((NX, NY, NZ), complex)
    Q32 = sci.zeros((NX, NY, NZ), complex)
    Q33 = sci.zeros((NX, NY, NZ), complex)
    # 3D arrays of distances
    dd = sci.sqrt((xx)**2 + (yy)**2 + (zz)**2)
    dd2 = dd * dd
    # 3D arrays of components of the Q-matrix
    sci.seterr(divide='ignore', invalid='ignore')
    Q11 = sci.divide(xx * xx, dd2)
    Q12 = sci.divide(xx * yy, dd2)
    Q13 = sci.divide(xx * zz, dd2)
    Q22 = sci.divide(yy * yy, dd2)
    Q23 = sci.divide(yy * zz, dd2)
    Q33 = sci.divide(zz * zz, dd2)
    Q21 = Q12
    Q31 = Q13
    Q32 = Q23
    # alpha and beta scalar multipliers
    alpha = sci.divide(sci.exp(1.0j*KB*dd),4.0*sci.pi*dd)*\
            (-KB**2.0 - sci.divide(1.0j*3.0*KB,dd) + sci.divide(3.0,dd2))
    beta = sci.divide(sci.exp(1.0j*KB*dd),4.0*sci.pi*dd)*\
           (KB**2.0 + sci.divide(1.0j*KB,dd) - sci.divide(1.0,dd2))
    # Green's tensor without self-patch
    G[:, :, :, 0, 0] = Q11 * alpha + beta
    G[:, :, :, 0, 1] = Q12 * alpha
    G[:, :, :, 0, 2] = Q13 * alpha
    G[:, :, :, 1, 0] = Q21 * alpha
    G[:, :, :, 1, 1] = Q22 * alpha + beta
    G[:, :, :, 1, 2] = Q23 * alpha
    G[:, :, :, 2, 0] = Q31 * alpha
    G[:, :, :, 2, 1] = Q32 * alpha
    G[:, :, :, 2, 2] = Q33 * alpha + beta
    G = G * (Cell**3)  # multiplying by the elementary volume
    # self-patch
    G[0,0,0,0,0] = (2./3.)*(1.-1.j*KB*Cell*((3./(4.*sci.pi))**(1./3.)))*\
                   sci.exp(1.j*KB*Cell*((3./(4.*sci.pi))**(1./3.)))-1.0
    G[0, 0, 0, 0, 1] = 0.
    G[0, 0, 0, 0, 2] = 0.
    G[0, 0, 0, 1, 0] = 0.
    G[0, 0, 0, 1, 1] = G[0, 0, 0, 0, 0]
    G[0, 0, 0, 1, 2] = 0.
    G[0, 0, 0, 2, 0] = 0.
    G[0, 0, 0, 2, 1] = 0.
    G[0, 0, 0, 2, 2] = G[0, 0, 0, 0, 0]
    #Circular extension of G
    GC[0:NX, 0:NY, 0:NZ, :, :] = G
    DeltaOp = sci.eye(3, 3)
    s = 0
    while s <= 2:
        ss = 0
        while ss <= 2:
            GC[NX + 1:, 0:NY, 0:NZ, s, ss] = (1 - 2 * DeltaOp[0, s]) * (
                1 - 2 * DeltaOp[0, ss]) * G[:0:-1, :, :, s, ss]
            GC[NX+1:,NY+1:,0:NZ,s,ss] = (1-2*DeltaOp[0,s])*(1-2*DeltaOp[0,ss])*\
                                        (1-2*DeltaOp[1,s])*(1-2*DeltaOp[1,ss])*G[:0:-1,:0:-1,:,s,ss]
            GC[NX+1:,NY+1:,NZ+1:,s,ss] = (1-2*DeltaOp[0,s])*(1-2*DeltaOp[0,ss])*\
                                         (1-2*DeltaOp[1,s])*(1-2*DeltaOp[1,ss])*\
                                         (1-2*DeltaOp[2,s])*(1-2*DeltaOp[2,ss])*G[:0:-1,:0:-1,:0:-1,s,ss]
            GC[0:NX, NY + 1:, 0:NZ, s, ss] = (1 - 2 * DeltaOp[1, s]) * (
                1 - 2 * DeltaOp[1, ss]) * G[:, :0:-1, :, s, ss]
            GC[0:NX,NY+1:,NZ+1:,s,ss] = (1-2*DeltaOp[1,s])*(1-2*DeltaOp[1,ss])*\
                                        (1-2*DeltaOp[2,s])*(1-2*DeltaOp[2,ss])*G[:,:0:-1,:0:-1,s,ss]
            GC[0:NX, 0:NY, NZ + 1:, s, ss] = (1 - 2 * DeltaOp[2, s]) * (
                1 - 2 * DeltaOp[2, ss]) * G[:, :, :0:-1, s, ss]
            GC[NX+1:,0:NY,NZ+1:,s,ss] = (1-2*DeltaOp[0,s])*(1-2*DeltaOp[0,ss])*\
                                        (1-2*DeltaOp[2,s])*(1-2*DeltaOp[2,ss])*G[:0:-1,:,:0:-1,s,ss]
            ss = ss + 1
        s = s + 1
    # FFT of the Green's tensor array
    s = 0
    while s <= 2:
        ss = 0
        while ss <= 2:
            GF[:, :, :, s, ss] = fft.fftn(sci.squeeze(GC[:, :, :, s, ss]))
            ss = ss + 1
        s = s + 1

    return GF
Пример #45
0
def hyst_area_rand(D, A, DyV, AyV, DuV, AuV, csm_hysteretic_damping):
    """
    function to calculate hystereris loop area used in the building damage calcs
    this version works for where the building cap curves are chasen randomly
    (which means Ay needs to be rescaled by Rcap;)

    In:  DyV, AyV =  yield point (vectors(nev,1))
         DuV, AuV =  ultimate point (vectors(nev,1))
         D, A = peak dispacement and accel (vector(nev,1) of events)

    Out: Area = vector of events
         Glenn Fulford: 21/4/02.


This is a diagram, trying to show how the hysteresis area is
calculated if csm_hysteretic_damping = 'curve'.

The diagram shows half the hysteresis area.
y is the yield point
PP is the performance point
x2 is the translation along the displacement axis.
A2 is the area from (y-x2) to PP on the top curve.
A2 is also the area from y to (PP-x2) on the bottom curve.
A1 and A3 are triaglular areas.

The hysteresis area = 2(A1+A2-A3)

        y-x2    y          PP
        |       |          |
        |       |          |
        |       |          |
        |       |          | ___.......--=----.------'''
        |       |    _,.,-':'    __..--''
        |       |.-''      |_.-''
        |    _,'|        ,-+
        |  ,'   |     ,-' /|
        | /  A2 |  ,-'   / |
        |/      |,'     /  |
        :i______|______/   |
        +      /      /    |
       /|     /      /     |
      / |    /      /      |
     /  |   /      /       |
    /   |  /      /  A3    |
   /    | /      /         |
  / A1  |/      /          |
 /      +      /           |
/      '|     /            |


    """
    # use a coord system with the orign are at the beginning
    # of the hysterisis curve
    # disp('hyst: caprand='); disp(SAcapR(:,1));

    ky = (AyV / DyV)  # slope of linear part of capacity curve

    # x0=D-DyV    # x distance from linear part (<=0 implies point is linear)
    linear_region = where(D <= DyV)

    x2 = D - A / ky  # translation along displacement axis
    if csm_hysteretic_damping is 'trapezoidal':
        # y distance from linear part (<=0 implies point is linear)
        y1 = (A - AyV)
        y1[linear_region] = -1  # avoid NaNs - only Harea3 has any impact there

        x1 = 2 * x2 + y1 / ky

        cc = A - AyV
        bb = ky / (A - AyV)
        aa = -ky / bb

        oldsettings = seterr(under='ignore')
        Harea1 = cc * x1 + aa / bb * (1 - exp(-bb * x1))
        seterr(**oldsettings)
        Harea2 = 0.5 * y1 * y1 / ky
        Harea3 = 2 * x2 * AyV

        Harea = 2 * (Harea1 - Harea2 + Harea3)
        Harea[linear_region] = 0
    elif csm_hysteretic_damping is 'curve':
        cc = AuV
        bb = ky / (AuV - AyV)
        aa = (AyV - AuV) * exp(bb * DyV)

        Harea1 = 0.5 * AyV * DyV
        Harea2 = aa / bb * \
            (exp(-bb * DyV) - exp(-bb * (D + x2))) + cc * (D + x2 - DyV)
        Harea3 = 0.5 * A * (D - x2)

        Harea = 2 * (Harea1 + Harea2 - Harea3)
        Harea[linear_region] = 0

    # Area calculation from
    # ATC 40 Seismic Evaluation and Retrofit of Concrete Buildings 8-15
    # Ed = 4(Ay*Dpi - Dy*Api)

    elif csm_hysteretic_damping is 'parallelogram':
        Harea = 4 * (D * AyV - DyV * A)
        Harea[linear_region] = 0
    else:
        print "csm_hysteretic_damping", csm_hysteretic_damping

    return Harea
Пример #46
0
def plot_params(args):
    """Plot alpha, theta, and the emission probabilities"""
    old_err = sp.seterr(under='ignore')
    oldsize = matplotlib.rcParams['font.size']
    K, L = args.emit_probs.shape if not args.continuous_observations else args.means.shape

    # alpha
    #matplotlib.rcParams['font.size'] = 12
    pyplot.figure()
    _, xedges, yedges = sp.histogram2d([0,K], [0,K], bins=[K,K])
    extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
    pyplot.imshow(args.alpha.astype(sp.float64), extent=extent, interpolation='nearest',
                  vmin=0, vmax=1,  cmap='OrRd', origin='lower')
    pyplot.xticks(sp.arange(K) + .5, sp.arange(K)+1)
    pyplot.gca().set_xticks(sp.arange(K)+1, minor=True)
    pyplot.yticks(sp.arange(K) + .5, sp.arange(K)+1)
    pyplot.gca().set_yticks(sp.arange(K)+1, minor=True)
    pyplot.grid(which='minor', alpha=.2)
    for line in pyplot.gca().yaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines(minor=True) + pyplot.gca().yaxis.get_ticklines(minor=True):
    # label is a Text instance
        line.set_markersize(0)
    pyplot.ylabel('Horizontal parent state')
    pyplot.xlabel('Node state')
    pyplot.title(r"Top root transition ($\alpha$) for {approx} iteration {iteration}".
                        format(approx=args.approx, iteration=args.iteration))
    b = pyplot.colorbar(shrink=.9)
    b.set_label("Probability")
    outfile = (args.out_params + '_it{iteration}.png').format(param='alpha', **args.__dict__)
    pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)


    # beta
    pyplot.figure()
    _, xedges, yedges = sp.histogram2d([0,K], [0,K], bins=[K,K])
    extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
    pyplot.clf()
    pyplot.imshow(args.beta.astype(sp.float64), extent=extent, interpolation='nearest',
                  vmin=0, vmax=1, cmap='OrRd', origin='lower')
    pyplot.xticks(sp.arange(K) + .5, sp.arange(K)+1)
    pyplot.gca().set_xticks(sp.arange(K)+1, minor=True)
    pyplot.yticks(sp.arange(K) + .5, sp.arange(K)+1)
    pyplot.gca().set_yticks(sp.arange(K)+1, minor=True)
    pyplot.grid(which='minor', alpha=.2)
    for line in pyplot.gca().yaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines(minor=True) + pyplot.gca().yaxis.get_ticklines(minor=True):
    # label is a Text instance
        line.set_markersize(0)
    pyplot.ylabel('Vertical parent state')
    pyplot.xlabel('Node state')
    pyplot.title(r"Left root transition ($\beta$) for {approx} iteration {iteration}".
                        format(approx=args.approx, iteration=args.iteration))
    b = pyplot.colorbar(shrink=.9)
    b.set_label("Probability")
    outfile = (args.out_params + '_it{iteration}.png').format(param='beta', **args.__dict__)
    pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)


    # theta
    if args.separate_theta:
        theta_tmp = args.theta
        for i in range((args.theta.shape)[0]):
            setattr(args, 'theta_%s'%(i+1), args.theta[i,:,:,:])

    for theta_name in ['theta'] + ['theta_%s' % i for i in range(20)]:
        #print 'trying', theta_name
        if not hasattr(args, theta_name):
            #print 'missing', theta_name
            continue
        _, xedges, yedges = sp.histogram2d([0,K], [0,K], bins=[K,K])
        extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
        if K == 18:
            numx_plots = 6
            numy_plots = 3
        elif K == 15:
            numx_plots = 5
            numy_plots = 3
        else:
            numx_plots = int(ceil(sp.sqrt(K)))
            numy_plots = int(ceil(sp.sqrt(K)))
        matplotlib.rcParams['font.size'] = 8
        fig, axs = pyplot.subplots(numy_plots, numx_plots, sharex=True, sharey=True, figsize=(numx_plots*2.5,numy_plots*2.5))
        for k in xrange(K):
            pltx, plty = k // numx_plots, k % numx_plots
            #axs[pltx,plty].imshow(args.theta[k,:,:], extent=extent, interpolation='nearest',
            axs[pltx,plty].imshow(getattr(args, theta_name)[:,k,:].astype(sp.float64), extent=extent, interpolation='nearest',
                          vmin=0, vmax=1, cmap='OrRd', aspect='auto', origin='lower')
            #if k < numx_plots:
            #axs[pltx,plty].text(0 + .5, K - .5, 'vp=%s' % (k+1), horizontalalignment='left', verticalalignment='top', fontsize=10)
            axs[pltx,plty].text(0 + .5, K - .5, 'hp=%s' % (k+1), horizontalalignment='left', verticalalignment='top', fontsize=10)
            #axs[pltx,plty].xticks(sp.arange(K) + .5, sp.arange(K))
            #axs[pltx,plty].yticks(sp.arange(K) + .5, sp.arange(K))
            axs[pltx,plty].set_xticks(sp.arange(K) + .5)
            axs[pltx,plty].set_xticks(sp.arange(K)+1, minor=True)
            axs[pltx,plty].set_xticklabels(sp.arange(K) + 1)
            axs[pltx,plty].set_yticks(sp.arange(K) + .5)
            axs[pltx,plty].set_yticks(sp.arange(K)+1, minor=True)
            axs[pltx,plty].set_yticklabels(sp.arange(K) + 1)
            for line in axs[pltx,plty].yaxis.get_ticklines() + axs[pltx,plty].xaxis.get_ticklines() + axs[pltx,plty].yaxis.get_ticklines(minor=True) + axs[pltx,plty].xaxis.get_ticklines(minor=True):
                line.set_markersize(0)
            axs[pltx,plty].grid(True, which='minor', alpha=.2)

        #fig.suptitle(r"$\Theta$ with fixed parents for {approx} iteration {iteration}".
        #                    format(approx=args.approx, iteration=args.iteration),
        #                    fontsize=14, verticalalignment='top')
        fig.suptitle('Node state', y=.03, fontsize=14, verticalalignment='center')
        #fig.suptitle('Horizontal parent state', y=.5, x=.02, rotation=90,
        fig.suptitle('Vertical parent state', y=.5, x=.02, rotation=90,
                     verticalalignment='center', fontsize=14)
        matplotlib.rcParams['font.size'] = 6.5
        fig.subplots_adjust(wspace=.05, hspace=.05, left=.05, right=.95)
        #b = fig.colorbar(shrink=.9)
        #b.set_label("Probability")
        outfile = (args.out_params + '_vertparent_it{iteration}.png').format(param=theta_name, **args.__dict__)
        pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)


        fig, axs = pyplot.subplots(numy_plots, numx_plots, sharex=True, sharey=True, figsize=(numx_plots*2.5,numy_plots*2.5))
        for k in xrange(K):
            pltx, plty = k // numx_plots, k % numx_plots
            axs[pltx,plty].imshow(getattr(args, theta_name)[k,:,:].astype(sp.float64), extent=extent, interpolation='nearest',
            #axs[pltx,plty].imshow(args.theta[:,k,:], extent=extent, interpolation='nearest',
                          vmin=0, vmax=1, cmap='OrRd', aspect='auto', origin='lower')
            #if k < numx_plots:
            axs[pltx,plty].text(0 + .5, K - .5, 'vp=%s' % (k+1), horizontalalignment='left', verticalalignment='top', fontsize=10)
            #axs[pltx,plty].xticks(sp.arange(K) + .5, sp.arange(K))
            #axs[pltx,plty].yticks(sp.arange(K) + .5, sp.arange(K))
            axs[pltx,plty].set_xticks(sp.arange(K) + .5)
            axs[pltx,plty].set_xticks(sp.arange(K)+1, minor=True)
            axs[pltx,plty].set_xticklabels(sp.arange(K) + 1)
            axs[pltx,plty].set_yticks(sp.arange(K) + .5)
            axs[pltx,plty].set_yticks(sp.arange(K)+1, minor=True)
            axs[pltx,plty].set_yticklabels(sp.arange(K) + 1)
            for line in axs[pltx,plty].yaxis.get_ticklines() + axs[pltx,plty].xaxis.get_ticklines() + axs[pltx,plty].yaxis.get_ticklines(minor=True) + axs[pltx,plty].xaxis.get_ticklines(minor=True):
                line.set_markersize(0)
            axs[pltx,plty].grid(True, which='minor', alpha=.2)

        #fig.suptitle(r"$\Theta$ with fixed parents for {approx} iteration {iteration}".
        #                    format(approx=args.approx, iteration=args.iteration),
        #                    fontsize=14, verticalalignment='top')
        fig.suptitle('Node state', y=.03, fontsize=14, verticalalignment='center')
        fig.suptitle('Horizontal parent state', y=.5, x=.02, rotation=90,
        #fig.suptitle('Vertical parent state', y=.5, x=.02, rotation=90,
                     verticalalignment='center', fontsize=14)
        matplotlib.rcParams['font.size'] = 6.5
        fig.subplots_adjust(wspace=.05, hspace=.05, left=.05, right=.95)
        #b = fig.colorbar(shrink=.9)
        #b.set_label("Probability")
        outfile = (args.out_params + '_it{iteration}.png').format(param=theta_name, **args.__dict__)
        pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)


    # emission probabilities
    if args.continuous_observations:
        # plot mean values
        matplotlib.rcParams['font.size'] = 8
        pyplot.figure(figsize=(max(1,round(L/3.)),max(1, round(K/3.))))
        print (max(1,round(L/3.)),max(1, round(K/3.)))
        pyplot.imshow(args.means.astype(sp.float64), interpolation='nearest', aspect='auto',
                      vmin=0, vmax=args.means.max(), cmap='OrRd', origin='lower')
        for k in range(K):
            for l in range(L):
                pyplot.text(l, k, '%.1f' % (args.means[k,l]), horizontalalignment='center', verticalalignment='center', fontsize=5)
        pyplot.yticks(sp.arange(K), sp.arange(K)+1)
        pyplot.gca().set_yticks(sp.arange(K)+.5, minor=True)
        pyplot.xticks(sp.arange(L), valid_marks, rotation=30, horizontalalignment='right')
        pyplot.gca().set_xticks(sp.arange(L)+.5, minor=True)
        pyplot.grid(which='minor', alpha=.2)
        for line in pyplot.gca().yaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines(minor=True) + pyplot.gca().yaxis.get_ticklines(minor=True):
        # label is a Text instance
            line.set_markersize(0)
        pyplot.ylabel('Hidden State')
        pyplot.title("Emission Mean")
        #b = pyplot.colorbar(shrink=.7)
        #b.set_label("Probability")
        outfile = (args.out_params + '_it{iteration}.png').format(param='emission_means', **args.__dict__)
        pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)

        # plot variances
        pyplot.figure(figsize=(max(1,round(L/3.)),max(1, round(K/3.))))
        print (L/3,K/3.)
        pyplot.imshow(args.variances.astype(sp.float64), interpolation='nearest', aspect='auto',
                      vmin=0, vmax=args.variances.max(), cmap='OrRd', origin='lower')
        for k in range(K):
            for l in range(L):
                pyplot.text(l, k, '%.1f' % (args.variances[k,l]), horizontalalignment='center', verticalalignment='center', fontsize=5)
        pyplot.yticks(sp.arange(K), sp.arange(K)+1)
        pyplot.gca().set_yticks(sp.arange(K)+.5, minor=True)
        pyplot.xticks(sp.arange(L), valid_marks, rotation=30, horizontalalignment='right')
        pyplot.gca().set_xticks(sp.arange(L)+.5, minor=True)
        pyplot.grid(which='minor', alpha=.2)
        for line in pyplot.gca().yaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines(minor=True) + pyplot.gca().yaxis.get_ticklines(minor=True):
        # label is a Text instance
            line.set_markersize(0)
        pyplot.ylabel('Hidden State')
        pyplot.title("Emission Variance")
        #b = pyplot.colorbar(shrink=.7)
        #b.set_label("Probability")
        outfile = (args.out_params + '_it{iteration}.png').format(param='emission_variances', **args.__dict__)
        pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)
    else:
        matplotlib.rcParams['font.size'] = 8
        pyplot.figure(figsize=(max(1,round(L/3.)),max(1, round(K/3.))))
        print (L/3,K/3.)
        pyplot.imshow(args.emit_probs.astype(sp.float64), interpolation='nearest', aspect='auto',
                      vmin=0, vmax=1, cmap='OrRd', origin='lower')
        for k in range(K):
            for l in range(L):
                pyplot.text(l, k, '%2.0f' % (args.emit_probs[k,l] * 100), horizontalalignment='center', verticalalignment='center')
        pyplot.yticks(sp.arange(K), sp.arange(K)+1)
        pyplot.gca().set_yticks(sp.arange(K)+.5, minor=True)
        pyplot.xticks(sp.arange(L), valid_marks, rotation=30, horizontalalignment='right')
        pyplot.gca().set_xticks(sp.arange(L)+.5, minor=True)
        pyplot.grid(which='minor', alpha=.2)
        for line in pyplot.gca().yaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines(minor=True) + pyplot.gca().yaxis.get_ticklines(minor=True):
        # label is a Text instance
            line.set_markersize(0)
        pyplot.ylabel('Hidden State')
        pyplot.title("Emission probabilities")
        #b = pyplot.colorbar(shrink=.7)
        #b.set_label("Probability")
        outfile = (args.out_params + '_it{iteration}.png').format(param='emission', **args.__dict__)
        pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)


    #broad_paper_enrichment = sp.array([[16,2,2,6,17,93,99,96,98,2],
    #                               [12,2,6,9,53,94,95,14,44,1],
    #                               [13,72,0,9,48,78,49,1,10,1],
    #                               [11,1,15,11,96,99,75,97,86,4],
    #                               [5,0,10,3,88,57,5,84,25,1],
    #                               [7,1,1,3,58,75,8,6,5,1],
    #                               [2,1,2,1,56,3,0,6,2,1],
    #                               [92,2,1,3,6,3,0,0,1,1],
    #                               [5,0,43,43,37,11,2,9,4,1],
    #                               [1,0,47,3,0,0,0,0,0,1],
    #                               [0,0,3,2,0,0,0,0,0,0],
    #                               [1,27,0,2,0,0,0,0,0,0],
    #                               [0,0,0,0,0,0,0,0,0,0],
    #                               [22,28,19,41,6,5,26,5,13,37],
    #                               [85,85,91,88,76,77,91,73,85,78],
    #                               [float('nan'), float('nan'),float('nan'),float('nan'),float('nan'),float('nan'),float('nan'),float('nan'),float('nan'),float('nan')]
    #                            ]) / 100.
    #mapping_from_broad = dict(zip(range(K), (5,2,0,14,4,6,9,1,12,-1,3,12,8,7,10,12,11,13)))
    #broad_paper_enrichment = broad_paper_enrichment[tuple(mapping_from_broad[i] for i in range(K)), :]
    #broad_names = ['Active promoter', 'Weak promoter', 'Inactive/poised promoter', 'Strong enhancer',
    #               'Strong enhancer', 'weak/poised enhancer', 'Weak/poised enhancer', 'Insulator',
    #               'Transcriptional transition', 'Transcriptional elongation', 'Weak transcribed',
    #               'Polycomb repressed', 'Heterochrom; low signal', 'Repetitive/CNV', 'Repetitive/CNV',
    #               'NA', 'NA', 'NA']
    #pyplot.figure(figsize=(L/3,K/3.))
    #print (L/3,K/3.)
    #pyplot.imshow(broad_paper_enrichment, interpolation='nearest', aspect='auto',
    #              vmin=0, vmax=1, cmap='OrRd', origin='lower')
    #for k in range(K):
    #    for l in range(L):
    #        pyplot.text(l, k, '%2.0f' % (broad_paper_enrichment[k,l] * 100), horizontalalignment='center', verticalalignment='center')
    #    pyplot.text(L, k, broad_names[mapping_from_broad[k]], horizontalalignment='left', verticalalignment='center', fontsize=6)
    #pyplot.yticks(sp.arange(K), sp.arange(K)+1)
    #pyplot.gca().set_yticks(sp.arange(K)+.5, minor=True)
    #pyplot.xticks(sp.arange(L), valid_marks, rotation=30, horizontalalignment='right')
    #pyplot.gca().set_xticks(sp.arange(L)+.5, minor=True)
    #pyplot.grid(which='minor', alpha=.2)
    #for line in pyplot.gca().yaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines() + pyplot.gca().xaxis.get_ticklines(minor=True) + pyplot.gca().yaxis.get_ticklines(minor=True):
    ## label is a Text instance
    #    line.set_markersize(0)
    #pyplot.ylabel('Hidden State')
    #pyplot.title("Broad paper Emission probabilities")
    ##b = pyplot.colorbar(shrink=.7)
    ##b.set_label("Probability")
    #pyplot.subplots_adjust(right=.7)
    #outfile = (args.out_params + '_broadpaper.png').format(param='emission', **args.__dict__)
    #pyplot.savefig(os.path.join(args.out_dir, outfile), dpi=240)

    pyplot.close('all')
    sp.seterr(**old_err)
    matplotlib.rcParams['font.size'] = oldsize
Пример #47
0
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pandas as pd
import time
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
from sklearn.kernel_ridge import KernelRidge
import random
import warnings as ws

# Ignore all Underflow Warnings : Value close to Zero - No Accuracy Errors
np.seterr(under='ignore')
sp.seterr(under='ignore')
ws.simplefilter('ignore')


def kfold(dataset, k):
    kf = KFold(n_splits=k, shuffle=True, random_state=random.randint(1, 10))
    return kf


# Load the CSV file
dataset = pd.read_csv('wineQualityReds.csv')
percent = 0.80
dataset_cv = dataset[:int(percent * dataset.shape[0])]
dataset_main = dataset[int(percent * dataset.shape[0]):]

# Get Cross Validation K-Fold
folds = kfold(dataset, 5)
Пример #48
0
#History (version,date, change author)
#
#
#
""" creates Likelihood classes for testing of MCMC,RJMCMC and other fitting methods. Also servers as an example on how to write functions needed to run programs"""

_module_name = 'gauss_param_space'

import numpy as nu
#import pylab as lab
import scipy.stats as stat_dist
from scipy.special import expn, erfinv
from scipy.optimize import fmin_powell
from scipy import seterr

a = seterr('ignore')


#template class things needed for programs to run
class template_class(object):
    #things needed for mcmc or rjmcmc to run
    def __init__(self):
        #initalize and put input data
        pass

    def sampler(self):
        #sampler or proposial distribution for drawing numbers
        pass

    def prior(self):
        #prior, should be a distribution, return values between [0,1] and also returns 0's for out of bounds parameters
Пример #49
0
def relative_bin_deviation(h1, h2):  # 79 us @array, 104 us @list \w 100 bins
    r"""
    Calculate the bin-wise deviation between two histograms.
    
    The relative bin deviation between two histograms :math:`H` and :math:`H'` of size
    :math:`m` is defined as:
    
    .. math::
    
        d_{rbd}(H, H') = \sum_{m=1}^M
            \frac{
                \sqrt{(H_m - H'_m)^2}
              }{
                \frac{1}{2}
                \left(
                    \sqrt{H_m^2} +
                    \sqrt{{H'}_m^2}
                \right)
              }
    
    *Attributes:*

    - a real metric
    
    *Attributes for normalized histograms:*

    - :math:`d(H, H')\in[0, \infty)`
    - :math:`d(H, H) = 0`
    - :math:`d(H, H') = d(H', H)`
    
    *Attributes for not-normalized histograms:*

    - :math:`d(H, H')\in[0, \infty)`
    - :math:`d(H, H) = 0`
    - :math:`d(H, H') = d(H', H)`
    
    *Attributes for not-equal histograms:*

    - not applicable 
    
    Parameters
    ----------
    h1 : sequence
        The first histogram.
    h2 : sequence
        The second histogram, same bins as ``h1``.
    
    Returns
    -------
    relative_bin_deviation : float
        Relative bin deviation between the two histograms.
    """
    h1, h2 = __prepare_histogram(h1, h2)
    numerator = scipy.sqrt(scipy.square(h1 - h2))
    denominator = (scipy.sqrt(scipy.square(h1)) +
                   scipy.sqrt(scipy.square(h2))) / 2.
    old_err_state = scipy.seterr(
        invalid='ignore'
    )  # divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0
    result = numerator / denominator
    scipy.seterr(**old_err_state)
    result[scipy.isnan(
        result
    )] = 0  # faster than scipy.nan_to_num, which checks for +inf and -inf also
    return scipy.sum(result)
Пример #50
0
from ..physics.Constants import erg_per_ev
from ..physics.SecondaryElectrons import *
import os, re, scipy, itertools, math, copy
from scipy.integrate import quad, trapz, simps

try:
    from mpi4py import MPI
    rank = MPI.COMM_WORLD.rank
    size = MPI.COMM_WORLD.size
except ImportError:
    rank = 0
    size = 1

E_th = [13.6, 24.6, 54.4]

scipy.seterr(all='ignore')

class IntegralTable: 
    def __init__(self, pf, source, grid, logN=None):
        """
        Initialize a table of integral quantities.
        """
        self.pf = pf
        self.src = source
        self.grid = grid
        
        # need to not do this stuff if a table was supplied via source_table
        # best way: save pf in hdf5 file, use it 
        # just override table_* parameters...what else?
        
        
Пример #51
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--path',
                        default='trajdata',
                        help='directory of data to test')
    parser.add_argument('--output',
                        required=True,
                        nargs='+',
                        help='relative path to saved model')
    parser.add_argument('--obs_length',
                        default=9,
                        type=int,
                        help='observation length')
    parser.add_argument('--pred_length',
                        default=12,
                        type=int,
                        help='prediction length')
    parser.add_argument('--disable-write',
                        action='store_true',
                        help='disable writing new files')
    parser.add_argument('--disable-collision',
                        action='store_true',
                        help='disable collision metrics')
    parser.add_argument('--labels',
                        required=False,
                        nargs='+',
                        help='labels of models')
    parser.add_argument('--normalize_scene',
                        action='store_true',
                        help='augment scenes')
    parser.add_argument('--unimodal',
                        action='store_true',
                        help='provide unimodal evaluation')
    parser.add_argument('--topk',
                        action='store_true',
                        help='provide topk evaluation')
    parser.add_argument('--multimodal',
                        action='store_true',
                        help='provide multimodal nll evaluation')
    parser.add_argument('--modes',
                        default=1,
                        type=int,
                        help='number of modes to predict')
    parser.add_argument('--scene_type',
                        default=0,
                        type=int,
                        choices=(0, 1, 2, 3, 4),
                        help='type of scene to evaluate')
    parser.add_argument('--thresh',
                        default=0.0,
                        type=float,
                        help='noise thresh')
    parser.add_argument('--ped_type',
                        default='primary',
                        help='type of ped to add noise to')

    args = parser.parse_args()

    scipy.seterr('ignore')

    ## Path to the data folder name to predict
    args.path = 'DATA_BLOCK/' + args.path + '/'

    ## Test_pred: Folders for saving model predictions
    args.path = args.path + 'test_pred/'

    if (not args.unimodal) and (not args.topk) and (not args.multimodal):
        args.unimodal = True  # Compute unimodal metrics by default

    if args.topk:
        args.modes = 3

    if args.multimodal:
        args.modes = 20

    enable_col1 = True

    ## drop pedestrians that appear post observation
    def drop_post_obs(ground_truth, obs_length):
        obs_end_frame = ground_truth[0][obs_length].frame
        ground_truth = [
            track for track in ground_truth if track[0].frame < obs_end_frame
        ]
        return ground_truth

    ## Writes to Test_pred
    ## Does this overwrite existing predictions? No. ###
    datasets = sorted([
        f.split('.')[-2] for f in os.listdir(args.path.replace('_pred', ''))
        if not f.startswith('.') and f.endswith('.ndjson')
    ])

    ## Model names are passed as arguments
    for model in args.output:
        model_name = model.split('/')[-1].replace('.pkl', '')

        # Loading the appropriate model (functionality only for SGAN and LSTM)
        print("Model Name: ", model_name)
        if 'sgan' in model_name:
            predictor = trajnetbaselines.sgan.SGANPredictor.load(model)
            goal_flag = predictor.model.generator.goal_flag
        elif 'vae' in model_name:
            predictor = trajnetbaselines.vae.VAEPredictor.load(model)
            goal_flag = predictor.model.goal_flag
        else:
            predictor = trajnetbaselines.lstm.LSTMPredictor.load(model)
            goal_flag = predictor.model.goal_flag

        # On CPU
        device = torch.device('cpu')
        predictor.model.to(device)

        total_scenes = 0
        average = 0
        final = 0
        gt_col = 0.
        pred_col = 0.
        neigh_scenes = 0
        topk_average = 0
        topk_final = 0
        all_goals = {}
        average_nll = 0

        ## Start writing in dataset/test_pred
        for dataset in datasets:
            # Model's name
            name = dataset.replace(
                args.path.replace('_pred', '') + 'test/', '')

            # Copy file from test into test/train_pred folder
            print('processing ' + name)
            if 'collision_test' in name:
                continue

            ## Filter for Scene Type
            reader_tag = trajnetplusplustools.Reader(
                args.path.replace('_pred', '_private') + dataset + '.ndjson',
                scene_type='tags')
            if args.scene_type != 0:
                filtered_scene_ids = [
                    s_id for s_id, tag, s in reader_tag.scenes()
                    if tag[0] == args.scene_type
                ]
            else:
                filtered_scene_ids = [
                    s_id for s_id, _, _ in reader_tag.scenes()
                ]

            # Read file from 'test'
            reader = trajnetplusplustools.Reader(
                args.path.replace('_pred', '') + dataset + '.ndjson',
                scene_type='paths')
            ## Necessary modification of train scene to add filename (for goals)
            scenes = [(dataset, s_id, s) for s_id, s in reader.scenes()
                      if s_id in filtered_scene_ids]

            ## Consider goals
            ## Goal file must be present in 'goal_files/test_private' folder
            ## Goal file must have the same name as corresponding test file
            if goal_flag:
                goal_dict = pickle.load(
                    open('goal_files/test_private/' + dataset + '.pkl', "rb"))
                all_goals[dataset] = {
                    s_id: [goal_dict[path[0].pedestrian] for path in s]
                    for _, s_id, s in scenes
                }

            ## Get Goals
            if goal_flag:
                scene_goals = [
                    np.array(all_goals[filename][scene_id])
                    for filename, scene_id, _ in scenes
                ]
            else:
                scene_goals = [
                    np.zeros((len(paths), 2)) for _, scene_id, paths in scenes
                ]

            print("Getting Predictions")
            scenes = tqdm(scenes)
            ## Get all predictions in parallel. Faster!
            pred_list = Parallel(n_jobs=12)(
                delayed(process_scene)(predictor, model_name, paths,
                                       scene_goal, args)
                for (_, _, paths), scene_goal in zip(scenes, scene_goals))

            ## GT Scenes
            reader_gt = trajnetplusplustools.Reader(
                args.path.replace('_pred', '_private') + dataset + '.ndjson',
                scene_type='paths')
            scenes_gt = [
                s for s_id, s in reader_gt.scenes()
                if s_id in filtered_scene_ids
            ]
            total_scenes += len(scenes_gt)

            print("Evaluating Predictions")
            scenes = tqdm(scenes)
            for (predictions, (_, scene_id, paths),
                 ground_truth) in zip(pred_list, scenes, scenes_gt):

                ## Extract 1) first_frame, 2) frame_diff 3) ped_ids for writing predictions
                observed_path = paths[0]
                frame_diff = observed_path[1].frame - observed_path[0].frame
                first_frame = observed_path[args.obs_length -
                                            1].frame + frame_diff
                ped_id = observed_path[0].pedestrian
                ped_id_ = []
                for j, _ in enumerate(paths[1:]):  ## Only need neighbour ids
                    ped_id_.append(paths[j + 1][0].pedestrian)

                if args.unimodal:  ## Unimodal
                    ## ADE / FDE
                    prediction, neigh_predictions = predictions[0]
                    prediction = np.round(prediction, 2)
                    ## make Track Rows
                    # primary
                    prediction = [
                        trajnetplusplustools.TrackRow(
                            first_frame + i * frame_diff, ped_id,
                            prediction[i, 0], prediction[i, 1], 0)
                        for i in range(len(prediction))
                    ]

                    primary_tracks = [
                        t for t in prediction if t.prediction_number == 0
                    ]
                    frame_gt = [
                        t.frame for t in ground_truth[0]
                    ][args.obs_length:args.obs_length + args.pred_length]
                    frame_pred = [t.frame for t in primary_tracks]

                    ## To verify if same scene
                    if frame_gt != frame_pred:
                        raise Exception('frame numbers are not consistent')

                    average_l2 = trajnetplusplustools.metrics.average_l2(
                        ground_truth[0][args.obs_length:args.obs_length +
                                        args.pred_length],
                        primary_tracks,
                        n_predictions=args.pred_length)
                    final_l2 = trajnetplusplustools.metrics.final_l2(
                        ground_truth[0][args.obs_length:args.obs_length +
                                        args.pred_length], primary_tracks)

                    # aggregate FDE and ADE
                    average += average_l2
                    final += final_l2

                    ground_truth = drop_post_obs(ground_truth, args.obs_length)
                    ## Collision Metrics
                    for j in range(1, len(ground_truth)):
                        if trajnetplusplustools.metrics.collision(
                                primary_tracks,
                                ground_truth[j],
                                n_predictions=args.pred_length):
                            gt_col += 1
                            break

                    num_gt_neigh = len(ground_truth) - 1
                    num_predicted_neigh = neigh_predictions.shape[1]
                    if num_gt_neigh != num_predicted_neigh:
                        enable_col1 = False
                    # [Col-I] only if neighs in gt = neighs in prediction
                    if enable_col1:
                        neigh_scenes += 1
                        for n in range(neigh_predictions.shape[1]):
                            neigh = neigh_predictions[:, n]
                            neigh = np.round(neigh, 2)
                            neigh_track = [
                                trajnetplusplustools.TrackRow(
                                    first_frame + j * frame_diff, n,
                                    neigh[j, 0], neigh[j, 1], 0)
                                for j in range(len(neigh))
                            ]
                            if trajnetplusplustools.metrics.collision(
                                    primary_tracks,
                                    neigh_track,
                                    n_predictions=args.pred_length):
                                pred_col += 1
                                break

                primary_tracks_all = [
                    trajnetplusplustools.TrackRow(first_frame + i * frame_diff,
                                                  ped_id, x, y, m)
                    for m, (prim, neighs) in predictions.items()
                    for i, (x, y) in enumerate(prim)
                ]

                if args.topk:
                    topk_ade, topk_fde = trajnetplusplustools.metrics.topk(
                        primary_tracks_all,
                        ground_truth[0][args.obs_length:args.obs_length +
                                        args.pred_length],
                        n_predictions=args.pred_length)
                    topk_average += topk_ade
                    topk_final += topk_fde

                if args.multimodal:
                    nll_val = trajnetplusplustools.metrics.nll(
                        primary_tracks_all,
                        ground_truth[0],
                        n_predictions=args.pred_length,
                        n_samples=20)
                    average_nll += nll_val

        if args.unimodal:
            ## Average ADE and FDE
            average /= total_scenes
            final /= total_scenes
            gt_col /= (total_scenes * 0.01)
            if not enable_col1:
                pred_col = -1
            else:
                pred_col /= (neigh_scenes * 0.01)

            print('ADE: ', np.round(average, 3))
            print('FDE: ', np.round(final, 3))
            print("Col-I: ", np.round(pred_col, 2))
            print("Col-II: ", np.round(gt_col, 2))

        if args.topk:
            topk_average /= total_scenes
            topk_final /= total_scenes
            print('Topk_ADE: ', topk_average)
            print('Topk_FDE: ', topk_final)

        if args.multimodal:
            average_nll /= total_scenes
            print('Average NLL: ', average_nll)
Пример #52
0
def disable_warnings():
    scipy.seterr(over='ignore',
                 divide='ignore',
                 invalid='ignore',
                 under='ignore')
    logging.root.setLevel(logging.CRITICAL)
Пример #53
0
    #######################################################################
    # This is the main part of the program, which calls the above functions
    #######################################################################
    # First, initialise some of our variables to be empty
    coords_array = scipy.array([])
    point_handles_array = scipy.array([])
    handle_of_regression_line_plot = []
    ### Set up an initial space to click inside
    axis_range = 10
    ### Make the figure window
    pylab.figure()
    ### Clear the figure window
    pylab.clf() # clf means "clear the figure"
    ### In order to keep the boundaries of the figure fixed in place,
    ### we will draw a white box around the region that we want.
    pylab.plot(axis_range*scipy.array([-1, 1, 1, -1]),
               axis_range*scipy.array([-1, -1, 1, 1]),'w-')
    pylab.axis('equal')  # Make the tick-marks equally spaced on x- and y-axes
    pylab.axis(axis_range*scipy.array([-1, 1, -1, 1]))
    ### Python issues a warning when we try to calculate
    ### the correlation when there are just two points,
    ### as the p-value is zero. This next line hides that warning
    scipy.seterr(invalid="ignore")
    ### Tell Python to call a function every time
    ### when the mouse is pressed in this figure
    pylab.connect('button_press_event', do_this_when_the_mouse_is_clicked)

    clear_the_figure_and_empty_points_list()
    pylab.show()    # This shows the figure window onscreen

Пример #54
0
def main():
    # This configures numpy/scipy to raise an exception in case of errors, instead of printing a warning and going ahead.
    numpy.seterr(all='raise')
    scipy.seterr(all='raise')

    parser = argparse.ArgumentParser(
        description='Runs a set of benchmarks defined in a YAML file.')
    parser.add_argument(
        '--fruit-benchmark-sources-dir',
        help='Path to the fruit sources (used for benchmarking code only)')
    parser.add_argument('--fruit-sources-dir',
                        help='Path to the fruit sources')
    parser.add_argument('--boost-di-sources-dir',
                        help='Path to the Boost.DI sources')
    parser.add_argument(
        '--output-file',
        help=
        'The output file where benchmark results will be stored (1 per line, with each line in JSON format). These can then be formatted by e.g. the format_bench_results script.'
    )
    parser.add_argument(
        '--benchmark-definition',
        help=
        'The YAML file that defines the benchmarks (see fruit_wiki_benchs.yml for an example).'
    )
    args = parser.parse_args()

    if args.output_file is None:
        raise Exception('You must specify --output_file')
    sh.rm('-f', args.output_file)

    fruit_build_tmpdir = tempfile.gettempdir() + '/fruit-benchmark-build-dir'

    with open(args.benchmark_definition, 'r') as f:
        yaml_file_content = yaml.load(f)
        global_definitions = yaml_file_content['global']
        benchmark_definitions = expand_benchmark_definitions(
            yaml_file_content['benchmarks'])

    benchmark_index = 0

    all_compilers = {
        benchmarks_definition['compiler']
        for benchmarks_definition in benchmark_definitions
    }

    for compiler_executable_name in all_compilers:
        print('Preparing for benchmarks with the compiler %s' %
              compiler_executable_name)
        compiler_name = determine_compiler_name(compiler_executable_name)

        # Build Fruit in fruit_build_tmpdir, so that fruit_build_tmpdir points to a built Fruit (useful for e.g. the config header).
        shutil.rmtree(fruit_build_tmpdir, ignore_errors=True)
        os.makedirs(fruit_build_tmpdir)
        modified_env = os.environ.copy()
        modified_env['CXX'] = compiler_executable_name
        sh.cmake(args.fruit_sources_dir,
                 '-DCMAKE_BUILD_TYPE=Release',
                 _cwd=fruit_build_tmpdir,
                 _env=modified_env)
        make_command(_cwd=fruit_build_tmpdir)

        for benchmark_definition in benchmark_definitions:
            if benchmark_definition['compiler'] != compiler_executable_name:
                continue

            # The 'compiler_name' dimension is synthesized automatically from the 'compiler' dimension.
            # We put the compiler name/version in the results because the same 'compiler' value might refer to different compiler versions
            # (e.g. if GCC 6.0.0 is installed when benchmarks are run, then it's updated to GCC 6.0.1 and finally the results are formatted, we
            # want the formatted results to say "GCC 6.0.0" instead of "GCC 6.0.1").
            benchmark_definition = benchmark_definition.copy()
            benchmark_definition['compiler_name'] = compiler_name

            benchmark_index += 1
            print('%s/%s: %s' % (benchmark_index, len(benchmark_definitions),
                                 benchmark_definition))
            benchmark_name = benchmark_definition['name']

            if (benchmark_name in {
                    'boost_di_compile_time', 'boost_di_run_time',
                    'boost_di_executable_size'
            } and args.boost_di_sources_dir is None):
                raise Exception(
                    'Error: you need to specify the --boost-di-sources-dir flag in order to run Boost.DI benchmarks.'
                )

            if benchmark_name == 'new_delete_run_time':
                benchmark = NewDeleteRunTimeBenchmark(
                    benchmark_definition,
                    fruit_benchmark_sources_dir=args.
                    fruit_benchmark_sources_dir)
            elif benchmark_name == 'fruit_single_file_compile_time':
                benchmark = FruitSingleFileCompileTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_benchmark_sources_dir=args.
                    fruit_benchmark_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir)
            elif benchmark_name == 'fruit_compile_time':
                benchmark = FruitCompileTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir)
            elif benchmark_name == 'fruit_run_time':
                benchmark = FruitRunTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir)
            elif benchmark_name == 'fruit_executable_size':
                benchmark = FruitExecutableSizeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir)
            elif benchmark_name == 'boost_di_compile_time':
                benchmark = BoostDiCompileTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir,
                    boost_di_sources_dir=args.boost_di_sources_dir)
            elif benchmark_name == 'boost_di_run_time':
                benchmark = BoostDiRunTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir,
                    boost_di_sources_dir=args.boost_di_sources_dir)
            elif benchmark_name == 'boost_di_executable_size':
                benchmark = BoostDiExecutableSizeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir,
                    boost_di_sources_dir=args.boost_di_sources_dir)
            else:
                raise Exception("Unrecognized benchmark: %s" % benchmark_name)

            run_benchmark(benchmark,
                          output_file=args.output_file,
                          max_runs=global_definitions['max_runs'])
Пример #55
0
def main():
    # This configures numpy/scipy to raise an exception in case of errors, instead of printing a warning and going ahead.
    numpy.seterr(all='raise')
    scipy.seterr(all='raise')

    parser = argparse.ArgumentParser(description='Runs a set of benchmarks defined in a YAML file.')
    parser.add_argument('--fruit-benchmark-sources-dir', help='Path to the fruit sources (used for benchmarking code only)')
    parser.add_argument('--fruit-sources-dir', help='Path to the fruit sources')
    parser.add_argument('--boost-di-sources-dir', help='Path to the Boost.DI sources')
    parser.add_argument('--output-file',
                        help='The output file where benchmark results will be stored (1 per line, with each line in JSON format). These can then be formatted by e.g. the format_bench_results script.')
    parser.add_argument('--benchmark-definition', help='The YAML file that defines the benchmarks (see fruit_wiki_benchs_fruit.yml for an example).')
    parser.add_argument('--continue-benchmark', help='If this is \'true\', continues a previous benchmark run instead of starting from scratch (taking into account the existing benchmark results in the file specified with --output-file).')
    args = parser.parse_args()

    if args.output_file is None:
        raise Exception('You must specify --output_file')
    if args.continue_benchmark == 'true':
        try:
            with open(args.output_file, 'r') as f:
                previous_run_completed_benchmarks = [json.loads(line)['benchmark'] for line in f.readlines()]
        except FileNotFoundError:
            previous_run_completed_benchmarks = []
    else:
        previous_run_completed_benchmarks = []
        run_command('rm', args=['-f', args.output_file])

    fruit_build_tmpdir = tempfile.gettempdir() + '/fruit-benchmark-build-dir'

    with open(args.benchmark_definition, 'r') as f:
        yaml_file_content = yaml.load(f)
        global_definitions = yaml_file_content['global']
        benchmark_definitions = expand_benchmark_definitions(yaml_file_content['benchmarks'])

    benchmark_index = 0

    for (compiler_executable_name, additional_cmake_args), benchmark_definitions_with_current_config \
            in group_by(benchmark_definitions,
                        lambda benchmark_definition:
                            (benchmark_definition['compiler'], tuple(benchmark_definition['additional_cmake_args']))):

        print('Preparing for benchmarks with the compiler %s, with additional CMake args %s' % (compiler_executable_name, additional_cmake_args))
        # We compute this here (and memoize the result) so that the benchmark's describe() will retrieve the cached
        # value instantly.
        determine_compiler_name(compiler_executable_name)

        # Build Fruit in fruit_build_tmpdir, so that fruit_build_tmpdir points to a built Fruit (useful for e.g. the config header).
        shutil.rmtree(fruit_build_tmpdir, ignore_errors=True)
        os.makedirs(fruit_build_tmpdir)
        modified_env = os.environ.copy()
        modified_env['CXX'] = compiler_executable_name
        run_command('cmake',
                    args=[
                        args.fruit_sources_dir,
                        '-DCMAKE_BUILD_TYPE=Release',
                        *additional_cmake_args,
                    ],
                    cwd=fruit_build_tmpdir,
                    env=modified_env)
        run_command('make', args=make_args, cwd=fruit_build_tmpdir)

        for benchmark_definition in benchmark_definitions_with_current_config:
            benchmark_index += 1
            print('%s/%s: %s' % (benchmark_index, len(benchmark_definitions), benchmark_definition))
            benchmark_name = benchmark_definition['name']

            if (benchmark_name in {'boost_di_compile_time', 'boost_di_run_time', 'boost_di_executable_size'}
                and args.boost_di_sources_dir is None):
                raise Exception('Error: you need to specify the --boost-di-sources-dir flag in order to run Boost.DI benchmarks.')

            if benchmark_name == 'new_delete_run_time':
                benchmark = NewDeleteRunTimeBenchmark(
                    benchmark_definition,
                    fruit_benchmark_sources_dir=args.fruit_benchmark_sources_dir)
            elif benchmark_name == 'fruit_single_file_compile_time':
                benchmark = FruitSingleFileCompileTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_benchmark_sources_dir=args.fruit_benchmark_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir)
            elif benchmark_name == 'fruit_compile_time':
                benchmark = FruitCompileTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir)
            elif benchmark_name == 'fruit_run_time':
                benchmark = FruitRunTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir)
            elif benchmark_name == 'fruit_executable_size':
                benchmark = FruitExecutableSizeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir)
            elif benchmark_name == 'boost_di_compile_time':
                benchmark = BoostDiCompileTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir,
                    boost_di_sources_dir=args.boost_di_sources_dir)
            elif benchmark_name == 'boost_di_run_time':
                benchmark = BoostDiRunTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir,
                    boost_di_sources_dir=args.boost_di_sources_dir)
            elif benchmark_name == 'boost_di_executable_size':
                benchmark = BoostDiExecutableSizeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir,
                    boost_di_sources_dir=args.boost_di_sources_dir)
            else:
                raise Exception("Unrecognized benchmark: %s" % benchmark_name)

            if benchmark.describe() in previous_run_completed_benchmarks:
                print("Skipping benchmark that was already run previously (due to --continue-benchmark):", benchmark.describe())
                continue

            run_benchmark(benchmark, output_file=args.output_file, max_runs=global_definitions['max_runs'])
Пример #56
0
    # moduleNames is used for do_coverage
    # test_result is used in distribution.py
    return moduleNames, test_result


if __name__ == '__main__':
    """
    Note, the parameter is currently obsolete
    The eqrm_code directory that will be tested can be passed in as
    a parameter.  This is usefull to test one sandpit from another
    sandpit.  Used in distribution.py
    """
    from os import access, F_OK
    import sys

    seterr(all='warn')

    if len(sys.argv) > 1 and access(sys.argv[1], F_OK):
        path = sys.argv[1]
        path = None
    else:
        path = None
    _, test_result = main(path=path)
    try:
        c_errors_failures = len(test_result.errors) + len(test_result.failures)
    except:
        print "WARNING TEST RESULTS UNKNOWN"
        c_errors_failures = 1
    sys.exit(c_errors_failures)
Пример #57
0
def ypred_leave_one_out (train, test, percent) :
    
    #split to ground truth (y) and feature values (X)
    ytrain, Xtrain = np.hsplit(train, np.array([1]))
    ytest, Xtest = np.hsplit(test, np.array([1]))
    ytrain = np.concatenate(ytrain)
    ytest = np.concatenate(ytest)
    
    #normalize feature matrices
    (mins,maxs) = normalize_by_columns(Xtrain)
    
    #reduce features
    a = maxs-mins
    boolean_array = np.bool_(maxs-mins < 2.2204460492503131e-16)
    #print np.where(boolean_array)
    Xtrain = Xtrain.compress(np.logical_not(boolean_array), axis=1)
    training_n, reduced_feature_n = Xtrain.shape
    
    
    
    seterr(invalid='print',divide='print')
    idx = 0
    C = [(0), (0), (0)]
    E = np.zeros(training_n)
    for idx in range(0, reduced_feature_n):
        xi = Xtrain.T[idx]
        #print xi.shape, idx
        #print ytrain
        #print xi
        ytrain, xi
        slope, intercept, r_value, p_value, std_err = stats.linregress(xi,ytrain)
        B = [(slope), (intercept), (r_value)]
        #print 'r value', r_value
        #print 'slope', slope
        #print 'intercept', intercept
        C = np.c_[ C, B ]
        #plot(xi,line,'r-',xi,y,'o')
        #show()    
        count = 0
        D = [[(5)]]
        for count in range(0,training_n):
            y_pred = slope*xi[count]+intercept
            D = np.append(D, np.array(y_pred))
        D = np.delete(D,0,0)
        E = np.vstack([E, D])
        idx =+ 1
    C = np.delete(C,0,1)
    E = np.delete(E,0,0)
    E = E.T
    C[2] = np.square(C[2])
    
    #reduce features based on R
    new_feature_number = math.ceil((percent/100)*reduced_feature_n)   
    #new_feature_number = 60
    number_of_features_cut = reduced_feature_n - new_feature_number
    index = C[2].argsort()[:number_of_features_cut]
    C = np.delete(C, index, 1)    
    
    
    #normalize test matrices
    normalize_by_columns(Xtest, mins, maxs)
    Xtest = Xtest.compress(np.logical_not(boolean_array), axis=1)
    Xtest = np.delete(Xtest, index, 1)
    Xtrain = np.delete(Xtrain, index, 1)
    
    ##############################
    
    #weight the train and test matrices
    w = C[2]
    Xtrain = w*Xtrain
    Xtest = w*Xtest
    
    
    #add ones to feature matrix for least squares
    Xtrain = np.hstack([np.ones((training_n, 1)), Xtrain])
    Xtest = np.hstack([np.ones((test_n, 1)), Xtest])
    
  
    
    #least squares linear regression
    a = np.linalg.lstsq(Xtrain, ytrain)[0]
    ypred = np.array(np.dot(Xtest,a))
    ytest = np.array(ytest)
    return ypred, new_feature_number
Пример #58
0
def main():
    # This configures numpy/scipy to raise an exception in case of errors, instead of printing a warning and going ahead.
    numpy.seterr(all='raise')
    scipy.seterr(all='raise')

    parser = argparse.ArgumentParser(description='Runs a set of benchmarks defined in a YAML file.')
    parser.add_argument('--fruit-benchmark-sources-dir', help='Path to the fruit sources (used for benchmarking code only)')
    parser.add_argument('--fruit-sources-dir', help='Path to the fruit sources')
    parser.add_argument('--boost-di-sources-dir', help='Path to the Boost.DI sources')
    parser.add_argument('--output-file',
                        help='The output file where benchmark results will be stored (1 per line, with each line in JSON format). These can then be formatted by e.g. the format_bench_results script.')
    parser.add_argument('--benchmark-definition', help='The YAML file that defines the benchmarks (see fruit_wiki_benchs_fruit.yml for an example).')
    parser.add_argument('--continue-benchmark', help='If this is \'true\', continues a previous benchmark run instead of starting from scratch (taking into account the existing benchmark results in the file specified with --output-file).')
    args = parser.parse_args()

    if args.output_file is None:
        raise Exception('You must specify --output_file')
    if args.continue_benchmark == 'true':
        with open(args.output_file, 'r') as f:
            previous_run_completed_benchmarks = [json.loads(line)['benchmark'] for line in f.readlines()]
    else:
        previous_run_completed_benchmarks = []
        sh.rm('-f', args.output_file)

    fruit_build_tmpdir = tempfile.gettempdir() + '/fruit-benchmark-build-dir'

    with open(args.benchmark_definition, 'r') as f:
        yaml_file_content = yaml.load(f)
        global_definitions = yaml_file_content['global']
        benchmark_definitions = expand_benchmark_definitions(yaml_file_content['benchmarks'])

    benchmark_index = 0

    all_compilers = {benchmarks_definition['compiler'] for benchmarks_definition in benchmark_definitions}

    for compiler_executable_name in all_compilers:
        print('Preparing for benchmarks with the compiler %s' % compiler_executable_name)
        # We compute this here (and memoize the result) so that the benchmark's describe() will retrieve the cached
        # value instantly.
        determine_compiler_name(compiler_executable_name)

        # Build Fruit in fruit_build_tmpdir, so that fruit_build_tmpdir points to a built Fruit (useful for e.g. the config header).
        shutil.rmtree(fruit_build_tmpdir, ignore_errors=True)
        os.makedirs(fruit_build_tmpdir)
        modified_env = os.environ.copy()
        modified_env['CXX'] = compiler_executable_name
        sh.cmake(args.fruit_sources_dir, '-DCMAKE_BUILD_TYPE=Release', _cwd=fruit_build_tmpdir, _env=modified_env)
        make_command(_cwd=fruit_build_tmpdir)

        for benchmark_definition in benchmark_definitions:
            if benchmark_definition['compiler'] != compiler_executable_name:
                continue

            benchmark_index += 1
            print('%s/%s: %s' % (benchmark_index, len(benchmark_definitions), benchmark_definition))
            benchmark_name = benchmark_definition['name']

            if (benchmark_name in {'boost_di_compile_time', 'boost_di_run_time', 'boost_di_executable_size'}
                and args.boost_di_sources_dir is None):
                raise Exception('Error: you need to specify the --boost-di-sources-dir flag in order to run Boost.DI benchmarks.')

            if benchmark_name == 'new_delete_run_time':
                benchmark = NewDeleteRunTimeBenchmark(
                    benchmark_definition,
                    fruit_benchmark_sources_dir=args.fruit_benchmark_sources_dir)
            elif benchmark_name == 'fruit_single_file_compile_time':
                benchmark = FruitSingleFileCompileTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_benchmark_sources_dir=args.fruit_benchmark_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir)
            elif benchmark_name == 'fruit_compile_time':
                benchmark = FruitCompileTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir)
            elif benchmark_name == 'fruit_run_time':
                benchmark = FruitRunTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir)
            elif benchmark_name == 'fruit_executable_size':
                benchmark = FruitExecutableSizeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir)
            elif benchmark_name == 'boost_di_compile_time':
                benchmark = BoostDiCompileTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir,
                    boost_di_sources_dir=args.boost_di_sources_dir)
            elif benchmark_name == 'boost_di_run_time':
                benchmark = BoostDiRunTimeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir,
                    boost_di_sources_dir=args.boost_di_sources_dir)
            elif benchmark_name == 'boost_di_executable_size':
                benchmark = BoostDiExecutableSizeBenchmark(
                    benchmark_definition,
                    fruit_sources_dir=args.fruit_sources_dir,
                    fruit_build_tmpdir=fruit_build_tmpdir,
                    boost_di_sources_dir=args.boost_di_sources_dir)
            else:
                raise Exception("Unrecognized benchmark: %s" % benchmark_name)

            if benchmark.describe() in previous_run_completed_benchmarks:
                print("Skipping benchmark that was already run previously (due to --continue-benchmark):", benchmark.describe())
                continue

            run_benchmark(benchmark, output_file=args.output_file, max_runs=global_definitions['max_runs'])