示例#1
0
 def residual(x, n, input_moments):
     ph = PhaseType(*decompose(x, n))
     estimated_moments = [
         ph.moment(i + 1) for i in range(len(input_moments))
     ]
     estimated_moments = np.asarray(estimated_moments)
     return np.r_[estimated_moments - input_moments,
                  10 * (1 - ph.pmf0.sum())]
示例#2
0
    def test_moments_ph_distribution(self):
        source = PhaseType.exponential(3.0)

        self.assertIsInstance(stats.moment(source, 1), np.ndarray)
        self.assertIsInstance(stats.moment(source, 2), np.ndarray)
        assert_almost_equal(stats.moment(source, 1), [1 / 3], 10)
        assert_almost_equal(stats.moment(source, 3), [1 / 3, 2 / 9, 2 / 9], 10)
示例#3
0
def fit_ph_nonlinear_opt(moments, order=3, x0=None, loss=None):
    """
    Fits PH for the given moments using non-linear optimization.

    Args:
        moments:
        order:
        x0:
        loss:

    Returns: PH distribution
    """
    def decompose(x, n):
        tau = x[:n]
        s = np.zeros((n, n))
        for i in range(n):
            row = x[(i + 1) * n:(i + 2) * n]
            s[i] = np.concatenate((row[:i], [-np.sum(row)], row[i:n - 1]))
        return s, tau

    def residual(x, n, input_moments):
        ph = PhaseType(*decompose(x, n))
        estimated_moments = [
            ph.moment(i + 1) for i in range(len(input_moments))
        ]
        estimated_moments = np.asarray(estimated_moments)
        return np.r_[estimated_moments - input_moments,
                     10 * (1 - ph.pmf0.sum())]

    def _x(v1, v2, a_order):
        return [v1] * a_order + [v2] * (a_order * a_order)

    moments = np.asarray(moments)
    normalized_moments, mu = stats.normalize_moments(moments)

    params = {
        'fun': residual,
        'x0': x0 if x0 is not None else np.array(_x(1. / order, 1., order)),
        'bounds': (_x(0., 0., order), _x(1., np.inf, order)),
        'kwargs': {
            'input_moments': normalized_moments,
            'n': order,
        },
    }
    if loss is not None:
        params['loss'] = loss

    result = scipy.optimize.least_squares(**params)
    # noinspection PyUnresolvedReferences
    normalized_subgenerator, pmf0 = decompose(result.x, order)

    # Normalizing PMF0 (why could they be greater than 1???)
    sum_prob = sum(pmf0)
    if np.abs(sum_prob - 1.0) > 1e-18:
        pmf0 = pmf0 / sum_prob

    subgenerator = normalized_subgenerator / mu
    return PhaseType(subgenerator, pmf0)
示例#4
0
         queue_size_pmf=[
             0.09230753, 0.0630149, 0.07784193, 0.09615768, 0.11878301,
             0.14673196, 0.18125713, 0.22390586
         ],
         queue_size_avg=4.3708,
         queue_size_var=5.2010,
         utilization=0.9587,
         loss_prob=0.2239,
         bandwidth=32.5959,
         response_time=0.163,
         wait_time=0.134,
     ),
     '(MM1NQueue: arrival_rate=42, service_rate=34, capacity=8)'),
 # MAP/PH/1/N representation of M/M/1/N queue:
 (MapPh1NQueue(MarkovArrival.poisson(2),
               PhaseType.exponential(5),
               queue_capacity=4),
  QueueProps(
      arrival_rate=2,
      service_rate=5,
      departure_rate=1.9877,
      system_size_pmf=[0.6025, 0.2410, 0.0964, 0.0385, 0.0154, 0.0062],
      system_size_avg=0.6420,
      system_size_var=0.9624,
      queue_size_pmf=[0.8434, 0.0964, 0.0385, 0.0154, 0.0062],
      queue_size_avg=0.2444,
      queue_size_var=0.4284,
      utilization=0.3975,
      loss_prob=0.0062,
      bandwidth=1.9877,
      response_time=0.323,
示例#5
0
    # Define subgenerator and probabilities vector elements
    if c > 0:
        p = (-b + 6 * m1 * d + a) / (b + a)
        l1 = (b - a) / c
        l2 = (b + a) / c
    elif c < 0:
        p = (b - 6 * m1 * d + a) / (-b + a)
        l1 = (b + a) / c
        l2 = (b - a) / c
    else:
        p = 0
        l1 = 1 / m1
        l2 = 1 / m1

    # Build the distribution and compute estimation errors:
    ph = PhaseType(sub=np.asarray([[-l1, l1], [0.0, -l2]]),
                   p=np.asarray([p, 1 - p]))
    errors = [abs(m - ph.moment(i + 1)) / m for i, m in enumerate(moments)]
    return ph, np.asarray(errors)


def get_acph2_m2_min(m1: float) -> float:
    """
    Get minimum value of m2 (second moment) for ACPH(2) fitting.

    According to [1], M2 has only lower bound since pow(CV, 2) should be 
    greater or equal to 0.5.

    If m1 < 0, then `ValueError` is raised.

    Parameters
    ----------
示例#6
0
import pytest
from numpy.testing import assert_allclose
from pydesim import simulate

from pyqumo.random import Exponential, PhaseType
from pyqumo.qsim import QueueingSystem, QueueingTandemNetwork, \
    tandem_queue_network, tandem_queue_network_with_fixed_service


@pytest.mark.parametrize('arrival,service,stime_limit', [
    (Exponential(5), Exponential(1), 12000),
    (Exponential(3), Exponential(2), 12000),
    (PhaseType.exponential(10.0), PhaseType.exponential(48.0), 4000),
])
def test_mm1_model(arrival, service, stime_limit):
    ret = simulate(QueueingSystem,
                   stime_limit=stime_limit,
                   params={
                       'arrival': arrival,
                       'service': service,
                       'queue_capacity': None,
                   })

    busy_rate = ret.data.server.busy_trace.timeavg()
    system_size = ret.data.system_size_trace.timeavg()
    est_arrival_mean = ret.data.source.intervals.statistic().mean()
    est_departure_mean = ret.data.sink.arrival_intervals.statistic().mean()
    est_service_mean = ret.data.server.service_intervals.mean()
    est_delay = ret.data.source.delays.mean()
    est_sys_wait = ret.data.system_wait_intervals.mean()
    est_queue_wait = ret.data.queue.wait_intervals.mean()
示例#7
0
def fit_ph(source, order, method='opt', verbose=False, options=None):
    """Fit a PH distribution of a given order from a trace or from another
    distribution (in the latter case, it must provide methods `moment(k)`
    and `generate(n)`).

    Two methods are supported:
    - non-linear optimization
    - G-Fit

    If `source` is a distribution (another `pyqunet.distributions.PhaseType`),
    then in the first case, the PH will be reduced using analytically computed
    moments, while in the latter case a trace will be generated and the
    EM algorithm will be used.

    Args:
        source (array-like): a list of samples
        order: an order of PH to fit
        method: 'opt' or 'gfit'
        verbose: if `True`, progress will be printed to the screen
        options (dict): provides additional method-related options:
            - x0: initial guess, default: `None` (OPT, GFIT)
            - loss: loss function, see `scipy.optimize.least_squares` (OPT)
            - numMoments: number of moments to use for fitting, default: 3 (OPT)
            - weights: sample weights vector, default: `None` (GFIT)
            - maxIter: max number of iterations, default: 200 (GFIT)
            - stopCond: stop condition, default: 1e-7 (GFIT)
            - numSamples: number of samples to generate into a trace,
                default: 20'000 (GFIT, used when source is a distribution)

    Returns:
        phase-type distribution, `pyqunet.distributions.PH`
    """
    if options is None:
        options = {}
    if method == 'opt':
        x0 = options.get('x0', None)  # initial guess
        loss = options.get('loss', None)  # 'cauchy', ...
        maxn = options.get('numMoments', 3)  # number of moments
        moments = stats.moment(source, maxn)
        return fit_ph_nonlinear_opt(moments, order, x0, loss)
    elif method == 'gfit':
        x0 = options.get('x0', None)
        weights = options.get('weights', None)
        max_iter = options.get('maxIter', 200)
        stop_cond = options.get('stopCond', 1e-7)
        num_samples = options.get('numSamples', 20000)
        if hasattr(source, 'generate'):
            trace = list(source.generate(num_samples))
        else:
            trace = list(source)
        tau, s = PHFromTrace(trace,
                             order,
                             weights,
                             max_iter,
                             stop_cond,
                             x0,
                             result='vecmat',
                             retlogli=False,
                             verbose=verbose)
        return PhaseType(s, tau)
    else:
        raise ValueError(
            "method '{}' not supported, use 'opt', 'gfit'".format(method))
示例#8
0
 (HyperErlang([1], [1], [1]), 1, 2, 6, 24,
  '(HyperErlang: probs=[1], shapes=[1], params=[1])', 1e-2, 2e-2),
 (
     HyperErlang(params=[1, 5, 8], shapes=[3, 4, 5], probs=[.2, .5, .3
                                                            ]),
     1.1875,
     2.941,
     12.603,
     72.795,
     '(HyperErlang: probs=[0.2, 0.5, 0.3], shapes=[3, 4, 5], '
     'params=[1, 5, 8])',
     1e-2,
     1e-2,
 ),
 # Phase-type distribution
 (PhaseType.exponential(1.0), 1, 2, 6, 24, '(PH: s=[[-1]], p=[1])',
  1e-2, 2e-2),
 (PhaseType.erlang(
     shape=3, rate=4.2), 0.714286, 0.680272, 0.809848, 1.15693, '(PH: '
  's=[[-4.2, 4.2, 0], [0, -4.2, 4.2], [0, 0, -4.2]], '
  'p=[1, 0, 0])', 1e-2, 2e-2),
 (PhaseType.hyperexponential(rates=[2, 3, 4], probs=[0.5, 0.2, 0.3]),
  0.39167, 0.33194, 0.4475694, 0.837384, '(PH: '
  's=[[-2, 0, 0], [0, -3, 0], [0, 0, -4]], '
  'p=[0.5, 0.2, 0.3])', 1e-2, 2e-2),
 (PhaseType(np.asarray([[-2, 1, 0.2], [0.5, -3, 1], [0.5, 0.5, -4]]),
            np.asarray([0.5, 0.4, 0.1])), 0.718362, 1.01114, 2.12112,
  5.92064, '(PH: '
  's=[[-2, 1, 0.2], [0.5, -3, 1], [0.5, 0.5, -4]], '
  'p=[0.5, 0.4, 0.1])', 1e-2, 2e-2),
 # Choice (discrete) distribution
     # Scalar probabilities and rates:
     drop_prob=[0, 0, 0],
     delivery_prob=[1, 1, 1],
     utilization=[.1, .5, .25],
     # Intervals:
     departure_avg=[1, 1 / 3, 1 / 10],
     arrival_avg=[1, 1 / 3, 1 / 10],
     response_time_avg=[1 / 9, 1 / 3, 1 / 30],
     wait_time_avg=[1 / 90, 1 / 6, 1 / 120],
     delivery_delay_avg=[43 / 90, 11 / 30, 1 / 30]),
 TandemProps(
     arrival=[
         MarkovArrival.poisson(1), None, None,
         HyperExponential([4.0], [1.0])
     ],
     service=PhaseType.exponential(10),
     queue_capacity=np.inf,
     num_stations=4,
     # System and queue sizes:
     system_size_avg=[1 / 9, 1 / 9, 1 / 9, 1],
     system_size_std=[(10**.5) / 9, (10**.5) / 9, (10**.5) / 9, 2**.5],
     queue_size_avg=[1 / 90, 1 / 90, 1 / 90, 1 / 2],
     queue_size_std=[(109**.5) / 90, (109**.5) / 90, (109**.5) / 90,
                     (5**.5) / 2],
     busy_avg=[.1, .1, .1, .5],
     busy_std=[.3, .3, .3, .5],
     # Scalar probabilities and rates:
     drop_prob=[0, 0, 0, 0],
     delivery_prob=[1, 1, 1, 1],
     utilization=[.1, .1, .1, .5],
     # Intervals: