Exemple #1
0
 def test_system_size_avg(self):
     q1 = MapPh1N(MAP.exponential(1.0), PhaseType.exponential(2.0), 5)
     q2 = MapPh1N(MAP.exponential(2.0), PhaseType.exponential(5.0), 10)
     exp_q1 = MM1N(1.0, 2.0, 5)
     exp_q2 = MM1N(2, 5, 10)
     self.assertAlmostEqual(q1.system_size_avg, exp_q1.system_size_avg)
     self.assertAlmostEqual(q2.system_size_avg, exp_q2.system_size_avg)
Exemple #2
0
 def test_wait_time(self):
     q1 = MapPh1N(MAP.exponential(1.0), PhaseType.exponential(2.0), 5)
     q2 = MapPh1N(MAP.exponential(2.0), PhaseType.exponential(5.0), 10)
     exp_q1 = MM1N(1.0, 2.0, 5)
     exp_q2 = MM1N(2, 5, 10)
     self.assertAlmostEqual(q1.wait_time, exp_q1.wait_time)
     self.assertAlmostEqual(q2.wait_time, exp_q2.wait_time)
Exemple #3
0
 def test_map_equivalence(self):
     m = MAP([[-10., 1], [2., -3.]], [[9, 0], [0.1, 0.9]])
     samples = list(m.generate(20000))
     self.assertIsInstance(stats.lag(m, 1), np.ndarray)
     self.assertIsInstance(stats.lag(m, 2), np.ndarray)
     self.assertIsInstance(stats.lag(samples, 1), np.ndarray)
     self.assertIsInstance(stats.lag(samples, 2), np.ndarray)
     assert_almost_equal(stats.lag(m, 2), stats.lag(samples, 2), 2)
Exemple #4
0
 def test_system_size_pmf(self):
     q1 = MapPh1N(MAP.exponential(1.0), PhaseType.exponential(2.0), 5)
     q2 = MapPh1N(MAP.exponential(2.0), PhaseType.exponential(5.0), 10)
     exp_q1 = MM1N(1.0, 2.0, 5)
     exp_q2 = MM1N(2, 5, 10)
     assert_almost_equal(q1.system_size_pmf(10), exp_q1.system_size_pmf(10))
     self.assertEqual(len(q1.system_size_pmf()), q1.capacity + 1)
     assert_almost_equal(q2.system_size_pmf(), exp_q2.system_size_pmf())
 def residual(xi, n, input_lags):
     d1i = xi.reshape(n, n).transpose() / mu
     m = MAP(D0, d1i, check=False)
     estimated_lags = [m.lag(i + 1) for i in range(len(input_lags))]
     system_diff = np.asarray(A.dot(xi) - b).flatten() * 1000
     lags_diff = np.asarray(input_lags - estimated_lags)
     diff = np.hstack((system_diff, lags_diff))
     return diff
 def residual(x, n, input_moments, input_lags):
     m = MAP(*decompose(x, n), check=False)
     estimated_moments = [
         m.moment(i + 1) for i in range(len(input_moments))
     ]
     estimated_lags = [m.lag(i + 1) for i in range(len(input_lags))]
     estimated_moments_lags = np.r_[estimated_moments, estimated_lags]
     input_moments_lags = np.r_[input_moments, input_lags]
     return estimated_moments_lags - input_moments_lags
Exemple #7
0
 def test_departure_process(self):
     q1 = MapPh1N(MAP.exponential(1.0), PhaseType.exponential(2.0), 20)
     q2 = MapPh1N(MAP.erlang(4, 1.0), PhaseType.erlang(3, 2.0), 20)
     self.assertIsInstance(q1.departure, MAP)
     self.assertIsInstance(q2.departure, MAP)
     # Since queue is very long, arrival rate will be close to departure
     # rate.
     self.assertAlmostEqual(q1.departure.rate, q1.arrival_rate, 5)
     self.assertAlmostEqual(q2.departure.rate, q2.arrival_rate, 5)
Exemple #8
0
 def test_valid_creation(self):
     q1 = MapPh1N(MAP.exponential(1.0), PhaseType.exponential(2.0), 5)
     q2 = MapPh1N(MAP.exponential(2.0), PhaseType.exponential(5.0), 4)
     self.assertAlmostEqual(q1.arrival_rate, 1)
     self.assertAlmostEqual(q1.service_rate, 2)
     self.assertEqual(q1.capacity, 5)
     self.assertAlmostEqual(q2.arrival_rate, 2)
     self.assertAlmostEqual(q2.service_rate, 5)
     self.assertAlmostEqual(q2.capacity, 4)
Exemple #9
0
    def test_moments_map_arrival(self):
        source = MAP.exponential(3.0)

        self.assertIsInstance(stats.moment(source, 1), np.ndarray)
        self.assertIsInstance(stats.moment(source, 2), np.ndarray)
        assert_almost_equal(stats.moment(source, 1), [1 / 3], 10)
        assert_almost_equal(stats.moment(source, 2), [1 / 3, 2 / 9], 10)
Exemple #10
0
    def departure(self):
        # Aliasing matrices from arrival MAP and service PH
        D0 = self.arrival.D0
        D1 = self.arrival.D1
        W = self.arrival.order
        Iw = np.eye(W)
        S = self.service.subgenerator
        tau = self.service.pmf0
        V = self.service.order
        Iv = np.eye(V)
        Ev = np.ones((V, 1))
        M = self.capacity - 1
        B = V * W
        Ob = np.zeros((B, B))

        # Building blocks
        D0_Iv = np.kron(D0, Iv)
        D1_Iv = np.kron(D1, Iv)
        D0_S = np.kron(D0, Iv) + np.kron(Iw, S)
        Ct = np.kron(-S.dot(Ev), tau)
        Iw_Ct = np.kron(Iw, Ct)
        R0 = np.kron(D1, np.kron(tau, Ev))
        Ra = np.kron(D0 + D1, Iv) + np.kron(Iw, S)

        # Building departure D0 and D1
        D0_dep = cbdiag(self.capacity, ((0, D0_S), (1, D1_Iv)))
        D0_dep[M * B:, M * B:] = Ra
        D0_left_col = np.vstack((D0_Iv, ) + (Ob, ) * self.capacity)
        D0_top_row = np.hstack((R0, ) + (Ob, ) * M)
        D0_dep = np.hstack((D0_left_col, np.vstack((D0_top_row, D0_dep))))
        D1_dep = cbdiag(self.capacity + 1, ((-1, Iw_Ct), ))

        return MAP(D0_dep, D1_dep)
def fit_map_nonlinear_opt(moments, lags, order=3, x0=None, loss=None):
    """
    Fits MAP for the given moments using non-linear optimization.

    Args:
        moments:
        lags:
        order:
        x0:
        loss:

    Returns: MAP
    """
    def decompose(x, n):
        a_d0 = np.zeros((n, n))
        a_d1 = x[n * (n - 1):].reshape((n, n))
        for i in range(n):
            row = x[i * (n - 1):(i + 1) * (n - 1)]
            a_d0[i] = np.concatenate(
                (row[:i], [-np.sum(row) - np.sum(a_d1[i])], row[i:n - 1]))
        return a_d0, a_d1

    def residual(x, n, input_moments, input_lags):
        m = MAP(*decompose(x, n), check=False)
        estimated_moments = [
            m.moment(i + 1) for i in range(len(input_moments))
        ]
        estimated_lags = [m.lag(i + 1) for i in range(len(input_lags))]
        estimated_moments_lags = np.r_[estimated_moments, estimated_lags]
        input_moments_lags = np.r_[input_moments, input_lags]
        return estimated_moments_lags - input_moments_lags

    def _x(v1, v2, a_order):
        return [v1] * a_order * (a_order - 1) + [v2] * (a_order * a_order)

    moments = np.asarray(moments)
    lags = np.asarray(lags)
    normalized_moments, mu = stats.normalize_moments(moments)

    params = {
        'fun': residual,
        'x0': x0 if x0 is not None else np.array(_x(1., 1., order)),
        'bounds': (_x(0., 0, order), _x(np.inf, np.inf, order)),
        'kwargs': {
            'input_moments': normalized_moments,
            'input_lags': lags,
            'n': order,
        },
    }
    if loss is not None:
        params['loss'] = loss

    normalized_result = scipy.optimize.least_squares(**params)
    # noinspection PyUnresolvedReferences
    normalized_matrices = decompose(normalized_result.x, order)
    d0, d1 = normalized_matrices[0] / mu, normalized_matrices[1] / mu
    return MAP(d0, d1)
Exemple #12
0
 def departure(self):
     n = self.capacity
     a = self.arrival_rate
     b = self.service_rate
     d0 = cbdiag(n + 1, [(0, [[-(a + b)]]), (1, [[a]])])
     d0[0, 0] += b
     d0[n, n] += a
     d1 = cbdiag(n + 1, [(-1, [[b]])])
     return MAP(d0, d1)
Exemple #13
0
    def test_invalid_creation(self):
        ar1 = MAP.exponential(1.0)
        srv1 = PhaseType.exponential(2.0)

        with self.assertRaises(ValueError):
            MapPh1N(ar1, srv1, -1)
        with self.assertRaises(ValueError):
            MapPh1N(ar1, srv1, 0)
        with self.assertRaises(TypeError):
            MapPh1N(ar1, srv1, '1')
        with self.assertRaises(TypeError):
            MapPh1N(ar1, srv1, 1.2)
        with self.assertRaises(TypeError):
            MapPh1N(ar1, srv1, [1, 2])
def fit_map(source,
            order,
            method='opt',
            verbose=False,
            options=None,
            **kwargs):
    """Fit a MAP of a given order from a trace or from another arrival process
    (in the latter case, it must provide methods `moment(k)`, `lag(k)` and
    `generate(n)`).

    Two methods are supported:
    - non-linear optimization
    - independent fitting of D0 as PH using moments and D1 using lag-k
    - EM-procedure

    If `source` is an arrival process, e.g. another `pyqunet.arrivals.MAP`,
    then in the first case, the MAP will be reduced using analytically computed
    moments and lag-k correlations, while in the latter case a trace will be
    generated and the EM algorithm will be used. Behaviour in the second case
    depends on the selected PH-fitting method, see options.

    Args:
        source (array-like): a list of samples
        order: an order of PH to fit
        method: 'opt', 'indi' or 'em'
        verbose: if `True`, progress will be printed to the screen
        options (dict): provides additional method-related options:
            - x0: initial guess, default: `None` (OPT)
            - loss: loss function, see `scipy.optimize.least_squares`
                (OPT, INDI)
            - numMoments: number of moments to use for fitting, default: 3
                (OPT, INDI)
            - numLags: number of lag-k to use for fitting, default: 2
                (OPT, INDI)
            - maxIter: max number of iterations, default: 200 (GFIT, INDI)
            - stopCond: stop condition, default: 1e-7 (GFIT, INDI)
            - numSamples: number of samples to generate into a trace,
                default: 20'000 (GFIT and INDI when source is a distribution)
            - phFitMethod: 'opt' or 'gfit', default: 'opt' (INDI)

    Returns:
        Markovian arrival process, `pyqunet.arrivals.MAP`
    """
    if options is None:
        options = {}
    if method == 'opt':
        x0 = options.get('x0', None)  # initial guess
        loss = options.get('loss', None)  # 'cauchy', ...
        num_moments = options.get('numMoments', 3)
        num_lags = options.get('numLags', 2)
        moments = stats.moment(source, num_moments)
        lags = stats.lag(source, num_lags)
        return fit_map_nonlinear_opt(moments, lags, order, x0, loss)
    elif method == 'em':
        max_iter = options.get('maxIter', 200)
        stop_cond = options.get('stopCond', 1e-7)
        num_samples = options.get('numSamples', 20000)
        if hasattr(source, 'generate'):
            trace = list(source.generate(num_samples))
        else:
            trace = list(source)
        d0, d1 = MAPFromTrace(trace,
                              order,
                              max_iter,
                              stop_cond,
                              initial=None,
                              retlogli=False,
                              verbose=verbose)
        return MAP(d0, d1)
    elif method == 'indi':
        ph_fit_method = options.get('phFitMethod', 'opt')
        num_lags = options.get('numLags', kwargs.get('numLags', 2))
        lags = stats.lag(source, num_lags)
        ph = fit_ph(source, order, ph_fit_method, verbose, options)
        return fit_map_horvath(ph, lags)
    elif method == 'opt-cdf':
        x0 = options.get('x0', None)  # initial guess
        num_components = options.get('numComponents', 3)
        weights = options.get('weights', None)
        return fit_map_cdf(source, num_components, weights, order, x0=x0)
    else:
        raise ValueError(
            "method '{}' not supported, use 'opt', 'gfit'".format(method))
Exemple #15
0
 def test_utilization(self):
     q1 = MapPh1N(MAP.exponential(1.0), PhaseType.exponential(2.0), 20)
     q2 = MapPh1N(MAP.erlang(4, 1.0), PhaseType.erlang(3, 2.0), 20)
     self.assertAlmostEqual(q1.utilization, 1 / 2)
     self.assertAlmostEqual(q2.utilization, 3 / 8)
Exemple #16
0
 def test_service_distribution(self):
     q = MapPh1N(MAP.exponential(1.0), PhaseType.exponential(2.0), 5)
     self.assertIsInstance(q.service, PhaseType)
     self.assertAlmostEqual(q.service.rate, q.service_rate)
Exemple #17
0
 def test_arrival_process(self):
     q = MapPh1N(MAP.exponential(1.0), PhaseType.exponential(2.0), 5)
     self.assertIsInstance(q.arrival, MAP)
     self.assertAlmostEqual(q.arrival.rate, q.arrival_rate)
def fit_map_horvath(ph, lags):
    """Find D1 matrix using a D0 as subgenerator of the given PH and lags.

    Args:
        ph (`pyqunet.distributions.PH`): a PH distribution approximating D0
        lags (array-like): a list of lag-k auto-correlation coefficients
    """
    if len(lags) > 1:
        return fit_map_horvath(ph, [lags[0]])

    N = ph.order
    D0 = ph.subgenerator
    En = np.ones((N, 1))
    pi = ph.init_probs

    num_lags = len(lags)
    if num_lags == 0:
        D1_row_sum = (-D0).dot(En).reshape(N)
        D1 = np.asarray([D1_row_sum[i] * pi for i in range(N)])
        return MAP(D0, D1)

    ph_moments = stats.moment(ph, 2)
    ph_moments, mu = stats.normalize_moments(ph_moments)

    D0ni = np.linalg.inv(-D0) / mu
    D0ni2 = D0ni.dot(D0ni)
    rate = ph.param * mu
    lag1 = lags[0]

    d = (-D0 * mu).dot(En).reshape((N, 1))
    gamma = pi.dot(D0ni).reshape((1, N))
    block_gamma = cbdiag(N, [(0, gamma)])
    block_eye = np.hstack([np.eye(N)] * N)
    A = np.vstack([block_eye, block_gamma])
    b = np.vstack([d, pi.reshape((N, 1))])

    delta = pow(rate, 2) * pi.dot(D0ni2)
    f = D0ni.dot(En)
    # noinspection PyTypeChecker
    v = lag1 * (2 * pow(rate, 2) * pi.dot(D0ni2).dot(En).reshape(1)[0] - 1) + 1
    c = np.hstack([f[i] * delta for i in range(N)])

    if num_lags == 1:
        A = np.vstack([A, c])
        b = np.vstack([b, [[v]]]).reshape(2 * N + 1)
        ret = scipy.optimize.lsq_linear(A,
                                        b, (0, np.inf),
                                        tol=1e-10,
                                        method='bvls')
        # noinspection PyUnresolvedReferences
        x = ret.x
        assert isinstance(x, np.ndarray)
        D1 = x.reshape((N, N)).transpose() / mu

        try:
            return MAP(D0, D1, rtol=1e-3, atol=1e-4)
        except ValueError:
            if np.abs(lags[0] < 1e-5):
                return fit_map_horvath(ph, [])
            else:
                return fit_map_horvath(ph, [lags[0] * 0.5])
    else:

        def residual(xi, n, input_lags):
            d1i = xi.reshape(n, n).transpose() / mu
            m = MAP(D0, d1i, check=False)
            estimated_lags = [m.lag(i + 1) for i in range(len(input_lags))]
            system_diff = np.asarray(A.dot(xi) - b).flatten() * 1000
            lags_diff = np.asarray(input_lags - estimated_lags)
            diff = np.hstack((system_diff, lags_diff))
            return diff

        lags = np.asarray(lags)

        params = {
            'fun': residual,
            'x0':
            np.asarray([d[i] * pi for i in range(N)]).transpose().flatten(),
            'bounds': (0, np.inf),
            'kwargs': {
                'input_lags': lags,
                'n': N,
            },
        }

        result = scipy.optimize.least_squares(**params)
        # noinspection PyUnresolvedReferences
        D1 = result.x.reshape(N, N).transpose() / mu
        return MAP(D0, D1, check=False)
def fit_map_cdf(source, num_components, weights=None, order=3, x0=None):
    """
    Fits MAP for the given moments using non-linear optimization.

    Args:
        source:
        num_components:
        weights:
        order:
        x0:

    Returns: MAP
    """

    # noinspection PyPep8Naming
    def pdf_components(a_D0, a_D1, n, K=5):
        one = np.ones(n)
        eye = np.eye(n)
        P = -np.linalg.inv(a_D0).dot(a_D1)

        # Firstly, compute steady-state probability vector
        P = P.T - eye
        P[0] = one
        pi = np.zeros(n)
        pi[0] = 1

        pi = np.linalg.solve(P, pi)

        D0_power = eye
        a_result = [0] * K

        for k in range(K):
            D0_power = D0_power.dot(a_D0)
            a_result[k] = pi.dot(D0_power).dot(one)

        return np.array(a_result)

    def get_weights(t=1.0, k=5):
        w = [0] * k
        factorial = 2

        for ki in range(k):
            # noinspection PyTypeChecker
            w[ki] = t**(ki + 2) / factorial
            factorial *= (ki + 3)

        return np.array(w)

    def decompose(x, n):
        a_d0 = np.zeros((n, n))
        a_d1 = x[n * (n - 1):].reshape((n, n))
        for i in range(n):
            row = x[i * (n - 1):(i + 1) * (n - 1)]
            a_d0[i] = np.concatenate(
                (row[:i], [-np.sum(row) - np.sum(a_d1[i])], row[i:n - 1]))
        return a_d0, a_d1

    # noinspection PyPep8Naming
    def residual(x, n, components, ws):
        a_D0, a_D1 = decompose(x, n)
        estimated = pdf_components(a_D0, a_D1, n, components.size)
        return (estimated - components) * ws

    assert isinstance(source, MAP)
    x_size = (2 * order - 1) * order
    map_components = pdf_components(source.D0, source.D1, source.order,
                                    num_components)

    if weights is None:
        weights = get_weights(1, num_components)
    elif isinstance(weights, int) or isinstance(weights, float):
        weights = get_weights(float(weights), num_components)

    params = {
        'fun': residual,
        'x0': x0 if x0 is not None else np.array([1] * x_size),
        'bounds': ([0] * x_size, [np.inf] * x_size),
        'kwargs': {
            'components': map_components,
            'ws': weights,
            'n': order,
        },
    }

    result = scipy.optimize.least_squares(**params)
    # noinspection PyUnresolvedReferences
    ret_D0, ret_D1 = decompose(result.x, order)
    return MAP(ret_D0, ret_D1)