示例#1
0
def test_get_profile_length():
    a = [1, 2, 3, 4, 5, 6, 7, 8]
    b = [1, 2, 3, 4, 5]
    m = 4
    desired = 8 - 4 + 1
    actual = core.get_profile_length(a, b, m)

    assert (desired == actual)
示例#2
0
def stomp(ts, window_size, query=None, n_jobs=1):
    """
    Computes matrix profiles for a single dimensional time series using the 
    parallelized STOMP algorithm (by default). Ray or Python's multiprocessing
    library may be used. When you have initialized Ray on your machine, 
    it takes priority over using Python's multiprocessing.

    Parameters
    ----------
    ts : array_like
        The time series to compute the matrix profile for.
    window_size: int
        The size of the window to compute the matrix profile over.
    query : array_like
        Optionally, a query can be provided to perform a similarity join.
    n_jobs : int, Default = 1
        Number of cpu cores to use.

    Returns
    -------
    dict : profile
        A MatrixProfile data structure.
        
        >>> {
        >>>     'mp': The matrix profile,
        >>>     'pi': The matrix profile 1NN indices,
        >>>     'rmp': The right matrix profile,
        >>>     'rpi': The right matrix profile 1NN indices,
        >>>     'lmp': The left matrix profile,
        >>>     'lpi': The left matrix profile 1NN indices,
        >>>     'metric': The distance metric computed for the mp,
        >>>     'w': The window size used to compute the matrix profile,
        >>>     'ez': The exclusion zone used,
        >>>     'join': Flag indicating if a similarity join was computed,
        >>>     'sample_pct': Percentage of samples used in computing the MP,
        >>>     'data': {
        >>>         'ts': Time series data,
        >>>         'query': Query data if supplied
        >>>     }
        >>>     'class': "MatrixProfile"
        >>>     'algorithm': "stomp_parallel"
        >>> }

    Raises
    ------
    ValueError
        If window_size < 4.
        If window_size > query length / 2.
        If ts is not a list or np.array.
        If query is not a list or np.array.
        If ts or query is not one dimensional.

    """
    is_join = core.is_similarity_join(ts, query)
    if not is_join:
        query = ts

    # data conversion to np.array
    ts = core.to_np_array(ts)
    query = core.to_np_array(query)

    if window_size < 4:
        error = "window size must be at least 4."
        raise ValueError(error)

    if window_size > len(query) / 2:
        error = "Time series is too short relative to desired window size"
        raise ValueError(error)

    # multiprocessing or single threaded approach
    if n_jobs == 1:
        pass
    else:
        n_jobs = core.valid_n_jobs(n_jobs)

    # precompute some common values - profile length, query length etc.
    profile_length = core.get_profile_length(ts, query, window_size)
    data_length = len(ts)
    query_length = len(query)
    num_queries = query_length - window_size + 1
    exclusion_zone = int(np.ceil(window_size / 2.0))

    # do not use exclusion zone for join
    if is_join:
        exclusion_zone = 0

    # find skip locations, clean up nan and inf in the ts and query
    skip_locs = core.find_skip_locations(ts, profile_length, window_size)
    ts = core.clean_nan_inf(ts)
    query = core.clean_nan_inf(query)

    # initialize matrices
    matrix_profile = np.full(profile_length, np.inf)
    profile_index = np.full(profile_length, 0)

    # compute left and right matrix profile when similarity join does not happen
    left_matrix_profile = None
    right_matrix_profile = None
    left_profile_index = None
    right_profile_index = None

    if not is_join:
        left_matrix_profile = np.copy(matrix_profile)
        right_matrix_profile = np.copy(matrix_profile)
        left_profile_index = np.copy(profile_index)
        right_profile_index = np.copy(profile_index)

    # precompute some statistics on ts
    data_mu, data_sig = core.moving_avg_std(ts, window_size)
    first_window = query[0:window_size]
    first_product = core.fft_convolve(ts, first_window)

    batch_windows = []
    results = []

    # batch compute with multiprocessing
    args = []
    for start, end in core.generate_batch_jobs(num_queries, n_jobs):
        args.append((start, end, ts, query, window_size, data_length,
                     profile_length, exclusion_zone, is_join, data_mu,
                     data_sig, first_product, skip_locs))
        batch_windows.append((start, end))

    # we are running single threaded stomp - no need to initialize any
    # parallel environments.
    if n_jobs == 1 or len(args) == 1:
        results.append(_batch_compute(args[0]))
    else:
        # parallelize
        with core.mp_pool()(n_jobs) as pool:
            results = pool.map(_batch_compute, args)

    # now we combine the batch results
    if len(results) == 1:
        result = results[0]
        matrix_profile = result['mp']
        profile_index = result['pi']
        left_matrix_profile = result['lmp']
        left_profile_index = result['lpi']
        right_matrix_profile = result['rmp']
        right_profile_index = result['rpi']
    else:
        for index, result in enumerate(results):
            start = batch_windows[index][0]
            end = batch_windows[index][1]

            # update the matrix profile
            indices = result['mp'] < matrix_profile
            matrix_profile[indices] = result['mp'][indices]
            profile_index[indices] = result['pi'][indices]

            # update the left and right matrix profiles
            if not is_join:
                indices = result['lmp'] < left_matrix_profile
                left_matrix_profile[indices] = result['lmp'][indices]
                left_profile_index[indices] = result['lpi'][indices]

                indices = result['rmp'] < right_matrix_profile
                right_matrix_profile[indices] = result['rmp'][indices]
                right_profile_index[indices] = result['rpi'][indices]

    return {
        'mp': matrix_profile,
        'pi': profile_index,
        'rmp': right_matrix_profile,
        'rpi': right_profile_index,
        'lmp': left_matrix_profile,
        'lpi': left_profile_index,
        'metric': 'euclidean',
        'w': window_size,
        'ez': exclusion_zone,
        'join': is_join,
        'sample_pct': 1,
        'data': {
            'ts': ts,
            'query': query
        },
        'class': "MatrixProfile",
        'algorithm': "stomp"
    }
示例#3
0
def scrimp_plus_plus(ts, window_size, query=None, step_size=0.25, sample_pct=0.1,
                     random_state=None, n_jobs=1):
    """SCRIMP++ is an anytime algorithm that computes the matrix profile for a 
    given time series (ts) over a given window size (m). Essentially, it allows
    for an approximate solution to be provided for quicker analysis. In the 
    case of this implementation, sample percentage is used. An approximate
    solution is given based a sample percentage from 0 to 1. The default sample
    percentage is currently 10%.

    This algorithm was created at the University of California Riverside. For
    further academic understanding, please review this paper:

    Matrix Profile XI: SCRIMP++: Time Series Motif Discovery at Interactive
    Speed. Yan Zhu, Chin-Chia Michael Yeh, Zachary Zimmerman, Kaveh Kamgar
    Eamonn Keogh, ICDM 2018.

    https://www.cs.ucr.edu/~eamonn/SCRIMP_ICDM_camera_ready_updated.pdf

    Parameters
    ----------
    ts : np.ndarray
        The time series to compute the matrix profile for.
    window_size : int
        The window size.
    query : array_like
        Optionally, a query can be provided to perform a similarity join.
    step_size : float, default 0.25
        The sampling interval for the window. The paper suggest 0.25 is the
        most practical. It should be a float value between 0 and 1.
    sample_pct : float, default = 0.1 (10%)
        Number of samples to compute distances for in the MP.
    random_state : int, default None
        Set the random seed generator for reproducible results.
    n_jobs : int, Default = 1
        Number of cpu cores to use.

    Returns
    -------
    dict : profile
        A MatrixProfile data structure.

        >>> {
        >>>    'mp': The matrix profile,
        >>>    'pi': The matrix profile 1NN indices,
        >>>    'rmp': The right matrix profile,
        >>>    'rpi': The right matrix profile 1NN indices,
        >>>    'lmp': The left matrix profile,
        >>>    'lpi': The left matrix profile 1NN indices,
        >>>    'metric': The distance metric computed for the mp,
        >>>    'w': The window size used to compute the matrix profile,
        >>>    'ez': The exclusion zone used,
        >>>    'join': Flag indicating if a similarity join was computed,
        >>>    'sample_pct': Percentage of samples used in computing the MP,
        >>>    'data': {
        >>>        'ts': Time series data,
        >>>        'query': Query data if supplied
        >>>    }
        >>>    'class': "MatrixProfile"
        >>>    'algorithm': "scrimp++"
        >>> }

    Raises
    ------
    ValueError
        If window_size < 4.
        If window_size > query length / 2.
        If ts is not a list or np.array.
        If query is not a list or np.array.
        If ts or query is not one dimensional.
        If sample_pct is not between 0 and 1.

    """
    # validate random_state
    if random_state is not None:
        try:
            np.random.seed(random_state)
        except:
            raise ValueError('Invalid random_state value given.')

    ###########################
    # PreSCRIMP
    ###########################
    profile = prescrimp(ts, window_size, query=query, step_size=step_size,
        sample_pct=sample_pct, random_state=random_state, n_jobs=n_jobs)

    # data conversion to np.array
    ts = profile['data']['ts']
    query = profile['data']['query']
    if isinstance(query, type(None)):
        query = ts

    # precompute some common values - profile length, query length etc.
    step_size = int(math.floor(window_size * step_size))
    profile_length = core.get_profile_length(ts, query, window_size)
    data_length = len(ts)
    exclusion_zone = profile['ez']
    window_size = profile['w']

    # precompute some statistics on ts
    data_mu, data_sig = core.moving_avg_std(ts, window_size)

    ###########################
    # SCRIMP
    ###########################

    # randomly sort indices for compute order
    orig_index = np.arange(profile_length)
    compute_order = np.copy(orig_index[orig_index > exclusion_zone])
    #np.random.shuffle(compute_order)

    # Only refine to provided sample_pct
    sample_size = int(np.ceil(len(compute_order) * sample_pct))
    compute_order = np.random.choice(compute_order, size=sample_size, 
        replace=False)

    # initialize some values
    curlastz = np.zeros(profile_length)
    curdistance = np.zeros(profile_length)
    dist1 = np.full(profile_length, np.inf)
    dist2 = np.full(profile_length, np.inf)

    for idx in compute_order:
        # compute last z
        curlastz[idx] = np.sum(ts[0:window_size] * ts[idx:idx + window_size])
        curlastz[idx+1:] = curlastz[idx] + np.cumsum(
            (ts[window_size:data_length - idx] * ts[idx + window_size:data_length]) -\
            (ts[0:profile_length - idx - 1] * ts[idx:profile_length - 1])
        )

        # compute distances
        curdistance[idx:] = np.sqrt(np.abs(
            2 * (window_size - (curlastz[idx:profile_length + 1] -\
                window_size * (data_mu[idx:] * data_mu[0:profile_length - idx])) /\
                (data_sig[idx:] * data_sig[0:profile_length - idx]))
        ))

        dist1[0:idx - 1] = np.inf
        dist1[idx:] = curdistance[idx:]

        dist2[0:profile_length - idx] = curdistance[idx:]
        dist2[profile_length - idx + 2:] = np.inf

        loc1 = dist1 < profile['mp']
        if loc1.any():
            profile['mp'][loc1] = dist1[loc1]
            profile['pi'][loc1] = orig_index[loc1] - idx

        loc2 = dist2 < profile['mp']
        if loc2.any():
            profile['mp'][loc2] = dist2[loc2]
            profile['pi'][loc2] = orig_index[loc2] + idx


    profile['algorithm'] = 'scrimp++'
    profile['sample_pct'] = sample_pct

    return profile
示例#4
0
def mstomp(ts, window_size, return_dimension=False, n_jobs=1):
    """
    Computes multidimensional matrix profile with mSTAMP (stomp based). Ray or Python's multiprocessing library may be used. When you have initialized Ray on your machine, it takes priority over using Python's multiprocessing.

    Parameters
    ----------
    ts : array_like, shape (n_dim, seq_len)
        The multidimensional time series to compute the multidimensional matrix profile for.
    window_size: int
        The size of the window to compute the matrix profile over.
    return_dimension : bool
        if True, also return the matrix profile dimension. It takses O(d^2 n)
        to store and O(d^2 n^2) to compute. (default is False)
    n_jobs : int, Default = 1
        Number of cpu cores to use.

    Returns
    -------
    dict : profile
        A MatrixProfile data structure.
        
        >>> {
        >>>     'mp': The matrix profile,
        >>>     'pi': The matrix profile 1NN indices,
        >>>     'rmp': The right matrix profile,
        >>>     'rpi': The right matrix profile 1NN indices,
        >>>     'lmp': The left matrix profile,
        >>>     'lpi': The left matrix profile 1NN indices,
        >>>     'metric': The distance metric computed for the mp,
        >>>     'w': The window size used to compute the matrix profile,
        >>>     'ez': The exclusion zone used,
        >>>     'sample_pct': Percentage of samples used in computing the MP,
        >>>     'data': {
        >>>         'ts': Time series data,
        >>>         'query': Query data if supplied
        >>>     }
        >>>     'class': "MatrixProfile"
        >>>     'algorithm': "stomp_based_mstamp"
        >>> }

    Raises
    ------
    ValueError
        If window_size < 4.
        If window_size > time series length / 2.
        If ts is not a list or np.array.

    """

    query = ts

    # data conversion to np.array
    ts = core.to_np_array(ts)
    query = core.to_np_array(query)

    if window_size < 4:
        error = "window size must be at least 4."
        raise ValueError(error)

    if ts.ndim == 1:
        ts = np.expand_dims(ts, axis=0)
        query = np.expand_dims(query, axis=0)

    if window_size > query.shape[1] / 2:
        error = "Time series is too short relative to desired window size"
        raise ValueError(error)

    # multiprocessing or single threaded approach
    if n_jobs == 1:
        pass
    else:
        n_jobs = core.valid_n_jobs(n_jobs)

    # precompute some common values - profile length, query length etc.
    profile_length = core.get_profile_length(ts, query, window_size)
    data_length = ts.shape[1]
    query_length = query.shape[1]
    num_queries = query_length - window_size + 1
    exclusion_zone = int(np.ceil(window_size / 2.0))
    num_dim = ts.shape[0]

    # find skip locations, clean up nan and inf in the ts and query
    skip_locs = core.find_multid_skip_locations(ts, profile_length, window_size)
    ts = core.clean_nan_inf(ts)
    query = core.clean_nan_inf(query)

    # initialize matrices
    matrix_profile = np.full((num_dim, profile_length), np.inf)
    profile_index = np.full((num_dim, profile_length), 0)
    # profile_index = np.full((num_dim, profile_length), -1)

    # compute left and right matrix profile when similarity join does not happen
    left_matrix_profile = np.copy(matrix_profile)
    right_matrix_profile = np.copy(matrix_profile)
    left_profile_index = np.copy(profile_index)
    right_profile_index = np.copy(profile_index)

    profile_dimension = []
    if return_dimension:
        n_jobs = 1
        for i in range(num_dim):
            profile_dimension.append(np.empty((i + 1, profile_length), dtype=int))

    # precompute some statistics on ts
    data_mu, data_sig, first_product = np.empty((num_dim, profile_length)), np.empty(
        (num_dim, profile_length)), np.empty((num_dim, profile_length))
    for i in range(num_dim):
        data_mu[i, :], data_sig[i, :] = core.moving_avg_std(ts[i, :], window_size)
        first_window = query[i, 0:window_size]
        first_product[i, :] = core.fft_convolve(ts[i, :], first_window)

    batch_windows = []
    results = []

    # batch compute with multiprocessing
    args = []
    for start, end in core.generate_batch_jobs(num_queries, n_jobs):
        args.append((num_dim, start, end, ts, query, window_size, data_length, profile_length, exclusion_zone, data_mu,
                     data_sig, first_product, skip_locs, profile_dimension, return_dimension))
        batch_windows.append((start, end))

    # we are running single threaded stomp - no need to initialize any
    # parallel environments.
    if n_jobs == 1 or len(args) == 1:
        results.append(_batch_compute(args[0]))
    else:
        # parallelize
        with core.mp_pool()(n_jobs) as pool:
            results = pool.map(_batch_compute, args)

    # now we combine the batch results
    if len(results) == 1:
        result = results[0]
        matrix_profile = result['mp']
        profile_index = result['pi']
        profile_dimension = result['pd']
        left_matrix_profile = result['lmp']
        left_profile_index = result['lpi']
        right_matrix_profile = result['rmp']
        right_profile_index = result['rpi']
    else:
        for index, result in enumerate(results):
            start = batch_windows[index][0]
            end = batch_windows[index][1]

            # update the matrix profile
            indices = result['mp'] < matrix_profile
            matrix_profile[indices] = result['mp'][indices]
            profile_index[indices] = result['pi'][indices]

            # update the left and right matrix profiles
            indices = result['lmp'] < left_matrix_profile
            left_matrix_profile[indices] = result['lmp'][indices]
            left_profile_index[indices] = result['lpi'][indices]

            indices = result['rmp'] < right_matrix_profile
            right_matrix_profile[indices] = result['rmp'][indices]
            right_profile_index[indices] = result['rpi'][indices]

    return {
        'mp': matrix_profile,
        'pi': profile_index,
        'pd': profile_dimension,
        'rmp': right_matrix_profile,
        'rpi': right_profile_index,
        'lmp': left_matrix_profile,
        'lpi': left_profile_index,
        'metric': 'euclidean',
        'w': window_size,
        'ez': exclusion_zone,
        'sample_pct': 1,
        'data': {
            'ts': ts,
            'query': query
        },
        'class': "MatrixProfile",
        'algorithm': "stomp_based_mstamp"
    }
示例#5
0
def prescrimp(ts, window_size, query=None, step_size=0.25, sample_pct=0.1,
                     random_state=None, n_jobs=1):
    """
    This is the PreScrimp algorithm from the SCRIMP++ paper. It is primarly
    used to compute the approximate matrix profile. In this case we use
    a sample percentage to mock "the anytime/approximate nature".

    Parameters
    ----------
    ts : np.ndarray
        The time series to compute the matrix profile for.
    window_size : int
        The window size.
    query : array_like
        Optionally, a query can be provided to perform a similarity join.
    step_size : float, default 0.25
        The sampling interval for the window. The paper suggest 0.25 is the
        most practical. It should be a float value between 0 and 1.
    sample_pct : float, default = 0.1 (10%)
        Number of samples to compute distances for in the MP.
    random_state : int, default None
        Set the random seed generator for reproducible results.
    n_jobs : int, Default = 1
        Number of cpu cores to use.

    Note
    ----
    The matrix profiles computed from prescrimp will always be the approximate
    solution.

    Returns
    -------
    dict : profile
        A MatrixProfile data structure.
        
        >>> {
        >>>    'mp': The matrix profile,
        >>>    'pi': The matrix profile 1NN indices,
        >>>    'rmp': The right matrix profile,
        >>>    'rpi': The right matrix profile 1NN indices,
        >>>    'lmp': The left matrix profile,
        >>>    'lpi': The left matrix profile 1NN indices,
        >>>    'metric': The distance metric computed for the mp,
        >>>    'w': The window size used to compute the matrix profile,
        >>>    'ez': The exclusion zone used,
        >>>    'join': Flag indicating if a similarity join was computed,
        >>>    'sample_pct': Percentage of samples used in computing the MP,
        >>>    'data': {
        >>>        'ts': Time series data,
        >>>        'query': Query data if supplied
        >>>    }
        >>>    'class': "MatrixProfile"
        >>>    'algorithm': "prescrimp"
        >>>}

    Raises
    ------
    ValueError
        If window_size < 4.
        If window_size > query length / 2.
        If ts is not a list or np.array.
        If query is not a list or np.array.
        If ts or query is not one dimensional.
        If sample_pct is not between 0 and 1.

    """
    is_join = core.is_similarity_join(ts, query)
    if not is_join:
        query = ts

    # data conversion to np.array
    ts = core.to_np_array(ts)
    query = core.to_np_array(query)

    # validate step_size
    if not isinstance(step_size, float) or step_size > 1 or step_size < 0:
        raise ValueError('step_size should be a float between 0 and 1.')

    # validate sample_pct
    if not isinstance(sample_pct, float) or sample_pct > 1 or sample_pct < 0:
        raise ValueError('sample_pct should be a float between 0 and 1.')

    # validate random_state
    if random_state is not None:
        try:
            np.random.seed(random_state)
        except:
            raise ValueError('Invalid random_state value given.')

    if window_size < 4:
        error = "window size must be at least 4."
        raise ValueError(error)

    if window_size > len(query) / 2:
        error = "Time series is too short relative to desired window size"
        raise ValueError(error)

    # precompute some common values - profile length, query length etc.
    step_size = int(math.floor(window_size * step_size))
    profile_length = core.get_profile_length(ts, query, window_size)
    data_length = len(ts)
    exclusion_zone = int(np.ceil(window_size / 4.0))

    matrix_profile = np.zeros(profile_length)
    mp_index = np.zeros(profile_length, dtype='int')

    X = np.fft.fft(ts)
    mux, sigx = core.moving_avg_std(ts, window_size)

    dotproduct = np.zeros(profile_length)
    refine_distance = np.full(profile_length, np.inf)
    orig_index = np.arange(profile_length)

    # iterate over sampled indices and update the matrix profile
    # compute_order = compute_indices(profile_length, step_size, sample_pct)
    compute_order = np.arange(0, profile_length, step=step_size)

    for iteration, idx in enumerate(compute_order):
        subsequence = ts[idx:idx + window_size]

        # compute distance profile
        distance_profile = calc_distance_profile(X, subsequence, data_length,
            window_size, mux, sigx)
        
        # apply exclusion zone
        distance_profile = core.apply_exclusion_zone(exclusion_zone, is_join,
            window_size, data_length, idx, distance_profile)

        # find and store nearest neighbor
        if iteration == 0:
            matrix_profile = distance_profile
            mp_index[:] = idx
        else:
            update_pos = distance_profile < matrix_profile
            mp_index[update_pos] = idx
            matrix_profile[update_pos] = distance_profile[update_pos]

        idx_min = np.argmin(distance_profile)
        matrix_profile[idx] = distance_profile[idx_min]
        mp_index[idx] = idx_min
        idx_nn = mp_index[idx]

        # compute the target indices
        idx_diff = idx_nn - idx
        endidx = np.min([
            profile_length - 1,
            idx + step_size - 1,
            profile_length - idx_diff - 1
        ])
        beginidx = np.max([0, idx - step_size + 1, 2 - idx_diff])

        # compute dot product and refine distance for the idx, begin idx 
        # and end idx
        dotproduct = calc_dotproduct_idx(dotproduct, window_size, 
            matrix_profile, idx, sigx, idx_nn, mux)

        dotproduct = calc_dotproduct_end_idx(ts, dotproduct, idx, window_size,
                                             endidx, idx_nn, idx_diff)

        refine_distance = calc_refine_distance_end_idx(
            refine_distance, dotproduct, idx, endidx, mux, sigx, idx_nn,
            idx_diff, window_size)
        
        dotproduct = calc_dotproduct_begin_idx(
            ts, dotproduct, beginidx, idx, idx_diff, window_size, idx_nn)

        refine_distance = calc_refine_distance_begin_idx(
            refine_distance, dotproduct, beginidx, idx, idx_diff, idx_nn, 
            sigx, mux, window_size)

        matrix_profile, mp_index = apply_update_positions(matrix_profile, 
                                                          mp_index, 
                                                          refine_distance, 
                                                          beginidx, 
                                                          endidx, 
                                                          orig_index, idx_diff)

    return {
        'mp': matrix_profile,
        'pi': mp_index,
        'rmp': None,
        'rpi': None,
        'lmp': None,
        'lpi': None,
        'w': window_size,
        'ez': exclusion_zone,
        'join': is_join,
        'sample_pct': sample_pct,
        'metric': 'euclidean',
        'data': {
            'ts': ts,
            'query': query if is_join else None
        },
        'class': 'MatrixProfile',
        'algorithm': 'prescrimp',
    }