def prepare_transform(self, ts_to_be_rescaled):
        """Prepare the model for temporal resampling by computing DTW alignment path between the reference time series
        and a time series to be rescaled or a set of time series to be rescaled.
        
        If ts_to_be_rescaled contains a single time series, all series from the dataset will be rescaled using the
        DTW path between that time series and the reference one, otherwise, the X array given at transform time
        should have the same number of time series (X.shape[0]) as ts_to_be_rescaled.

        Parameters
        ----------
        ts_to_be_rescaled : numpy.ndarray
            A time series dataset of base modalities of shape (n_ts, sz, d) with
            ``d = self.reference_series_.shape[-1]``
        """
        ts_to_be_rescaled = to_time_series_dataset(ts_to_be_rescaled)
        # Now ts_to_be_rescaled is of shape n_ts, sz, d
        # with d = self.reference_series.shape[-1]
        self.saved_dtw_paths_ = []
        for ts in ts_to_be_rescaled:
            end = first_non_finite_index(ts)
            resampled_ts = _resampled(ts[:end],
                                      n_samples=self.n_samples,
                                      kind=self.interp_kind)
            if self.metric == "dtw":
                path, d = dtw_path(self.reference_series_, resampled_ts)
            elif self.metric == "lrdtw":
                path, d = lr_dtw_path(self.reference_series_,
                                      resampled_ts,
                                      gamma=self.gamma_lr_dtw)
            else:
                raise ValueError("Unknown alignment function")
            self.saved_dtw_paths_.append(path)
Example #2
0
 def DTW_update(self,
                s1,
                s2,
                w,
                gl_const=self.gl_const,
                scr=self.scr,
                ims=self.ims):
     """find the best path"""
     best_path, distance = dtw_path(s1,
                                    s2,
                                    global_constraint=gl_const,
                                    sakoe_chiba_radius=scr,
                                    itakura_max_slope=ims)
     x_cords = []
     y_cords = []
     # print(w)
     for i in best_path:
         x_cords += [round(i[0] * w + i[1] * (1 - w))]
         y_cords += [s1[i[0]] * w + s2[i[1]] * (1 - w)]
     s3 = []
     for j in range(len(s1)):
         sublistj = [
             y_cords[k] for k in np.where(np.isin(np.array(x_cords), j))[0]
         ]
         if len(sublistj) != 0:
             s3 += [sum(sublistj) / len(sublistj)]
         else:
             s3 += [s3[-1]]
     return s3
Example #3
0
def DynamicTimeWarping(seismic_data, grid, window_width):
    path_info = []
    start = time.time()

    for i in range(0, len(grid), 1):
        reference_trace = seismic_data[:, grid[i][0], grid[i][1]]
        reference_trace = np.trim_zeros(reference_trace, 'b')
        block_size = 0

        for ii in range(0, len(grid), 1):
            if abs(grid[i][0] - grid[ii][0]) <= window_width and abs(
                    grid[i][1] - grid[ii][1]) <= window_width:
                block_size = block_size + 1
                matched_trace = seismic_data[:, grid[ii][0], grid[ii][1]]
                matched_trace = np.trim_zeros(matched_trace, 'b')
                path, sim = metrics.dtw_path(
                    reference_trace, matched_trace
                )  #print(grid[i][0], grid[i][1], grid[ii][0], grid[ii][1], sim)

                #if sim < similarity_score:
                PI = [grid[i][0], grid[i][1], grid[ii][0], grid[ii][1], path]
                path_info.append(PI)
        stop = time.time()
        t = (stop - start) / 60

        clear_output()
        print('reference trace:', grid[i][0], grid[i][1], '(', i, 'out of:',
              len(grid), ')', ' time in minutes: ', t, 'Sub-block size:',
              block_size)

    return path_info
Example #4
0
def regrid_horizons_DTW(seismic_data, grid, grid_step,
                        filtered_horizons_binary, dense_grid, window_size):
    horizontal_warp_distance = window_size * grid_step
    dense_path_info = []
    start = time.time()

    for i in range(0, len(grid)):
        reference_trace = seismic_data[:, grid[i][0], grid[i][1]]
        reference_trace = np.trim_zeros(reference_trace, 'b')
        block_size = 0

        for iii in range(0, len(dense_grid)):
            if abs(grid[i][0] -
                   dense_grid[iii][0]) <= horizontal_warp_distance and abs(
                       grid[i][1] -
                       dense_grid[iii][1]) <= horizontal_warp_distance:
                block_size = block_size + 1
                matched_trace = seismic_data[:, dense_grid[iii][0],
                                             dense_grid[iii][1]]
                matched_trace = np.trim_zeros(matched_trace, 'b')
                path, sim = metrics.dtw_path(reference_trace, matched_trace)
                PI = [
                    grid[i][0], grid[i][1], dense_grid[iii][0],
                    dense_grid[iii][1], path
                ]
                dense_path_info.append(PI)

        stop = time.time()
        t = (stop - start) / 60
        clear_output()
        print('(', i, 'out of:', len(grid), ')', ' time in minutes: ', t,
              'Sub-block size:', block_size)

    regridded_horizons = []
    it = 0
    for reflector in filtered_horizons_binary:
        new_horizon = []
        it = it + 1
        for i in range(0, len(reflector)):
            events = []
            reference_trace = seismic_data[:, reflector[i][1], reflector[i][2]]
            reference_trace = np.trim_zeros(reference_trace, 'b')
            event = reflector[i][0]
            for k in range(0, len(dense_path_info)):
                if dense_path_info[k][0] == reflector[i][
                        1] and dense_path_info[k][1] == reflector[i][2]:
                    path = dense_path_info[k][4]
                    for ll in range(0, len(path)):

                        if path[ll][0] == event:
                            point = [
                                path[ll][1], dense_path_info[k][2],
                                dense_path_info[k][3]
                            ]
                            event_dense_path = path[ll][1]
                            new_horizon.append(point)
        regridded_horizons.append(new_horizon)

    return regridded_horizons, dense_path_info
Example #5
0
 def _petitjean_assignment(self, X, barycenter):
     n = X.shape[0]
     assign = ([[] for _ in range(self.barycenter_size)], [[] for _ in range(self.barycenter_size)])
     for i in range(n):
         path, _ = dtw_path(X[i], barycenter)
         for pair in path:
             assign[0][pair[1]].append(i)
             assign[1][pair[1]].append(pair[0])
     return assign
    def eval_model(self,
                   net,
                   loader,
                   batch_size,
                   gamma,
                   verbose=1,
                   target_mean=0,
                   target_std=0):
        criterion = torch.nn.MSELoss()
        losses_mse = []
        losses_dtw = []
        losses_tdi = []

        for i, data in enumerate(loader, 0):
            loss_mse, loss_dtw, loss_tdi = torch.tensor(0), torch.tensor(
                0), torch.tensor(0)
            # get the inputs
            inputs, target = data

            # inputs, target, breakpoints = data

            inputs = torch.tensor(inputs, dtype=torch.float32).to(self.device)
            target = torch.tensor(target, dtype=torch.float32).to(self.device)
            # batch_size, N_output = target.shape[0:2]
            outputs = net(inputs)

            # MSE
            loss_mse = criterion(target, outputs)
            loss_dtw, loss_tdi = 0, 0
            # DTW and TDI
            for k in range(batch_size):
                target_k_cpu = target[k, :,
                                      0:1].view(-1).detach().cpu().numpy()
                output_k_cpu = outputs[k, :,
                                       0:1].view(-1).detach().cpu().numpy()

                loss_dtw += dtw(target_k_cpu, output_k_cpu)
                path, sim = dtw_path(target_k_cpu, output_k_cpu)

                Dist = 0
                for i, j in path:
                    Dist += (i - j) * (i - j)
                loss_tdi += Dist / (self.N_output * self.N_output)

            loss_dtw = loss_dtw / batch_size
            loss_tdi = loss_tdi / batch_size

            # print statistics
            losses_mse.append(loss_mse.item())
            losses_dtw.append(loss_dtw)
            losses_tdi.append(loss_tdi)
            ## TODO plotting eval

        print(' Eval mse= ',
              np.array(losses_mse).mean(), ' dtw= ',
              np.array(losses_dtw).mean(), ' tdi= ',
              np.array(losses_tdi).mean())
Example #7
0
def evaluate_iteration(model, criterion, X_test_left, X_test_right, y_test):
    model.eval()

    x_test_left = np.transpose(X_test_left, [1, 0, 2])
    x_test_right = np.transpose(X_test_right, [1, 0, 2])
    y_test = np.transpose(y_test, [1, 0, 2])

    x_test_left_tensor = numpy_to_tvar(x_test_left)
    x_test_right_tensor = numpy_to_tvar(x_test_right)

    y_test_tensor = numpy_to_tvar(y_test)

    output, atten = model(x_test_left_tensor,
                          x_test_right_tensor, y_test_tensor, 0)

    loss = criterion(output, y_test_tensor)
    loss_mse, loss_dtw, loss_tdi = 0, 0, 0
    loss_mae, loss_RMSLE, loss_RMSE = 0, 0, 0

    for k in range(BATCH_SIZE):
        target_k_cpu = y_test_tensor[:, k, 0:1].view(-1).detach().cpu().numpy()
        output_k_cpu = output[:, k, 0:1].view(-1).detach().cpu().numpy()

        loss_dtw += dtw(target_k_cpu, output_k_cpu)
        path, sim = dtw_path(target_k_cpu, output_k_cpu)

        Dist = 0
        for i, j in path:
            Dist += (i-j)*(i-j)
        loss_tdi += Dist / (N_output*N_output)

        loss_mae += mean_absolute_error(target_k_cpu, output_k_cpu)
        loss_RMSLE += np.sqrt(mean_squared_error(target_k_cpu, output_k_cpu))
        loss_RMSE += np.sqrt(mean_squared_error(target_k_cpu, output_k_cpu))

    loss_dtw = loss_dtw / BATCH_SIZE
    loss_tdi = loss_tdi / BATCH_SIZE
    loss_mae = loss_mae / BATCH_SIZE
    loss_RMSLE = loss_RMSLE / BATCH_SIZE
    loss_RMSE = loss_RMSE / BATCH_SIZE

    # # metric
    # output_numpy = output.cpu().data.numpy()
    # y_test_numpy = y_test_tensor.cpu().data.numpy()

    # loss_mae = mean_absolute_error(y_test_numpy,output_numpy)
    # loss_RMSLE = np.sqrt(mean_squared_error(y_test_numpy,output_numpy))
    # loss_RMSE = np.sqrt(mean_squared_error(y_test_numpy,output_numpy))

    # test_loss_meter.add(loss.item())

    # plot_result(output, y_test_tensor)
    # show_attention(x_test_left_tensor, x_test_right_tensor,output,atten)
    # plt.show()

    return loss.item(), loss_mae, loss_RMSLE, loss_RMSE, loss_dtw
Example #8
0
 def _get_similarity(self, seq1_features, seq2_features):
     path, dist = dtw_path(np.array(seq1_features), np.array(seq2_features))
     similarities_per_path = []
     for i in range(len(path)):
         cosine_sim = self.cosine_score(
             torch.Tensor(seq1_features[path[i][0]]),
             torch.Tensor(seq2_features[path[i][1]])).numpy()
         similarities_per_path.append(cosine_sim)
     total_path_similarity = sum(similarities_per_path) / len(path)
     return total_path_similarity
Example #9
0
def true_dilate(target, pred, alpha):  # target, pred [seq_length]
    N_output = target.shape[0]
    loss_dtw = dtw(target, pred)
    path, sim = dtw_path(target, pred)
    Dist = 0
    for ii, jj in path:
        Dist += (ii - jj) * (ii - jj)
    loss_tdi = Dist / (N_output * N_output)
    loss_dilate = alpha * loss_dtw + (1 - alpha) * loss_tdi
    return loss_dtw, loss_tdi, loss_dilate
Example #10
0
def _petitjean_assignment(X, barycenter, metric_params=None):
    if metric_params is None:
        metric_params = {}
    n = X.shape[0]
    barycenter_size = barycenter.shape[0]
    assign = ([[] for _ in range(barycenter_size)],
              [[] for _ in range(barycenter_size)])
    for i in range(n):
        path, _ = dtw_path(X[i], barycenter, **metric_params)
        for pair in path:
            assign[0][pair[1]].append(i)
            assign[1][pair[1]].append(pair[0])
    return assign
Example #11
0
    def _applying_dtw_to_clusters(
        self,
        adata1,
        adata2,
        genename,
        min_percentile_outlier=0,
        max_percentile_outlier=100
    ):
        expression1 = adata1.raw.to_adata()[:, genename].X
        expression2 = adata2.raw.to_adata()[:, genename].X

        if not isinstance(expression1, np.ndarray):
            expression1 = expression1.toarray()
        if not isinstance(expression2, np.ndarray):
            expression2 = expression2.toarray()

        # when all or most of the gene expressions are 0, np.percentile causes error
        # catching error by try and except, but it needs to be fixed
        # excluding cells that have too low or too high gene expression
        try:
            min_outlier, max_outlier = np.percentile(
                expression1, q=[min_percentile_outlier, max_percentile_outlier]
            )
            gene_expression1 = expression1[(expression1 <= max_outlier) & (
                expression1 >= min_outlier)]
            ordered_cells1 = np.array(adata1[(
                expression1 <= max_outlier) & (expression1 >= min_outlier)].obs_names.to_list())
        except IndexError as e:
            gene_expression1 = expression1
            ordered_cells1 = np.array(adata1.obs_names.to_list())

        # excluding cells that have too low or too high gene expression
        try:
            min_outlier, max_outlier = np.percentile(
                expression2, q=[min_percentile_outlier, max_percentile_outlier]
            )

            gene_expression2 = expression2[(
                expression2 <= max_outlier) & (expression2 >= min_outlier)]

            ordered_cells2 = np.array(adata2[(
                expression2 <= max_outlier) & (expression2 >= min_outlier)].obs_names.to_list())
        except IndexError as e:
            gene_expression2 = expression2
            ordered_cells2 = np.array(adata2.obs_names.to_list())

        path, dist = dtw_path(
            gene_expression1, gene_expression2)

        return ordered_cells1, ordered_cells2, path, dist
Example #12
0
def ntu_similarity(net,
                   config,
                   x,
                   f,
                   y,
                   window_size=16,
                   slide=2,
                   visibility=False,
                   dist='cosine',
                   use_all_joints_on_each_bp=False):
    if use_all_joints_on_each_bp:
        body_parts = config.body_parts_entire_body if not visibility else config.body_parts_invis_entire_body
    else:
        body_parts = config.body_parts if not visibility else config.body_parts_invis

    bps = list(body_parts._fields)

    cand_framesPerBP, metric, querys = ntu_similarity_prepart(
        net, x, f, y, body_parts, bps, dist, slide, window_size)

    finals = [[] for _ in range(4)]
    for query_idx, query in enumerate(querys):
        for bp in range(len(bps)):
            path, _ = dtw_path(query[bp], cand_framesPerBP[bp])

            similarities = []
            for i in range(len(path)):
                if dist == "cosine":
                    metric_sim = metric(
                        torch.Tensor(query[bp][path[i][0]]),
                        torch.Tensor(
                            cand_framesPerBP[bp][path[i][1]])).numpy()
                else:
                    metric_sim = metric(
                        torch.Tensor(query[bp][path[i][0]]).unsqueeze(0),
                        torch.Tensor(cand_framesPerBP[bp][
                            path[i][1]]).unsqueeze(0)).numpy()[0]
                similarities.append(metric_sim)

            final_sim = np.mean(similarities)
            finals[query_idx].append(final_sim)

    # origin, flipped, arm_flipped, leg_flipped
    final_sim_calculated = [np.mean(final_v) for final_v in finals
                            ]  # different paths don't `np.mean(_, axis=1)`
    if dist == 'cosine':
        return tuple(final_sim_calculated)
    else:
        return tuple([1 / item for item in final_sim_calculated])
def plot_DWT_matrix(comb_df: pd.DataFrame, targ:str, sample_idx : int):

    frag_df = comb_df[comb_df['ID'] == sample_idx]

    seq1 = frag_df[targ].values.reshape(1,-1,1)
    seq2 = frag_df['OEP'].values.reshape(1,-1,1)

    path, sim = metrics.dtw_path(seq1[0], seq2[0])

    sz = frag_df.shape[0]

    plt.figure(1, figsize=(7,6))

    left, bottom = 0.01, 0.1
    w_ts = h_ts = 0.2
    left_h = left + w_ts + 0.02
    width = height = 0.65
    bottom_h = bottom + height + 0.02

    rect_s_y = [left, bottom, w_ts, height]
    rect_gram = [left_h, bottom, width, height]
    rect_s_x = [left_h, bottom_h, width - 0.13, h_ts]

    ax_gram = plt.axes(rect_gram)
    ax_s_x = plt.axes(rect_s_x)
    ax_s_y = plt.axes(rect_s_y)

    mat = cdist(seq1[0], seq2[0])

    sns.heatmap(mat, cmap = 'jet', ax = ax_gram)
    ax_gram.axis("off")
    ax_gram.invert_yaxis()
    ax_gram.autoscale(False)
    ax_gram.plot([j for (i, j) in path], [i for (i, j) in path], "w--",
                linewidth=2.)

    ax_s_x.plot(np.arange(sz), seq2[0], color = '#510385', linewidth=1.8, label = 'OEP')
    ax_s_x.set_xlim((0, sz - 1))
    ax_s_x.axis("off")
    ax_s_x.set(title = f"Mẫu số {sample_idx}")

    ax_s_y.plot(-seq1[0], np.arange(sz), color = '#850318', linewidth=1.8, label = targ)
    ax_s_y.set_ylim((0, sz - 1))
    ax_s_y.axis("off")

    plt.legend()
    plt.show()
def get_dtw_distance_mse(query, reference):
    '''
    Parameters:
    query     : 1D array
    reference : 1D array
    '''
    if len(query) > len(reference):
        query, reference = reference, query
    try:
        path, distance = tsm.dtw_path(query,
                                      reference,
                                      global_constraint='sakoe_chiba')
        warped_reference = np.zeros(len(query))
        for pair in path:
            warped_reference[pair[0]] = reference[pair[1]]
        mse = rms_difference(query, warped_reference)
        return distance, mse
    except ValueError:
        return (1000, 100)
Example #15
0
def _mm_assignment(X, barycenter, weights, metric_params=None):
    """Computes item assignement based on DTW alignments and return cost as a
    bonus.

    Parameters
    ----------
    X : numpy.array of shape (n, sz, d)
        Time-series to be averaged

    barycenter : numpy.array of shape (barycenter_size, d)
        Barycenter as computed at the current step of the algorithm.

    weights: array
        Weights of each X[i]. Must be the same size as len(X).

    metric_params: dict or None (default: None)
        DTW constraint parameters to be used.
        See :ref:`tslearn.metrics.dtw_path <fun-tslearn.metrics.dtw_path>` for
        a list of accepted parameters
        If None, no constraint is used for DTW computations.

    Returns
    -------
    list of index pairs
        Warping paths

    float
        Current alignment cost
    """
    if metric_params is None:
        metric_params = {}
    n = X.shape[0]
    cost = 0.
    list_p_k = []
    for i in range(n):
        path, dist_i = dtw_path(barycenter, X[i], **metric_params)
        cost += dist_i**2 * weights[i]
        list_p_k.append(path)
    cost /= weights.sum()
    return list_p_k, cost
def get_column_wise_dtw_metrics(query, reference):
    '''
    Calculates dtw distances between columns.
    Transposes input arrays (columns become rows),
    calculates DTW distance using euclidean norm and MSE between aligned columns. 
    Parameters:
    query     : 2D array with shape n * m (n time frames, m features)
    reference : 2D array with shape k * m (k time frames, m features)    
    '''
    assert (len(query) == len(reference))
    if len(query[0]) > len(reference[0]):
        query, reference = reference, query
    query = query.T
    reference = reference.T
    path, distance = tsm.dtw_path(query,
                                  reference,
                                  global_constraint='sakoe_chiba')
    warped_reference = np.zeros(query.shape)
    for pair in path:
        warped_reference[pair[0]] = reference[pair[1]]
    mse = np.linalg.norm(query - warped_reference) / len(query)
    return distance, mse
def DTW_similarity(comb_df: pd.DataFrame, sampl_df: pd.DataFrame):

    sim_df = pd.DataFrame(columns = ['frag','Evt_type','Tx_RIP', 'Th_Flow', 'Abd_RIP'])

    for i in range(sampl_df.shape[0]):
        evt_type = sampl_df.Resp_lab[i]
        
        frag_df = comb_df[comb_df['ID'] == i][['Tx_RIP', 'Th_Flow', 'Abd_RIP', 'OEP']]
        
        sims = [i, evt_type]

        for j in product(frag_df.drop(['OEP'], axis = 1).columns,['OEP']):
            targ = j[0]

            seq1 = frag_df[targ].values.reshape(1,-1,1)
            seq2 = frag_df['OEP'].values.reshape(1,-1,1)

            _, sim = metrics.dtw_path(seq1[0],seq2[0])
            
            sims.append(sim)
    
        sim_df.loc[i] = sims

    return sim_df
Example #18
0
import numpy
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt

from tslearn.generators import random_walks
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
from tslearn import metrics

numpy.random.seed(0)
n_ts, sz, d = 2, 100, 1
dataset = random_walks(n_ts=n_ts, sz=sz, d=d)
scaler = TimeSeriesScalerMeanVariance(mu=0., std=1.)  # Rescale time series
dataset_scaled = scaler.fit_transform(dataset)

path, sim = metrics.dtw_path(dataset_scaled[0], dataset_scaled[1])

matrix_path = numpy.zeros((sz, sz), dtype=numpy.int)
for i, j in path:
    matrix_path[i, j] = 1

plt.figure()

plt.subplot2grid((1, 3), (0, 0), colspan=2)
plt.plot(numpy.arange(sz), dataset_scaled[0, :, 0])
plt.plot(numpy.arange(sz), dataset_scaled[1, :, 0])
plt.subplot(1, 3, 3)
plt.imshow(matrix_path, cmap="gray_r")

plt.tight_layout()
plt.show()
Example #19
0
import matplotlib.pyplot as plt

from tslearn.generators import random_walks
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
from tslearn import metrics

numpy.random.seed(0)
n_ts, sz, d = 2, 100, 1
dataset = random_walks(n_ts=n_ts, sz=sz, d=d, random_state=5)
scaler = TimeSeriesScalerMeanVariance(mu=0., std=1.)  # Rescale time series
dataset_scaled = scaler.fit_transform(dataset)

lcss_path, sim_lcss = metrics.lcss_path(dataset_scaled[0, :, 0],
                                        dataset_scaled[1, :40, 0],
                                        eps=1.5)
dtw_path, sim_dtw = metrics.dtw_path(dataset_scaled[0, :, 0],
                                     dataset_scaled[1, :40, 0])

plt.figure(1, figsize=(8, 8))

plt.plot(dataset_scaled[0, :, 0], "b-", label='First time series')
plt.plot(dataset_scaled[1, :40, 0], "g-", label='Second time series')

for positions in lcss_path:
    plt.plot([positions[0], positions[1]], [
        dataset_scaled[0, positions[0], 0], dataset_scaled[1, positions[1], 0]
    ],
             color='orange')
plt.legend()
plt.title("Time series matching with LCSS")

plt.figure(2, figsize=(8, 8))
Example #20
0
     -0.600, -0.606, -0.607, -0.604, -0.598, -0.589, -0.577, -0.558, -0.531,
     -0.496, -0.454, -0.410, -0.364, -0.318, -0.276, -0.237, -0.203, -0.176,
     -0.157, -0.145, -0.142, -0.145, -0.154, -0.168, -0.185, -0.206, -0.230,
     -0.256, -0.286, -0.318, -0.351, -0.383, -0.414, -0.442, -0.467, -0.489,
     -0.508, -0.523, -0.535, -0.544, -0.552, -0.557, -0.560, -0.560, -0.557,
     -0.551, -0.542, -0.531, -0.519, -0.507, -0.494, -0.484, -0.476, -0.469,
     -0.463, -0.456, -0.449, -0.442, -0.435, -0.431, -0.429, -0.430, -0.435,
     -0.442, -0.452, -0.465, -0.479, -0.493, -0.506, -0.517, -0.526, -0.535,
     -0.548, -0.567, -0.592, -0.622, -0.655, -0.690, -0.728, -0.764, -0.795,
     -0.815, -0.823, -0.821])

s_y1 = numpy.concatenate((s_x, s_x)).reshape((-1, 1))
s_y2 = numpy.concatenate((s_x, s_x[::-1])).reshape((-1, 1))
sz = s_y1.shape[0]

path, sim = metrics.dtw_path(s_y1, s_y2)

plt.figure(1, figsize=(8, 8))

# definitions for the axes
left, bottom = 0.01, 0.1
w_ts = h_ts = 0.2
left_h = left + w_ts + 0.02
width = height = 0.65
bottom_h = bottom + height + 0.02

rect_s_y = [left, bottom, w_ts, height]
rect_gram = [left_h, bottom, width, height]
rect_s_x = [left_h, bottom_h, width, h_ts]

ax_gram = plt.axes(rect_gram)
Example #21
0
def ntu_similarity_global_dtw(net,
                              config,
                              x,
                              f,
                              y,
                              min_num=2,
                              window_size=16,
                              slide=2,
                              visibility=False,
                              dist='cosine',
                              use_all_joints_on_each_bp=False):
    if use_all_joints_on_each_bp:
        body_parts = config.body_parts_entire_body if not visibility else config.body_parts_invis_entire_body
    else:
        body_parts = config.body_parts if not visibility else config.body_parts_invis

    bps = list(body_parts._fields)

    cand_framesPerBP, metric, querys = ntu_similarity_prepart(
        net, x, f, y, body_parts, bps, dist, slide, window_size)

    querys_bp_flatten = []
    for query in querys:
        query_bp_flatten = [
            np.concatenate([bp[p_idx] for bp in query])
            for p_idx in range(len(query[0]))
        ]
        querys_bp_flatten.append(query_bp_flatten)
    c_bp_flatten = [
        np.concatenate([bp[p_idx] for bp in cand_framesPerBP])
        for p_idx in range(len(cand_framesPerBP[0]))
    ]
    paths = [
        dtw_path(np.array(query_flatten), np.array(c_bp_flatten))[0]
        for query_flatten in querys_bp_flatten
    ]

    finals = [[] for _ in range(4)]
    for idx, (query, path) in enumerate(zip(querys, paths)):
        for path_idx in range(len(path)):
            sims = []
            for bp in range(len(bps)):
                if dist == "cosine":
                    metric_sim = metric(
                        torch.Tensor(query[bp][path[path_idx][0]]),
                        torch.Tensor(
                            cand_framesPerBP[bp][path[path_idx][1]])).numpy()
                else:
                    metric_sim = metric(
                        torch.Tensor(
                            query[bp][path[path_idx][0]]).unsqueeze(0),
                        torch.Tensor(cand_framesPerBP[bp][
                            path[path_idx][1]]).unsqueeze(0)).numpy()[0]
                sims.append(metric_sim)
            sims = np.sort(np.array(sims), axis=None)
            sims = sims[:min_num]
            finals[idx].append(np.mean(sims))

    # origin, flipped, arm_flipped, leg_flipped
    final_sim_calculated = [np.mean(final_v) for final_v in finals
                            ]  # different paths don't `np.mean(_, axis=1)`

    if dist == 'cosine':
        return tuple(final_sim_calculated)
    else:
        return tuple([1 / item for item in final_sim_calculated])
    normer_tests = my_dtw_tests.plsm[j]['Time_pls'].values.astype('float64')
    
    
    #create normalization values
    normer_tests = dtw_wei(normer_tests,twind,b=5./((max_train-min_train)/2.)**2.,c=1.)
    normer_tests = np.outer(normer_tests,np.ones(X_tests.shape[1]))
    
    #include a time normalization factory
    X_tests *= normer_tests
    
    y_tests = my_dtw_tests.plsm[j].index.values
    
    
    
    #get multi-parameter dtw solution
    path, sim = metrics.dtw_path(X_train, X_tests)
    
    #convert path into a numpy array
    m = np.array(zip(*path))
    fig, nax = plt.subplots(nrows=2,ncols=2,sharex=True,figsize=(12,12))
    
    #x_vals = ['SPEED']+x_vals
    for i,ax in enumerate(nax.flatten()):
        parm = x_vals[i]
        ax.plot(y_train[m[0,:]],my_dtw_train.plsm['Wind'][parm].values[m[0,:]])
        ax.plot(y_train[m[0,:]],my_dtw_tests.plsm[j][parm].values[m[1,:]])
        
        ax.set_ylabel(parm)
        ax.set_xlabel('Time [UTC]')
        fancy_plot(ax)
#ax.plot(my_dtw_train.plsm['Wind'].index.values.astype('float64'),X_train[:,0])
    def visualise_test(self, nets, nets_name, batch_size, means_input,
                       stds_input, means_target, stds_target):

        gen_test = iter(self.testloader)
        losses_mse = []
        losses_dtw = []
        losses_tdi = []

        for ind, (test_inputs, test_targets) in enumerate(gen_test):
            print(ind)
            test_inputs = torch.tensor(test_inputs,
                                       dtype=torch.float32).to(self.device)
            test_targets = torch.tensor(test_targets,
                                        dtype=torch.float32).to(self.device)
            criterion = torch.nn.MSELoss()
            print(f"test_inputs tensor shape: {test_inputs.size()}, "
                  f"test_targets shape: {test_targets.size()}, ")

            fig, axs = plt.subplots(1, 3, sharey='col', figsize=(15, 8))
            for net_i, net in enumerate(nets):
                test_preds = net(test_inputs).to(self.device)

                loss_mse = criterion(test_targets, test_preds)

                # DTW and TDI
                loss_dtw, loss_tdi = 0, 0
                for k in range(batch_size):
                    target_k_cpu = test_targets[k, :, 0:1].view(
                        -1).detach().cpu().numpy()
                    output_k_cpu = test_preds[k, :, 0:1].view(
                        -1).detach().cpu().numpy()

                    loss_dtw += dtw(target_k_cpu, output_k_cpu)
                    path, sim = dtw_path(target_k_cpu, output_k_cpu)

                    Dist = 0
                    for i, j in path:
                        Dist += (i - j) * (i - j)
                    loss_tdi += Dist / (self.N_output * self.N_output)

                loss_dtw = loss_dtw / batch_size
                loss_tdi = loss_tdi / batch_size

                # print statistics
                losses_mse.append(loss_mse.item())
                losses_dtw.append(loss_dtw)
                losses_tdi.append(loss_tdi)

                input = test_inputs.detach().cpu().numpy()[0, :, :]
                target = test_targets.detach().cpu().numpy()[0, :, :]
                preds = test_preds.detach().cpu().numpy()[0, :, :]

                print(
                    f"input np shape: {input.shape}, target np shape: {target.shape}, preds np shape: {preds.shape}"
                )
                print(
                    f"means_input: {means_input.shape}, stds_input: {stds_input.shape}"
                )

                ## select target column in input
                input = input[:, self.idx_tgt_col]

                ## Scaling back to original
                input = input * stds_input[
                    ind, :, self.idx_tgt_col] + means_input[ind, :,
                                                            self.idx_tgt_col]
                target = target * stds_target[
                    ind, self.idx_tgt_col] + means_target[ind,
                                                          self.idx_tgt_col]
                preds = preds * stds_target[
                    ind, self.idx_tgt_col] + means_target[ind,
                                                          self.idx_tgt_col]

                print(
                    f"input shape: {input.shape}, target shape: {target.shape}, preds shape: {preds.shape}"
                )

                print(
                    f"target plot shape: {np.concatenate([input[-1:], target.ravel()]).shape}"
                    f"preds plot shape: {np.concatenate([input[-1:], preds.ravel()]).shape}"
                )

                print(f"net_i: {net_i}")
                axs[net_i].plot(range(0, self.N_input),
                                input,
                                label='input',
                                linewidth=1)

                axs[net_i].plot(range(self.N_input - 1,
                                      self.N_input + self.N_output),
                                np.concatenate([input[-1:],
                                                target.ravel()]),
                                label='target',
                                linewidth=1)

                axs[net_i].plot(range(self.N_input - 1,
                                      self.N_input + self.N_output),
                                np.concatenate([input[-1:],
                                                preds.ravel()]),
                                label='prediction',
                                linewidth=1)
                # axs[i].xticks(range(0,40,2))
                axs[net_i].legend()
                axs[net_i].set_title(
                    f"{nets_name[net_i]} \n MSE: {round(loss_mse.item(), 3)}, DTW: {round(loss_dtw, 3)}. TDI: {round(loss_tdi, 3)}"
                )

            # plt.show()
            plt.tight_layout()
            plt.savefig(f"results/new_test_loader/{ind}.png")
            plt.close()

        print(' Test mse= ',
              np.array(losses_mse).mean(), ' dtw= ',
              np.array(losses_dtw).mean(), ' tdi= ',
              np.array(losses_tdi).mean())
Example #24
0
hashBucket9 = Lsh(seqID, m, 9)
resultlist = []
for i in range(len(hashBucket1)):  # 遍历查询序列
    value = ""
    for j in range(len(hashbucket1)):  # 遍历数据库序列
        flag1 = (hashBucket1[i] == hashbucket1[j]).all()  # 哈希族是否相等
        flag2 = (hashBucket2[i] == hashbucket2[j]).all()
        flag3 = (hashBucket3[i] == hashbucket3[j]).all()
        flag4 = (hashBucket4[i] == hashbucket4[j]).all()
        flag5 = (hashBucket5[i] == hashbucket5[j]).all()
        flag6 = (hashBucket6[i] == hashbucket6[j]).all()
        flag7 = (hashBucket7[i] == hashbucket7[j]).all()
        flag8 = (hashBucket8[i] == hashbucket8[j]).all()
        flag9 = (hashBucket9[i] == hashbucket9[j]).all()
        if flag1 or flag2 or flag3 or flag4 or flag5 or flag6 or flag7 or flag8 or flag9:  #只要有哈希族匹配就计算dtw
            optimal_path, dtw_score = dtw_path(query[i], database[j])
            if dtw_score <= threshold:
                value += str(j) + " "
    resultlist.append(value)
time_end = time.time()
time_avg = (time_end - time_start) / len(query)
print('LSH_DTW: ', time_avg)

time_start = time.time()
resultlist_true = []
for i in range(len(query)):
    value = ""
    for j in range(len(database)):
        optimal_path, dtw_score = dtw_path(query[i], database[j])
        if dtw_score <= threshold:
            value += str(j) + " "
Example #25
0
def evaluate(boardSeq, comment, bert_emb, intvals, cnn1, w_hidden2hidden,
             boardenc2skill1, boardenc2skill2, comment2skill, comment2skill2,
             decoderBoard1, decoderBoard2, w_hidden2board, decoderComment):
    #
    boardSeq = boardSeq.permute(1, 0, 2)  #batch, 500, 512-> 500, batch, 512
    boardSeq = boardSeq.to(dtype=torch.float32)
    seq_len = boardSeq.size()[0]
    batch_size = boardSeq.size()[1]

    board_seq_encoded_enlarged = torch.zeros(seq_len, batch_size, d_model)

    for i in range(seq_len):
        board_seq_encoded_enlarged[i] = w_hidden2hidden(boardSeq[i])

    board_seq_encoded_enlarged = board_seq_encoded_enlarged.permute(
        1, 0, 2)  #(input_len, bs, 768)-> (bs, input_len, 768)

    skills = boardenc2skill1(board_seq_encoded_enlarged)

    skills2 = boardenc2skill2(skills)

    skill_len = skills.size()[1]

    board_per_skill = 25  #max(1, int(10/skill_len))

    #################Comment Encoding ##############################

    max_length = comment.size()[1]

    comment_skill = comment2skill(
        bert_emb.view(batch_size, max_length,
                      d_model))  # expected n_skills * bs * 768
    comment_skill2 = comment2skill2(comment_skill)
    comm_skill_len = comment_skill.size()[1]

    ###################### SKILLS GENERATION 8#######################

    decoder_input = torch.ones(1, batch_size, 768)  # <SOS_index>
    decoder_input_c = torch.ones(1, batch_size, 768)  # <SOS_index>
    skills2 = skills2.to(dtype=torch.float32)
    comment_skill2 = comment_skill2.to(dtype=torch.float32)
    decoder_input = decoder_input.to(dtype=torch.float32)
    decoder_input_c = decoder_input_c.to(dtype=torch.float32)

    count = 0
    use_teacher_forcing = True if random.random() < t_ratio else False
    generated_skills = torch.ones(16, batch_size, 768)
    generated_skills_c = torch.ones(16, batch_size, 768)

    for j in range(skills2.size()[1]):
        decoder_hidden = skills2[:, j, :].view(
            1, batch_size, d_model).to(dtype=torch.float32).repeat(1, 1, 1)
        decoder_hidden_c = comment_skill2[:, j, :].view(
            1, batch_size, d_model).to(dtype=torch.float32).repeat(1, 1, 1)

        for i in range(4):
            output, decoder_hidden = decoderBoard1(decoder_input,
                                                   decoder_hidden)
            output_c, decoder_hidden_c = decoderBoard1(decoder_input_c,
                                                       decoder_hidden_c)

            decoder_input = output
            decoder_input_c = output_c
            generated_skills[count] = output.view(batch_size, 768)
            generated_skills_c[count] = output_c.view(batch_size, 768)
            count = count + 1

    generated_skills = generated_skills.permute(1, 0, 2)
    generated_skills_c = generated_skills_c.permute(1, 0, 2)
    #######################################################BOARD GENERATION FROM 8 ############################\
    #decoder_input = torch.ones(1, bs, 84) # <SOS_index>
    decoder_inputB = boardSeq.clone()[0, :, :].view(
        1, batch_size, 512)  #game start positions as start token

    count = 0
    use_teacher_forcing = True if random.random() < t_ratio else False
    generated_boards = torch.ones(board_per_skill * 16, batch_size, 512)

    for j in range(generated_skills.size()[1]):
        decoder_hiddenB = generated_skills.clone()[:, j, :].view(
            1, batch_size, d_model).to(dtype=torch.float32).repeat(1, 1, 1)

        for i in range(board_per_skill):
            outputB, decoder_hiddenB = decoderBoard2(decoder_inputB,
                                                     decoder_hiddenB)
            if use_teacher_forcing and j * board_per_skill + i + 1 < seq_len:
                decoder_inputB = boardSeq.clone()[i + 1 + j *
                                                  board_per_skill, :, :].view(
                                                      1, batch_size, 512)
            else:
                decoder_inputB = outputB
            #print(output.shape)
            generated_boards[count] = outputB.view(batch_size, 512)
            count = count + 1

    #######################################################BOARD GENERATION FROM 2 ############################\
    #decoder_input = torch.ones(1, bs, 84) # <SOS_index>
    decoder_inputB1 = boardSeq.clone()[0, :, :].view(
        1, batch_size, 512)  #game start positions as start token

    count = 0
    use_teacher_forcing = True if random.random() < t_ratio else False
    generated_boards1 = torch.ones(board_per_skill * 4, batch_size, 512)

    for j in range(skills2.size()[1]):
        decoder_hiddenB1 = skills2.clone()[:, j, :].view(
            1, batch_size, d_model).to(dtype=torch.float32).repeat(1, 1, 1)

        for i in range(board_per_skill):
            outputB1, decoder_hiddenB1 = decoderBoard2(decoder_inputB1,
                                                       decoder_hiddenB1)
            if use_teacher_forcing and j * board_per_skill + i + 1 < seq_len:
                decoder_inputB1 = boardSeq.clone()[i + 1 + j *
                                                   board_per_skill, :, :].view(
                                                       1, batch_size, 512)
            else:
                decoder_inputB1 = outputB1
            #print(output.shape)
            generated_boards1[count] = outputB1.view(batch_size, 512)
            count = count + 1

    criterion = SoftDTW(gamma=1.0, normalize=True)
    if torch.cuda.device_count() > 1:
        criterion = CriterionParallel(criterion)

    generated_boards = generated_boards.permute(1, 0, 2)
    generated_boards1 = generated_boards1.permute(1, 0, 2)

    tar = boardSeq.permute(1, 0, 2).clone()[:, 1:, :]

    generated_boards_cpu = generated_boards.detach().cpu().numpy()
    generated_boards1_cpu = generated_boards1.detach().cpu().numpy()
    tar_cpu = tar.detach().cpu().numpy()
    intvals = intvals.detach().cpu().numpy()
    intvals = intvals[0]
    prefix = np.zeros(len(intvals))
    # print(intvals)
    tot_time = sum(intvals)
    prefix[0] = intvals[0]
    for i in range(1, len(intvals)):
        prefix[i] = prefix[i - 1] + intvals[i]

    multi = tot_time / 200.0

    pred = []
    prev = 0
    path, dist = dtw_path(generated_boards_cpu[0], tar_cpu[0])
    for prs in (path):
        if (prs[0] + 1) % board_per_skill == 0 and prs[0] != 0:
            pred.append((prs[1] - prev) * multi)
            prev = prs[1]

    # print(path)
    pred1 = []
    prev = 0
    path1, dist1 = dtw_path(generated_boards1_cpu[0], tar_cpu[0])
    for prs in (path1):
        if (prs[0] + 1) % board_per_skill == 0 and prs[0] != 0:
            pred1.append((prs[1] - prev) * multi)
            prev = prs[1]
    for i in range(1, len(pred)):
        pred[i] += pred[i - 1]
    for i in range(1, len(pred1)):
        pred1[i] += pred1[i - 1]
    pred = np.array(pred)
    pred1 = np.array(pred1)

    prefix = np.unique(prefix)
    pred = np.unique(pred)
    pred1 = np.unique(pred1)

    # print(prefix)
    # print(pred)
    # print(pred1)

    paths1, dists1 = dtw_path(prefix, pred)
    paths2, dists2 = dtw_path(prefix, pred1)
    # print(paths1)
    # print(paths2)
    curr = 0
    tot_den = 0
    tot_num = 0
    prev_d = -1
    prev_n = -1
    track = 0
    for nums in paths1:
        fi = nums[0]
        se = nums[1]
        x2 = prefix[fi]
        y2 = pred[se]
        if fi == 0:
            x1 = 0.0
        else:
            x1 = prefix[fi - 1]
        if se == 0:
            y1 = 0.0
        else:
            y1 = pred[se - 1]
        tot_num += max(0.0, min(x2, y2) - max(x1, y1))
        if prev_d != x2 and prev_n != x1:
            tot_den += max(0.0, max(x2, y2) - min(x1, y1))
            prev_d = x2
            prev_n = x1

    curr = 0
    tot_den1 = 0
    tot_num1 = 0
    prev_d = -1
    prev_n = -1
    track = 0
    for nums in paths2:
        fi = nums[0]
        se = nums[1]
        x2 = prefix[fi]
        y2 = pred1[se]
        if fi == 0:
            x1 = 0.0
        else:
            x1 = prefix[fi - 1]
        if se == 0:
            y1 = 0.0
        else:
            y1 = pred1[se - 1]
        tot_num1 += max(0.0, min(x2, y2) - max(x1, y1))
        if prev_d != x2 and prev_n != x1:
            tot_den1 += max(0.0, max(x2, y2) - min(x1, y1))
            prev_d = x2
            prev_n = x1
        # print(tot_den1)
        # print(tot_num1)
    # print("Alignment Score 1:",dists1)
    # print("Alignment Score 2:",dists2)
    # print("IoU Score 1:", tot_num/tot_den)
    # print("IoU Score 2:", tot_num1/tot_den1)
    return dists1, dists2, tot_num, tot_den, tot_num1, tot_den1
Example #26
0
def evaluate(boardSeq, comment, bert_emb, intvals, cnn1, w_hidden2hidden,
             boardenc2skill, comment2skill, decoderBoard, w_hidden2board,
             decoderComment):
    #
    boardSeq = torch.transpose(boardSeq, 0,
                               1)  #batch, 500, 512-> 500, batch, 512
    #     boardSeq = boardSeq.to(dtype = torch.float64)
    seq_len = boardSeq.size()[0]
    batch_size = boardSeq.size()[1]

    board_seq_encoded_enlarged = torch.zeros(seq_len, batch_size, d_model)

    for i in range(seq_len):
        board_seq_encoded_enlarged[i] = w_hidden2hidden(boardSeq[i])

    board_seq_encoded_enlarged = torch.transpose(
        board_seq_encoded_enlarged, 0,
        1)  #(input_len, bs, 768)-> (bs, input_len, 768)

    skills = boardenc2skill(board_seq_encoded_enlarged)
    skill_len = skills.size()[1]
    #     print('skill', skills.shape)

    #     print(skill_len)
    board_per_skill = 100  #max(1, int(10/skill_len))
    #     print(skills.shape)

    #################Comment Encoding ##############################
    #comment = torch.randn(batch_size, 12)

    max_length = comment.size()[1]

    #######################################################BOARD GENERATION############################\
    #decoder_input = torch.ones(1, bs, 84) # <SOS_index>
    decoder_inputB = boardSeq.clone()[0, :, :].view(
        1, batch_size, 512)  #game start positions as start token

    count = 0
    use_teacher_forcing = True if random.random() < t_ratio else False
    generated_boards = torch.ones(board_per_skill * skill_len, batch_size, 512)

    for j in range(skills.size()[1]):
        decoder_hiddenB = skills.clone()[:, j, :].view(
            1, batch_size, d_model).to(dtype=torch.float32).repeat(1, 1, 1)
        #         if use_cuda:
        #             decoder_hiddenB = decoder_hiddenB.to(device)
        for i in range(board_per_skill):
            outputB, decoder_hiddenB = decoderBoard(decoder_inputB,
                                                    decoder_hiddenB)

            decoder_inputB = outputB
            #print(output.shape)
            generated_boards[count] = outputB.view(batch_size, 512)
            count = count + 1

    generated_boards = torch.transpose(generated_boards, 0, 1)

    boardSeq = torch.transpose(boardSeq, 0,
                               1)  #.view(batch_size, seq_len, 8*8*15)
    tar = boardSeq.clone()[:, 1:, :]
    # loss_dtw = criterion(generated_boards, tar)

    generated_boards_cpu = generated_boards.detach().cpu().numpy()
    tar_cpu = tar.detach().cpu().numpy()

    intvals = intvals.detach().cpu().numpy()
    intvals = intvals[0]
    prefix = np.zeros(len(intvals))
    # print(intvals)
    tot_time = sum(intvals)
    prefix[0] = intvals[0]
    for i in range(1, len(intvals)):
        prefix[i] = prefix[i - 1] + intvals[i]

    multi = tot_time / 200.0
    pred = []
    prev = 0
    path, dist = dtw_path(generated_boards_cpu[0], tar_cpu[0])
    for prs in (path):
        if (prs[0] + 1) % 25 == 0 and prs[0] != 0:
            pred.append((prs[1] - prev) * multi)
            prev = prs[1]

    pred = np.array(pred)

    prefix = np.unique(prefix)
    pred = np.unique(pred)

    paths1, dists1 = dtw_path(prefix, pred)

    curr = 0
    tot_den = 0
    tot_num = 0
    prev_d = -1
    prev_n = -1
    track = 0
    for nums in paths1:
        fi = nums[0]
        se = nums[1]
        x2 = prefix[fi]
        y2 = pred[se]
        if fi == 0:
            x1 = 0.0
        else:
            x1 = prefix[fi - 1]
        if se == 0:
            y1 = 0.0
        else:
            y1 = pred[se - 1]
        tot_num += max(0.0, min(x2, y2) - max(x1, y1))
        if prev_d != x2 and prev_n != x1:
            tot_den += max(0.0, max(x2, y2) - min(x1, y1))
            prev_d = x2
            prev_n = x1

    return dists1, dists1, tot_num, tot_den, tot_num, tot_den
        IAP_path_new = np.zeros((t_rescaled.shape[-1], 3))
        IAP_path_new[:, 0] = PchipInterpolator(t, path[:, 0])(t_rescaled)
        IAP_path_new[:, 1] = PchipInterpolator(t, path[:, 1])(t_rescaled)
        IAP_path_new[:, 2] = PchipInterpolator(t, path[:, 2])(t_rescaled)
        IAP_path_rescaled.append(IAP_path_new.tolist())

    # 2-3) vector_deviations
    # vector_path for each vector_traj
    labels = []
    for i, traj in enumerate(vector_trajs_rescaled):
        traj = np.array(traj)
        dtw_dists = []
        for j, path in enumerate(vector_paths_rescaled):
            path = np.array(path)
            dtw_path, dtw_dist = metrics.dtw_path(traj[:, 1:4], path[:, 0:3])
            dtw_dists.append(dtw_dist)
        labels.append(np.argmin(dtw_dists))

    # deviations
    vector_trajs_updated, vector_trajs_dist, vector_deviations = [], [], []
    for j, path in enumerate(vector_paths_rescaled):
        path = np.array(path)
        for i, traj in enumerate(vector_trajs_rescaled):
            if labels[i] == j:
                vector_trajs_updated.append(traj.tolist())

                deviations = traj[:, 1:4] - path[:, 0:3]
                vector_deviations.append(deviations.tolist())

                traj_dist = np.sum([
    def compute_DTW_on_iter(dataset,
                            iter,
                            numOfDrones,
                            drones,
                            per_series=True):
        print('iter: ', iter)
        dtw_results_dict = {
            'iter': [],
            'update_step': [],
            'drone': [],
            'comparison_drone': [],
            'DTW_dist': []
        }
        # print('iter: ',iter )
        dataset_iter = dataset.loc[dataset['iter'] == iter, :]
        # cut the df by current update step-win size
        update_step_ls = dataset_iter.update_step.unique()
        # num of features (all columns - no sensor columns and label
        num_of_features = dataset_iter.shape[1] - len(no_sensors_cols +
                                                      ['label'])
        # iterate over time steps
        for update_step in update_step_ls:
            current_seq = dataset_iter.loc[
                (dataset_iter['update_step'] <= update_step)
                & (dataset_iter['update_step'] > (update_step - win_size))]
            # iterte over drones
            for droneIidx in range(numOfDrones):
                currentDrone = drones[droneIidx]
                currentDroneDf = current_seq.loc[current_seq.drone ==
                                                 currentDrone, :]
                # drop irrelevant cols and convert to numpy
                currentDroneNp = currentDroneDf.drop(
                    no_sensors_cols + ['label'], 1).to_numpy()
                if use_scaler:
                    scaled_currentDroneNp = StandardScaler().fit_transform(
                        currentDroneNp)
                else:
                    scaled_currentDroneNp = currentDroneNp
                for droneJidx in range(numOfDrones):
                    # dont compare drone to itself
                    if (droneIidx >= droneJidx): continue
                    # print(droneIidx, droneJidx)
                    otherDrone = drones[droneJidx]
                    otherDroneDf = current_seq.loc[current_seq.drone ==
                                                   otherDrone, :]
                    otherDroneNp = otherDroneDf.drop(
                        no_sensors_cols + ['label'], 1).to_numpy()
                    if use_scaler:
                        scaled_otherDroneNp = StandardScaler().fit_transform(
                            otherDroneNp)
                    else:
                        scaled_otherDroneNp = otherDroneNp
                    """compute DTW"""

                    if per_series:  # compute between each pair of series, return list

                        dist = [
                            dtw_path(scaled_currentDroneNp[:, i],
                                     scaled_otherDroneNp[:, i])[1]
                            for i in range(num_of_features)
                        ]
                        dist = np.array(dist)
                    else:
                        # path, dist = dtw_path(scaled_currentDroneNp, scaled_otherDroneNp)
                        path = ''
                        dist = dtw(scaled_currentDroneNp,
                                   scaled_otherDroneNp,
                                   window_type="sakoechiba",
                                   window_args={
                                       'window_size': 60
                                   }).distance
                    # print('Iter {} updatestep {} DroneI {} DroneJ {} DTW {}'.format(iter,update_step,currentDrone, otherDrone, dist))
                    # save results of current drone
                    dtw_results_dict['iter'].append(iter)
                    dtw_results_dict['update_step'].append(update_step)
                    dtw_results_dict['drone'].append(currentDrone)
                    dtw_results_dict['comparison_drone'].append(otherDrone)
                    dtw_results_dict['DTW_dist'].append(
                        dist)  # ; dtw_results_dict['DTW_path'].append(path)
                    # save results of other drone
                    dtw_results_dict['iter'].append(iter)
                    dtw_results_dict['update_step'].append(update_step)
                    dtw_results_dict['drone'].append(otherDrone)
                    dtw_results_dict['comparison_drone'].append(currentDrone)
                    dtw_results_dict['DTW_dist'].append(
                        dist)  # ; dtw_results_dict['DTW_path'].append(path)

        print('iter done: ', iter)
        return dtw_results_dict
Example #29
0
def make_one_folium(sz, a=1., noise=.1, resample_fun=None):
    theta = np.linspace(0, 1, sz)
    if resample_fun is not None:
        theta = resample_fun(theta)
    theta -= .5
    theta *= .9 * np.pi
    theta = theta.reshape((-1, 1))
    r = a / 2 * (4 * np.cos(theta) - 1. / np.cos(theta))
    x = r * np.cos(theta) + np.random.rand(sz, 1) * noise
    y = r * np.sin(theta) + np.random.rand(sz, 1) * noise
    return np.array(np.hstack((x, y)))

trajectory = make_one_folium(sz=30).dot(get_rot2d(np.pi + np.pi / 3))
rotated_trajectory = trajectory.dot(get_rot2d(np.pi / 4)) + np.array([0., 3.])

path_dtw, _ = dtw_path(trajectory, rotated_trajectory)

path_ctw, cca, _ = ctw_path(trajectory, rotated_trajectory,
                            max_iter=100, n_components=2)

plt.figure(figsize=(8, 4))
ax = plt.subplot(1, 2, 1)
for (i, j) in path_dtw:
    ax.plot([trajectory[i, 0], rotated_trajectory[j, 0]],
            [trajectory[i, 1], rotated_trajectory[j, 1]],
            color='g' if i == j else 'r', alpha=.5)
plot_trajectory(trajectory, ax)
plot_trajectory(rotated_trajectory, ax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title("DTW")
Example #30
0
            DTW_matrix[i, j] = cost + last_min

    print("DTW Distance Matrix")
    return DTW_matrix


def plot_matrix(cdist_matrix, path):
    plt.plot([j for (i, j) in path], [i for (i, j) in path],
             "w-",
             linewidth=3.)
    plt.imshow(cdist_matrix)
    plt.show()


#ts1 = [1,3,1,2,1,1,1]
#ts2 = [1,1,3,1,2,1,1]

ts1 = [1, 3, 4, 9, 8, 2, 1, 5, 7, 3]
ts2 = [1, 6, 2, 3, 0, 9, 4, 3, 6, 3]
a = compute_DTW(ts1, ts2)

path, sim = metrics.dtw_path(ts1, ts2)
#arr = metrics.cdist_dtw(ts1, ts2)
print("Using tslearn library")
print(f"Path : {path}")
print(f"Similarity Score: {sim}")
#print(np.shape(arr))
#cdist_matrix = cdist([ts1], [ts2])
#print(cdist_matrix)
plot_matrix(a, path)