Пример #1
0
    def load_data(self):
        """
        Prepare numpy array X with shape (num_instances, img_size, img_size, num_variables)
        and y with shape (num_instances, num_variables)
        """

        X, Y = np.empty((self.num_instances, self.img_size, self.img_size, self.num_variables)), \
               np.empty((self.num_instances, self.num_variables))
        print(X.shape)

        # Initialize PAA transformer
        paa = PiecewiseAggregateApproximation(window_size=None,
                                              output_size=self.img_size,
                                              overlapping=False)
        rp = RecurrencePlot()

        # For all instance
        start = time.time()
        for idx, row in enumerate(self.data.iterrows()):
            for i in range(self.num_variables):
                # Get current variable's series
                # Apply linear interpolation on missing values
                s = row[1][i].interpolate(
                    limit_direction='both').to_numpy()[:self.ts_length]
                # Apply PAA and RP
                X[idx, :, :, i] = rp.transform(
                    paa.transform(np.expand_dims(s[:-1], axis=0)))[0]
                Y[idx, i] = s[-1]
        end = time.time()
        print(f"Data loaded in {end - start} seconds")

        return X, Y
Пример #2
0
def encode_dataset(dataset,
                   batch_size,
                   downscale_factor,
                   pooling_function,
                   dimension=1,
                   time_delay=1,
                   threshold=None):
    """ Computation of encodings has to be done in batches due to the large size of the dataset.
        Otherwise the kernel will die!
        
        For downscaling pick np.mean (average pooling) or np.max (max pooling) respectively.
        If downscaling is not required choose downscale_factor=1.
        Keep in mind the network expects an input image size of 64x64.
        
        The function returns a 3D matrix.
        The new 3D matrix contains several 2D matrices, which correspond to the time series encodings/images.
        The order of the objects does not change, which means for example that the 23rd slice of the 
        input dataset corresponds to the 23rd encoding in the 3D Matrix."""

    n, l = np.shape(dataset)
    f = downscale_factor
    n_batches = n // batch_size
    batches = np.linspace(1, n_batches, n_batches, dtype=int) * batch_size

    rp = RecurrencePlot(dimension=dimension,
                        time_delay=time_delay,
                        threshold=threshold)

    print('Encoding started...')
    for p in range(n_batches):
        if p == 0:
            X_rp = rp.transform(dataset[0:batches[p], :])
            sample = block_reduce(X_rp[0],
                                  block_size=(f, f),
                                  func=pooling_function)
            l_red = sample.shape[0]
            X_rp_red = np.zeros((n, l_red, l_red))
            print('output 3D Matrix shape: ', np.shape(X_rp_red))

            j = 0
            for i in range(0, batches[p]):
                X_rp_red[i] = block_reduce(X_rp[j],
                                           block_size=(f, f),
                                           func=pooling_function)
                j += 1

        else:
            X_rp = rp.transform(X[batches[p - 1]:batches[p], :])

            j = 0
            for i in range(batches[p - 1], batches[p]):
                X_rp_red[i] = block_reduce(X_rp[j],
                                           block_size=(f, f),
                                           func=pooling_function)
                j += 1

    print('Encoding successful!')
    print('#####################################')

    return X_rp_red
Пример #3
0
def rri_test_recurrent(filelist=None):
    global shape_tmp
    for i in range(len(filelist)):

        # for i in range(10):
        with open(filelist[i], 'rb') as f:
            plk_tmp = pkl.load(f)
        ecg_re = ecg.ecg(signal=plk_tmp, sampling_rate=Fs, show=False)
        rpeaks_tmp = ecg_re['rpeaks'].tolist()
        nni = tools.nn_intervals(rpeaks=rpeaks_tmp)
        nni_tmp = nni.reshape((-1, int(nni.shape[0])))  # for 2d data type
        rp = RecurrencePlot(threshold='point', percentage=20)
        X_rp = rp.fit_transform(nni_tmp)
        dst = cv2.resize(X_rp[0],
                         dsize=(135, 135),
                         interpolation=cv2.INTER_AREA)
        shape_tmp.append(X_rp.shape)
        recurrence_tmp.append(X_rp)
        recur_resize.append(dst)
        # for pandas
        # shape_tmp = shape_tmp.append(pd.DataFrame(X_rp.shape))
        # plot check
        plt.imshow(X_rp[0], cmap='binary', origin='lower')
        plt.plot(nni)
        plt.title('Recurrence Plot', fontsize=16)
        plt.tight_layout()
        plt.show()
        # np_tmp = np.column_stack([np_tmp, X_rp])
        if i == 0:
            pass
    return shape_tmp, recurrence_tmp, np.asarray(recur_resize)
Пример #4
0
def save_recurrencePlots(net, save_recurrencePlots_file):
    global save_recurrence_plots
    if save_recurrence_plots:
        for name, parameters in net.named_parameters():
            if "fc" in name and parameters.cpu().detach().numpy().ndim == 2:
                hiddenState = parameters.cpu().detach().numpy()
                rp = RecurrencePlot()
                X_rp = rp.fit_transform(hiddenState)
                plt.figure(figsize=(6, 6))
                plt.imshow(X_rp[0], cmap='binary', origin='lower')
                plt.savefig(save_recurrencePlots_file, dpi=600)
            else:
                continue
    else:
        pass
Пример #5
0
    def ts_imaging(self, data):
        """
        Calcultes the image representation for each batch
        Args:
            data (tf.tensor(batch_size, sequence_length)): a batch of the aggregate power sequences

        Raises:
            ImagingMethodError: Error raised in case a wrong image transform is provided

        Returns:
            tensor(batch_size, img_size, img_size, 1): the image representation of the of the input data
        """
        if self.img_method == 'gasf':
            transformer = GramianAngularField(image_size=self.img_size,
                                              method='summation')
            tsi = transformer.fit_transform(data)
        elif self.img_method == 'gadf':
            transformer = GramianAngularField(image_size=self.img_size,
                                              method='difference')
            tsi = transformer.fit_transform(data)
        elif self.img_method == 'mtf':
            transformer = MarkovTransitionField(image_size=self.img_size)
            tsi = transformer.fit_transform(data)
        elif self.img_method == 'rp':
            transformer = RecurrencePlot(threshold='point', percentage=20)
            tsi = transformer.fit_transform(data)
        elif self.img_size == 'all':
            print("""

                To use the three images at once 
                the input layer need to be adapted in (line 197 in IM2Seq.py).
                The new input layer should be :
                model.add(Conv2D(filters=8, kernel_size=4, strides=2, activation='linear',
                         input_shape=(self.img_size, self.img_size, 3)))

            """)
            RP = RecurrencePlot(threshold='point',
                                percentage=20).fit_transform(data)
            GASF = GramianAngularField(image_size=self.img_size,
                                       method='summation').fit_transform(data)
            MTF = MarkovTransitionField(
                image_size=self.img_size).fit_transform(data)
            tsi = np.stack([RP, GASF, MTF], axis=3)

        else:
            raise ImagingMethodError()

        return tsi
    def evaluate_classifiers(dst):
        print("[%s] Processing dataset %s" % (datetime.now().strftime("%F %T"), dst))

        train_x, train_y = load_from_tsfile_to_dataframe(os.path.join(UCR_DATASET_PATH, dst, dst + "_TRAIN.ts"))
        test_x, test_y = load_from_tsfile_to_dataframe(os.path.join(UCR_DATASET_PATH, dst, dst + "_TEST.ts"))
        data_train = [train_x.iloc[i][0] for i in range(train_x.shape[0])]
        data_test = [test_x.iloc[i][0] for i in range(test_x.shape[0])]
        enc = LabelEncoder().fit(train_y)
        ohe = OneHotEncoder(sparse=False)
        labels_encoded = enc.transform(train_y)
        integer_encoded = labels_encoded.reshape(len(labels_encoded), 1)
        labels_train = ohe.fit_transform(integer_encoded)
        ts_plotters = [RecurrencePlot(threshold='point', percentage=20),
                       MarkovTransitionField(),
                       GramianAngularField()]

        def evaluate_classifier(plot_obj):
            try:
                classifier = classif_class(input_dim, num_classes=len(set(train_y)),
                                           batch_size=batch_size, series_plot_obj=plot_obj)
                classifier.train(data_train, labels_train, n_epochs=n_epochs)
                y_pred = [classifier.predict(series) for series in data_test]
                y_pred = enc.inverse_transform(y_pred)
                accuracy = accuracy_score(test_y, y_pred)
                f1 = f1_score(test_y, y_pred, average='macro')
                with open("tsplot_results.csv", "a") as f:
                    f.write("{};{};{};{};{}\n".format(classif_class.__class__.__name__, plot_obj.__class__.__name__, dst, accuracy, f1))
                return accuracy, f1
            except Exception as e:
                print("Exception while evaluating classifier:", e.__str__())
                return float('nan'), float('nan')

        return list(itertools.chain(*[evaluate_classifier(plot_obj) for plot_obj in ts_plotters]))
Пример #7
0
class TSToRP(Transform):
    r"""Transforms a time series batch to a 4d TSImage (bs, n_vars, size, size) by applying Recurrence Plot.
    It requires input to be previously normalized between -1 and 1"""
    order = 98

    def __init__(self, size=224, cmap=None, **kwargs):
        self.size, self.cmap = size, cmap
        self.encoder = RecurrencePlot(**kwargs)

    def encodes(self, o: TSTensor):
        bs, *_, seq_len = o.shape
        size = ifnone(self.size, seq_len)
        if size != seq_len:
            o = F.interpolate(o.reshape(-1, 1, seq_len),
                              size=size,
                              mode='linear',
                              align_corners=False)[:, 0]
        else:
            o = o.reshape(-1, seq_len)
        output = self.encoder.fit_transform(o.cpu().numpy()) / 2
        output = output.reshape(bs, -1, size, size)
        if self.cmap and output.shape[1] == 1:
            output = TSImage(plt.get_cmap(
                self.cmap)(output)[..., :3]).squeeze(1).permute(0, 3, 1, 2)
        else:
            output = TSImage(output)
        return output.to(device=o.device)
 def __init__(self, input_dim, num_classes, batch_size=16, series_plot_obj=None):
     self.input_dim = input_dim
     self.batch_size = batch_size
     self.num_classes = num_classes
     self.series_plot_obj = series_plot_obj
     if self.series_plot_obj is None:
         self.series_plot_obj = RecurrencePlot(threshold='point', percentage=20)
     self.init_model()
Пример #9
0
def test_actual_results_single_value(params):
    """Test that the actual results are the expected ones."""
    arr_actual = JointRecurrencePlot(**params).transform(X)
    arr_desired = []
    for i in range(n_features):
        arr_desired.append(RecurrencePlot(**params).transform(X[:, i]))
    arr_desired = np.prod(arr_desired, axis=0)
    np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0.)
Пример #10
0
def mrp_encode_3_to_4(arr_3d, percentage=60, swap=(2, 2)):

    transformer_multi = MultivariateTransformer(RecurrencePlot(
        threshold='point', percentage=percentage),
                                                flatten=False)
    recplot_isff_4d = (transformer_multi.fit_transform(
        array.swapaxes(swap[0], swap[1])) for array in arr_3d)
    return recplot_isff_4d
def toRPdata(tsdatas,
             dimension=1,
             time_delay=1,
             threshold=None,
             percentage=10,
             flatten=False):
    X = []
    rp = RecurrencePlot(dimension=dimension,
                        time_delay=time_delay,
                        threshold=threshold,
                        percentage=percentage,
                        flatten=flatten)
    for data in tsdatas:
        data_rp = rp.fit_transform(data)
        X.append(data_rp[0])

    return np.array(X)
Пример #12
0
def test_actual_results_without_flatten():
    """Test that the actual results are the expected ones."""
    params = {'estimator': RecurrencePlot(dimension=6), 'flatten': False}
    arr_actual = MultivariateTransformer(**params).fit_transform(X)
    arr_desired = []
    for i in range(n_features):
        arr_desired.append(params['estimator'].transform(X[:, i]))
    arr_desired = np.transpose(arr_desired, axes=(1, 0, 2, 3))
    np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0.)
 def __init__(self, series, labels, img_size=128, batch_size=32, series_plot_obj=None):
     self.series = series
     self.labels = labels
     self.img_dim = img_size
     self.batch_size = batch_size
     self.indexes = np.arange(len(self.series))
     self.rp = series_plot_obj
     if self.rp is None:
         self.rp = RecurrencePlot(threshold='point', percentage=20)
Пример #14
0
def test_actual_results_with_flatten():
    """Test that the actual results are the expected ones."""
    params = {'estimator': RecurrencePlot(dimension=6), 'flatten': True}
    arr_actual = MultivariateTransformer(**params).fit_transform(X)
    arr_desired = []
    for i in range(n_features):
        arr_desired.append(params['estimator'].transform(X[:, i]).reshape(
            (n_samples, -1)))
    arr_desired = np.concatenate(arr_desired, axis=1)
    np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0.)
Пример #15
0
def RP_encoder(ts,
               size=None,
               dimension=1,
               time_delay=1,
               threshold=None,
               percentage=10,
               norm_output=True,
               **kwargs):
    ts = To2dArray(ts)
    assert ts.ndim == 2, 'ts ndim must be 2!'
    if size is None: size = ts.shape[-1]
    else: size = min(size, ts.shape[-1])
    ts = PAA(window_size=None, output_size=size).fit_transform(ts)
    encoder = RP(dimension=dimension,
                 time_delay=time_delay,
                 threshold=threshold,
                 percentage=percentage)
    output = np.squeeze(encoder.fit_transform(ts), 0)
    if norm_output: return norm(output)
    else: return output
Пример #16
0
def rp_transform(data, image_size=500, show=False, img_index=0):
    # RP transformationmtf
    transform = RecurrencePlot(dimension=1,
                               threshold='percentage_points',
                               percentage=30)
    return (pyts_transform(transform,
                           data,
                           image_size=image_size,
                           show=show,
                           cmap='binary',
                           img_index=img_index))
Пример #17
0
def test_actual_results_lists(params):
    """Test that the actual results are the expected ones."""
    arr_actual = JointRecurrencePlot(**params).transform(X)
    arr_desired = []
    for i, (threshold, percentage) in enumerate(
            zip(params['threshold'], params['percentage'])):
        arr_desired.append(
            RecurrencePlot(threshold=threshold,
                           percentage=percentage).transform(X[:, i]))
    arr_desired = np.prod(arr_desired, axis=0)
    np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0.)
Пример #18
0
    def ts_imaging(self, data):
        """
        Calcultes the image representation for each batch
        Args:
            data (tf.tensor(batch_size, sequence_length)): a batch of the aggregate power sequences

        Raises:
            ImagingMethodError: Error raised in case a wrong image transform is provided

        Returns:
            tensor(batch_size, img_size, img_size, 1): the image representation of the of the input data
        """
        if self.img_method == 'gasf':
            transformer = GramianAngularField(image_size=self.img_size,
                                              method='summation')
            tsi = transformer.fit_transform(data)
        elif self.img_method == 'gadf':
            transformer = GramianAngularField(image_size=self.img_size,
                                              method='difference')
            tsi = transformer.fit_transform(data)
        elif self.img_method == 'mtf':
            transformer = MarkovTransitionField(image_size=self.img_size)
            tsi = transformer.fit_transform(data)
        elif self.img_method == 'rp':
            transformer = RecurrencePlot(threshold='point', percentage=20)
            tsi = transformer.fit_transform(data)
        elif self.img_size == 'all':
            RP = RecurrencePlot(threshold='point',
                                percentage=20).fit_transform(data)
            GASF = GramianAngularField(image_size=self.img_size,
                                       method='summation').fit_transform(data)
            MTF = MarkovTransitionField(
                image_size=self.img_size).fit_transform(data)
            tsi = np.stack([RP, GASF, MTF], axis=3)

        else:
            raise ImagingMethodError()

        return tsi
Пример #19
0
    def seq_to_rp(self, X):

        X_rp = np.empty((len(X), *self.input_shape))

        for i, x in enumerate(X):

            img = RecurrencePlot(**self.rp_params).fit_transform([x])[0]
            img = cv2.resize(img,
                             dsize=self.input_shape[:2],
                             interpolation=cv2.INTER_CUBIC).astype(
                                 self.data_type)

            if np.sum(img) > 0:
                # TODO: improve fit/predict statistics
                # Normalizar
                if self.normalize:
                    img = (img - img.min()) / (img.max() - img.min()
                                               )  # MinMax (0,1)
                #img = (img - img.mean()) / np.max([img.std(), 1e-4])

            #     # centralizar
            #     if centralizar:
            #         img -= img.mean()

            # Padronizar
                elif self.standardize:
                    img = (img - img.mean()) / img.std(
                    )  #tf.image.per_image_standardization(img).numpy()

                elif self.rescale:
                    img = (img - img.min()) / (img.max() - img.min())

            # N canais
            img = np.stack([img for i in range(self.input_shape[-1])],
                           axis=-1).astype(self.data_type)

            X_rp[i, ] = img

        return X_rp
Пример #20
0
def create_rp(segment,
              dimension=2, time_delay=1, percentage=1, use_clip=False, knn=None, imsize=None,
              images_dir='', base_name='Sample',
              suffix='jpg', # suffix='png'
              show_image=False, cmap=None, ##cmap='gray', cmap='binary'
             ):
    """Generate recurrence plot for specified signal segment and save to disk"""

    if base_name is None:
        base_name  = 'sample'
    fname = '{}_d{}_t{}_p{}{}.{}'.format(base_name, dimension, time_delay, percentage,
                                       '_clipped' if use_clip else '', suffix)

    segment = np.expand_dims(segment, 0)
    if knn is not None:
        rp = RecurrencePlot(dimension=dimension, time_delay=time_delay)
        X_dist = rp.fit_transform(segment)[0]
        X_rp = mask_knn(X_dist, k=knn, policy='cols')
    elif use_clip:
        rp = RecurrencePlot(dimension=dimension, time_delay=time_delay)
        X_dist = rp.fit_transform(segment)
        X_rp = rp_norm(X_dist, threshold='percentage_clipped', percentage=percentage)[0]
    else:
        rp = RecurrencePlot(dimension=dimension, time_delay=time_delay,
                            #threshold='percentage_points', percentage=percentage)
                            threshold='point', percentage=percentage)
        X_rp = rp.fit_transform(segment)[0]

    if imsize is not None:
        X_rp = resize_rp(X_rp, new_shape=imsize, use_max=True)

    imageio.imwrite(os.path.join(images_dir, fname), np_to_uint8(X_rp))
    if show_image:
        plt.figure(figsize=(3, 3))
        plt.imshow(X_rp, cmap=cmap, origin='lower')
        plt.title('Recurrence Plot for {}'.format(fname), fontsize=14)
        plt.show()
    return fname
Пример #21
0
def generate_rp(frame, dimension, threshold, percentage, file_name, count):
    data_rp = []
    data_rp.append(frame.values.reshape(1, -1))
    data_rp.append(frame.values.reshape(1, -1))
    data_rp = np.asarray(data_rp)

    # Recurrence plot transformation

    #X_rp1 = RecurrencePlot(dimension=3, time_delay=1,threshold=None, percentage=0).fit_transform(data_rp[0])[0]
    X_rp1 = RecurrencePlot(dimension=dimension,
                           time_delay=1,
                           threshold=threshold,
                           percentage=percentage).fit_transform(data_rp[0])[0]

    imgplot = plt.imshow(X_rp1, cmap='binary', origin='lower')

    fig1 = plt.gcf()
    #plt.show()
    plt.draw()
    fig1.savefig('D:/Sundar/speech/Speaker/Threshold/0/Sachin/' + file_name +
                 '_RP' + str(count) + '.png')
Пример #22
0
def serie_para_imagem(serie,
                      params_rp=PARAMETROS_RP,
                      tam_imagem=TAMANHO_IMAGEM_RP,
                      normalizar=False,
                      padronizar=False,
                      tipo_dados=TIPO_DADOS):
    """
    Funcao responsavel por gerar e tratar a imagem RP (baseado estudo #17).
    """
    # Gerando imagem RP/redimensiona_prndo
    imagem = RecurrencePlot(**params_rp).fit_transform([serie])[0]
    imagem = cv2.resize(imagem,
                        dsize=tam_imagem[:2],
                        interpolation=cv2.INTER_CUBIC).astype(tipo_dados)

    if np.sum(imagem) > 0:
        # Normalizar
        if normalizar:
            imagem = (imagem - imagem.min()) / (imagem.max() - imagem.min()
                                                )  # MinMax (0,1)
        #imagem = (imagem - imagem.mean()) / np.max([imagem.std(), 1e-4])

    #     # centralizar
    #     if centralizar:
    #         imagem -= imagem.mean()

    # Padronizar
        elif padronizar:
            imagem = (imagem - imagem.mean()) / imagem.std(
            )  #tf.image.per_image_standardization(imagem).numpy()

    # N canais
    imagem = np.stack([imagem for i in range(tam_imagem[-1])],
                      axis=-1).astype(tipo_dados)

    return imagem
Пример #23
0
#print(len(frame[:f]))
#print(len(frame[:]))'

# In[103]:

data_rp = []
data_rp.append(frame.values.reshape(1, -1))
data_rp.append(frame.values.reshape(1, -1))
data_rp = np.asarray(data_rp)

# Recurrence plot transformation

#X_rp1 = RecurrencePlot(dimension=3, time_delay=1,threshold=None, percentage=0).fit_transform(data_rp[0])[0]
X_rp1 = RecurrencePlot(dimension=3,
                       time_delay=1,
                       threshold='point',
                       percentage=5).fit_transform(data_rp[0])[0]

imgplot = plt.imshow(X_rp1, cmap='binary', origin='lower')

fig1 = plt.gcf()
plt.show()
plt.draw()
fig1.savefig('Sound/threshold/0/anil/' + 'sample' + '_RP' + '1' + '.png')

# ### Recurrence Plots for A, C & E at None Threshold

# In[ ]:

dir_names = ["/gdrive/My Drive/EEG/S"]
Пример #24
0
def sig2img(signals):
    rp = RecurrencePlot(dimension=1, time_delay=1, threshold=None)
    signals.reshape(1,-1)
    img = rp.fit_transform(signals)
    return img
Пример #25
0
50 recurrence plots are plotted.
"""  # noqa:E501

# Author: Johann Faouzi <*****@*****.**>
# License: BSD-3-Clause

import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from pyts.image import RecurrencePlot
from pyts.datasets import load_gunpoint

# Load the GunPoint dataset
X, _, _, _ = load_gunpoint(return_X_y=True)

# Get the recurrence plots for all the time series
rp = RecurrencePlot(threshold='point', percentage=20)
X_rp = rp.fit_transform(X)

# Plot the 50 recurrence plots
fig = plt.figure(figsize=(10, 5))

grid = ImageGrid(fig, 111, nrows_ncols=(5, 10), axes_pad=0.1, share_all=True)
for i, ax in enumerate(grid):
    ax.imshow(X_rp[i], cmap='binary', origin='lower')
grid[0].get_yaxis().set_ticks([])
grid[0].get_xaxis().set_ticks([])

fig.suptitle(
    "Recurrence plots for the 50 time series in the 'GunPoint' dataset",
    y=0.92)
Пример #26
0
def PSOGSA_ResNet(dataset, max_iters, num_particles, Epochs, NumSave, lr,
                  resume, savepath):
    np.seterr(divide='ignore', invalid='ignore')
    # %config InlineBackend.figure_format = 'retina'
    c1 = 2
    c2 = 2
    g0 = 1
    dim = 2
    w1 = 2
    wMax = 0.9
    wMin = 0.5
    current_fitness = np.zeros((num_particles, 1))
    gbest = np.zeros((1, dim))
    gbest_score = float('inf')
    OldBest = float('inf')

    convergence = np.zeros(max_iters)
    alpha = 20
    epsilon = 1

    class Particle:
        pass

    #all particle initialized
    particles = []
    Max = 1024
    for i in range(num_particles):
        p = Particle()
        p.params = [
            np.random.randint(Max * 0.5, Max),
            np.random.uniform(0.2, 0.9)
        ]

        p.fitness = rnd.rand()
        p.velocity = 0.3 * rnd.randn(dim)
        p.res_force = rnd.rand()
        p.acceleration = rnd.randn(dim)
        p.force = np.zeros(dim)
        p.id = i
        particles.append(p)

    #training
    print('training begain:', dataset)

    for i in range(max_iters):
        if i % 10 == 0:
            print('iteration number:', i)
        # gravitational constant
        g = g0 * np.exp((-alpha * i) / max_iters)
        # calculate mse
        cf = 0
        for p in particles:
            fitness = 0
            y_train = 0
            if p.params[0] < Max * 0.5 or p.params[0] > Max:
                p.params[0] = np.random.randint(Max * 0.5, Max)

            if p.params[1] < 0.2 or p.params[1] > 0.9:
                p.params[1] = np.random.uniform(0.2, 0.9)

            print('hidden size, and contraction coefficients are:',
                  p.params[0], p.params[1])
            [fitness, hidden0] = ResNetBasics.ResNet(dataset, p.params, Epochs,
                                                     1, lr, resume, savepath)
            hiddensize = int(p.params[0])

            #         fitness = fitness/X.shape[0]
            OldFitness = fitness
            current_fitness[cf] = fitness
            cf += 1
            if gbest_score > fitness and OldBest > fitness:
                hiddenState = hidden0.cpu().detach().numpy()
                rp = RecurrencePlot()
                X_rp = rp.fit_transform(hiddenState)
                plt.figure(figsize=(6, 6))
                plt.imshow(X_rp[0], cmap='binary', origin='lower')
                #         plt.title('Recurrence Plot', fontsize=14)
                plt.savefig(savepath + 'RecurrencePlots/' +
                            'RecurrencePlots_' + dataset +
                            str(round(fitness, NumSave)) + '_' +
                            str(hiddensize) + '_' + '.png',
                            dpi=600)
                plt.show()
                """weightsName='reservoir.weight_hh'
                for name, param in named_parameters:
        #             print(name,param)
                    if name.startswith(weightsName):
        #                 set_trace()
                        torch.save(param,savepath+'weights'+str(round(fitness,6))+'.pt')"""
                OldBest = gbest_score
                gbest_score = fitness
                gbest = p.params

        best_fit = min(current_fitness)
        worst_fit = max(current_fitness)

        for p in particles:
            p.mass = (current_fitness[particles.index(p)] -
                      0.99 * worst_fit) / (best_fit - worst_fit)

        for p in particles:
            p.mass = p.mass * 5 / sum([p.mass for p in particles])

        # gravitational force
        for p in particles:
            for x in particles[particles.index(p) + 1:]:
                p.force = (g * (x.mass * p.mass) *
                           (np.array(p.params) - np.array(x.params)).tolist()
                           ) / (euclid_dist(p.params, x.params))

        # resultant force
        for p in particles:
            p.res_force = p.res_force + rnd.rand() * p.force

        # acceleration
        for p in particles:
            p.acc = p.res_force / p.mass

        w1 = wMin - (i * (wMax - wMin) / max_iters)

        # velocity
        for p in particles:

            p.velocity = w1 * p.velocity + rnd.rand() * p.acceleration + (
                rnd.rand() * np.array(gbest) - np.array(p.params)).tolist()

        # position
        for p in particles:
            p.params = p.params + p.velocity

        convergence[i] = gbest_score

    plt.figure(figsize=(6, 6))
    plt.plot(convergence)
    plt.xlabel('Convergence')
    plt.ylabel('Error')
    plt.draw()
    plt.savefig(savepath + dataset + '_ConvergenceChanges.png', dpi=600)

    sys.stdout.write('\rMPSOGSA is training ResnNet (Iteration = ' +
                     str(i + 1) + ', MSE = ' + str(gbest_score) + ')')
    sys.stdout.flush()
    # save results
    FileName = dataset + '_BestParameters.csv'
    newdata = [max_iters, num_particles, p.params, convergence]
    PathFileName = os.path.join(savepath, FileName)
    SV.SaveDataCsv(PathFileName, newdata)
os.chdir(processed_data_dir)
plot_params()

# Parameters
n_samples, n_features = 100, 144

# dataset
X = np.load('filtered_calls/LblBla4548_130418-DC-46.npy')
X = X[2900:4000].reshape(1, -1)  #3200:3700

# Recurrence plot transformation
#X = np.load('filtered_calls/RedRas3600_110615-DC-10.npy')
#X = X[1500:3000].reshape(1, -1) #3200:3700
rp = RecurrencePlot(dimension=1,
                    time_delay=1,
                    threshold='distance',
                    percentage=5)
n = 250
X_rp = rp.fit_transform(X)
X_new = X_rp[0]
Y = np.zeros((len(X_new) - n * 2, n))
for i in range(len(X_new) - n * 2):
    for j in range(n):
        Y[i, j] = X_new[i + n, i + n + j]
fig = plt.figure(figsize=(5, 2.5))
ax = fig.add_subplot(1, 1, 1)
ax.imshow(Y.T,
          cmap='binary',
          origin='lower',
          extent=[0, len(X_new) - n * 2, 0, n])
color = sns.color_palette("Set1", n_colors=3, desat=0.7)[0]
Пример #28
0
    parser.add_argument('--maxiter', default=1400, type=int)
    parser.add_argument('--gamma',
                        default=0.1,
                        type=float,
                        help='coefficient of clustering loss')
    parser.add_argument('--update_interval', default=140, type=int)
    parser.add_argument('--tol', default=0.001, type=float)
    parser.add_argument('--cae_weights',
                        default=None,
                        help='This argument must be given')
    args = parser.parse_args()

    # load dataset
    if args.dataset == 'simulated':
        x_vec = pd.read_pickle("data/simulated-data/simulated_timeseries.p")
        x = RecurrencePlot(percentage=20).fit_transform(x_vec)
        x = x.reshape(x.shape + (1, ))
        y = pd.read_pickle("data/simulated-data/simulated_target.p")
        n_clusters = len(np.unique(y))
    elif args.dataset == 'biological':
        x_vec = pd.read_pickle(
            "data/biological-data/preprocessed/real_data_timeseries_new.p")
        x = pd.read_pickle(
            "data/biological-data/preprocessed/real_image_data_new.p")
        x = x.reshape(x.shape + (1, ))
        x = x / 255.0
        y = None
        n_clusters = 5
        #x = RecurrencePlot(percentage=20).fit_transform(x_vec)
        #################################################################################
        # You can transform biological timeseries dataset to the image with below code
Пример #29
0
# Author: Johann Faouzi <*****@*****.**>
# License: BSD-3-Clause

import numpy as np
import matplotlib.pyplot as plt
from pyts.image import RecurrencePlot


# Create a toy time series using the sine function
time_points = np.linspace(0, 4 * np.pi, 1000)
x = np.sin(time_points)
X = np.array([x])

# Recurrence plot transformation
rp = RecurrencePlot(threshold=np.pi/18)
X_rp = rp.transform(X)

# Plot the time series and its recurrence plot
fig = plt.figure(figsize=(6, 6))

gs = fig.add_gridspec(2, 2,  width_ratios=(2, 7), height_ratios=(2, 7),
                      left=0.1, right=0.9, bottom=0.1, top=0.9,
                      wspace=0.05, hspace=0.05)

# Define the ticks and their labels for both axes
time_ticks = np.linspace(0, 4 * np.pi, 9)
time_ticklabels = [r'$0$', r'$\frac{\pi}{2}$', r'$\pi$',
                   r'$\frac{3\pi}{2}$', r'$2\pi$', r'$\frac{5\pi}{2}$',
                   r'$3\pi$', r'$\frac{7\pi}{2}$', r'$4\pi$']
value_ticks = [-1, 0, 1]
Пример #30
0
import re
from scipy.sparse import csr_matrix
from pyts.classification import SAXVSM
from pyts.image import RecurrencePlot
from pyts.multivariate.transformation import MultivariateTransformer
from pyts.transformation import BOSS

n_samples, n_features, n_timestamps = 40, 3, 30
rng = np.random.RandomState(42)
X = rng.randn(n_samples, n_features, n_timestamps)


@pytest.mark.parametrize(
    'params, error, err_msg',
    [({
        'estimator': [BOSS(), RecurrencePlot(),
                      SAXVSM()]
    }, ValueError, "Estimator 2 must be a transformer."),
     ({
         'estimator': [BOSS()]
     }, ValueError, "If 'estimator' is a list, its length must be equal to "
      "the number of features (1 != 3)"),
     ({
         'estimator': None
     }, TypeError, "'estimator' must be a transformer that inherits from "
      "sklearn.base.BaseEstimator or a list thereof.")])
def test_parameter_check(params, error, err_msg):
    """Test parameter validation."""
    transformer = MultivariateTransformer(**params)
    with pytest.raises(error, match=re.escape(err_msg)):
        transformer.fit_transform(X)