def initialize(self):
        """
        Create DTC model
        """
        # Create AE models
        self.autoencoder, self.encoder, self.decoder = temporal_autoencoder(
            input_dim=self.input_dim,
            timesteps=self.timesteps,
            n_filters=self.n_filters,
            kernel_size=self.kernel_size,
            strides=self.strides,
            pool_size=self.pool_size,
            n_units=self.n_units)
        clustering_layer = TSClusteringLayer(self.n_clusters,
                                             alpha=self.alpha,
                                             dist_metric=self.dist_metric,
                                             name='TSClustering')(
                                                 self.encoder.output)

        # Heatmap-generating network
        if self.heatmap:
            n_heatmap_filters = self.n_clusters  # one heatmap (class activation map) per cluster
            encoded = self.encoder.output
            heatmap_layer = Reshape((-1, 1, self.n_units[1]))(encoded)
            heatmap_layer = UpSampling2D((self.pool_size, 1))(heatmap_layer)
            heatmap_layer = Conv2DTranspose(n_heatmap_filters,
                                            (self.kernel_size, 1),
                                            padding='same')(heatmap_layer)
            # The next one is the heatmap layer we will visualize
            heatmap_layer = Reshape((-1, n_heatmap_filters),
                                    name='Heatmap')(heatmap_layer)
            heatmap_output_layer = GlobalAveragePooling1D()(heatmap_layer)
            # A dense layer must be added only if `n_heatmap_filters` is different from `n_clusters`
            # heatmap_output_layer = Dense(self.n_clusters, activation='relu')(heatmap_output_layer)
            heatmap_output_layer = Softmax()(
                heatmap_output_layer)  # normalize activations with softmax

        if self.heatmap:
            # Create DTC model
            self.model = Model(inputs=self.autoencoder.input,
                               outputs=[
                                   self.autoencoder.output, clustering_layer,
                                   heatmap_output_layer
                               ])
            # Create Heatmap model
            self.heatmap_model = Model(inputs=self.autoencoder.input,
                                       outputs=heatmap_layer)
        else:
            # Create DTC model
            self.model = Model(
                inputs=self.autoencoder.input,
                outputs=[self.autoencoder.output, clustering_layer])
from datasets import load_data
import matplotlib.pyplot as plt
from hierarchical import n_clusters, get_labels
import numpy as np
import pandas as pd
from sklearn.metrics import silhouette_score

if __name__ == "__main__":
    dataset = load_data("../dataset/sjr_residencial.csv")

    # separando os hashes de cada conta, da série de cada conta
    hashes = dataset[:, 0].reshape(-1, 1)
    dataset = dataset[:, 1:].astype('float64')

    autoencoder, encoder, _ = temporal_autoencoder(dataset.shape[1],
                                                   1,
                                                   pool_size=7)
    autoencoder.compile('SGD',
                        loss='MSLE',
                        metrics=['MeanAbsoluteError', 'MeanSquaredError'])
    autoencoder.fit(x=dataset, y=dataset, epochs=20)

    new_x = autoencoder.predict(dataset)
    encoded_x = encoder.predict(dataset)

    decoded_features = []
    encoded_features = []
    for i, _ in enumerate(encoded_x):
        decoded_features.append(new_x[i].ravel().tolist())
        encoded_features.append(encoded_x[i].ravel().tolist())