def save(self, signal, target_path, orig_path, lr, iterations, i, bandwidth):
     outprefix = "generated_stage2"
     if not os.path.exists(outprefix):
         os.makedirs(outprefix)
     signal = signal.unsqueeze(0)
     id_original = self.audio_path.split("/")[-1].split('_')[0]
     id_target = target_path.split("/")[-1].split('.')[0]
     save_path = os.path.join(outprefix, id_original + '_to_' + id_target + "_final" + ".wav")
     torchaudio.save(save_path, signal, sample_rate=16000, precision=32)
     print("Adversarial audio has been saved to " + save_path + "!")
     return save_path
Esempio n. 2
0
        self.activations = activations

    def __lt__(self, other):
        return torch.sum(self.activations, dim=0) < torch.sum(
            other.activations, dim=0)


import heapq
from collections import defaultdict
### Maximum Activations ###
d = defaultdict(list)
attr_algo = LayerActivation(model, modules["conv8"])

for signal in tqdm(X):
    # Get neuron activation
    attributions = attr_algo.attribute(signal.unsqueeze(0).cuda(),
                                       attribute_to_layer_input=False)
    # print(attributions.shape)
    for i, act in enumerate(attributions[0]):
        heapq.heappush(d[i], neuron(signal, act))
        if len(d[i]) > 5:
            d[i].pop(-1)

# %%
from ipywidgets import interact_manual, widgets


def get_top_activations_spectogram_for_unit(channel):
    # for u in d[]:
    place = 1
    # plt.figure(place)
Esempio n. 3
0
def analyze_ood(signal, range):
    output = signal.unsqueeze(0)
    for i in modules:
        layer = modules[i]
        layer.eval().to(device)
        output = layer(output)

    features = torch.flatten(output, start_dim=1)
    # print(features.shape)

    coordinates = pca.transform(features.cpu().detach().numpy())

    plotly.offline.init_notebook_mode()

    # print(coordinates)

    new_df = pd.DataFrame({
        'X': [coordinates[0][0]],
        'Y': [coordinates[0][1]],
        'Z': [coordinates[0][2]],
        'label': [200]
    })

    new_df = df.append(new_df)

    # Configure the trace.
    trace = px.scatter_3d(
        new_df,
        x='X',
        y='Y',
        z='Z',
        color='label',
    )

    trace.show()

    fig = plt.figure(figsize=(40, 40))

    x1, y1 = purturbate(signal, range)

    #print(x1)
    #print(y1)

    close = closest_node(coordinates, points)

    x2, y2 = purturbate(X[close], range)

    new_points = list(points.copy())

    new_points.pop(close)

    close_2 = closest_node(coordinates, new_points)

    x3, y3 = purturbate(X[close_2], range)

    ##

    print("Out of Distribution Signal Prediction: ",
          int(torch.round(torch.sigmoid(model(signal.unsqueeze(0)))).item()))

    ax = fig.add_subplot(411)
    # print(signal)
    signal = torch.abs(torch.sum(signal, dim=0))
    # print((signal).shape)
    # print(signal)
    ax.plot(signal)

    ax.set_xlabel('Time')

    ax.set_ylabel('Motion')

    rect = Rectangle((x1, 0),
                     y1 - x1,
                     20,
                     color='red',
                     fc=(1, 0, 0, 0.2),
                     ec=(0, 0, 0, 1))
    plt.ylim((0, 20))

    ax.add_patch(rect)

    plt.show()

    ##
    print("Closest Signal Prediction: ",
          int(torch.round(torch.sigmoid(model(X[close].unsqueeze(0)))).item()))

    fig = plt.figure(figsize=(40, 40))

    ax = fig.add_subplot(411)

    close_signal = torch.abs(torch.sum(X[close_2], dim=0))
    ax.plot(close_signal)

    ax.set_xlabel('Time')

    ax.set_ylabel('Motion')

    rect = Rectangle((x3, 0),
                     y3 - x3,
                     20,
                     color='red',
                     fc=(1, 0, 0, 0.2),
                     ec=(0, 0, 0, 1))
    plt.ylim((0, 20))

    ax.add_patch(rect)

    plt.show()