Ejemplo n.º 1
0
 def update_data_representation(self):
     """ Update lines when changing representation options """
     smooth_amplitude = smoothing.smooth(self.amplitude,
                                         self.smoothing_octave,
                                         self.window_type)
     self.amplitude_repr = 20*np.log10(smooth_amplitude)
     self.amplitude_repr = self.amplitude_repr - np.mean(self.amplitude_repr)
     self.phase_repr = smoothing.smooth(self.phase, self.smoothing_octave,
                                        self.window_type)
Ejemplo n.º 2
0
def default_onset(y, fs, window_length=51, polyorder=3):
    # These parameters are taken directly from the paper
    n_fft = 1024
    hop_length = int(librosa.time_to_samples(1. / 200, sr=fs))
    n_mels = 138
    fmin = 27.5
    fmax = 16000.
    # The paper uses a log-frequency representation,
    # but for simplicity, we'll use a Mel spectrogram instead.
    S = librosa.feature.melspectrogram(y,
                                       sr=fs,
                                       n_fft=n_fft,
                                       hop_length=hop_length,
                                       fmin=fmin,
                                       fmax=fmax,
                                       n_mels=n_mels)

    # compute the onset strength envelope
    # onset events using the librosa defaults.
    env_default = librosa.onset.onset_strength(y=y,
                                               sr=fs,
                                               hop_length=hop_length)
    env_default = smoothing.smooth(
        env_default, window_length,
        polyorder)  # window size 51, polynomial order 3
    onset_def = librosa.onset.onset_detect(y=env_default,
                                           sr=fs,
                                           hop_length=hop_length,
                                           units='time')
    return onset_def
Ejemplo n.º 3
0
def show_plot(reeb, epsilon):
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.yaxis.set_visible(False)
    reeb = smoothing.smooth(reeb, epsilon)
    draw_reeb(reeb, ax)
    plt.show()
def evaluate_file(sph_fname, txt_fname, classifier_model):
    probs = classifier.predict_pipeline(sph_fname, classifier_model, raw_prob=True)
    duration = probs.shape[0] * feature.FRAME_SIZE # number of frames * frame size
    y_hats = np.argmax(probs, axis=1)
    y_hats = smoothing.smooth(y_hats)
    ys = to_nparray(read_hub4_annotation(txt_fname), duration)
    return probs, y_hats, ys, p_r_f(ys, y_hats)
Ejemplo n.º 5
0
def show_plot(reeb, epsilon):
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.yaxis.set_visible(False)
    reeb = smoothing.smooth(reeb, epsilon)
    draw_reeb(reeb, ax)
    plt.show()
Ejemplo n.º 6
0
def main_direction(sob1, sob2):
    g = sob1+sob2*1j
    g1 = smoothing.smooth(g)
    g1 = np.abs(g1)
    g2 = smoothing.smooth(np.abs(g))
    r = g1/g2
    g = g/np.abs(g)
    g = g.ravel()
    x = g.real
    y = g.imag
    x1 = np.copy(x)
    y1 = np.copy(y)
    x1[x*y>0] = np.abs(x1[x*y>0])
    y1[x*y>0] = np.abs(y1[x*y>0])
    x1[x*y<0] = -np.abs(x1[x*y<0])
    y1[x*y<0] = np.abs(y1[x*y<0])
    g = x1+y1*1j
    direction = sob2/sob1
    theta = np.arctan(direction)%(np.pi)
    direction_theta = theta*180/np.pi
    value = {}
    for i in range(36):
        value[i] = []

    for t,v,rr in zip(direction_theta.ravel(), g, r.ravel()):
        value[int(np.around(t/5))%36].append(v*rr)
    sum_value = []
    for key in sorted(value.keys()):
        sum_value.append(np.sum(value[key]))
    sum_value = np.array(sum_value)
    x = sum_value.real
    y = sum_value.imag
    main_d = np.argmax(x**2+y**2)
    sob1 = np.ravel(sob1)
    sob2 = np.ravel(sob1)

    main_x, main_y = sob1[main_d], sob2[main_d]
    return x, y, main_x, main_y
Ejemplo n.º 7
0
def show_multiplots(reeb):
    fig = plt.figure()
    num_plots = 12
    cols = int(math.sqrt(num_plots))
    rows = (num_plots - 1) // cols + 1
    crit_vals = smoothing.get_critical_vals(reeb)
    interval = (crit_vals[-1] - crit_vals[0]) / 2
    gs = gridspec.GridSpec(rows, cols)

    plots = []
    for i in range(num_plots):
        row = i // cols
        col = i % cols
        plots.append(fig.add_subplot(gs[row, col]))
        epsilon = interval * i / (num_plots - 1)
        new_reeb = smoothing.smooth(reeb, epsilon)
        plots[-1].set_title("epsilon = {:.2f}".format(epsilon))
        plots[-1].yaxis.set_visible(False)
        draw_reeb(new_reeb, plots[-1])
    fig.tight_layout()
    plt.show()
Ejemplo n.º 8
0
def show_multiplots(reeb):
    fig = plt.figure()
    num_plots = 12
    cols = int(math.sqrt(num_plots))
    rows = (num_plots - 1) // cols + 1
    crit_vals = smoothing.get_critical_vals(reeb)
    interval = (crit_vals[-1] - crit_vals[0]) / 2
    gs = gridspec.GridSpec(rows, cols)

    plots = []
    for i in range(num_plots):
        row = i // cols
        col = i % cols
        plots.append(fig.add_subplot(gs[row, col]))
        epsilon = interval * i / (num_plots - 1)
        new_reeb = smoothing.smooth(reeb, epsilon)
        plots[-1].set_title("epsilon = {:.2f}".format(epsilon))
        plots[-1].yaxis.set_visible(False)
        draw_reeb(new_reeb, plots[-1])
    fig.tight_layout()
    plt.show()
Ejemplo n.º 9
0
def imgDeymstify(inFolder,outFolder,model,name):
    print(name)
    device = 'cuda' if torch.cuda.is_available() else 'cpu'  
    model.to(device)
    torch.cuda.empty_cache()
    
    for idx,encoded in enumerate(os.listdir(inFolder)):
        print(idx)
        imgEncoded = torch.load(os.path.join(inFolder,encoded))
        imgEncoded = imgEncoded.to(device)
        print(f'the name is {encoded} and the type is {type(encoded)} and the output is initialized')
        out = torch.zeros(6,10, 3, 128, 128)
        for i in range(6):
            for j in range(10):
                result = model.decode(imgEncoded[i,j,:,:,:].unsqueeze(0))
                out[i,j] = result.data
        out1 = imgDetransformation(out)
        out1 = sm.smooth(out1,6)
        norm_image = cv2.normalize(out1, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)
        norm_image = norm_image.astype(np.uint8)
        cv2.imwrite(os.path.join(outFolder,str(idx)+'.png'),norm_image)
        del(out)
        print('===============================================================================================')
Ejemplo n.º 10
0
def superflux(y, fs, window_length=51, polyorder=3):
    # These parameters are taken directly from the paper
    n_fft = 1024
    hop_length = int(librosa.time_to_samples(1. / 200, sr=fs))
    lag = 2
    n_mels = 138
    fmin = 27.5
    fmax = 16000.
    max_size = 3
    # The paper uses a log-frequency representation,
    # but for simplicity, we'll use a Mel spectrogram instead.
    S = librosa.feature.melspectrogram(y,
                                       sr=fs,
                                       n_fft=n_fft,
                                       hop_length=hop_length,
                                       fmin=fmin,
                                       fmax=fmax,
                                       n_mels=n_mels)

    # compute the onset strength envelope
    # onset events using the librosa defaults.
    envelope_sf = librosa.onset.onset_strength(S=librosa.power_to_db(
        S, ref=np.max),
                                               sr=fs,
                                               hop_length=hop_length,
                                               lag=lag,
                                               max_size=max_size)
    # smoothing the envelope
    # window size 51, polynomial order 3
    envelope_sf = smoothing.smooth(envelope_sf, window_length, polyorder)

    onset_sf = librosa.onset.onset_detect(onset_envelope=envelope_sf,
                                          sr=fs,
                                          hop_length=hop_length,
                                          units='time')
    return onset_sf
Ejemplo n.º 11
0
#     i += 1

# smoothing based upon filtering window (not gaussian)
# for f in ['hann','hamming','bartlett','blackman']:
#     smoothed = sm.smooth(spec.flux,window_len=5,window=f)
#     ax.step(spec.wave,smoothed/1e-18,where='mid',label=f'f={f}')

# #     smoothed_err = sm.smooth(spec.ferr,window_len=5,window=f)
#     smu,ssigma = norm.fit(smoothed/spec.ferr)
#     label = f'$\mu$={round(smu,2)}\n$\sigma$={round(ssigma,2)}'
#     ax2.plot(x,gaussian(x,smu,500,ssigma,0),label=label,color=f'C{i+1}')
#     i += 1

# smoothing based upon sigma (gaussian)
for s in np.arange(0.6, 1.2, 0.2):
    smoothed = sm.smooth(spec.flux, window_len=3, window='gaussian', sigma=s)
    ax.step(spec.wave, smoothed / 1e-18, where='mid', label=f'$\sigma_G$={s}')

    #     smoothed_err = sm.smooth(spec.ferr,window_len=3,window='gaussian',sigma=s)
    smu, ssigma = norm.fit(smoothed / spec.ferr)
    label = f'$\mu$={round(smu,2)}\n$\sigma$={round(ssigma,2)}'
    ax2.plot(x,
             gaussian(x, smu, 0.37, ssigma, 0),
             label=label,
             color=f'C{i+1}')
    i += 1

# -----------------------------------------------------------------------

ax.legend(ncol=3, handlelength=1)
Ejemplo n.º 12
0
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from compressor import compress, decompress
from smoothing import smooth

if __name__ == '__main__':
    sample_rate = 48000
    silence_length = 0.4

    raw_data = np.fromfile('wtf.bin', dtype=np.int8)
    comp_data = compress(raw_data)

    smooth_data = smooth(comp_data, int(sample_rate * silence_length))

    # print(list(filter(lambda x: x > int(sample_rate * silence_length), zeros)))
    # print(list(filter(lambda x: x > int(sample_rate * silence_length), ones)))

    # plt.figure(figsize=(13, 8))
    #
    # ax = plt.subplot(1, 1, 1)
    # ax.set_title("Validation Data")
    #
    # ax.set_autoscaley_on(False)
    # ax.set_ylim([32, 43])
    # ax.set_autoscalex_on(False)
    # # ax.set_xlim([-126, -112])
    # plt.scatter(list(range(int(len(comp_data)))),
    #                  comp_data,
Ejemplo n.º 13
0
    if args.cpu:
        os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

    if args.train:
        start = time.perf_counter()
        X, Y = feature.extract_all(reader.read_wavs(args.train),
                                   train=True,
                                   binary_class=args.binary)
        model_path = classifier.train_pipeline(X, Y)
        print("============")
        print(f"model saved at {model_path}")
        print(f"time elapsed: {time.perf_counter()-start:0.4f} seconds")
        print("============")

    if args.segment:
        # include 'dat' file extension for Galaxy data files
        for wav in reader.read_wavs(args.segment[1],
                                    file_ext=['mp3', 'wav', 'mp4', 'dat']):
            start = time.perf_counter()
            model = classifier.load_model(args.segment[0])
            predicted = classifier.predict_pipeline(wav, model)
            smoothed = smoothing.smooth(predicted, int(args.threshold),
                                        args.binary)
            amp_segments = AmpSegment(wav[1], smoothed)

            if args.out:
                writer.save_json(amp_segments, wav, args.out)
            print(
                f"Finished {wav} in {time.perf_counter()-start:0.4f} seconds")
Ejemplo n.º 14
0
 p = p**2
 # Deal with the cases of having even and odd numbers of data points.
 if n % 2 > 0:  # odd
     p[1:len(p)] = p[1:len(p)] * 2
 else:  # even
     p[1:len(p) - 1] = p[1:len(p) - 1] * 2
 # Create an array of frequencies, this will be our horizontal axis.
 freqArray = arange(0, nUniquePts, 1.0) * (framerate / n)
 # Define the power in decibels.
 audio_power = 10 * log10(p)
 # Here we clip the array data a bit, since human hearing (and hence
 # possible notes) lie in a subset of these original arrays.
 freqArray = freqArray[100:90000]
 audio_power = audio_power[100:90000]
 # Smooth the power data.
 smoothed_power = smooth(audio_power, window_len=101, window='blackman')
 # Now we can draw the power spectrum plot.
 ax2 = fig.add_subplot(3, 1, 2)
 ax2.plot(freqArray,
          audio_power,
          marker="",
          color="blue",
          linestyle="-",
          linewidth=1.0,
          alpha=1,
          label="Power")
 # The raw power spectrum data is pretty ratty, so we overplot with the
 # smoothed power spectrum, which forms a nicer line.
 ax2.plot(freqArray,
          smoothed_power,
          marker="",
Ejemplo n.º 15
0
 p = p**2
 # Deal with the cases of having even and odd numbers of data points.
 if n % 2 > 0: # odd
     p[1:len(p)] = p[1:len(p)] * 2
 else: # even
     p[1:len(p) -1] = p[1:len(p) - 1] * 2
 # Create an array of frequencies, this will be our horizontal axis.
 freqArray = arange(0, nUniquePts, 1.0) * (framerate / n)
 # Define the power in decibels.
 audio_power = 10*log10(p)
 # Here we clip the array data a bit, since human hearing (and hence 
 # possible notes) lie in a subset of these original arrays.
 freqArray = freqArray[100:90000]
 audio_power = audio_power[100:90000]
 # Smooth the power data.
 smoothed_power = smooth(audio_power, window_len=101,window='blackman')
 # Now we can draw the power spectrum plot.
 ax2 = fig.add_subplot(3,1,2)
 ax2.plot(freqArray, audio_power, marker="", color="blue", linestyle="-", 
     linewidth=1.0, alpha=1, label="Power")
 # The raw power spectrum data is pretty ratty, so we overplot with the 
 # smoothed power spectrum, which forms a nicer line.
 ax2.plot(freqArray, smoothed_power, marker="", color="red", 
     linestyle="-", linewidth=1.5, alpha=1, label="Power")
 ax2.set_xlabel("Frequency [Hz]")
 ax2.set_ylabel("Power [dB]")
 # We enforce some reasonable limits. None of the sound files contain
 # fundamental notes beyond the 5th octave.
 ax2.set_xlim(10, 1000)
 ax2.set_ylim(median(audio_power) - 1*std(audio_power), 
     1.05*max(audio_power))
Ejemplo n.º 16
0
def animate_reeb(n, reeb, ax, delta):
    ax.clear()
    reeb = smoothing.smooth(reeb, delta * n)
    draw_reeb(reeb, ax)
Ejemplo n.º 17
0
       for item in snelheid_0_3: 
           output = [(participant +1 ), 1] + list(item)   # in file gecodeerd als conditie 1 
           output_writer_lmer.writerow(output)
           
       for item in snelheid_3: 
           output = [(participant +1 ), 2] + list(item)   # in file gecodeerd als conditie 2 
           output_writer_lmer.writerow(output)   
      
       for item in snelheid_30: 
           output = [(participant +1 ), 3] + list(item)  # in file gecodeerd als conditie 3 
           output_writer_lmer.writerow(output) 
  
  ## data smoothen 
  snelheid_30 = [sum(e)/len(e) for e in zip(*snelheid_30)]   
  snelheid_30_array = numpy.array(snelheid_30)
  snelheid_30 = smoothing.smooth(snelheid_30_array)
  
  snelheid_0_3 = [sum(e)/len(e) for e in zip(*snelheid_0_3)]
  snelheid_0_3array = numpy.array(snelheid_0_3)
  snelheid_0_3 = smoothing.smooth(snelheid_0_3array)
      
  snelheid_3 = [sum(e)/len(e) for e in zip(*snelheid_3)]
  snelheid_3array = numpy.array(snelheid_3)
  snelheid_3 = smoothing.smooth(snelheid_3array)
 
 ## de drie condities samenvoegen tot een lijn per conditie voor alle participanten, deze gebruiken we voor het plotten  ####     
  masssnelheid_3.append(snelheid_3)
  masssnelheid_0_3.append(snelheid_0_3)
  masssnelheid_30.append(snelheid_30)
  
  
Ejemplo n.º 18
0
            x2, y2 = pygame.mouse.get_pos()
            pygame.display.flip()

        elif size < 5:
            size = 5

        if event.type == pygame.MOUSEBUTTONUP:
            drawing = False

            if len(fill) > 2:

                if abs(fill[0][0] - (fill[len(fill) - 1][0])) < size:
                    print("I'm working")
                    print(fill[0], fill[len(fill) - 1])
                    pygame.draw.polygon(w, color, fill)
                    smoothing.smooth(fill, 10)
                    pygame.display.flip()
                    fill = []
                    print("is it empty" + str(fill))

            fill = []
            if len(fill) < 2:
                pygame.draw.circle(w, color, (x1, y1), size)
                pygame.display.flip()

        if event.type == pygame.MOUSEMOTION and drawing:
            fill_point = pygame.mouse.get_pos()
            #print(str(fill_point))
            fill_point = pygame.mouse.get_pos()

            fill.append(fill_point)
        if args.train.endswith('.npz'):
            import numpy
            npzarrays = numpy.load(args.train)
            X, Y = npzarrays['xs'], npzarrays['ys']
        else:
            X, Y = feature.extract_all(reader.read_audios(args.train),
                                       train=True,
                                       binary_class=True,
                                       persist=True)
        model_path = classifier.train_pipeline(X, Y)
        print("============")
        print("model saved at " + model_path)
        print("============")

    if args.segment:
        model = classifier.load_model(args.segment[0])
        for wav in reader.read_audios(args.segment[1],
                                      file_per_dir=args.numfiles):
            predicted = classifier.predict_pipeline(wav, model)
            smoothed = smoothing.smooth(predicted)
            speech_portions, total_frames = writer.index_frames(smoothed)
            audio_fname = os.path.join(*wav)
            writer.print_durations(speech_portions, audio_fname, total_frames)
            if args.out:
                print('writing files')
                writer.slice_speech(speech_portions, audio_fname)

    if args.evaluate:
        model = classifier.load_model(args.evaluate[0])
        evaluation.evaluate_files(args.evaluate[1], model, args.numfiles)
Ejemplo n.º 20
0
def animate_reeb(n, reeb, ax, delta):
    ax.clear()
    reeb = smoothing.smooth(reeb, delta * n)
    draw_reeb(reeb, ax)
Ejemplo n.º 21
0
    return x, y, main_x, main_y


if __name__ == '__main__':
    sigma_vv, sigma_vh, w_speed, w_direction, incidence = get_data(8)
#    plt.imshow(sigma_vv[0])
#    print(w_speed[0])
#    print(w_direction[0])
#    plt.show()
    #img = plt.imread('./sierra.jpg')
    #sigma_vv, sigma_vh, incidence = open_text('subset_0_of_S1B_IW_GRDH_1SDV_20170507T233247_20170507T233312_005499_009A2C_4206_Noise-Cor_Cal_Spk.csv.txt')
    #sigma_vv = np.reshape(sigma_vv, (1241, 1479))
    sigma_vv = sigma_vv[3]

    print(w_direction[3])
    sigma_vv = smoothing.smooth(sigma_vv)
    #sigma_vv = 10 * np.log10(sigma_vv)
    plt.figure(figsize=(20,15))
    plt.subplot(2, 3, 1)
    plt.imshow(sigma_vv, 'Greys')
    plt.title('origin')
    plt.subplot(2, 3, 2)
    sob1, sob2 = sobel(sigma_vv)
    sob1 = smoothing.smooth(sob1)
    sob2 = smoothing.smooth(sob2)
    norm = np.sqrt(sob1**2+sob2**2)
    plt.imshow(sob1, 'Greys')
    plt.title('sobel x')
    plt.subplot(2, 3, 3)
    plt.imshow(sob2, 'Greys')
    plt.title('sobel y')