Ejemplo n.º 1
0
def F(t, θ):

    # dθ = dθ.reshape(n, n)
    _θ = θ.reshape(n, n)
    dθ = np.zeros_like(_θ)
    f = lambda θ_i, θ_j: np.sin(θ_j - θ_i)
    for i in range(n):
        for j in range(n):
            # print(_θ[i, j])
            # dθ[i] = ω[i] + (K / N) * np.sum(np.sin(θ - θ_i))
            phase_difference = f(_θ[i, j], _θ)
            conv = wiener(phase_difference, k_dim)
            dθ[i, j] = _ω[i, j] + K * np.sum(conv)
    return dθ.flatten()
Ejemplo n.º 2
0
def Proposed_method(im):
    gry_im = im2gray(im) / 255
    filtered_im = sig.wiener(gry_im, Windo)
    T, I, F = NS_transform(filtered_im)
    alpha = .001
    pre_entr = entropy(I)
    while True:
        T, I, F = NS_alpha_mean(T, I, F)
        entr = entropy(I)
        print(entr)
        if (entr - pre_entr) / pre_entr < alpha:
            break
        pre_entr = entr
    return filters.median((T > filters.threshold_sauvola(T)))
Ejemplo n.º 3
0
def convert_audio(path, audio_duration=3):
    """Given a path to an audio file, extract the log-scaled mel-spectrogram"""
    input_length = 44100 * audio_duration
    signal, sample_rate = librosa.load(path, sr=44100)
    signal, _ = librosa.effects.trim(signal, top_db=25)
    signal = wiener(signal)
    if len(signal) > input_length:
        signal = signal[0:input_length]
    elif input_length > len(signal):
        max_offset = input_length - len(signal)  
        signal = np.pad(signal, (0, max_offset), "constant")
    mel_spectrogram = librosa.feature.melspectrogram(signal, sr=sample_rate, n_fft=2048, hop_length=512, n_mels=128)
    lms = librosa.power_to_db(mel_spectrogram)
    lms = np.expand_dims(lms, axis=-1)
    return lms
Ejemplo n.º 4
0
def features_and_plot(lista_img, lista_mask, printing=False):

    id_feat_list = []
    row, col = lista_img[0].shape

    for cont in range(0, len(lista_img)):
        # Equalized hist of image
        dst = cv2.equalizeHist(lista_img[cont] * lista_mask[cont])
        # Wiener filter
        filtered_img = wiener(dst, (3, 3), noise=10)
        # Extract Haralick Features
        feature_map, h_feature = extract_haralick(filtered_img)
        id_feat_list.append(h_feature)

        # showing the features
        if printing:
            fig, ([ax1, ax2]) = plt.subplots(1, 2, figsize=(12, 10))
            fig.suptitle("Scans: " + str(cont + 1), y=0.8, fontsize=16)
            ax1.imshow(dst, cmap=plt.cm.gray)
            ax1.set_title("Equalized image (Masked)")
            ax2.imshow(feature_map, cmap=plt.cm.viridis)
            ax2.set_title("Haralick Features Map")
            plt.tight_layout()
    return id_feat_list
Ejemplo n.º 5
0
# PRODUCE TEACHER SIGNAL
###########################

#generate teaching signal orthogonal signal to input
teach = np.loadtxt(
    '/home/federico/project/work/trunk/data/Berkeley/rad-auditory/wehr/Tools/'
    + datadir + '/1_.txt')  #
framepersec = len(teach) / 15
teach = teach[0:framepersec / (duration_rec / 1000)]
#interpolate to match lenght
signal_ad = [np.linspace(0, duration_rec, len(teach)), teach]
ynew = np.linspace(0, duration_rec, nT + 1)
s = interpolate.interp1d(signal_ad[0], signal_ad[1], kind="linear")
teach_sig = s(ynew)
teach_sig = sigtool.wiener(
    teach_sig
)  #np.abs(sigtool.hilbert(teach_sig)) #sigtool.detrend(teach_sig)#= np.abs(sigtool.hilbert(teach_sig)) sigtool.wiener(teach_sig)# #get envelope
#teach_sig = sigtool.convolve(teach_sig,teach_sig)
teach_sig = smooth(teach_sig, window_len=smoothing_len, window='hanning')

#######################
# TEACH AND TEST
######################

for this_trial in range(1, num_trials_test + num_trials_teach):
    if this_trial <= num_trials_teach - 1:
        do_trial(what='teach', this_trial=this_trial, teacher=teach_sig)
    if this_trial > num_trials_teach:
        do_trial(what='test', this_trial=this_trial, teacher=teach_sig)

#imshow(np.reshape(res.ReadoutW['output'], (16,16)), interpolation='nearest')
Ejemplo n.º 6
0
    output[x] = mags

#output[output<np.mean(output) + np.std(output)*9] = 0
#output[output>0] = 1


def rebin(arr, downSample0, downSample1):
    shape = (arr.shape[0] // downSample0, downSample0,
             arr.shape[1] // downSample1, downSample1)

    return arr[0:(arr.shape[0] // downSample0) * downSample0,
               0:(arr.shape[1] // downSample1) *
               downSample1].reshape(shape).mean(-1).mean(1)


output = wiener(output, (25, 1))

binSize = 4
output = rebin(output, binSize, 1)
#print(np.max(output))
#output[output<1] = 0
#output[output>8] = 1

# display fft waterfall type stuff
if display:
    offset = 1000
    img = output[offset:950 + offset, :]
    img = img / np.max(img)
    map_color = cv2.COLORMAP_HOT
    img = np.uint8(img * 255)
    img = cv2.applyColorMap(img, map_color)
Ejemplo n.º 7
0
import cv2
from scipy.signal.signaltools import wiener
from skimage.util import random_noise
from matplotlib import pyplot as plt

img = cv2.normalize(
    cv2.imread("Lenna.jpg", cv2.NORM_MINMAX).astype("float"), None, 0.0, 1.0,
    cv2.NORM_MINMAX)
imgn = random_noise(img)
imgd1 = wiener(imgn, (3, 3))
imgd2 = wiener(imgn, (5, 5))
imgd3 = wiener(imgn, (7, 7))

plt.subplot(131)
plt.imshow(imgd1)
plt.subplot(132)
plt.imshow(imgd2)
plt.subplot(133)
plt.imshow(imgd3)
plt.show()
Ejemplo n.º 8
0
def audioFilter(audio,samplingrate):
    filtered_audio = wiener(audio)  # Filter the image
    denoise = denoise_wavelet(filtered_audio, method='BayesShrink', mode='soft', wavelet_levels=3, wavelet='sym8',rescale_sigma='True')
    return list(denoise)