Beispiel #1
0
    def filt(self,dataEEG,channels_labels):
        index_ref = []
        for i in range(len(channels_labels)):
            channels_labels[i].upper()
            if ('FP1' in channels_labels[i]) or ('FP2' in channels_labels[i]) or ('F7' in channels_labels[i]) or ('F8' in channels_labels[i]):
                index_ref.append(i)
        signal_size = dataEEG.shape
        nchannels = signal_size[0]
        nsamples = signal_size[1]
        index_data = np.arange(nchannels)
        index_data = np.delete(index_data,index_ref)
        transpose_dataEEG = np.transpose(dataEEG)
        dataRef = np.take(transpose_dataEEG,index_ref,axis=1) 
        dataEEG = np.take(transpose_dataEEG,index_data,axis=1)
        dataRef = np.transpose(dataRef) 
        dataEEG = np.transpose(dataEEG)
        
        W = jadeR(dataEEG)
        W = np.array(W)
        Y = np.dot(W,dataEEG)

        V = jadeR(dataRef)
        V = np.array(V)
        #T = np.dot(V,dataRef)
        
        Xpp = self.filterAdaptative(Y,dataEEG, dataRef, W, V, index_ref,index_data,nsamples)

        for i in range(0,np.size(index_ref)):
            Xpp = np.insert( Xpp, index_ref[i], transpose_dataEEG[:,index_ref[i]], axis=0)
        return Xpp
Beispiel #2
0
    def filt(self, dataEEG, channels_labels):
        # Este try es utilizado para que cuando la matriz W no converja, no se presente ningún error al correr el programa.
        # Si no se puede aplicar ICA, la salida de este método es la misma señal 'dataEEG', que es la misma señal contenida en el archivo '.edf' original sin procesamiento alguno.
        try:
            index_ref = []
            for i in range(len(channels_labels)):
                channels_labels[i].upper()
                if ('FP1' in channels_labels[i]
                    ) or ('FP2' in channels_labels[i]) or (
                        'F7' in channels_labels[i]) or ('F8'
                                                        in channels_labels[i]):
                    index_ref.append(i)
            signal_size = dataEEG.shape
            nchannels = signal_size[0]
            nsamples = signal_size[1]
            index_data = np.arange(nchannels)
            index_data = np.delete(index_data, index_ref)
            transpose_dataEEG = np.transpose(dataEEG)
            dataRef = np.take(transpose_dataEEG, index_ref, axis=1)
            dataEEG = np.take(transpose_dataEEG, index_data, axis=1)
            dataRef = np.transpose(dataRef)
            dataEEG = np.transpose(dataEEG)

            W = jadeR(dataEEG)
            W = np.array(W)
            Y = np.dot(W, dataEEG)

            V = jadeR(dataRef)
            V = np.array(V)
            #T = np.dot(V,dataRef)

            Xpp = self.filterAdaptative(Y, dataEEG, dataRef, W, V, index_ref,
                                        index_data, nsamples)
            for i in range(0, np.size(index_ref)):
                Xpp = np.insert(Xpp,
                                index_ref[i],
                                transpose_dataEEG[:, index_ref[i]],
                                axis=0)

            return Xpp

        except:
            return dataEEG
    signal, same as in Cardoso's paper (1))
    
"""
# Defining parameters of the experiment
algorithm = 'jade'
experiment = 'audio'
verbose = False

# Running experiment
if experiment == 'ecg3':
    xlim = 1000
    data = ECG_data(verbose=verbose).load()
    channels_3 = np.asarray(data[:3])

    if algorithm == 'jade':
        unmixing_mat = np.asarray(jadeR(channels_3))
    elif algorithm == 'fastICA':
        unmixing_mat, _, _ = fastICA(channels_3)
    else:
        print('Algorithm ', algorithm, ' is not implemented: using jade')
    A_hat = np.linalg.inv(unmixing_mat)

    # Plotting results of ICA
    y = np.dot(unmixing_mat, channels_3)

    plt.figure(figsize=(15.0, 4.0))
    n_mixtures = 3
    for i in range(n_mixtures):
        plt.subplot(1, n_mixtures, i + 1)
        plt.plot(y[i, :], linewidth=2)
        plt.xlim([0, xlim])
Beispiel #4
0
sum_rows = mixing_matrix.sum(axis=1)
mixing_matrix = mixing_matrix / sum_rows.reshape(mixing_matrix.shape[0], 1)

# Loading Data
audio = Audio(nb_tracks=n_sources).load_tracks()
mixtures, mixing = audio.mix_tracks(load=False,
                                    dimension=n_mixtures,
                                    verbose=True,
                                    mixing_matrix=mixing_matrix)
_, R_init, _ = whiten(center(mixtures), zca=False)

# Performing ICA
if method == 'mica' or method == 'ica':
    if algorithm == 'jade':
        unmixing_mat = np.asarray(jadeR(mixtures))
        print(
            "\n\n amari_index Jade",
            amari_index(
                np.dot(np.dot(unmixing_matrix_jade, R_init), mixing_matrix),
                2))

    elif algorithm == 'fastICA':
        unmixing_mat, _, _ = fastICA(mixtures,
                                     init=False,
                                     A_init=mixing,
                                     n_iter=50)
    A_hat = np.linalg.inv(unmixing_mat)
    y = np.dot(unmixing_mat, mixtures)

    plt.figure(figsize=(15.0, 4.0))
Beispiel #5
0
    def run(self):
        self.times.append(time.time() - self.t0)
        self.frame_out = self.frame_in

        self.gray = cv2.equalizeHist(
            cv2.cvtColor(self.frame_in, cv2.COLOR_BGR2GRAY))
        col = (100, 255, 100)
        if self.find_faces:

            # put texts on window
            cv2.putText(self.frame_out, "Press 'S' to lock face and begin",
                        (10, 50), cv2.FONT_HERSHEY_PLAIN, 1.25, col)
            cv2.putText(self.frame_out, "Press 'Esc' to quit", (10, 75),
                        cv2.FONT_HERSHEY_PLAIN, 1.25, col)
            # initiating data
            self.data_buffer, self.times, self.trained = [], [], False
            # image = self.frame_in
            # cv2.namedWindow("equalization", cv2.WINDOW_GUI_NORMAL)
            # # convert image from RGB to HSV
            # img_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
            # # Histogram equalisation on the V-channel
            # img_hsv[:, :, 2] = cv2.equalizeHist(img_hsv[:, :, 2])
            # # convert image back from HSV to RGB
            # image2 = cv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB)
            # stack = np.hstack((image, image2))
            # cv2.imshow("lightness", image2)

            # Detect the faces
            faces = list(
                self.face_cascade.detectMultiScale(
                    self.gray,
                    scaleFactor=1.3,
                    minNeighbors=4,
                    minSize=(50, 50),
                    flags=cv2.CASCADE_SCALE_IMAGE))
            # Draw the rectangle around each face

            if len(faces) > 0:
                faces.sort(key=lambda a: a[-1] * a[-2])
                self.face_rect = faces[-1]
            # roi = self.face_rect
            roi = self.get_subface_coord(self.rect_size)
            self.draw_rect(self.face_rect, col=(255, 0, 0))
            self.draw_rect(roi)
            x, y, w, h = self.face_rect
            cv2.putText(self.frame_out, "Face", (x, y), cv2.FONT_HERSHEY_PLAIN,
                        1.5, col)
            return
            # for (x, y, w, h) in faces:
            #     cv2.rectangle(self.frame_in, (x, y), (x + w, y + h), (255, 0, 0), 1)
        if set(self.face_rect) == set([1, 1, 2, 2]):
            return
        cv2.putText(self.frame_out, "Press 'S' to restart", (10, 50),
                    cv2.FONT_HERSHEY_PLAIN, 1.5, col)
        cv2.putText(self.frame_out, "Press 'D' to toggle data plot", (10, 75),
                    cv2.FONT_HERSHEY_PLAIN, 1.5, col)
        cv2.putText(self.frame_out, "Press 'F' to save data", (10, 100),
                    cv2.FONT_HERSHEY_PLAIN, 1.5, col)
        cv2.putText(self.frame_out, "Press 'Esc' to quit", (10, 125),
                    cv2.FONT_HERSHEY_PLAIN, 1.5, col)
        roi = self.get_subface_coord(self.rect_size)
        # roi = self.face_rect
        self.draw_rect(roi)
        # while True:
        #     # Read the frame
        #     #_, img = self.cap.read()
        #     # Convert to grayscale
        #     # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        #     self.gray = cv2.equalizeHist(cv2.cvtColor(self.frame_in,
        #                                               cv2.COLOR_BGR2GRAY))
        #     # Detect the faces
        #     faces = self.face_cascade.detectMultiScale(self.gray,
        #                                                scaleFactor=1.3,
        #                                                minNeighbors=4,
        #                                                minSize=(50, 50),
        #                                                flags=cv2.CASCADE_SCALE_IMAGE)
        #     # Draw the rectangle around each face
        #     for (x, y, w, h) in faces:
        #         cv2.rectangle(self.frame_in, (x, y), (x + w, y + h), (255, 0, 0), 1)
        #     # Display
        #     #cv2.imshow("Processed", self.frame_in)
        #     # Stop if escape key is pressed
        #     # k = cv2.waitKey(10) & 255
        #     # if k == 27:
        #     #     print("Exiting")
        #     #     sys.exit()

        # Release the VideoCapture object
        # self.cap.release()
        pixel_vals = self.getPixelMean(roi)
        # print(pixel_vals)

        self.data_buffer.append(pixel_vals)
        L = len(self.data_buffer)
        if L > self.buffer_size:
            self.data_buffer = self.data_buffer[-self.buffer_size:]
            self.times = self.times[-self.buffer_size:]
            L = self.buffer_size
        processed = np.array(self.data_buffer)
        self.samples = np.transpose(processed)
        # np.transpose(self.samples)
        # print(self.samples.shape)

        if L > 10:
            x1, y1, w1, h1 = self.face_rect
            self.slices = [np.copy(self.frame_out[y1:y1 + h1, x1:x1 + w1, 1])]
            # print(len(self.samples))

            # transformer = FastICA(n_components=3,
            #                       random_state=0, max_iter=1000, tol=1)
            # X_transformed = transformer.fit_transform(self.samples)

            X_transformed = jadeR(self.samples, m=3, verbose=False)
            X_ = np.matmul(np.linalg.inv(X_transformed), self.samples)
            X_ = np.array(X_)

            for i in range(len(self.samples)):
                # color = X_[i]
                color = self.samples[i]
                # y = self.butter_bandpass_filter(color, 0.8, 6, 100, 4)
                even_times = np.linspace(self.times[0], self.times[-1], L)
                interpolated = np.interp(even_times, self.times, color)
                interpolated = np.hamming(
                    L
                ) * interpolated  # a wave with width L * interpolated value
                interpolated = interpolated - np.mean(
                    interpolated)  # standardisation?
                raw = np.fft.rfft(interpolated)
                arg = np.abs(raw)
                self.freqs = np.fft.rfftfreq(L) * 10 * 60
                idx = np.where((self.freqs > 50) & (self.freqs < 160))

                # select the one within (50, 160)
                self.freqs = self.freqs[idx]
                self.fft[i] = arg[idx]
                # find the argmax
                peak = np.argmax(self.fft[i])
                if not self.last_peak:
                    self.last_peak = peak
                if (self.freqs[peak] -
                        self.freqs[self.last_peak]) <= self.diff:
                    self.last_peak = peak
            self.bpms[i].append(self.freqs[self.last_peak])

            x1, y1, w1, h1 = self.face_rect
            self.slices = [np.copy(self.frame_out[y1:y1 + h1, x1:x1 + w1, 1])]
            col = (100, 255, 100)
            # gap = (self.buffer_size - L) / self.fps
            # if gap:
            #     text = "(estimate: %0.1f bpm, wait %0.0f s)" % (self.bpm, gap)
            # else:
            #     text = "(estimate: %0.1f bpm)" % (np.mean(np.transpose(self.bpm)[-1]))
            text = "(estimate: %0.1f bpm)" % (np.mean(
                np.transpose(self.bpms)[-1]))

            tsize = 1
            x, y, w, h = self.get_subface_coord(self.rect_size)
            cv2.putText(self.frame_out, text, (int(x - w / 2), int(y)),
                        cv2.FONT_HERSHEY_PLAIN, tsize, col)