示例#1
0
def calculate_metric(predictions, labels, signal='pulse', window_size=360, fs=30, bpFlag=True):
    if signal == 'pulse':
        [b, a] = butter(1, [0.75 / fs * 2, 2.5 / fs * 2], btype='bandpass') # 2.5 -> 1.7
    else:
        [b, a] = butter(1, [0.08 / fs * 2, 0.5 / fs * 2], btype='bandpass')

    data_len = len(predictions)
    HR_pred = []
    HR0_pred = []
    mySNR = []
    for j in range(0, data_len, window_size):
        if j == 0 and (j+window_size) > data_len:
            pred_window = predictions
            label_window = labels
        elif (j + window_size) >  data_len:
            break
        else:
            pred_window = predictions[j:j + window_size]
            label_window = labels[j:j + window_size]
        if signal == 'pulse':
            pred_window = detrend(np.cumsum(pred_window), 100)
        else:
            pred_window = np.cumsum(pred_window)

        label_window = np.squeeze(label_window)
        if bpFlag:
            pred_window = scipy.signal.filtfilt(b, a, np.double(pred_window))

        pred_window = np.expand_dims(pred_window, 0)
        label_window = np.expand_dims(label_window, 0)
        # Predictions FFT
        f_prd, pxx_pred = scipy.signal.periodogram(pred_window, fs=fs, nfft=4 * window_size, detrend=False)
        if signal == 'pulse':
            fmask_pred = np.argwhere((f_prd >= 0.75) & (f_prd <= 2.5))  # regular Heart beat are 0.75*60 and 2.5*60
        else:
            fmask_pred = np.argwhere((f_prd >= 0.08) & (f_prd <= 0.5))  # regular Heart beat are 0.75*60 and 2.5*60
        pred_window = np.take(f_prd, fmask_pred)
        # Labels FFT
        f_label, pxx_label = scipy.signal.periodogram(label_window, fs=fs, nfft=4 * window_size, detrend=False)
        if signal == 'pulse':
            fmask_label = np.argwhere((f_label >= 0.75) & (f_label <= 2.5))  # regular Heart beat are 0.75*60 and 2.5*60
        else:
            fmask_label = np.argwhere((f_label >= 0.08) & (f_label <= 0.5))  # regular Heart beat are 0.75*60 and 2.5*60
        label_window = np.take(f_label, fmask_label)

        # MAE
        temp_HR, temp_HR_0 = calculate_HR(pxx_pred, pred_window, fmask_pred, pxx_label, label_window, fmask_label)
        temp_SNR = calculate_SNR(pxx_pred, f_prd, temp_HR_0, signal)
        HR_pred.append(temp_HR)
        HR0_pred.append(temp_HR_0)
        mySNR.append(temp_SNR)

    HR = np.array(HR_pred)
    HR0 = np.array(HR0_pred)
    mySNR = np.array(mySNR)

    MAE = np.mean(np.abs(HR - HR0))
    RMSE = np.sqrt(np.mean(np.square(HR - HR0)))
    meanSNR = np.nanmean(mySNR)
    return MAE, RMSE, meanSNR, HR0, HR
示例#2
0
文件: speechfft.py 项目: v3551G/AAI
def main():
    args = parse()
    file = args.file
    speech_path = os.path.join(args.speechfolder, file)
    true_egg_path = os.path.join(args.eggfolder, file)
    estimated_egg_path = os.path.join(args.geneggfolder, file)

    speech = np.load(speech_path)
    speech = minmaxnormalize(speech)
    speech = smooth(speech, 21)
    true_egg = np.load(true_egg_path)
    true_egg = minmaxnormalize(true_egg)

    if args.detrend:
        _, true_egg = detrend(None, true_egg)

    estimated_egg = np.load(estimated_egg_path)
    estimated_egg = minmaxnormalize(estimated_egg)

    # srange = slice(0, speech.shape[0])
    srange = slice(10700, 15400)
    speech = speech[srange]

    plt.figure()
    fig = plt.gcf()
    plt.subplot(211)
    plt.title("Speech")
    plt.plot(speech, "k", label="Speech Waveform")
    plt.xlabel("Sample")
    plt.ylabel("Amplitude")

    dft = np.fft.rfft(speech)
    freqs = np.fft.rfftfreq(np.size(speech, 0), 1 / 16e3)

    assert freqs.shape == dft.shape

    plt.subplot(212)
    plt.title("Fourier Spectra")
    plt.gca().semilogx(freqs, np.abs(dft)**2, "b", label="DFT")
    # plt.plot(freqs, np.abs(dft), "b", label="DFT")
    plt.xlabel("Frequency")
    plt.ylabel("PSD")

    plt.subplots_adjust(top=0.926,
                        bottom=0.117,
                        left=0.078,
                        right=0.981,
                        hspace=0.476,
                        wspace=0.2)
    mng = plt.get_current_fig_manager()
    mng.window.showMaximized()

    plt.show()
    fig.savefig("images/fft.png")
示例#3
0
def detrend_data(averaged_data):
    """
	Detrend the time series, taking away long-term effects.
	"""
    detrended_data = averaged_data.copy()
    for moteid in range(1, 59):
        for feature in ['temperature_mean', 'humidity_mean']:
            if moteid not in [5, 15, 28, 57]:
                detrended_data.loc[detrended_data['moteid']==moteid,feature]=\
                detrend(averaged_data, moteid,feature)
    return detrended_data
示例#4
0
def extract_gci_metrics(true_egg, estimated_egg, detrend_egg=False, fs=16e3):
    fnames = {}
    if type(true_egg) is str:
        fnames = {"true_file": true_egg}
        true_egg = np.load(true_egg)
        if detrend_egg:
            _, true_egg = detrend(None, true_egg)
    if type(estimated_egg) is str:
        estimated_egg = np.load(estimated_egg)

    if len(true_egg) > len(estimated_egg):
        true_egg = true_egg[:len(estimated_egg)]

    true_egg, estimated_egg = detect_voiced_region(true_egg, estimated_egg)
    true_gci = detectgroundwaveletgci(true_egg) / fs
    estimated_gci = detectgenwaveletgci(estimated_egg) / fs

    metrics = corrected_naylor_metrics(true_gci, estimated_gci)
    metrics.update(fnames)
    return metrics
示例#5
0
def main():
    args = parse()
    fname = args.file
    speech_path = os.path.join(args.speechfolder, fname)
    true_egg_path = os.path.join(args.eggfolder, fname)
    estimated_egg_path = os.path.join(args.geneggfolder, fname)

    speech = np.load(speech_path)
    speech = minmaxnormalize(speech)
    true_egg = np.load(true_egg_path)
    true_egg = minmaxnormalize(true_egg)

    if args.detrend:
        _, true_egg = detrend(None, true_egg)

    estimated_egg = np.load(estimated_egg_path)
    estimated_egg = minmaxnormalize(estimated_egg)

    # region = slice(11000, 12000)
    region = slice(0, len(speech))

    plt.figure()
    fig = plt.gcf()
    ax = plt.subplot(211)
    plt.plot(speech[region], "k", label="Speech Waveform")
    plt.ylabel("Amplitude")

    plt.subplot(212, sharex=ax)
    plt.plot(true_egg[region], "k", label="Ground Truth EGG")
    plt.plot(estimated_egg[region], "b", label="Estimated EGG")
    plt.xlabel("Sample Number")
    plt.ylabel("Amplitude")

    plt.subplots_adjust(
        top=0.962, bottom=0.087, left=0.057, right=0.981, hspace=0.212, wspace=0.2
    )

    mng = plt.get_current_fig_manager()
    mng.window.showMaximized()

    plt.show()
示例#6
0
def main():
    args = parse()
    fname = args.filename
    eground = np.load(os.path.join(args.groundpath, fname))
    if args.detrend:
        _, eground = detrend(None, eground)

    power = get_envelope(eground)

    fig = plt.figure()
    plt.title("Voicing Visualization")
    plt.plot(eground / np.max(np.abs(eground)), "r", label="ground truth")
    plt.plot(power, "g", label="envelope")
    plt.xlabel("sample")
    plt.ylabel("amplitude")

    plt.legend()

    fig.tight_layout()
    mng = plt.get_current_fig_manager()
    mng.window.showMaximized()
    plt.show()
示例#7
0
def extract_quotient_metrics(true_egg,
                             estimated_egg,
                             detrend_egg=False,
                             fs=16e3):
    def _detect_voiced_region_as_regions(true_egg,
                                         reconstructed_egg,
                                         power_threshold=0.05):
        def _get_signal_power(x, window):
            power = np.convolve(x**2, window / window.sum(), mode="same")
            return power

        def _get_window(window_len=10, window="flat"):
            if window == "flat":  # average
                w = np.ones(window_len, "d")
            else:
                w = eval("np." + window + "(window_len)")

            return w

        true_scaler = pd.Series(np.abs(true_egg)).nlargest(100).median()
        reconstructed_scaler = (pd.Series(
            np.abs(reconstructed_egg)).nlargest(100).median())

        true_egg = true_egg / true_scaler
        reconstructed_egg = reconstructed_egg / reconstructed_scaler

        window = _get_window(window_len=501, window="hanning")
        power = _get_signal_power(true_egg, window)

        regions = power >= power_threshold
        # true_egg_voiced = true_egg[regions]
        # reconstructed_egg_voiced = reconstructed_egg[regions]

        return regions

    fnames = {}
    if type(true_egg) is str:
        fnames = {"true_file": true_egg}
        true_egg = np.load(true_egg)
        if detrend_egg:
            _, true_egg = detrend(None, true_egg)
    if type(estimated_egg) is str:
        estimated_egg = np.load(estimated_egg)

    # true_egg = lowpass(true_egg)
    # estimated_egg = lowpass(estimated_egg)

    if len(true_egg) > len(estimated_egg):
        true_egg = true_egg[:len(estimated_egg)]

    regions = _detect_voiced_region_as_regions(true_egg, estimated_egg)

    true_gci = detectgroundwaveletgci(true_egg)
    estimated_gci = detectgenwaveletgci(estimated_egg)

    true_egg, estimated_egg = groundeggfilter(true_egg), geneggfilter(
        estimated_egg)
    true_degg = np.gradient(true_egg, edge_order=2)
    estimated_degg = np.gradient(estimated_egg, edge_order=2)

    true_gci = positions2onehot(true_gci, regions.shape) * regions
    estimated_gci = positions2onehot(estimated_gci, regions.shape) * regions
    true_gci = np.nonzero(true_gci)[0]
    estimated_gci = np.nonzero(estimated_gci)[0]

    true_goi = []
    estimated_goi = []
    for i in range(true_gci.shape[0] - 1):
        true_goi.append(true_gci[i] +
                        np.argmax(true_degg[true_gci[i] + 1:true_gci[i + 1]]) +
                        1)
    for i in range(estimated_gci.shape[0] - 1):
        estimated_goi.append(estimated_gci[i] +
                             np.argmax(estimated_degg[estimated_gci[i] +
                                                      1:estimated_gci[i +
                                                                      1]]) + 1)

    true_goi = np.array(true_goi)
    estimated_goi = np.array(estimated_goi)

    labelregions = label(regions)[0]

    true_goi = positions2onehot(true_goi, regions.shape)
    estimated_goi = positions2onehot(estimated_goi, regions.shape)
    true_gci = positions2onehot(true_gci, regions.shape)
    estimated_gci = positions2onehot(estimated_gci, regions.shape)

    true_peaks = -true_gci + true_goi
    estimated_peaks = -estimated_gci + estimated_goi

    tpeaks = []
    true_degg_list = []
    for r in find_objects(labelregions):
        tregion = true_peaks[r]
        true_degg_region = true_degg[r]
        tpos = np.nonzero(tregion)[0]

        if len(tpos) < 2:
            continue

        if tregion[tpos[0]] > 0:
            tpos = tpos[1:]
        if tregion[tpos[-1]] > 0:
            tpos = tpos[:-1]
        assert len(tpos) % 2 == 1
        tregion = tregion[tpos[0]:tpos[-1] + 1]
        tpeaks.append(tregion)
        true_degg_list.append(true_degg_region[tpos[0]:tpos[-1] + 1])

    epeaks = []
    estimated_degg_list = []
    for r in find_objects(labelregions):
        eregion = estimated_peaks[r]
        estimated_degg_region = estimated_degg[r]
        epos = np.nonzero(eregion)[0]

        if len(epos) < 2:
            continue

        if eregion[epos[0]] > 0:
            epos = epos[1:]
        if eregion[epos[-1]] > 0:
            epos = epos[:-1]
        assert len(epos) % 2 == 1
        eregion = eregion[epos[0]:epos[-1] + 1]
        epeaks.append(eregion)
        estimated_degg_list.append(estimated_degg_region[epos[0]:epos[-1] + 1])

    true_peaks_list = [np.nonzero(t)[0] for t in tpeaks]
    estimated_peaks_list = [np.nonzero(e)[0] for e in epeaks]

    metrics = {
        "CQ_true": 0,
        "OQ_true": 0,
        "SQ_true": 0,
        "CQ_estimated": 0,
        "OQ_estimated": 0,
        "SQ_estimated": 0,
    }

    count = 0
    sq_count = 0
    for tr, tr_degg in zip(true_peaks_list, true_degg_list):
        for i in range(1, tr.shape[0], 2):
            count += 1
            metrics["CQ_true"] += (tr[i + 1] - tr[i]) / (tr[i + 1] - tr[i - 1])
            metrics["OQ_true"] += (tr[i] - tr[i - 1]) / (tr[i + 1] - tr[i - 1])

        temp = extract_speed_quotient(tr_degg, tr)
        metrics["SQ_true"] += temp[0]
        sq_count += temp[1]

    metrics["SQ_true"] /= sq_count
    metrics["CQ_true"] /= count
    metrics["OQ_true"] /= count

    count = 0
    sq_count = 0
    for er, er_degg in zip(estimated_peaks_list, estimated_degg_list):
        for i in range(1, er.shape[0], 2):
            count += 1
            metrics["CQ_estimated"] += (er[i + 1] - er[i]) / (er[i + 1] -
                                                              er[i - 1])
            metrics["OQ_estimated"] += (er[i] - er[i - 1]) / (er[i + 1] -
                                                              er[i - 1])

        temp = extract_speed_quotient(er_degg, er)
        metrics["SQ_estimated"] += temp[0]
        sq_count += temp[1]
    metrics["SQ_estimated"] /= sq_count
    metrics["CQ_estimated"] /= count
    metrics["OQ_estimated"] /= count

    metrics.update(fnames)
    return metrics
示例#8
0
    env = PctChange(env)
    env = EWMA(env, alpha=alpha)
    return env


env_name = "buy_sell_hold_pct_ewma"
register_env(env_name, lambda config: buy_sell_hold_pct_ewma(config))

if __name__ == "__main__":
    ray.init()
    open_prices_detrended = (quandl.get(
        'WIKI/MSFT',
        start_date="2014-01-01",
        end_date="2017-01-01",
        api_key=my_secrets.quandl_api_key).assign(
            Open=lambda df: detrend(df['Open'])).assign(
                Open=lambda df: df['Open'] - df['Open'].min() + 1)['Open'])

    tune.run_experiments({
        "PPO_Detrended": {
            "run": "PPO",
            "stop": {
                "time_total_s": 60 * 10,
            },
            "checkpoint_at_end": True,
            "checkpoint_freq": 20,
            "config": {
                "env": env_name,
                "num_workers": 2,  # parallelism
                "lr": grid_search([5e-4, 5e-5]),  # try different lrs
                "train_batch_size": grid_search([4_000]),
示例#9
0
def pattern_detector(
    matrices,
    kernel,
    pattern_type="loops",
    precision=4.0,
    area=8,
    undetermined_percentage=1.,
    labels=None,
):

    if isinstance(matrices, np.ndarray):
        matrix_list = [matrices]
    else:
        matrix_list = matrices

    if labels is None:
        labels = range(len(matrix_list))
    elif isinstance(labels, str):
        labels = [labels]

    pattern_windows = []  # list containing all pannel of detected patterns
    pattern_sums = np.zeros(
        (area * 2 + 1, area * 2 + 1)
    )  # sum of all detected patterns
    agglomerated_pattern = np.zeros(
        (area * 2 + 1, area * 2 + 1)
    )  # median of all detected patterns
    detected_patterns = []
    n_patterns = 0

    for matrix, name in zip(matrix_list, labels):

        detrended, threshold_vector = utils.detrend(matrix)
        matrix_indices = np.where(matrix.sum(axis=0) > threshold_vector)
        n = matrix.shape[0]

        res2 = utils.corrcoef2d(
            detrended, kernel, centered_p=False
        )  # !!  Here the pattern match  !!
        res2[np.isnan(res2)] = 0.0
        n2 = res2.shape[0]
        res_rescaled = np.zeros(np.shape(matrix))
        res_rescaled[
            np.ix_(
                range(int(area), n2 + int(area)),
                range(int(area), n2 + int(area)),
            )
        ] = res2
        VECT_VALUES = np.reshape(res_rescaled, (1, n ** 2))
        VECT_VALUES = VECT_VALUES[0]
        thr = np.median(VECT_VALUES) + precision * np.std(VECT_VALUES)
        indices_max = np.where(res_rescaled > thr)
        indices_max = np.array(indices_max)
        res_rescaled = np.triu(res_rescaled)
        res_rescaled[(res_rescaled) < 0] = 0
        pattern_peak = utils.picker(res_rescaled, thr)

        if pattern_peak != "NA":
            if pattern_type == "loops":
                # Assume all loops are not found too far-off in the matrix
                mask = (
                    np.array(abs(pattern_peak[:, 0] - pattern_peak[:, 1]))
                    < 5000
                )
                pattern_peak = pattern_peak[mask, :]
                mask = (
                    np.array(abs(pattern_peak[:, 0] - pattern_peak[:, 1])) > 2
                )
                pattern_peak = pattern_peak[mask, :]
            elif pattern_type == "borders":
                # Borders are always on the diagonal
                mask = (
                    np.array(abs(pattern_peak[:, 0] - pattern_peak[:, 1])) == 0
                )
                pattern_peak = pattern_peak[mask, :]
            for l in pattern_peak:
                if l[0] in matrix_indices[0] and l[1] in matrix_indices[0]:
                    p1 = int(l[0])
                    p2 = int(l[1])
                    if p1 > p2:
                        p22 = p2
                        p2 = p1
                        p1 = p22
                    if (
                        p1 - area >= 0
                        and p1 + area + 1 < n
                        and p2 - area >= 0
                        and p2 + area + 1 < n
                    ):
                        window = detrended[
                            np.ix_(
                                range(p1 - area, p1 + area + 1),
                                range(p2 - area, p2 + area + 1),
                            )
                        ]
                        if (
                            len(window[window == 1.])
                            < ((area * 2 + 1) ** 2)
                            * undetermined_percentage
                            / 100.
                        ):  # there should not be many indetermined bins
                            n_patterns += 1
                            score = res_rescaled[l[0], l[1]]
                            detected_patterns.append([name, l[0], l[1], score])
                            pattern_sums += window
                            pattern_windows.append(window)
                        else:
                            detected_patterns.append([name, "NA", "NA", "NA"])
        else:
            detected_patterns.append([name, "NA", "NA", "NA"])

    # Computation of stats on the whole set - Agglomerated procedure :
    for i in range(0, area * 2 + 1):
        for j in range(0, area * 2 + 1):
            list_temp = []
            for el in range(1, len(pattern_windows)):
                list_temp.append(pattern_windows[el][i, j])
            agglomerated_pattern[i, j] = np.median(list_temp)

    return detected_patterns, agglomerated_pattern
示例#10
0
文件: egg_cycle.py 项目: v3551G/AAI
def main():
    args = parse()
    fname = args.file
    true_egg_path = os.path.join(args.eggfolder, fname)

    true_egg = np.load(true_egg_path)
    true_egg = minmaxnormalize(true_egg)

    if args.detrend:
        _, true_egg = detrend(None, true_egg)

    degg = np.gradient(true_egg, edge_order=2)

    # region = slice(11000, 12000)
    region = slice(8000, len(true_egg))
    true_egg = true_egg[region]
    degg = degg[region]

    plt.figure()
    fig = plt.gcf()
    ax1 = plt.subplot(211)
    plt.plot(true_egg, "b", label="EGG Waveform")
    plt.ylabel("Amplitude")

    ax2 = plt.subplot(212, sharex=ax1)
    plt.plot(degg, "g", label="DEGG waveform")
    plt.xlabel("Sample Number")
    plt.ylabel("Amplitude")

    # Timepoints in region shifted coordinates
    t0 = 19750  # start of EGG cycle, GOI
    t4 = 19853  # end of EGG cycle
    t1 = t0 + np.argmax(true_egg[t0:t4])  # t0=19750 => t1=19760 EGG peak
    t2 = t1 + np.argmin(degg[t1:t4])  # t0=19750 => t1=19760 => t2=19834 GCI
    t5 = 19908  # start of new egg cycle
    t9 = 20010  # end of new egg cycle
    t6 = t5 + np.argmax(true_egg[t5:t9])  # EGG peak
    t8 = t6 + np.argmin(degg[t6:t9])  # GCI

    y1max = 1.0
    y2min = -0.05

    # Epochs

    goi_ax1_1 = (t0, true_egg[t0])
    goi_ax1_2 = (t5, true_egg[t5])

    goi_ax2_1 = (t0, degg[t0])
    goi_ax2_2 = (t5, degg[t5])

    gci_ax1_1 = (t2, true_egg[t2])
    gci_ax1_2 = (t8, true_egg[t8])

    gci_ax2_1 = (t2, degg[t2])
    gci_ax2_2 = (t8, degg[t8])

    eggpeak_ax1_1 = (t1, true_egg[t1])
    eggpeak_ax1_2 = (t6, true_egg[t6])

    # eggstart_ax1_1 = (t0, true_egg[t0])
    # eggstart_ax1_2 = (t5, true_egg[t5])

    eggend_ax1_1 = (t4, true_egg[t4])
    eggend_ax1_2 = (t9, true_egg[t9])

    # Epoch Markers
    goi_ax1 = np.array([goi_ax1_1, goi_ax1_2])
    gci_ax1 = np.array([gci_ax1_1, gci_ax1_2])

    goi_ax2 = np.array([goi_ax2_1, goi_ax2_2])
    gci_ax2 = np.array([gci_ax2_1, gci_ax2_2])

    eggpeak_ax1 = np.array([eggpeak_ax1_1, eggpeak_ax1_2])

    # eggstart_ax1 = np.array([eggstart_ax1_1, eggstart_ax1_2])

    eggend_ax1 = np.array([eggend_ax1_1, eggend_ax1_2])

    # GOI scatter
    ax1.scatter(goi_ax1[:, 0],
                goi_ax1[:, 1],
                c="tab:pink",
                marker="x",
                s=100,
                label="GOI")
    ax2.scatter(goi_ax2[:, 0],
                goi_ax2[:, 1],
                c="tab:pink",
                marker="x",
                s=100,
                label="GOI")

    # GCI Scatter
    ax1.scatter(gci_ax1[:, 0],
                gci_ax1[:, 1],
                c="r",
                marker="*",
                s=100,
                label="GCI")
    ax2.scatter(gci_ax2[:, 0],
                gci_ax2[:, 1],
                c="r",
                marker="*",
                s=100,
                label="GCI")

    # EGG Peak Scatter
    ax1.scatter(
        eggpeak_ax1[:, 0],
        eggpeak_ax1[:, 1],
        # c="g",
        marker="s",
        s=100,
        label="EGG Peak",
        facecolors="none",
        edgecolors="g",
    )

    # EGG Start Scatter
    # ax1.scatter(
    #     eggstart_ax1[:, 0],
    #     eggstart_ax1[:, 1],
    #     # c="g",
    #     marker="^",
    #     s=100,
    #     label="EGG Cycle Start",
    #     facecolors="none",
    #     edgecolors="b",
    # )

    # EGG End Scatter
    ax1.scatter(
        eggend_ax1[:, 0],
        eggend_ax1[:, 1],
        # c="g",
        marker="v",
        s=100,
        label="EGG Cycle End",
        facecolors="none",
        edgecolors="tab:purple",
    )

    # Across axes vertical lines

    # Annotations
    yoffset = 0.1
    xoffset = 3
    ax1.text(
        t0 + xoffset,
        true_egg[t0] + yoffset,
        r"$t_0$",
        ha="center",
        va="center",
        transform=ax1.transData,
        fontsize=30,
    )
    ax1.text(
        t1 + xoffset,
        true_egg[t1] + yoffset,
        r"$t_1$",
        ha="center",
        va="center",
        transform=ax1.transData,
        fontsize=30,
    )
    ax1.text(
        t2 + xoffset,
        true_egg[t2] + yoffset,
        r"$t_2$",
        ha="center",
        va="center",
        transform=ax1.transData,
        fontsize=30,
    )
    ax1.text(
        t4 + xoffset,
        true_egg[t4] + yoffset,
        r"$t_4$",
        ha="center",
        va="center",
        transform=ax1.transData,
        fontsize=30,
    )

    plt.subplots_adjust(top=0.962,
                        bottom=0.08,
                        left=0.053,
                        right=0.981,
                        hspace=0.116,
                        wspace=0.2)

    # ax1.spines["bottom"].set_visible(False)
    # ax2.spines["top"].set_visible(False)
    ax1.get_xaxis().set_visible(False)
    ax1.get_yaxis().set_label_coords(-0.037, 0.5)
    ax2.get_yaxis().set_label_coords(-0.037, 0.5)
    ax1.set_xlim([19715, 20015])

    mng = plt.get_current_fig_manager()
    mng.window.showMaximized()

    ax1.legend()
    ax2.legend()
    plt.show()
示例#11
0
def main():
    args = parse()
    fname = args.filename
    eground = np.load(os.path.join(args.groundpath, fname))
    egen = np.load(os.path.join(args.generatedpath, fname))

    if len(eground) > len(egen):
        eground = eground[:len(egen)]
    if args.detrend:
        _, eground = detrend(None, eground)

    rawground, rawgen = eground, egen
    voicedground, voicedgen = detect_voiced_region(rawground, rawgen)

    voicedground = voicedground / np.max(np.abs(voicedground))
    voicedgen = voicedgen / np.max(np.abs(voicedgen))

    eground, egen = detect_voiced_region(eground, egen)
    eground, egen = groundeggfilter(eground), geneggfilter(egen)

    assert len(eground) == len(voicedground)

    degground = groundegg_process(eground)
    deggen = genegg_process(egen)

    peaksposground = detectgroundwaveletgci(eground)
    peaksground = positions2onehot(peaksposground, eground.shape)

    peaksposgen = detectgenwaveletgci(egen)
    peaksgen = positions2onehot(peaksposgen, egen.shape)
    assert len(peaksgen) == len(peaksground)

    metrics = corrected_naylor_metrics(peaksposground / 16e3,
                                       peaksposgen / 16e3)

    idr = metrics["identification_rate"]
    msr = metrics["miss_rate"]
    far = metrics["false_alarm_rate"]
    ida = metrics["identification_accuracy"]
    nhits = metrics["nhits"]
    nmisses = metrics["nmisses"]
    nfars = metrics["nfars"]
    ncycles = metrics["ncycles"]

    hits = metrics["hits"]
    hits, hit_distances = zip(*hits)
    fs = 16e3
    hits = np.array(hits).squeeze() * fs
    misses = np.array(metrics["misses"]).squeeze() * fs
    fars = np.array(metrics["fars"]).squeeze() * fs
    hits = hits.astype(np.int)
    misses = misses.astype(np.int)
    fars = fars.astype(np.int)

    ax = plt.subplot(411)
    plt.plot(voicedground, "r", label="ground truth")
    plt.plot(voicedgen, "g", label="generated egg")
    ax.set_xlim([-100, len(voicedground) + 100])
    plt.gca().set_ylabel("amplitude")
    plt.title("EGG")

    plt.legend(loc=1)

    plt.subplot(412, sharex=ax)
    plt.plot(eground, "r", label="ground truth")
    plt.plot(egen, "g", label="generated egg")
    plt.gca().set_ylabel("amplitude")
    plt.title("Proc EGG")

    plt.legend(loc=1)

    plt.subplot(413, sharex=ax)
    plt.plot(degground, "r", label="ground truth")
    plt.plot(deggen, "g", label="generated egg")
    plt.gca().set_ylabel("amplitude")
    plt.title("Neg DEGG")

    plt.legend(loc=1)

    plt.subplot(414, sharex=ax)
    x = np.arange(len(peaksground))
    lax = plt.gca()

    lax.axhline(x[0], x[-1], 0, color="k")
    lax.vlines(x, 0, peaksground, color="r", label="ground truth", linewidth=2)
    lax.vlines(x,
               0,
               2 * positions2onehot(hits, peaksground.shape),
               color="g",
               label="hits")
    lax.vlines(x,
               0,
               2 * positions2onehot(misses, peaksground.shape),
               color="b",
               label="misses")
    lax.vlines(x,
               0,
               2 * positions2onehot(fars, peaksground.shape),
               color="m",
               label="fars")
    # plt.plot(peaksground, "r", label="ground truth", linewidth=2)
    # plt.plot(2 * peaksgen, "g", label="generated egg")

    plt.gca().set_xlabel("sample")
    plt.gca().set_ylabel("amplitude")
    plt.title("GCI")
    plt.legend(loc=1)

    plt.subplots_adjust(top=0.91,
                        bottom=0.045,
                        left=0.035,
                        right=0.99,
                        hspace=0.2,
                        wspace=0.2)
    plt.suptitle(
        "{} IDR: {:2.2f} MR: {:2.2f} FAR: {:2.2f} IDA {:2.3f}\n H: {} M: {} F: {} C: {}"
        .format(
            fname,
            idr * 100,
            msr * 100,
            far * 100,
            ida * 1000,
            nhits,
            nmisses,
            nfars,
            ncycles,
        ))

    mng = plt.get_current_fig_manager()
    mng.window.showMaximized()
    plt.show()
示例#12
0
def gsr_preprocessing(signals):
    ''' Preprocessing for GSR signals '''
    der_signals = np.gradient(signals)
    con_signals = 1.0 / signals
    nor_con_signals = (con_signals -
                       np.mean(con_signals)) / np.std(con_signals)

    mean = np.mean(signals)
    der_mean = np.mean(der_signals)
    neg_der_mean = np.mean(der_signals[der_signals < 0])
    neg_der_pro = float(der_signals[der_signals < 0].size) / float(
        der_signals.size)

    local_min = 0
    for i in range(signals.shape[0] - 1):
        if i == 0:
            continue
        if signals[i - 1] > signals[i] and signals[i] < signals[i + 1]:
            local_min += 1

    # Using SC calculates rising time
    det_nor_signals, trend = detrend(nor_con_signals)
    lp_det_nor_signals = butter_lowpass_filter(det_nor_signals, 0.5, 128.)
    der_lp_det_nor_signals = np.gradient(lp_det_nor_signals)

    rising_time = 0
    rising_cnt = 0
    for i in range(der_lp_det_nor_signals.size - 1):
        if der_lp_det_nor_signals[i] > 0:
            rising_time += 1
            if der_lp_det_nor_signals[i + 1] < 0:
                rising_cnt += 1

    avg_rising_time = rising_time * (1. / 128.) / rising_cnt

    freqs, power = getfreqs_power(signals,
                                  fs=128.,
                                  nperseg=signals.size,
                                  scaling='spectrum')
    power_0_24 = []
    for i in range(21):
        power_0_24.append(
            getBand_Power(freqs,
                          power,
                          lower=0 + (i * 0.8 / 7),
                          upper=0.1 + (i * 0.8 / 7)))

    SCSR, _ = detrend(butter_lowpass_filter(nor_con_signals, 0.2, 128.))
    SCVSR, _ = detrend(butter_lowpass_filter(nor_con_signals, 0.08, 128.))

    zero_cross_SCSR = 0
    zero_cross_SCVSR = 0
    peaks_cnt_SCSR = 0
    peaks_cnt_SCVSR = 0
    peaks_value_SCSR = 0.
    peaks_value_SCVSR = 0.

    zc_idx_SCSR = np.array([], int)  # must be int, otherwise it will be float
    zc_idx_SCVSR = np.array([], int)
    for i in range(nor_con_signals.size - 1):
        if SCSR[i] * next((j for j in SCSR[i + 1:] if j != 0), 0) < 0:
            zero_cross_SCSR += 1
            zc_idx_SCSR = np.append(zc_idx_SCSR, i + 1)
        if SCVSR[i] * next((j for j in SCVSR[i + 1:] if j != 0), 0) < 0:
            zero_cross_SCVSR += 1
            zc_idx_SCVSR = np.append(zc_idx_SCVSR, i)

    for i in range(zc_idx_SCSR.size - 1):
        peaks_value_SCSR += np.absolute(
            SCSR[zc_idx_SCSR[i]:zc_idx_SCSR[i + 1]]).max()
        peaks_cnt_SCSR += 1
    for i in range(zc_idx_SCVSR.size - 1):
        peaks_value_SCVSR += np.absolute(
            SCVSR[zc_idx_SCVSR[i]:zc_idx_SCVSR[i + 1]]).max()
        peaks_cnt_SCVSR += 1

    zcr_SCSR = zero_cross_SCSR / (nor_con_signals.size / 128.)
    zcr_SCVSR = zero_cross_SCVSR / (nor_con_signals.size / 128.)

    mean_peak_SCSR = peaks_value_SCSR / peaks_cnt_SCSR if peaks_cnt_SCSR != 0 else 0
    mean_peak_SCVSR = peaks_value_SCVSR / peaks_cnt_SCVSR if peaks_value_SCVSR != 0 else 0

    features = [mean, der_mean, neg_der_mean, neg_der_pro, local_min, avg_rising_time] + \
        power_0_24 + [zcr_SCSR, zcr_SCVSR, mean_peak_SCSR, mean_peak_SCVSR]

    return features