Пример #1
0
def compute_psnr(diff_img):
    """takes diff Image as inout and returns PSNR and Minimum SNR for each color channel"""
    diff = np.asarray(diff_img)
    diff = diff.reshape(3, diff_img.width * diff_img.height)
    mse_rgb = np.mean(diff**2, axis = 1)
    psnr_rgb = 10 * np.log10(255**2 / mse_rgb)
    min_mse_rgb = np.amax(diff, axis = 1)
    min_snr_rgb = 10 * np.log10(255**2 / min_mse_rgb)
    return  psnr_rgb[0], psnr_rgb[1], psnr_rgb[2], min_snr_rgb[0], min_snr_rgb[1], min_snr_rgb[2]
    def __init__(self, Nx, Ny, gamma):
        """
        Parameters
        ----------
        Nx : int
            Number of segment for x-axis
        Ny : int
            Number of segment for y-axis
        gamma : ndarray
            Gain factor distribution
        """
        self.Nx = Nx
        self.Ny = Ny
        self.Lb = 3.5
        self.L = 0.1
        self.W = 0.1
        self.H = 0.1
        self.b = 0.4
        self.rho = 1.0
        self.dx = self.Lb/self.Nx
        self.x = cp.arange(0,self.Lb,self.dx, dtype=cp.float32)
        
        By = 1e-3
        Ay = -1/self.H*cp.log10(By)
        m = cp.linspace(0,1,Ny,dtype=cp.float32)
        self.y = -1/Ay*cp.log10(1-m*(1-By))
        self.dy = self.y[1:]-self.y[0:-1]

        ch_damp = 2.8 * cp.exp(-0.2 * self.x)
        
        self.k1 = 2.2e8*cp.exp(-3*self.x)
        self.m1 = 3e-3
        self.c1 = 6 + 670*cp.exp(-1.5*self.x) * ch_damp
        self.k2 = 1.4e6*cp.exp(-3.3*self.x)
        self.c2 = 4.4*cp.exp(-1.65*self.x) * ch_damp
        self.m2 = 0.5e-3
        self.k3 = 2.0e6*cp.exp(-3*self.x)
        self.c3 = 0.8*cp.exp(-0.6*self.x) * ch_damp
        self.k4 = 1.15e8*cp.exp(-3*self.x)
        self.c4 = 440.0*cp.exp(-1.5*self.x) * ch_damp

        self.c1c3 = self.c1 + self.c3
        self.k1k3 = self.k1 + self.k3
        self.c2c3 = self.c2 + self.c3
        self.k2k3 = self.k2 + self.k3

        self.gamma = gamma

        self.dt = 10e-6

        self.beta = 50e-7
Пример #3
0
def _gpu_snr(power, noise, block_size=200):
    shp = power.shape
    power_array = cp.array(power)
    gpu_noise = cp.array(noise)
    power_array = 10 * cp.log10(power_array / gpu_noise)
    snr = power_array.get()
    return snr
Пример #4
0
def compute_gain(sound, fs, min_db=-80.0, mode='A_weighting'):
    if fs == 16000:
        n_fft = 2048
    elif fs == 44100:
        n_fft = 4096
    else:
        raise Exception('Invalid fs {}'.format(fs))
    stride = n_fft // 2

    gain = None
    for i in range(0, len(sound[0]) - n_fft + 1, stride):
        if mode == 'RMSE':
            g = cupy.mean(sound[i:i + n_fft]**2, axis=1)
        elif mode == 'A_weighting':
            spec = cupy.fft.rfft(
                cupy.hanning(n_fft + 1)[:-1] * sound[:, i:i + n_fft])
            power_spec = cupy.abs(spec)**2
            a_weighted_spec = power_spec * cupy.power(10,
                                                      a_weight(fs, n_fft) / 10)
            g = cupy.sum(a_weighted_spec, axis=1)
        else:
            raise Exception('Invalid mode {}'.format(mode))
        if i == 0:
            gain = g.reshape([-1, 1])
        else:
            gain = cupy.concatenate((gain, g.reshape([-1, 1])), axis=1)

    gain = cupy.maximum(gain, cupy.power(10, min_db / 10))
    gain_db = 10 * cupy.log10(gain)

    return gain_db
Пример #5
0
def calc_antennas_vector(phase_diff):

    # 送信アンテナのインスタンス作成
    # txはx,yのgridに対してとりうる点をすべて保持する.(30x30)
    # アンテナの高さと方向は固定
    # TM波(水平偏波)
    tx_ant1 = rt.TX(const.x1_grid, const.y1_grid, const.tx_antenna_hight,
                    const.freq, const.tx_power, const.tx_orientation,
                    const.phase_of_origin)
    tx_ant2 = rt.TX(const.x2_grid, const.y2_grid, const.tx_antenna_hight,
                    const.freq, const.tx_power, const.tx_orientation,
                    (phase_diff / 180) * 2 * cp.pi)

    # 受信アンテナのインスタンス作成
    # rxもx,yのgridに対して取りうる点をすべて保持する(今回は7点のみx:0,50...,y:100)
    # アンテナの高さと方向は固定
    rx_ants = rt.RX(const.rx_ants_x_position, const.rx_ants_y_position,
                    const.rx_antenna_hight, const.freq, const.gain,
                    const.rx_orientation, const.phase_of_origin)

    # 2波モデルを使って受信点でのパワーを計算
    # power_rxs.shape = (30, 30, 7)
    power_rx1 = rx_ants.receive_power(tx_ant1)
    power_rx2 = rx_ants.receive_power(tx_ant2)

    power_rxs = power_rx1[:, :, cp.newaxis, cp.newaxis, :] + \
        power_rx2[cp.newaxis, cp.newaxis, :, :, :]

    # unit[dBm]
    power_rxs = 10 * cp.log10(cp.abs(power_rxs**2))

    return power_rxs
Пример #6
0
def show_qq_plot(df, x_axis, y_axis):

    x_values = cupy.fromDlpack(df[x_axis].to_dlpack())
    y_values = cupy.fromDlpack(df[y_axis].to_dlpack())

    x_max = cupy.max(x_values).tolist()
    y_max = cupy.max(y_values).tolist()

    qq_fig = figure(x_range=(0, x_max), y_range=(0, y_max))
    qq_fig.circle(-cupy.log10(x_values + 1e-10).get(),
                  -cupy.log10(y_values).get())
    qq_fig.line([0, x_max], [0, y_max])

    qq_handle = show(qq_fig, notebook_handle=True)
    push_notebook(handle=qq_handle)
    return qq_fig
Пример #7
0
def log10(x: Array, /) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.log10 <numpy.log10>`.

    See its docstring for more information.
    """
    if x.dtype not in _floating_dtypes:
        raise TypeError("Only floating-point dtypes are allowed in log10")
    return Array._new(np.log10(x._array))
Пример #8
0
def peak_signal_noise_ratio(image_true, image_test, *, data_range=None):
    """
    Compute the peak signal to noise ratio (PSNR) for an image.

    Parameters
    ----------
    image_true : ndarray
        Ground-truth image, same shape as im_test.
    image_test : ndarray
        Test image.
    data_range : int, optional
        The data range of the input image (distance between minimum and
        maximum possible values).  By default, this is estimated from the image
        data-type.

    Returns
    -------
    psnr : float
        The PSNR metric.

    Notes
    -----
    .. versionchanged:: 0.16
        This function was renamed from ``skimage.measure.compare_psnr`` to
        ``skimage.metrics.peak_signal_noise_ratio``.

    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio

    """
    check_shape_equality(image_true, image_test)

    if data_range is None:
        if image_true.dtype != image_test.dtype:
            warn(
                "Inputs have mismatched dtype.  Setting data_range based on "
                "im_true.",
                stacklevel=2,
            )
        dmin, dmax = dtype_range[image_true.dtype.type]
        true_min, true_max = cp.min(image_true), cp.max(image_true)
        if true_max > dmax or true_min < dmin:
            raise ValueError(
                "im_true has intensity values outside the range expected for "
                "its data type.  Please manually specify the data_range")
        if true_min >= 0:
            # most common case (255 for uint8, 1 for float)
            data_range = dmax
        else:
            data_range = dmax - dmin

    image_true, image_test = _as_floats(image_true, image_test)

    err = mean_squared_error(image_true, image_test)
    return 10 * cp.log10((data_range * data_range) / err)
Пример #9
0
def show_qq_plot(df,
                 x_axis,
                 y_axis,
                 title="QQ",
                 save_to=None,
                 x_max=None,
                 y_max=None):

    x_values = cupy.fromDlpack(df[x_axis].to_dlpack())
    y_values = cupy.fromDlpack(df[y_axis].to_dlpack())

    x_values = -cupy.log10(x_values)
    y_values = -cupy.log10(y_values)

    if x_max is None:
        x_max = cupy.max(x_values).tolist()
    if y_max is None:
        y_max = cupy.max(y_values).tolist()

    if y_max == cupy.inf:
        print("Please pass y_max. Input contains inf.")
        return
    if x_max == cupy.inf:
        print("Please pass x_max. Input contains inf.")
        return

    qq_fig = figure(x_range=(0, x_max), y_range=(0, y_max), title=title)
    qq_fig.circle(x_values.get(), y_values.get(), size=1)
    qq_fig.line([0, x_max], [0, y_max], line_color='orange', line_width=2)

    if save_to:
        export_png(qq_fig, filename=save_to)
    else:
        qq_handle = show(qq_fig, notebook_handle=True)
        push_notebook(handle=qq_handle)

    return qq_fig
Пример #10
0
def a_weight(fs, n_fft, min_db=-80.0):
    freq = cupy.linspace(0, fs // 2, n_fft // 2 + 1)
    freq_sq = cupy.power(freq, 2)
    freq_sq[0] = 1.0
    weight = 2.0 + 20.0 * (
        2 * cupy.log10(12194) + 2 * cupy.log10(freq_sq) -
        cupy.log10(freq_sq + 12194**2) - cupy.log10(freq_sq + 20.6**2) - 0.5 *
        cupy.log10(freq_sq + 107.7**2) - 0.5 * cupy.log10(freq_sq + 737.9**2))
    weight = cupy.maximum(weight, min_db)

    return weight
Пример #11
0
def show_manhattan_plot(df,
                        group_by,
                        x_axis,
                        y_axis,
                        title='Manhattan Plot',
                        save_to=None):
    chroms = df[group_by].unique().to_array()

    plot_width = len(chroms) * 50

    manhattan_fig = figure(title=title)
    manhattan_fig.xaxis.axis_label = 'Chromosomes'
    manhattan_fig.yaxis.axis_label = '-log10(p)'

    manhattan_fig.xaxis.ticker = FixedTicker(ticks=[t for t in chroms])

    start_position = 0.5
    for chrom in chroms:
        query = '%s == %s' % (group_by, chrom)
        cdf = df.query(query)

        x_array = cupy.fromDlpack(cdf[x_axis].to_dlpack()) + start_position
        y_array = -cupy.log10(cupy.fromDlpack(cdf[y_axis].to_dlpack()))

        manhattan_fig.circle(x_array.get(),
                             y_array.get(),
                             size=2,
                             color='orange' if
                             (start_position - 0.5) % 2 == 0 else 'gray',
                             alpha=0.5)

        start_position += 1

    if save_to:
        export_png(manhattan_fig, filename=save_to)
    else:
        manhattan_handle = show(manhattan_fig, notebook_handle=True)
        push_notebook(handle=manhattan_handle)

    return manhattan_fig
Пример #12
0
def mat_mwu_gpu(a_mat, b_mat, melt: bool, effect: str, use_continuity=True):
    """
    Compute rank-biserial correlations and Mann-Whitney statistics
    between every column-column pair of a_mat (continuous) and b_mat (binary).

    In the case that a_mat or b_mat has a single column, the results are
    re-formatted with the multiple hypothesis-adjusted q-value also returned.

    Parameters
    ----------
    a_mat: Pandas DataFrame
        Continuous set of observations, with rows as samples and columns
        as labels.
    b_mat: Pandas DataFrame
        Binary set of observations, with rows as samples and columns as labels.
        Required to be castable to boolean datatype.
    melt: boolean
        Whether or not to melt the outputs into columns.
    use_continuity: bool
        Whether or not to use a continuity correction. True by default.
    effect: "mean", "median", or "rank_biserial"
        The effect statistic.

    Returns
    -------
    effects: rank-biserial correlations
    pvals: -log10 p-values of correlations
    """

    if effect not in ["rank_biserial"]:

        raise ValueError("effect must be 'rank_biserial'")

    a_nan = a_mat.isna().sum().sum() == 0
    b_nan = b_mat.isna().sum().sum() == 0

    if not a_nan and not b_nan:

        raise ValueError("a_mat and b_mat cannot have missing values")

    a_mat, b_mat = precheck_align(a_mat, b_mat, np.float64, np.bool)

    a_names = a_mat.columns
    b_names = b_mat.columns

    a_ranks = a_mat.apply(rankdata)
    a_ties = a_ranks.apply(tiecorrect)

    a_ranks = cp.array(a_ranks)

    a_mat, b_mat = cp.array(a_mat), cp.array(b_mat)
    b_mat = b_mat.astype(cp.bool)

    a_num_cols = a_mat.shape[1]  # number of variables in A
    b_num_cols = b_mat.shape[1]  # number of variables in B

    a_mat = cp.array(a_mat).astype(cp.float64)
    b_pos = b_mat.astype(cp.float64)
    b_neg = (~b_mat).astype(cp.float64)

    pos_ns = b_pos.sum(axis=0)
    neg_ns = b_neg.sum(axis=0)

    pos_ns = cp.vstack([pos_ns] * a_num_cols)
    neg_ns = cp.vstack([neg_ns] * a_num_cols)

    pos_ranks = cp.dot(a_ranks.T, b_pos)

    u1 = pos_ns * neg_ns + (pos_ns * (pos_ns + 1)) / 2.0 - pos_ranks
    u2 = pos_ns * neg_ns - u1

    # temporarily mask zeros
    n_prod = pos_ns * neg_ns
    zero_prod = n_prod == 0
    n_prod[zero_prod] = 1

    effects = 2 * u2 / (pos_ns * neg_ns) - 1

    # set zeros to nan
    effects[zero_prod] = 0

    a_ties = cp.vstack([cp.array(a_ties)] * b_num_cols).T

    #     if T == 0:
    #         raise ValueError('All numbers are identical in mannwhitneyu')

    sd = cp.sqrt(a_ties * pos_ns * neg_ns * (pos_ns + neg_ns + 1) / 12.0)

    meanrank = pos_ns * neg_ns / 2.0 + 0.5 * use_continuity
    bigu = cp.maximum(u1, u2)

    # temporarily mask zeros
    sd_0 = sd == 0
    sd[sd_0] = 1

    z = (bigu - meanrank) / sd

    z[sd_0] = 0

    # compute p values
    pvals = 2 * (1 - ndtr(cp.abs(z)))

    # account for small p-values rounding to 0
    pvals[pvals == 0] = cp.finfo(cp.float64).tiny

    pvals = -cp.log10(pvals)

    pvals = pd.DataFrame(pvals, columns=b_names, index=a_names)
    effects = pd.DataFrame(effects, columns=b_names, index=a_names)
    pos_ns = pd.DataFrame(pos_ns, columns=b_names, index=a_names)
    neg_ns = pd.DataFrame(neg_ns, columns=b_names, index=a_names)

    effects = effects.fillna(0)
    pvals = pvals.fillna(1)

    if melt:

        return melt_mwu(effects, pvals, pos_ns, neg_ns, effect)

    return effects, pvals
Пример #13
0
def run_gpu_spectrum_int(num_samp, nbins, gain, rate, fc, t_int):
    '''
    Inputs:
    num_samp: Number of elements to sample from the SDR IQ per call;
              use powers of 2
    nbins:    Number of frequency bins in the resulting power spectrum; powers
              of 2 are most efficient, and smaller numbers are faster on CPU.
    gain:     Requested SDR gain (dB)
    rate:     SDR sample rate, intrinsically tied to bandwidth in SDRs (Hz)
    fc:       Base center frequency (Hz)
    t_int:    Total effective integration time (s)

    Returns:
    freqs:       Frequencies of the resulting spectrum, centered at fc (Hz), 
                 numpy array
    p_avg_db_hz: Power spectral density (dB/Hz) numpy array
    '''
    import cupy as cp
    import cusignal

    # Force a choice of window to allow converting to PSD after averaging
    # power spectra
    WINDOW = 'hann'
    # Force a default nperseg for welch() because we need to get a window
    # of this size later. Use the scipy default 256, but enforce scipy
    # conditions on nbins vs. nperseg when nbins gets small.
    if nbins < 256:
        nperseg = nbins
    else:
        nperseg = 256

    print('Initializing rtl-sdr with pyrtlsdr:')
    sdr = RtlSdr()

    try:
        sdr.rs = rate  # Rate of Sampling (intrinsically tied to bandwidth with SDR dongles)
        sdr.fc = fc
        sdr.gain = gain
        print('  sample rate: %0.6f MHz' % (sdr.rs / 1e6))
        print('  center frequency %0.6f MHz' % (sdr.fc / 1e6))
        print('  gain: %d dB' % sdr.gain)
        print('  num samples per call: {}'.format(num_samp))
        print('  PSD binning: {} bins'.format(nbins))
        print('  requested integration time: {}s'.format(t_int))
        N = int(sdr.rs * t_int)
        num_loops = int(N / num_samp) + 1
        print('  => num samples to collect: {}'.format(N))
        print('  => est. num of calls: {}'.format(num_loops - 1))

        # Set up arrays to store power spectrum calculated from I-Q samples
        freqs = cp.zeros(nbins)
        p_xx_tot = cp.zeros(nbins, dtype=complex)
        # Create mapped, pinned memory for zero copy between CPU and GPU
        gpu_iq = cusignal.get_shared_mem(num_samp, dtype=np.complex128)
        cnt = 0

        # Set the baseline time
        start_time = time.time()
        print('Integration began at {}'.format(
            time.strftime('%a, %d %b %Y %H:%M:%S',
                          time.localtime(start_time))))

        # Time integration loop
        for cnt in range(num_loops):
            # Move USB-collected samples off CPU and onto GPU for calc
            gpu_iq[:] = sdr.read_samples(num_samp)

            freqs, p_xx = cusignal.welch(gpu_iq,
                                         fs=rate,
                                         nperseg=nperseg,
                                         nfft=nbins,
                                         noverlap=0,
                                         scaling='spectrum',
                                         window=WINDOW,
                                         detrend=False,
                                         return_onesided=False)
            p_xx_tot += p_xx

        end_time = time.time()
        print('Integration ended at {} after {} seconds.'.format(
            time.strftime('%a, %d %b %Y %H:%M:%S'), end_time - start_time))
        print('{} spectra were measured at {}.'.format(cnt, fc))
        print('for an effective integration time of {:.2f}s'.format(
            num_samp * cnt / rate))

        half_len = len(freqs) // 2
        # Swap frequencies:
        tmp_first = freqs[:half_len].copy()
        tmp_last = freqs[half_len:].copy()
        freqs[:half_len] = tmp_last
        freqs[half_len:] = tmp_first

        # Swap powers:
        tmp_first = p_xx_tot[:half_len].copy()
        tmp_last = p_xx_tot[half_len:].copy()
        p_xx_tot[:half_len] = tmp_last
        p_xx_tot[half_len:] = tmp_first

        # Compute the average power spectrum based on the number of spectra read
        p_avg = p_xx_tot / cnt

        # Convert to power spectral density
        # See the scipy docs for _spectral_helper().
        win = get_window(WINDOW, nperseg)
        p_avg_hz = p_avg * ((win.sum()**2) / (win * win).sum()) / rate

        p_avg_db_hz = 10. * cp.log10(p_avg_hz)

        # Shift frequency spectra back to the intended range
        freqs = freqs + fc

        # nice and tidy
        sdr.close()

    except OSError as err:
        print("OS error: {0}".format(err))
        raise (err)
    except:
        print('Unexpected error:', sys.exc_info()[0])
        raise
    finally:
        sdr.close()

    return cp.asnumpy(freqs), cp.asnumpy(p_avg_db_hz)
 def __init__(self, bin_size=0.1, random_select=False):
     self.bins = cp.array([])
     self.weights = cp.array([], dtype=cp.int)
     self.bin_size = bin_size
     self._empty = True
     self._rd = int(cp.log10(1. / bin_size).item())
Пример #15
0
 def directivity(self, theta):
     # ダイポールの長さがλ/2であることを前提の計算->測定時には周波数を合わせること
     # Unit[倍]
     deg_gain = const.gain + 10 * \
         cp.log10(cp.abs(cp.cos(2*cp.pi / 2*cp.cos(theta)) * cp.sin(theta)))
     return 10**(deg_gain/10)
def _safe_db_cupy(num, den):
    """Properly handle the potential +Inf db SIR, instead of raising a
    RuntimeWarning. Only denominator is checked because the numerator can never
    be 0.
    """
    return 10.0 * cp.log10(num / (den + 1e-12))
Пример #17
0
def generate_spectrograms(
        NSAMP=1000,                     # Number of created spectrograms per class (vowel, non-vowel)
        N=512,                          # Window width in samples
        fbefore=250,                    # Number of windows before the center window
        fafter=250,                     # Number of windows after the center window
        ds=2,                           # Step between the windows in samples
        windows=None,                   # Matrix of window functions, 3 rows, N columns.
        dynRange=60,                    # Dynamic range
        signalLevel=10,                 # SNR
        sourceDir="",                   # TIMIT dataset audio folder
        sourceDirNoise="",              # Noise dataset audio folder
        targetDir="",                   # Output directory
        noiseType="MAVD",               # Type of the noise used {"MAVD", "ESC50"}
        batchSize=50,                   # Batch size when computing fft and creating spectrograms
        debug=False

):
    start_time = timer()
    if windows is None:
        windows = cp.ones(shape=(3, N))
    else:
        windows = cp.array(windows)

    eps = 1e-10
    phones = []
    wavfiles = []

    minstart = N // 2 + fbefore * ds

    for path, subdirs, files in os.walk(sourceDir):
        for name in files:
            basename, ext = os.path.splitext(name)
            fname = os.path.join(path, name)
            if ext != ".PHN":
                continue
            with open(fname, newline='') as phnfile:
                rdr = list(csv.reader(phnfile, delimiter=' '))
                wname = os.path.join(path, "{0}.WAV".format(basename))
                file_phones = []
                last_sampl = int(rdr[-1][1])
                maxstart = last_sampl - ds * fafter - N // 2 + 1
                for row in rdr:
                    start = int(row[0])
                    end = int(row[1])
                    if start < minstart:
                        continue
                    if end > maxstart:
                        continue
                    data = (wname, start, end, row[2].strip())
                    file_phones.append(data)

                phones += file_phones
                wavfiles.append(wname)

    print("Total {0} phones in {1} files".format(len(phones), len(wavfiles)))

    vowels = ["iy", "ih", "eh", "ey", "ae", "aa", "aw", "ay", "ah",
              "ao", "oy", "ow", "uh", "uw", "ux", "er", "ax", "ix", "axr",
              "ax-h"]

    # do not take samples from the very edges
    margin = int((fbefore + fafter) * ds * 0.1)
    phn_len = lambda x: max(x[2] - x[1] - margin, 1)

    class PhonePos:
        def __init__(self, phone, kind):
            self.file = phone[0]
            self.start = phone[1]
            self.end = phone[2]
            self.phone = phone[3]
            self.kind = kind

    def random_pos(kind="vowel"):
        if kind == "vowel":
            subset_iter = (filter(lambda x: x[3] in vowels, phones))
        else:
            subset_iter = (filter(lambda x: x[3] not in vowels, phones))
        subset_phones = list(subset_iter)
        subset_cs = np.cumsum(np.array(list(map(phn_len, subset_phones))))
        subset_max = subset_cs[-1]
        print(kind + " phonemes combined length: " + str(subset_max))

        subset_pos0 = np.sort(np.random.choice(subset_max, size=NSAMP, replace=False))
        subset_idx = np.searchsorted(subset_cs, subset_pos0, side='right')

        subset_pos = []
        for i in range(len(subset_idx)):
            j = subset_idx[i]
            assert (j >= 0)
            assert (j < len(subset_cs))
            phone = subset_phones[j]
            pp = PhonePos(phone, kind)

            # position within file
            file_pos = subset_pos0[i] + phone[1] + margin // 2

            if j > 0:
                file_pos -= subset_cs[j - 1]

            assert (file_pos <= phone[2] - margin // 2)
            assert (file_pos >= phone[1] + margin // 2)
            pp.file_pos = file_pos

            subset_pos.append(pp)

        # returns a list of PhonePos objects
        return subset_pos

    def list_noise_files(sound_ext=".wav"):
        noise_path_list = []
        for path, subdirs, files in os.walk(sourceDirNoise):
            for name in files:
                # print("Processing folder: {}".format(path))
                basename, ext = os.path.splitext(name)
                if ext != sound_ext:
                    continue
                fname = os.path.join(path, name)
                noise_path_list.append(fname)

        print(str(len(noise_path_list)) + " noise files found")
        return noise_path_list

    def load_noises(ns_pl, tar_rate):
        print("Loading noise")
        noises = []
        for ni, nois_p in enumerate(ns_pl):
            print("Loading noise " + str(int(ni / len(ns_pl) * 100)) + "%", end="\r")
            nois, rate = librosa.load(nois_p, sr=None, mono=True)
            nois_rs = librosa.resample(nois, rate, tar_rate)
            noises.append(nois_rs)

        return np.concatenate(noises)

    def tile_noise(noise, tar_len):
        multiple = tar_len / len(noise)
        repeat_c = int(np.floor(multiple))
        rest = tar_len - repeat_c * len(noise)
        return np.concatenate([np.repeat(noise, repeat_c), noise[:rest]], 0)

    def combine_with_noise(sig, nois, snr):
        # snr = 10*log10(sum(s**2)/sum(n**2))
        if len(nois) != len(sig):
            nois = tile_noise(nois, len(sig))

        E_sig = sum(sig ** 2)
        E_nois = sum(nois ** 2)
        if E_nois == 0:
            print("Warning: zero energy noise")
            return sig

        if type(snr) == list:
            snr = np.random.uniform(snr[0], snr[1])

        coef = 10 ** ((snr - 10 * np.log10(E_sig / E_nois)) / (-20))
        return (sig + coef * nois) / (1 + coef)

    vp = random_pos("vowel")
    up = random_pos("nonvowel")

    v_tarDir = os.path.join(targetDir, "vowel")
    u_tarDir = os.path.join(targetDir, "nonvowel")

    if not os.path.exists(v_tarDir):
        os.makedirs(v_tarDir)
    if not os.path.exists(u_tarDir):
        os.makedirs(u_tarDir)

    # timit sample rate is 16 kHz
    t_rate = 16000

    if signalLevel is not None:
        if noiseType == "MAVD":
            noise_files = list_noise_files(".flac")
            noises = load_noises(noise_files, t_rate)
        elif noiseType == "ESC50":
            nf = list_noise_files()

    complete_count = 0

    for wavFile in wavfiles:
        curList = []
        while len(vp) > 0 and vp[0].file == wavFile:
            pp = vp.pop(0)
            curList.append(pp)

        while len(up) > 0 and up[0].file == wavFile:
            pp = up.pop(0)
            curList.append(pp)

        if len(curList) == 0:
            continue

        complete_count += len(curList)
        print("Creating spectrograms " + str(int(complete_count / (2 * NSAMP) * 100)) + "%", end="\r")

        with open(wavFile, "rb") as fp:
            fp.read(1024)  # need to jump over 1024 bytes
            wa = fp.read()
            snd = np.frombuffer(wa, dtype=np.int16)

        snd = snd.astype(np.float32) / 2 ** 15

        if signalLevel is not None:
            if noiseType == "MAVD":
                noise_start = np.random.randint(0, len(noises) - len(snd))
                nois = noises[noise_start:noise_start + len(snd)]

            elif noiseType == "ESC50":
                noise_ind = np.random.randint(len(nf))
                nois, nois_rate = librosa.load(nf[noise_ind], sr=None, mono=False)
                nois = librosa.resample(nois, nois_rate, t_rate, res_type='kaiser_fast')


            snd = combine_with_noise(snd, nois, signalLevel)

        ftotal = 1 + fbefore + fafter
        dat = np.zeros(shape=(len(curList), 3, ftotal, N), dtype=cp.float32)

        if debug:
            sound_fn = os.path.join(targetDir, reg + "_" + speaker + "_" + fname + ".wav")
            sf.write(sound_fn, snd, samplerate=t_rate)

        for i, pp in enumerate(curList):
            p = pp.file_pos
            start = p - ds * fbefore - N // 2
            end = p + ds * fafter + N // 2
            stride_bytes = snd.strides[0]
            matrix = np.lib.stride_tricks.as_strided(snd[start:end],
                                                     shape=(ftotal, N),
                                                     strides=(stride_bytes * ds, stride_bytes),
                                                     writeable=False)
            dat[i, :, :, :] = matrix

        for bstart in range(0, dat.shape[0], batchSize):
            dat_batch = cp.transpose(cp.array(dat[bstart:bstart + batchSize]), (0, 2, 1, 3))
            spect_abs = cp.abs(cp.fft.rfft(cp.multiply(dat_batch, windows), axis=3))
            spect_abs[spect_abs == 0] = eps
            spectra = 20 * cp.log10(spect_abs)
            maxs = cp.max(spectra, axis=(1, 2, 3), keepdims=True)
            mins = cp.max(cp.concatenate((maxs - dynRange, cp.min(spectra, axis=(1, 2, 3), keepdims=True)), 3), 3,
                          keepdims=True)
            M_sp = spectra > mins
            spectra[~M_sp] = 0
            spectra[M_sp] = (((spectra - mins) / (maxs - mins)) * 255)[M_sp]

            spec_tr = cp.flip(cp.transpose(spectra, (0, 3, 1, 2)), 1).astype(dtype=cp.byte)

            fname = os.path.splitext(os.path.basename(wavFile))[0]
            reg, speaker = os.path.split(os.path.split(wavFile)[0])
            reg = os.path.split(reg)[1]

            for i, pp in enumerate(curList[bstart:bstart + batchSize]):
                pos = pp.file_pos
                img_name = reg + "_" + speaker + "_" + fname + "_" + str(pos) + "_" + str(pp.phone) + ".png"
                if pp.kind == "vowel":
                    img_path = os.path.join(v_tarDir, img_name)
                else:
                    img_path = os.path.join(u_tarDir, img_name)

                img = Image.fromarray(spec_tr[i].get(), mode="RGB")
                img.save(img_path)

    print("Finished in: {}s".format(timer() - start_time))
    def General_n_Balance_n_Collision_Eff(self,
                                      _new_path,
                                      length_only = True,
                                      GPU_accelerating = False,
                                      GPU_accelerating_data = None,
                                      matrix_data = None):
        ITC = {}
        max_ITC = 1
        min_ITC = sys.maxsize
        total_cost = 0
        max_order = 0
        total_order = 0

        standard_index = self.tools.GetWidth()**2 + self.tools.GetHeight()**2

        # Parallelization
        if GPU_accelerating and length_only:
            n_AGV, population_size = GPU_accelerating_data

            T_matrix, S_matrix = matrix_data

            T_matrix = cp.array(T_matrix)
            S_matrix = cp.array(np.array(S_matrix).astype(float))

            ITC_matrix = cp.reshape(cp.dot(T_matrix, cp.array([[1],[1]])), (population_size, n_AGV))
            O_matrix = cp.reshape(cp.dot(T_matrix, cp.array([[0],[1]])), (population_size, n_AGV))
            TC_matrix = cp.reshape(cp.dot(ITC_matrix, cp.ones((n_AGV, 1))), (population_size))
            TO_matrix = cp.reshape(cp.dot(O_matrix, cp.ones((n_AGV, 1))), (population_size))

            max_ITC_matrix = cp.amax(ITC_matrix, axis=1)
            min_ITC_matrix = cp.amin(ITC_matrix, axis=1)
            max_order_matrix = cp.amax(O_matrix, axis=1)
            
            _, n_order_points, _  = S_matrix.shape
            
            t_m = cp.reshape(cp.dot(S_matrix, cp.array([[[1],[0],[0],[0],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            x_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[1],[0],[0],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            y_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[0],[1],[0],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            l_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[0],[0],[1],[0]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            o_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[0],[0],[0],[1]]]*n_order_points)),
                             (population_size, n_order_points, n_order_points))
            t_m_l = cp.reshape(cp.dot(S_matrix, cp.array([[[1],[0],[0],[0],[0]]])),
                               (population_size, n_order_points))

            t_m_diff = t_m - cp.transpose(t_m, (0, 2, 1))
            x_m_diff = x_m - cp.transpose(x_m, (0, 2, 1))
            y_m_diff = y_m - cp.transpose(y_m, (0, 2, 1))

            m_xy_diff = cp.absolute(x_m_diff) + cp.absolute(y_m_diff)

            m_diff = cp.absolute(t_m_diff) + m_xy_diff
            
            m_diff_l = m_diff - l_m * 2
            
            m_diff_l_sign = (cp.logical_xor(cp.sign(m_diff_l) + 1, True))

            m_diff_l_eff = cp.multiply(m_diff, m_diff_l_sign)

            m_diff_l_sign = cp.sign(m_diff_l_eff)

            m_diff_l_H = cp.multiply(cp.multiply(cp.reciprocal(m_diff_l_eff + m_diff_l_sign - 1), m_diff_l_sign),
                                     cp.log10(m_diff_l_eff + cp.absolute(m_diff_l_sign - 1)))
            
            d_m = cp.reciprocal(cp.sum(m_diff_l_H,
                                       (1,2)))

            # Occupancy test
            """
            t_m_o = t_m + o_m - 1
            m_diff_o = cp.absolute(t_m_o - cp.transpose(t_m_o, (0, 2, 1))) - o_m - 1
            m_occupancy = (cp.logical_xor(cp.sign(m_diff_o) + 1, True))
            
            m_idn = cp.identity(n_order_points)
            OT = cp.prod(cp.logical_or(m_xy_diff,
                                       cp.logical_not(m_occupancy - m_idn)),
                         (1,2))
            """
            
            G1 = max_order_matrix/max_ITC_matrix
            G2 = TO_matrix/TC_matrix
            BU = min_ITC_matrix/max_ITC_matrix
            CI = cp.multiply(d_m, BU) # d_m * 0.1
            
            E_matrix = G1 + G2 + BU + CI
            
            cp.cuda.Stream.null.synchronize()

            return (list(E_matrix), (list(max_ITC_matrix), list(TC_matrix), list(BU), list(CI)))

        # Non-Paralleization
        else:
            print("[Scheduling] Must be use GPU to calculate")
            
            for each_AGV_ID in _new_path.keys():
                each_AGV_len_schedule = 0
                each_AGV_num_orders = 0

                if length_only:
                    each_AGV_len_schedule, each_num_order, each_order_list = _new_path[each_AGV_ID]
                    each_AGV_num_orders = each_num_order
                else:
                    each_path = _new_path[each_AGV_ID]
                    for each_pos_path in each_path:
                        if len(each_pos_path) == 3:
                            each_AGV_num_orders += 1
                        each_AGV_len_schedule += 1
                    
                cost = each_AGV_len_schedule + each_AGV_num_orders
                ITC[each_AGV_ID] = cost
                
                if each_AGV_num_orders > max_order:
                    max_order = each_AGV_num_orders
                total_order += each_AGV_num_orders
        
            for _, each_value in ITC.items():
                
                if each_value > max_ITC:
                    max_ITC = each_value
                if each_value < min_ITC:
                    min_ITC = each_value
                total_cost += each_value

            TT = max_ITC
            TTC = total_cost
            BU = min_ITC / max_ITC
            CI = 0
            
            G1 = max_order/TT
            G2 = total_order/TTC
            
            value = G1 + G2 + BU + CI
            return (value, (TT, TTC, BU, CI))
np_img = transform.resize(np_img, (4096, 4096))  # 4096*4096にリサイズ
np_img = color.rgb2gray(np_img)  # グレースケール化
np_img = np_img.astype('f')

io.imshow(np_img)  # 表示
io.show()

# フーリエ変換
cp_img = cp.asarray(np_img)  # numpy配列 ⇒ cupy配列に変換
cp_fimg = cp.fft.fft2(cp_img)  # 【フーリエ変換】
cp_fimg = cp.fft.fftshift(cp_fimg)  # fftshiftを使ってシフト

# パワースペクトルで表示
cp_fabs = cp.absolute(cp_fimg)  # 絶対値をとる
cp_fabs[cp_fabs < 1] = 1  # スケール調整用に1以下を1にする
cp_fpow = cp.log10(cp_fabs)  # logをとってパワースペクトル化

np_fpow = cp.asnumpy(cp_fpow)  # cupy配列 ⇒ numpy配列に変換

io.imshow(np_fpow)  # 表示
io.show()

# フーリエ逆変換
cp_ffimg = cp.fft.ifftshift(cp_fimg)  # シフトを戻す
cp_ffimg = cp.fft.ifft2(cp_ffimg)  # 【フーリエ逆変換】
cp_ffimg = cp.absolute(cp_ffimg)  # 絶対値をとって虚数をなくす
np_ffimg = cp.asnumpy(cp_ffimg)  # cupy配列 ⇒ numpy配列に変換

io.imshow(cp.asnumpy(np_ffimg))  # 表示
io.show()
Пример #20
0
# Loop over all anisotropic Henyey-Greenly parameters.
for hg in g:
    momentum_transfer = momentum_i * cp.ones_like(tau_atm)

    for atm_i, atm in enumerate(tau_atm):

        tau = cp.zeros(N_photons)
        mu = cp.zeros_like(tau)

        momentum_transfer = WalkLikeAPhoton(tau, mu, hg, atm,
                                            momentum_transfer)
    #     if flag:
    #         break
    # if flag:
    #         break
    plt.plot(cp.asnumpy(cp.log10(tau_atm)),
             cp.asnumpy(momentum_transfer) / N_photons,
             label=hg)

b = time.time()

print('time taken: ' + str(b - a))

plt.figure(1)
plt.xlabel(r'$log_{10}(\tau_{atm})$')
plt.ylabel('Momentum transferred per photon')
plt.title('CUpy with reflecting boundary')
plt.legend(title="anisotropic weighting")
plt.savefig('cupy_test.png', dpi=200)

plt.figure(2)