Esempio n. 1
0
 def function_wrapper(x, b, axis=0, **kwargs):
     # add the padding to the array
     xsize = x.shape[axis]
     if 'pad' in kwargs and kwargs['pad']:
         npad = b.shape[axis] // 2
         padd = cp.take(x, cp.arange(npad), axis=axis) * 0
         if kwargs['pad'] == 'zeros':
             x = cp.concatenate((padd, x, padd), axis=axis)
         if kwargs['pad'] == 'constant':
             x = cp.concatenate((padd * 0 + cp.mean(x[:npad]), x,
                                 padd + cp.mean(x[-npad:])),
                                axis=axis)
         if kwargs['pad'] == 'flip':
             pad_in = cp.flip(cp.take(x, cp.arange(1, npad + 1), axis=axis),
                              axis=axis)
             pad_out = cp.flip(cp.take(x,
                                       cp.arange(xsize - npad - 1,
                                                 xsize - 1),
                                       axis=axis),
                               axis=axis)
             x = cp.concatenate((pad_in, x, pad_out), axis=axis)
     # run the convolution
     y = fcn_convolve(x, b, **kwargs)
     # remove padding from both arrays (necessary for x ?)
     if 'pad' in kwargs and kwargs['pad']:
         # remove the padding
         y = cp.take(y, cp.arange(npad, x.shape[axis] - npad), axis=axis)
         x = cp.take(x, cp.arange(npad, x.shape[axis] - npad), axis=axis)
         assert xsize == x.shape[axis]
         assert xsize == y.shape[axis]
     return y
Esempio n. 2
0
    def __get_streaked_spectra(self, streakspeed):
        if self.is_low_res == True:
            return None
        else:
            from cupy.fft import fft

            def fs_in_au(t):
                return 41.3414 * t  # from fs to a.u.

            # def eV_in_au(e): return 0.271106*np.sqrt(e)  # from eV to a.u.

            # in V/m; shape of Vectorpotential determines: 232000 V/m = 1 meV/fs max streakspeed
            E0 = 232000 * streakspeed
            ff1 = cp.flip(
                fft(self.__temp * cp.exp(-1j * fs_in_au(self.__tAxis) *
                                         (1 / (2) *
                                          (self.p0 * E0 * p_times_A_vals_up +
                                           1 * E0**2 * A_square_vals_up)))))
            ff2 = cp.flip(
                fft(self.__temp * cp.exp(-1j * fs_in_au(self.__tAxis) *
                                         (1 / (2) *
                                          (self.p0 * E0 * p_times_A_vals_down +
                                           1 * E0**2 * A_square_vals_down)))))

            spectrum1 = cp.square(cp.abs(ff1))
            spectrum2 = cp.square(cp.abs(ff2))

            #         ff1=ff1/(cp.sum(cp.square(cp.abs(ff1))))
            #         ff1=ff2/(cp.sum(cp.square(cp.abs(ff2))))

            return spectrum1, spectrum2
Esempio n. 3
0
    def backward(self, w, grad):

        # 这部分的推导可以看 /resource下的cnn_bp.md
        if w != -1:
            grad = grad @ w

        self.dz = grad * self.activation.backward()
        self.dw = np.tensordot(
            self.dz, self.input_split, axes=[(0, 2, 3), (0, 2, 3)]) / self.m
        self.db = np.mean(self.dz, axis=(0, 2, 3))

        pad_diff = 2 * (self.shape[2] - self.dz.shape[2]) * self.strides
        self.padding_layer_bp = ZeroPadding2d(pad_diff // 2, pad_diff // 2)
        self.dz = self.padding_layer_bp.forward(self.dz)
        self.dz = self.split_by_strides(self.dz,
                                        kh=self.kernel_size[0],
                                        kw=self.kernel_size[1],
                                        s=self.strides)
        # 翻转180度
        self.dz = np.flip(self.dz, axis=4)
        self.dz = np.flip(self.dz, axis=4)
        # 这里炫个技,其实和算dw的tensor dot一样的,但是cupy不支持einsum
        # grad = np.einsum('mcab,nmwhab->ncwh', self.w, self.dz)
        grad = np.tensordot(self.w, self.dz,
                            axes=[(0, 2, 3),
                                  (1, 4, 5)]).transpose([1, 0, 2, 3])
        if self.padding_layer is not None:
            return self.padding_layer.backward(w=-1, grad=grad)
        return -1, grad
Esempio n. 4
0
def reverse(tensor, axis):
    """Reverses specific dimensions of a tensor.

    Args:
        tensor (tensor): tensor to reverse
        axis (tensor): axis or tuple of axis
    """
    if isinstance(axis, int):
        return cp.flip(tensor, axis)
    else:
        for ax in axis:
            tensor = cp.flip(tensor, axis=ax)
        return tensor
Esempio n. 5
0
def compute_all_results(experiment):
    global seq_uid_map, frame_no_map, seq_uid_count_map
    cuda_embeddings = cp.asarray(experiment.embeddings)
    embeddings = experiment.embeddings
    chunk_size = 500
    chunks = []
    results = []
    chunk_size = 100
    for i in tqdm(range(0, int(len(embeddings) / chunk_size) + 1)):
        start = i * chunk_size
        end = i * chunk_size + chunk_size
        if end > len(embeddings):
            end = len(embeddings)
        #start = chunk_size
        #end = chunk_size+chunk_size
        chunk_similarity = cp.dot(cuda_embeddings[start:end],
                                  cuda_embeddings.T)
        chunk_similarity[np.arange(chunk_size),
                         np.arange(chunk_size) + start] = 0
        chunk_matches = cp.argsort(chunk_similarity, axis=1)
        chunk_matches = cp.flip(chunk_matches, axis=1)
        chunk_matches = chunk_matches.get()
        chunk_results = get_results_for_chunk(chunk_matches, start, end)
        results.append(chunk_results)
    compiled_results = {}
    for result in results:
        compiled_results.update(result)
    #compiled_results
    df = pd.DataFrame.from_dict(compiled_results).T
    df = df.rename(columns={0: "reid", 1: "jitter", 2: "AUC", 3: "seq_uid"})
    return df
Esempio n. 6
0
def gpu_rot4D_flat(data4Df, rotangle, flip=True, return_numpy=False):
    warnings.filterwarnings('ignore')
    data4Df = cp.asarray(data4Df, dtype=data4D.dtype)
    if flip:
        data4Df = cp.flip(data4Df, axis=-1)
    data4Df = csnd.rotate(data4Df, rotangle, axes=(1, 2), reshape=False)
    return data4Df
def match_fuzzy(q, tms, tmt, opt):
    tms_score = cdist(q, tms)
    if not opt.include_perfect_match:
        tms_score[tms_score > 0.99] = -float('inf')

    tmt_score = cdist(q, tmt)
    if not opt.include_perfect_match:
        tmt_score[tms_score == -float('inf')] = -float('inf')
        tmt_score[tmt_score > 0.99] = -float('inf')

    tms_top_v, tms_top_i = topk(tms_score, opt.topk)
    tmt_top_v, tmt_top_i = topk(tmt_score, opt.topk)

    tms_top_i += opt.shard_i * opt.shard_max_len
    tmt_top_i += opt.shard_i * opt.shard_max_len

    top_v = cp.hstack([tms_top_v, tmt_top_v])
    top_i = cp.hstack([tms_top_i, tmt_top_i])

    arg_i = cp.flip(top_v.astype('float32').argsort(axis=1), axis=1)
    top_v = top_v[cp.arange(top_v.shape[0]).reshape(-1, 1),
                  arg_i][:, :opt.topk]
    top_i = top_i[cp.arange(top_i.shape[0]).reshape(-1, 1),
                  arg_i][:, :opt.topk]

    return top_v, top_i
def topk(array, k):
    assert array.ndim == 2
    top_i = array.astype('float32').argpartition(
        -k
    )[:, -k:]  # cupy.argpartition do whole sorting for implementation reason
    top_i = cp.flip(top_i, axis=1)
    top_v = cp.vstack([array[r, top_i[r]] for r in range(array.shape[0])])
    return top_v, top_i
Esempio n. 9
0
def mass2_gpu(ts, query):
    """
    Compute the distance profile for the given query over the given time 
    series. This require cupy to be installed.

    Parameters
    ----------
    ts : array_like
        The array to create a rolling window on.
    query : array_like
        The query.

    Returns
    -------
    An array of distances.

    Raises
    ------
    ValueError
        If ts is not a list or np.array.
        If query is not a list or np.array.
        If ts or query is not one dimensional.
    """
    def moving_mean_std_gpu(a, w):
        s = cp.concatenate([cp.array([0]), cp.cumsum(a)])
        sSq = cp.concatenate([cp.array([0]), cp.cumsum(a**2)])
        segSum = s[w:] - s[:-w]
        segSumSq = sSq[w:] - sSq[:-w]

        movmean = segSum / w
        movstd = cp.sqrt(segSumSq / w - (segSum / w)**2)

        return (movmean, movstd)

    x = cp.asarray(ts)
    y = cp.asarray(query)
    n = x.size
    m = y.size

    meany = cp.mean(y)
    sigmay = cp.std(y)

    meanx, sigmax = moving_mean_std_gpu(x, m)
    meanx = cp.concatenate([cp.ones(n - meanx.size), meanx])
    sigmax = cp.concatenate([cp.zeros(n - sigmax.size), sigmax])

    y = cp.concatenate((cp.flip(y, axis=0), cp.zeros(n - m)))

    X = cp.fft.fft(x)
    Y = cp.fft.fft(y)
    Z = X * Y
    z = cp.fft.ifft(Z)

    dist = 2 * (m - (z[m - 1:n] - m * meanx[m - 1:n] * meany) /
                (sigmax[m - 1:n] * sigmay))
    dist = cp.sqrt(dist)

    return cp.asnumpy(dist)
Esempio n. 10
0
 def acc_gate(t, ext):
     s = []
     for i in range(num_gates):
         s.append(F.sum(t[i]).data)
     # オッズを元にリターンベースで評価
     v = cp.argsort(cp.array(s, dtype=cp.float32))
     v = cp.flip(v, axis=0)
     tansho = 0
     if v[0] == 0:
         tansho = ext[0][0]  # 単勝
     fukusho1 = 0
     if v[0] == 0:
         fukusho1 = ext[0][1]  # 複勝(1枚買ったとき)
     if v[0] == 1:
         fukusho1 = ext[0][2]  # 複勝(1枚買ったとき)
     if v[0] == 2:
         fukusho1 = ext[0][3]  # 複勝(1枚買ったとき)
     fukusho2 = 0
     if v[0] == 0 or v[1] == 0:
         fukusho2 += ext[0][1]  # 複勝(2枚買ったとき)
     if v[0] == 1 or v[1] == 1:
         fukusho2 += ext[0][2]  # 複勝(2枚買ったとき)
     fukusho2 = fukusho2 / 2
     fukusho3 = 0
     if v[0] == 0 or v[1] == 0 or v[2] == 0:
         fukusho3 += ext[0][1]  # 複勝(3枚買ったとき)
     if v[0] == 1 or v[1] == 1 or v[2] == 1:
         fukusho3 += ext[0][2]  # 複勝(3枚買ったとき)
     if v[0] == 2 or v[1] == 2 or v[2] == 2:
         fukusho3 += ext[0][3]  # 複勝(3枚買ったとき)
     fukusho3 = fukusho3 / 3
     umaren = 0
     if v[0] == 0 and v[1] == 1:
         umaren = ext[0][5]
     elif v[0] == 1 and v[1] == 0:
         umaren = ext[0][5]  # 馬連
     wide = 0
     if (v[0] == 0 and v[1] == 1) or (v[0] == 1 and v[1] == 0):
         wide = ext[0][6]  # ワイド
     elif (v[0] == 0 and v[1] == 2) or (v[0] == 2 and v[1] == 0):
         wide = ext[0][7]  # ワイド
     elif (v[0] == 1 and v[1] == 2) or (v[0] == 2 and v[1] == 1):
         wide = ext[0][8]  # ワイド
     umatan = 0
     if v[0] == 0 and v[1] == 1:
         umatan = ext[0][9]  # 馬単
     triren = 0
     if v[0] <= 2 and v[1] <= 2 and v[2] <= 2:
         triren = ext[0][10]  # 3連複
     tritan = 0
     if v[0] == 0 and v[1] == 1 and v[2] == 2:
         tritan = ext[0][11]  # 3単連
     return (tansho, fukusho1, fukusho2, fukusho3, umaren, wide, umatan,
             triren, tritan)
Esempio n. 11
0
def get_image_init_positions(image, shape: Tuple[int], n: int, flip=False):
    init_image = IMG.open(image).convert("L")
    init_image = init_image.resize(tuple(np.flip(shape)))
    init_image = np.array(init_image) / 255
    if flip:
        init_image = 1 - init_image
    linear_idx = np.random.choice(init_image.size,
                                  size=n,
                                  p=init_image.ravel() /
                                  float(init_image.sum()))
    x, y = np.unravel_index(linear_idx, shape)
    x = x.reshape(-1, 1)
    y = y.reshape(-1, 1)
    return np.hstack([x, y])
Esempio n. 12
0
def gpu_rot4D(data4D, rotangle, flip=True, return_numpy=False):
    warnings.filterwarnings('ignore')
    data4D = cp.asarray(data4D, dtype=data4D.dtype)
    if flip:
        data4D = cp.flip(data4D, axis=-1)
    data_shape = data4D.shape
    data4D = csnd.rotate(data4D.reshape(-1, data_shape[-2], data_shape[-1]),
                         rotangle,
                         axes=(1, 2),
                         reshape=False)
    data4D = cp.reshape(data4D, data_shape)
    if return_numpy:
        data4D = cp.asnumpy(data4D)
    return data4D
Esempio n. 13
0
 def __init__(self):
     self.bias = config.fargs['bias']
     all_patches, idxs = utils.grab_patches(
         X_train,
         patch_size=config.fargs['patch_size'],
         max_threads=16,
         tot_patches=100000)
     all_patches = utils.normalize_patches(all_patches, zca_bias=1e-3)
     filters = all_patches[np.random.choice(all_patches.shape[0],
                                            config.fargs['num_filters'],
                                            replace=False)].astype(
                                                np.float32)
     self.filters = cp.asarray(filters)
     if config.flip:
         self.filters = cp.concatenate(
             (self.filters, cp.flip(self.filters, 3)), axis=0)
Esempio n. 14
0
    with mrcfile.open(obs_projection, permissive=True) as mrc:
        cp_obs_projection = cp.asarray(mrc.data, dtype="float32")
    mrc.close

    with mrcfile.open(obs_projection_150_temp, permissive=True) as mrc:
        cp_obs_projection_150_temp = cp.asarray(mrc.data, dtype="float32")
    mrc.close

    if (index_rot[n] == 0):
        cp_obs_projection = cp_obs_projection
    elif (index_rot[n] == 1):
        cp_obs_projection = cp.rot90(cp_obs_projection, axes=(1, 2))
        cp_obs_projection = cp.rot90(cp_obs_projection, axes=(1, 2))
    elif (index_rot[n] == 2):
        cp_obs_projection = cp.flip(cp_obs_projection, axis=1)
    elif (index_rot[n] == 3):
        cp_obs_projection = cp.rot90(cp_obs_projection, axes=(1, 2))
        cp_obs_projection = cp.rot90(cp_obs_projection, axes=(1, 2))
        cp_obs_projection = cp.flip(cp_obs_projection, axis=1)

    if (index_rot[n] == 0):
        cp_obs_projection_150_temp = cp_obs_projection_150_temp
    elif (index_rot[n] == 1):
        cp_obs_projection_150_temp = cp.rot90(cp_obs_projection_150_temp,
                                              axes=(1, 2))
        cp_obs_projection_150_temp = cp.rot90(cp_obs_projection_150_temp,
                                              axes=(1, 2))
    elif (index_rot[n] == 2):
        cp_obs_projection_150_temp = cp.flip(cp_obs_projection_150_temp,
                                             axis=1)
Esempio n. 15
0
def precision_recall_curve(
        y_true, probs_pred) -> typing.Tuple[CumlArray, CumlArray, CumlArray]:
    """
    Compute precision-recall pairs for different probability thresholds

    .. note:: this implementation is restricted to the binary classification
        task. The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the
        number of true positives and ``fp`` the number of false positives. The
        precision is intuitively the ability of the classifier not to label as
        positive a sample that is negative.

        The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number
        of true positives and ``fn`` the number of false negatives. The recall
        is intuitively the ability of the classifier to find all the positive
        samples. The last precision and recall values are 1. and 0.
        respectively and do not have a corresponding threshold. This ensures
        that the graph starts on the y axis.

        Read more in the scikit-learn's `User Guide
        <https://scikit-learn.org/stable/modules/model_evaluation.html#precision-recall-f-measure-metrics>`_.


    Parameters
    ----------
    y_true : array, shape = [n_samples]
        True binary labels, {0, 1}.
    probas_pred : array, shape = [n_samples]
        Estimated probabilities or decision function.

    Returns
    -------
    precision : array, shape = [n_thresholds + 1]
        Precision values such that element i is the precision of
        predictions with score >= thresholds[i] and the last element is 1.
    recall : array, shape = [n_thresholds + 1]
        Decreasing recall values such that element i is the recall of
        predictions with score >= thresholds[i] and the last element is 0.
    thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
        Increasing thresholds on the decision function used to compute
        precision and recall.

    Examples
    --------

    .. code-block:: python

        >>> import cupy as cp
        >>> from cuml.metrics import precision_recall_curve
        >>> y_true = cp.array([0, 0, 1, 1])
        >>> y_scores = cp.array([0.1, 0.4, 0.35, 0.8])
        >>> precision, recall, thresholds = precision_recall_curve(
        ...     y_true, y_scores)
        >>> print(precision)
        [0.666... 0.5  1.  1. ]
        >>> print(recall)
        [1. 0.5 0.5 0. ]
        >>> print(thresholds)
        [0.35 0.4 0.8 ]

    """
    y_true, n_rows, n_cols, ytype = \
        input_to_cupy_array(y_true, check_dtype=[np.int32, np.int64,
                                                 np.float32, np.float64])

    y_score, _, _, _ = \
        input_to_cupy_array(probs_pred, check_dtype=[np.int32, np.int64,
                            np.float32, np.float64],
                            check_rows=n_rows, check_cols=n_cols)

    if cp.any(y_true) == 0:
        raise ValueError("precision_recall_curve cannot be used when "
                         "y_true is all zero.")

    fps, tps, thresholds = _binary_clf_curve(y_true, y_score)
    precision = cp.flip(tps / (tps + fps), axis=0)
    recall = cp.flip(tps / tps[-1], axis=0)
    n = (recall == 1).sum()

    if n > 1:
        precision = precision[n - 1:]
        recall = recall[n - 1:]
        thresholds = thresholds[n - 1:]
    precision = cp.concatenate([precision, cp.ones(1)])
    recall = cp.concatenate([recall, cp.zeros(1)])

    return precision, recall, thresholds
Esempio n. 16
0
def generate_spectrograms(
        NSAMP=1000,                     # Number of created spectrograms per class (vowel, non-vowel)
        N=512,                          # Window width in samples
        fbefore=250,                    # Number of windows before the center window
        fafter=250,                     # Number of windows after the center window
        ds=2,                           # Step between the windows in samples
        windows=None,                   # Matrix of window functions, 3 rows, N columns.
        dynRange=60,                    # Dynamic range
        signalLevel=10,                 # SNR
        sourceDir="",                   # TIMIT dataset audio folder
        sourceDirNoise="",              # Noise dataset audio folder
        targetDir="",                   # Output directory
        noiseType="MAVD",               # Type of the noise used {"MAVD", "ESC50"}
        batchSize=50,                   # Batch size when computing fft and creating spectrograms
        debug=False

):
    start_time = timer()
    if windows is None:
        windows = cp.ones(shape=(3, N))
    else:
        windows = cp.array(windows)

    eps = 1e-10
    phones = []
    wavfiles = []

    minstart = N // 2 + fbefore * ds

    for path, subdirs, files in os.walk(sourceDir):
        for name in files:
            basename, ext = os.path.splitext(name)
            fname = os.path.join(path, name)
            if ext != ".PHN":
                continue
            with open(fname, newline='') as phnfile:
                rdr = list(csv.reader(phnfile, delimiter=' '))
                wname = os.path.join(path, "{0}.WAV".format(basename))
                file_phones = []
                last_sampl = int(rdr[-1][1])
                maxstart = last_sampl - ds * fafter - N // 2 + 1
                for row in rdr:
                    start = int(row[0])
                    end = int(row[1])
                    if start < minstart:
                        continue
                    if end > maxstart:
                        continue
                    data = (wname, start, end, row[2].strip())
                    file_phones.append(data)

                phones += file_phones
                wavfiles.append(wname)

    print("Total {0} phones in {1} files".format(len(phones), len(wavfiles)))

    vowels = ["iy", "ih", "eh", "ey", "ae", "aa", "aw", "ay", "ah",
              "ao", "oy", "ow", "uh", "uw", "ux", "er", "ax", "ix", "axr",
              "ax-h"]

    # do not take samples from the very edges
    margin = int((fbefore + fafter) * ds * 0.1)
    phn_len = lambda x: max(x[2] - x[1] - margin, 1)

    class PhonePos:
        def __init__(self, phone, kind):
            self.file = phone[0]
            self.start = phone[1]
            self.end = phone[2]
            self.phone = phone[3]
            self.kind = kind

    def random_pos(kind="vowel"):
        if kind == "vowel":
            subset_iter = (filter(lambda x: x[3] in vowels, phones))
        else:
            subset_iter = (filter(lambda x: x[3] not in vowels, phones))
        subset_phones = list(subset_iter)
        subset_cs = np.cumsum(np.array(list(map(phn_len, subset_phones))))
        subset_max = subset_cs[-1]
        print(kind + " phonemes combined length: " + str(subset_max))

        subset_pos0 = np.sort(np.random.choice(subset_max, size=NSAMP, replace=False))
        subset_idx = np.searchsorted(subset_cs, subset_pos0, side='right')

        subset_pos = []
        for i in range(len(subset_idx)):
            j = subset_idx[i]
            assert (j >= 0)
            assert (j < len(subset_cs))
            phone = subset_phones[j]
            pp = PhonePos(phone, kind)

            # position within file
            file_pos = subset_pos0[i] + phone[1] + margin // 2

            if j > 0:
                file_pos -= subset_cs[j - 1]

            assert (file_pos <= phone[2] - margin // 2)
            assert (file_pos >= phone[1] + margin // 2)
            pp.file_pos = file_pos

            subset_pos.append(pp)

        # returns a list of PhonePos objects
        return subset_pos

    def list_noise_files(sound_ext=".wav"):
        noise_path_list = []
        for path, subdirs, files in os.walk(sourceDirNoise):
            for name in files:
                # print("Processing folder: {}".format(path))
                basename, ext = os.path.splitext(name)
                if ext != sound_ext:
                    continue
                fname = os.path.join(path, name)
                noise_path_list.append(fname)

        print(str(len(noise_path_list)) + " noise files found")
        return noise_path_list

    def load_noises(ns_pl, tar_rate):
        print("Loading noise")
        noises = []
        for ni, nois_p in enumerate(ns_pl):
            print("Loading noise " + str(int(ni / len(ns_pl) * 100)) + "%", end="\r")
            nois, rate = librosa.load(nois_p, sr=None, mono=True)
            nois_rs = librosa.resample(nois, rate, tar_rate)
            noises.append(nois_rs)

        return np.concatenate(noises)

    def tile_noise(noise, tar_len):
        multiple = tar_len / len(noise)
        repeat_c = int(np.floor(multiple))
        rest = tar_len - repeat_c * len(noise)
        return np.concatenate([np.repeat(noise, repeat_c), noise[:rest]], 0)

    def combine_with_noise(sig, nois, snr):
        # snr = 10*log10(sum(s**2)/sum(n**2))
        if len(nois) != len(sig):
            nois = tile_noise(nois, len(sig))

        E_sig = sum(sig ** 2)
        E_nois = sum(nois ** 2)
        if E_nois == 0:
            print("Warning: zero energy noise")
            return sig

        if type(snr) == list:
            snr = np.random.uniform(snr[0], snr[1])

        coef = 10 ** ((snr - 10 * np.log10(E_sig / E_nois)) / (-20))
        return (sig + coef * nois) / (1 + coef)

    vp = random_pos("vowel")
    up = random_pos("nonvowel")

    v_tarDir = os.path.join(targetDir, "vowel")
    u_tarDir = os.path.join(targetDir, "nonvowel")

    if not os.path.exists(v_tarDir):
        os.makedirs(v_tarDir)
    if not os.path.exists(u_tarDir):
        os.makedirs(u_tarDir)

    # timit sample rate is 16 kHz
    t_rate = 16000

    if signalLevel is not None:
        if noiseType == "MAVD":
            noise_files = list_noise_files(".flac")
            noises = load_noises(noise_files, t_rate)
        elif noiseType == "ESC50":
            nf = list_noise_files()

    complete_count = 0

    for wavFile in wavfiles:
        curList = []
        while len(vp) > 0 and vp[0].file == wavFile:
            pp = vp.pop(0)
            curList.append(pp)

        while len(up) > 0 and up[0].file == wavFile:
            pp = up.pop(0)
            curList.append(pp)

        if len(curList) == 0:
            continue

        complete_count += len(curList)
        print("Creating spectrograms " + str(int(complete_count / (2 * NSAMP) * 100)) + "%", end="\r")

        with open(wavFile, "rb") as fp:
            fp.read(1024)  # need to jump over 1024 bytes
            wa = fp.read()
            snd = np.frombuffer(wa, dtype=np.int16)

        snd = snd.astype(np.float32) / 2 ** 15

        if signalLevel is not None:
            if noiseType == "MAVD":
                noise_start = np.random.randint(0, len(noises) - len(snd))
                nois = noises[noise_start:noise_start + len(snd)]

            elif noiseType == "ESC50":
                noise_ind = np.random.randint(len(nf))
                nois, nois_rate = librosa.load(nf[noise_ind], sr=None, mono=False)
                nois = librosa.resample(nois, nois_rate, t_rate, res_type='kaiser_fast')


            snd = combine_with_noise(snd, nois, signalLevel)

        ftotal = 1 + fbefore + fafter
        dat = np.zeros(shape=(len(curList), 3, ftotal, N), dtype=cp.float32)

        if debug:
            sound_fn = os.path.join(targetDir, reg + "_" + speaker + "_" + fname + ".wav")
            sf.write(sound_fn, snd, samplerate=t_rate)

        for i, pp in enumerate(curList):
            p = pp.file_pos
            start = p - ds * fbefore - N // 2
            end = p + ds * fafter + N // 2
            stride_bytes = snd.strides[0]
            matrix = np.lib.stride_tricks.as_strided(snd[start:end],
                                                     shape=(ftotal, N),
                                                     strides=(stride_bytes * ds, stride_bytes),
                                                     writeable=False)
            dat[i, :, :, :] = matrix

        for bstart in range(0, dat.shape[0], batchSize):
            dat_batch = cp.transpose(cp.array(dat[bstart:bstart + batchSize]), (0, 2, 1, 3))
            spect_abs = cp.abs(cp.fft.rfft(cp.multiply(dat_batch, windows), axis=3))
            spect_abs[spect_abs == 0] = eps
            spectra = 20 * cp.log10(spect_abs)
            maxs = cp.max(spectra, axis=(1, 2, 3), keepdims=True)
            mins = cp.max(cp.concatenate((maxs - dynRange, cp.min(spectra, axis=(1, 2, 3), keepdims=True)), 3), 3,
                          keepdims=True)
            M_sp = spectra > mins
            spectra[~M_sp] = 0
            spectra[M_sp] = (((spectra - mins) / (maxs - mins)) * 255)[M_sp]

            spec_tr = cp.flip(cp.transpose(spectra, (0, 3, 1, 2)), 1).astype(dtype=cp.byte)

            fname = os.path.splitext(os.path.basename(wavFile))[0]
            reg, speaker = os.path.split(os.path.split(wavFile)[0])
            reg = os.path.split(reg)[1]

            for i, pp in enumerate(curList[bstart:bstart + batchSize]):
                pos = pp.file_pos
                img_name = reg + "_" + speaker + "_" + fname + "_" + str(pos) + "_" + str(pp.phone) + ".png"
                if pp.kind == "vowel":
                    img_path = os.path.join(v_tarDir, img_name)
                else:
                    img_path = os.path.join(u_tarDir, img_name)

                img = Image.fromarray(spec_tr[i].get(), mode="RGB")
                img.save(img_path)

    print("Finished in: {}s".format(timer() - start_time))
                                                          x - i_shift,
                                                          axis=1)
                ave_sub_density_stack_shift = cp.roll(
                    ave_sub_density_stack_shift, y - i_shift, axis=2)
                ave_sub_density_stack_rot_shift = cp.roll(
                    ave_sub_density_stack_rot_shift, y - i_shift, axis=2)

            AB_presum = ave_sub_density_stack_shift[:, :, :] * ave_sub_calc_dens[:, :, :]
            AB = cp.sum(AB_presum, axis=(1, 2))
            correlation[0, :, x, y] = AB / C

            AB_presum = ave_sub_density_stack_rot_shift[:, :, :] * ave_sub_calc_dens[:, :, :]
            AB = cp.sum(AB_presum, axis=(1, 2))
            correlation[1, :, x, y] = AB / C

            ave_sub_density_stack_shift = cp.flip(ave_sub_density_stack_shift,
                                                  axis=1)
            ave_sub_density_stack_rot_shift = cp.flip(
                ave_sub_density_stack_rot_shift, axis=1)

            AB_presum = ave_sub_density_stack_shift[:, :, :] * ave_sub_calc_dens[:, :, :]
            AB = cp.sum(AB_presum, axis=(1, 2))
            correlation[2, :, x, y] = AB / C

            AB_presum = ave_sub_density_stack_rot_shift[:, :, :] * ave_sub_calc_dens[:, :, :]
            AB = cp.sum(AB_presum, axis=(1, 2))
            correlation[3, :, x, y] = AB / C

    index = cp.argmax(correlation, axis=(0, 2, 3))

    index_rot = index[:] / (2 * i_shift * 2 * i_shift)
    index_rot = index_rot.astype(cp.int)
Esempio n. 18
0
            stable: bool = True) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.argsort <numpy.argsort>`.

    See its docstring for more information.
    """
    # Note: this keyword argument is different, and the default is different.
    kind = "stable" if stable else "quicksort"
    if not descending:
        res = np.argsort(x._array, axis=axis, kind=kind)
    else:
        # As NumPy has no native descending sort, we imitate it here. Note that
        # simply flipping the results of np.argsort(x._array, ...) would not
        # respect the relative order like it would in native descending sorts.
        res = np.flip(
            np.argsort(np.flip(x._array, axis=axis), axis=axis, kind=kind),
            axis=axis,
        )
        # Rely on flip()/argsort() to validate axis
        normalised_axis = axis if axis >= 0 else x.ndim + axis
        max_i = x.shape[normalised_axis] - 1
        res = max_i - res
    return Array._new(res)


# Note: the descending keyword argument is new in this function
def sort(x: Array,
         /,
         *,
         axis: int = -1,
         descending: bool = False,
         stable: bool = True) -> Array:
Esempio n. 19
0
 def sort(tensor, axis, descending=False):
     if descending:
         return cp.flip(cp.sort(tensor, axis=axis), axis=axis)
     else:
         return cp.sort(tensor, axis=axis)
Esempio n. 20
0
#m_data=Image.open(multiply_2D_data)
#cp_m_data=cp.asarray(m_data,dtype="float32")

t1 = time.time()

multi_pattern = [cp_diff] * int(n_diff)

t2 = time.time()
print("make array : " + str(t2 - t1))

multi_pattern = cp.asarray(multi_pattern, dtype="float32")

t3 = time.time()
print("cupy convert : " + str(t3 - t2))

multi_pattern = cp.flip(multi_pattern, axis=1)

print("2D_multi shape = " + str(multi_pattern.shape))

#multi_pattern=multi_pattern*cp_m_data	#全ての階層に適用される。

multi_pattern = cp.asnumpy(multi_pattern)  #cupy配列 ⇒ numpy配列に変換
foname = finame[finame.rfind("/") + 1:len(finame) -
                4] + "_n_" + n_diff + ".mrc"
with mrcfile.new(foname, overwrite=True) as mrc:
    mrc.set_data(multi_pattern)
mrc.close

t5 = time.time()
print("total time : " + str(t5 - t1))
Esempio n. 21
0
def display(a):
    surf = pygame.surfarray.make_surface(
        cp.asnumpy(
            upsample((cp.flip(cp.flip(cp.swapaxes(a, 0, 1), 1), 0) * 255))))
    disp.blit(surf, (0, 0))
Esempio n. 22
0
import cupy as np


def argsort(
    x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True
) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.argsort <numpy.argsort>`.

    See its docstring for more information.
    """
    # Note: this keyword argument is different, and the default is different.
    kind = "stable" if stable else "quicksort"
    res = np.argsort(x._array, axis=axis, kind=kind)
    if descending:
        res = np.flip(res, axis=axis)
    return Array._new(res)


def sort(
    x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True
) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.sort <numpy.sort>`.

    See its docstring for more information.
    """
    # Note: this keyword argument is different, and the default is different.
    kind = "stable" if stable else "quicksort"
    res = np.sort(x._array, axis=axis, kind=kind)
    if descending:
Esempio n. 23
0
 def flip(arr, axis=None):
     return cp.flip(arr, axis)
Esempio n. 24
0
def get_image_init_array(image, shape: Tuple[int]):
    init_image = IMG.open(image).convert("L")
    init_image = init_image.resize(tuple(np.flip(shape)))
    init_image = np.array(init_image) / 255
    return init_image
Esempio n. 25
0
ft1[0,2*intv:3*intv]=tri_wave.reshape(-1,)
ft1[0,6*intv:7*intv]=tri_wave.reshape(-1,)
ft1[0,9*intv:10*intv]=tri_wave.reshape(-1,)
ft1[0,14*intv:15*intv]=tri_wave.reshape(-1,)
ft1[0,21*intv:22*intv]=tri_wave.reshape(-1,)

ft[0] = ft0
ft[1] = ft1[0]
ft = cp.hstack([ft,ft,ft,ft,ft])
inputs0 = (1/flt -1 )/9
inputs1 = (1-inputs0)*2
# inputs = cp.vstack([inputs0,inputs1])
inputs = cp.ones((1,2000))*0.1
inputs = cp.hstack([inputs,inputs,inputs,inputs,inputs])
test_inputs = cp.flip(inputs,axis=1)

np.savez('./embed_data/longtime_train.npz',cp.asnumpy(ft),cp.asnumpy(inputs))
# np.savez('longtime_test.npz',ft,inpu)
# %%
while True:
    nt = 3
    N = 500
    alpha = 1+ float(np.random.randn(1))
    fb =1.0 + float(np.random.rand(1))
    g = 1.6
    gin = 1.0 + float(np.random.rand(1))
    Pz = 0.2
    # Pgg = float(cp.random.rand(1))*0.75
    Pgg = 0.1
    nn = Force_multi_out_gpu.Reservoir(N=N,p=Pgg,g=g)
Esempio n. 26
0
    def detect_particles(self, kltpicker):
        """
        Construct the scoring matrix and then use the picking_from_scoring_mat function to pick particles and noise
        images.
        """
        eig_func_stat = self.eig_func[0:self.num_of_func, :]
        eig_val_stat = self.eig_val[0:self.num_of_func]
        for i in range(self.num_of_func):
            tmp_func = np.reshape(
                eig_func_stat[i, :],
                (kltpicker.patch_size_func, kltpicker.patch_size_func))
            tmp_func[kltpicker.rad_mat > np.floor(
                (kltpicker.patch_size_func - 1) / 2)] = 0
            eig_func_stat[i, :] = tmp_func.flatten()
        [q, r] = np.linalg.qr(eig_func_stat.transpose(), 'complete')
        r = r[0:self.num_of_func, 0:self.num_of_func]
        kappa = np.linalg.multi_dot([
            r, np.diag(eig_val_stat), r.transpose()
        ]) + (self.approx_noise_var * np.eye(self.num_of_func))
        kappa_inv = np.linalg.inv(kappa)
        t_mat = (1 / self.approx_noise_var) * np.eye(
            self.num_of_func) - kappa_inv

        [D, P] = np.linalg.eigh(t_mat)
        D = D[::-1]
        P = P[:, ::-1]
        p_full = np.zeros(q.shape)
        p_full[0:t_mat.shape[0], 0:t_mat.shape[1]] = P

        mu = np.linalg.slogdet((1 / self.approx_noise_var) * kappa)[1]
        num_of_patch_row = self.mc_size[0] - kltpicker.patch_size_func + 1
        num_of_patch_col = self.mc_size[1] - kltpicker.patch_size_func + 1

        qp = q @ p_full
        qp = qp.transpose().copy()

        if not kltpicker.no_gpu:
            qp = cp.asarray(qp)
            noise_mc = cp.asarray(self.noise_mc)
            log_test_mat = cp.zeros((num_of_patch_row, num_of_patch_col))
            D = cp.asarray(D)
            for i in range(self.num_of_func):
                qp_tmp = cp.reshape(qp[i, :],
                                    (kltpicker.patch_size_func,
                                     kltpicker.patch_size_func)).transpose()
                qp_tmp = cp.flip(cp.flip(qp_tmp, 0), 1)
                scoreTmp = fftconvolve2d_gpu(noise_mc, qp_tmp)
                log_test_mat = log_test_mat + D[i] * abs(scoreTmp**2)

            log_test_mat = log_test_mat.transpose() - mu
            neigh = cp.ones(
                (kltpicker.patch_size_func, kltpicker.patch_size_func))
            log_test_n = cp.asnumpy(fftconvolve2d_gpu(log_test_mat, neigh))
        else:
            log_test_mat = np.zeros((num_of_patch_row, num_of_patch_col))
            for i in range(self.num_of_func):
                qp_tmp = np.reshape(qp[i, :],
                                    (kltpicker.patch_size_func,
                                     kltpicker.patch_size_func)).transpose()

                qp_tmp = np.flip(np.flip(qp_tmp, 0), 1)

                scoreTmp = signal.fftconvolve(self.noise_mc, qp_tmp, 'valid')
                log_test_mat = log_test_mat + D[i] * abs(scoreTmp**2)

            log_test_mat = log_test_mat.transpose() - mu
            neigh = np.ones(
                (kltpicker.patch_size_func, kltpicker.patch_size_func))
            log_test_n = signal.fftconvolve(log_test_mat, neigh, 'valid')

        [num_picked_particles,
         num_picked_noise] = picking_from_scoring_mat(log_test_n.transpose(),
                                                      self.mrc_name, kltpicker,
                                                      self.mg_big_size)
        return num_picked_particles, num_picked_noise
Esempio n. 27
0
test_acc_array = []
train_acc_array = []

while train_iter.epoch < max_epoch:
    train_accuracies = []

    # ----------------- training loop 한 주기 설정----------------------
    train_batch = train_iter.next()
    image_train, target_train = concat_examples(train_batch, gpu_id)

    #    image_train = (image_train - Train_mean)
    image_train = (image_train - Train_mean) / Train_std

    Iarray_pad = cp.pad(image_train, ((0, 0), (0, 0), (4, 4), (4, 4)),
                        mode='constant')
    Iarray_flip = cp.flip(Iarray_pad, 1)
    a = [0, 1]
    filp_flag = cp.random.choice(a, 128)
    b = [0, 1, 2, 3, 4, 5, 6, 7]
    c = [0, 1, 2, 3, 4, 5, 6, 7]
    height_rand = cp.random.choice(b, 128)
    width_rand = cp.random.choice(c, 128)
    image_train_dataAug = cp.zeros([Iarray.shape[0], 3, 32, 32], 'float32')

    for i in range(0, image_train.shape[0] - 1):
        height = height_rand[i]
        width = width_rand[i]
        if filp_flag[i] == 0:
            image_train_dataAug[i, :, :, :] = Iarray_pad[i, :,
                                                         height:height + 32,
                                                         width:width + 32]
def _project_cupy(reference_sources, estimated_source, flen, nsrc):
    """Least-squares projection of estimated source on the subspace spanned by
    delayed versions of reference sources, with delays between 0 and flen-1
    """
    # nsrc = tf.shape(reference_sources)[0]
    nsampl = reference_sources.shape[1]
    typ = reference_sources.dtype

    # computing coefficients of least squares problem via FFT ##
    # zero padding and FFT of input data
    reference_sources = cp.concatenate(
        (reference_sources, cp.zeros([nsrc, flen - 1], dtype=typ)), 1)

    estimated_source = cp.concatenate(
        (estimated_source, cp.zeros([flen - 1], dtype=typ)), 0)

    n_fft = cp.power(2., cp.ceil(cp.log2(nsampl + flen - 1))).astype('i')

    sf = cp.fft.fft(reference_sources, n=int(n_fft), axis=1)
    sef = cp.fft.fft(estimated_source, n=int(n_fft))

    # inner products between delayed versions of reference_sources
    G = cp.empty([nsrc * flen, nsrc * flen])
    for i in range(nsrc):
        for j in range(nsrc):
            ssf = sf[i] * cp.conj(sf[j])
            ssf = cp.real(cp.fft.ifft(ssf))
            ss = toeplitz_cupy(
                cp.concatenate((cp.reshape(ssf[0], [1]), ssf[-1:-flen:-1]), 0),
                ssf[:flen])
            G[i * flen:(i + 1) * flen, j * flen:(j + 1) * flen] = ss
            G[j * flen:(j + 1) * flen,
              i * flen:(i + 1) * flen] = cp.transpose(ss)

    # inner products between estimated_source and delayed versions of
    # reference_sources
    D = cp.empty([nsrc * flen])
    for i in range(nsrc):
        ssef = sf[i] * cp.conj(sef)
        ssef = cp.real(cp.fft.ifft(ssef))
        conc = cp.concatenate(
            [cp.reshape(ssef[0], [1]),
             cp.flip(ssef[-flen + 1:], 0)], 0)
        D[i * flen:(i + 1) * flen] = conc

    # Computing projection
    # Distortion filters

    s = cp.linalg.solve(G, cp.expand_dims(D, 1))
    if nsrc == 2:
        C = cp.concatenate((s[:flen], s[flen:]), 1)
    else:
        C = cp.reshape(s, (flen, nsrc))

    # Filtering
    sproj = cp.zeros([nsampl + flen - 1], dtype=cp.float64)

    for i in range(nsrc):
        fshape = C[:, i].shape[0] + reference_sources[i].shape[0] - 1
        fft1 = cp.fft.rfftn(C[:, i], (fshape, ))
        fft2 = cp.fft.rfftn(reference_sources[i], (fshape, ))
        ifft = cp.fft.irfftn(fft1 * fft2, (fshape, ))
        sproj += ifft[:nsampl + flen - 1]
    return sproj
else:
    print("OSS mode")
print("")

# open mrc file

with mrcfile.open(initial_dens, permissive=True) as mrc:
    cp_dens = cp.asarray(mrc.data, dtype="float32")
mrc.close

if (support.find("tif") != 0):
    sup = Image.open(support)
    np_sup = np.asarray(sup, dtype="float32")
    cp_sup = [np_sup] * int(cp_dens.shape[0])
    cp_sup = cp.asarray(cp_sup, dtype="float32")
    cp_sup = cp.flip(cp_sup, axis=1)
    print("np_sup dtype = " + str(np_sup.dtype))
    print("np_sup shape = " + str(np_sup.shape))

else:
    with mrcfile.open(support, permissive=True) as mrc:
        cp_sup = cp.asarray(mrc.data, dtype="float32")
    mrc.close
    print("cp_sup dtype = " + str(cp_sup.dtype))
    print("cp_sup shape = " + str(cp_sup.shape))

print("cp_initial_dens dtype = " + str(cp_dens.dtype))
print("cp_initial_dens shape = " + str(cp_dens.shape))
print("")

if (cp_sup.shape != cp_dens.shape):
Esempio n. 30
0
def flip(im):
    return np.flip(im, random.choice([x for x in range(len(im.shape))]))