示例#1
0
def analyze_channels(freq, filename, function):
    """
    Given a filename, run the given analyzer function on each channel of the
    file
    """
    sample_rate, signal = scipy.io.wavfile.read(filename)
    print 'Analyzing "' + filename + '"...'

    if len(signal.shape) == 1:
        # Monaural
        function(freq, signal, sample_rate)
    elif signal.shape[1] == 2:
        # Stereo
        if np.array_equal(signal[:, 0], signal[:, 1]):
            print '-- Left and Right channels are identical --'
            function(freq, signal[:, 0], sample_rate)
        else:
            print '-- Left channel --'
            function(freq, signal[:, 0], sample_rate)
            print '-- Right channel --'
            function(freq, signal[:, 1], sample_rate)
    else:
        # Multi-channel
        for ch_no, channel in enumerate(signal.transpose()):
            print '-- Channel %d --' % (ch_no + 1)
            function(freq, channel, sample_rate)
示例#2
0
def transform_beat(sig, train=False):
    # 前置不可或缺的步骤
    # sig = resample(sig, config.target_point_num)
    # # 数据增强
    if train:
        if np.random.randn() > 0.5: sig = scaling(sig)
        if np.random.randn() > 0.5: sig = verflip(sig)
        if np.random.randn() > 0.5: sig = shift(sig)
        # if np.random.randn() > 0.4: sig = wavelet_db6(sig)
        # if np.random.randn() > 0.5: sig = wavelet_db4(sig) # time consuming
        # if np.random.randn() > 0.3: sig = wavelet_sym(sig)

    # 后置不可或缺的步骤
    sig = sig.transpose()
    sig = torch.tensor(sig.copy(), dtype=torch.float)
    return sig
示例#3
0
def transform(sig, train=True):
    # 前置不可或缺的步骤
    #sig = resample(sig, config.target_point_num)
    # sig_ext = np.zeros([config.target_point_num,12])

    # sig = resample(sig, int(sig.shape[0]/500 * config.target_fs))
    #print(sig.shape)

    # if sig.shape[0] < config.target_point_num:
    #     sig_ext[:sig.shape[0],:] = sig
    # if sig.shape[0] > config.target_point_num:
    #     sig_ext = sig[:config.target_point_num,:]

    # sig = sig_ext

    # # 数据增强
    if train:
        if np.random.randn() > 0.5: sig = scaling(sig)
        if np.random.randn() > 0.3: sig = verflip(sig)
        if np.random.randn() > 0.5: sig = shift(sig)

        if np.random.randn() > 0.3:
            sig = butter_bandpass_filter(sig,0.05,46,256)

        # if np.random.randn() > -1:
        #     fi = np.random.randint(11)
        #     if fi % 2 == 0 and fi != 2 and fi != 0 :
        #         sig = wavelet_db6(sig,'db{}'.format(fi) ,8)
        #     else:#if  fi % 2 != 0:
        #         if np.random.randn() > -0.5:
        #             sig = butter_bandpass_filter(sig,0.05,40,256)
        #         else:
        #             sig = butter_bandpass_forward_backward_filter(sig,0.05,40,256)
    else:
        #sig = butter_bandpass_filter(sig,0.05,46,256)
        pass
    # 后置不可或缺的步骤
    sig = sig.transpose()
    sig = torch.tensor(sig.copy(), dtype=torch.float)
    return sig
示例#4
0
def calc_rxd(data, band_idx, size_column, size_row):
    """
		Calcualte abnormal using RXD (Reed-Xiao) algorithm
	"""
    if data.shape[0] < band_idx.max():
        print "Error: Number of band is larger then actual"
        return None

    num_bands = len(band_idx)
    num_pixels = size_column * size_row

    rxd = np.zeros(num_pixels)

    if num_bands > 1:
        # reshaping
        GG = np.zeros((num_bands, num_pixels))
        for i in range(num_bands):
            band = band_idx[i]
            GG[i, ] = data[band].reshape(num_pixels)

        # calculating covariance matrix
        M = np.cov(GG)
        M_i = inv(M)

        avg = [np.mean(GG[i]) for i in range(num_bands)]

        for i in range(num_pixels):
            signal = GG[:, i] - avg
            rxd[i] = np.dot(np.dot(signal, M_i), signal.transpose())
    elif num_bands == 1:
        band = band_idx[0]

        avg = np.mean(data[band])
        std = np.std(data[band])
        GG = data[band].reshape(num_pixels)
        for i in range(num_pixels):
            rxd[i] = abs((GG[i] - avg) / std)

    abnormal = rxd.reshape((size_row, size_column))
    return abnormal
示例#5
0
文件: rxd.py 项目: HungLV4/HOG
def calc_rxd(data, band_idx, size_column, size_row):
	"""
		Calcualte abnormal using RXD (Reed-Xiao) algorithm
	"""
	if data.shape[0] < band_idx.max():
		print "Error: Number of band is larger then actual"
		return None

	num_bands = len(band_idx)
	num_pixels = size_column * size_row
	
	rxd = np.zeros(num_pixels)

	if num_bands > 1:
		# reshaping
		GG = np.zeros((num_bands, num_pixels))
		for i in range(num_bands):
			band = band_idx[i]
			GG[i, ] = data[band].reshape(num_pixels)

		# calculating covariance matrix
		M = np.cov(GG)
		M_i = inv(M)

		avg = [np.mean(GG[i]) for i in range(num_bands)]

		for i in range(num_pixels):
			signal = GG[:, i] - avg
			rxd[i] = np.dot(np.dot(signal, M_i), signal.transpose())
	elif num_bands == 1:
		band = band_idx[0]
		
		avg = np.mean(data[band])
		std = np.std(data[band])
		GG = data[band].reshape(num_pixels)
		for i in range(num_pixels):
			rxd[i] = abs((GG[i] - avg) / std)

	abnormal = rxd.reshape((size_row, size_column))
	return abnormal
示例#6
0
def arange_tx(signal, num_tx, vx_axis=2, axis=1):
    """Separate interleaved radar data from separate TX along a certain axis to account for TDM radars.

    Args:
        signal (ndarray): Received signal.
        num_tx (int): Number of transmit antennas.
        vx_axis (int): Axis in which to accumulate the separated data.
        axis (int): Axis in which the data is interleaved.

    Returns:
        ndarray: Separated received data in the

    """
    # Reorder the axes
    reordering = np.arange(len(signal.shape))
    reordering[0] = axis
    reordering[axis] = 0
    signal = signal.transpose(reordering)

    out = np.concatenate([signal[i::num_tx, ...] for i in range(num_tx)],
                         axis=vx_axis)

    return out.transpose(reordering)
figs = figure()
#################
## TEACH
#################
n_neu_scale = [256]
for this_readout in range(len(n_neu_scale)):
    res.Nn = n_neu_scale[this_readout]
    res.reset()
    for this_trial in range(3):
        out = nsetup.stimulate({},send_reset_event=True,duration=200)
        out = nsetup.stimulate({},send_reset_event=False,duration=300)
        out = nsetup.stimulate({},send_reset_event=True,duration=duration_rec)
        signal = go_reconstruct_signal_from_out(out,figs,upch=300,dnch=305,delta_up=0.1,delta_dn=0.1,do_detrend=False)
        signal = [signal[:,0]-np.min(signal[:,0]),signal[:,1]]
        signal = np.array(signal)
        signal = signal.transpose()
        #extract input and output
        raw_data = out[0].raw_data()
        dnch = 300
        upch = 305
        index_dn = np.where(raw_data[:,1] == dnch)[0]
        index_up = np.where(raw_data[:,1] == upch)[0]
        raw_data_input = []
        raw_data_input.extend(raw_data[index_dn,:])
        raw_data_input.extend(raw_data[index_up,:])
        raw_data_input = np.reshape(raw_data_input,[len(index_dn)+len(index_up),2])
        index_up = np.where(raw_data_input[:,1] == upch)[0]
        index_dn = np.where(raw_data_input[:,1] == dnch)[0]
        raw_data_input[index_dn,1] = 1
        raw_data_input[index_up,1] = 0   
        raw_data = out[0].raw_data()
示例#8
0
def prepare_data(mode, train_or_test, min=None, max=None):

    if min:
        config.MIN_MIX = min
    if max:
        config.MAX_MIX = max

    mix_speechs = []
    aim_fea = []
    aim_spkid = []
    aim_spkname = []
    query = []
    multi_spk_fea_list = []
    multi_spk_wav_list = []
    direction = []

    if config.MODE == 1:
        if config.DATASET == 'WSJ0':
            spk_file_tr = open(
                '/mnt/lustre/xushuang2/lcx/data/amcc-data/2channel/wav_spk.txt',
                'r')
            all_spk_train = [i.replace("\n", "") for i in spk_file_tr]
            all_spk_train = sorted(all_spk_train)
            print(all_spk_train)

            spk_file_tt = open(
                '/mnt/lustre/xushuang2/lcx/data/amcc-data/2channel/test/wav_spk.txt',
                'r')
            all_spk_test = [i.replace("\n", "") for i in spk_file_tt]
            all_spk_test = sorted(all_spk_test)
            print(all_spk_test)
            all_spk = all_spk_train + all_spk_test
            print(all_spk)

            all_dir = [i for i in range(1, 20)]
            dicDirFile = open(
                '/mnt/lustre/xushuang2/lcx/data/amcc-data/2channel/wav_dirLabel2.txt',
                'r')  #打开数据
            dirDict = {}
            while True:
                line = dicDirFile.readline()
                if line == '':
                    break
                index = line.find(' ')
                key = line[:index]
                #print(key)
                value = line[index:]
                dirDict[key] = value.replace("\n", "").replace(" ", "")
            dicDirFile.close()

            spk_samples_list = {}
            batch_idx = 0
            list_path = '/mnt/lustre/xushuang2/lcx/data/create-speaker-mixtures/'
            all_samples_list = {}
            sample_idx = {}
            number_samples = {}
            batch_mix = {}
            mix_number_list = range(config.MIN_MIX, config.MAX_MIX + 1)
            number_samples_all = 0
            for mix_k in mix_number_list:
                if train_or_test == 'train':
                    aim_list_path = list_path + 'mix_{}_spk_tr.txt'.format(
                        mix_k)
                if train_or_test == 'valid':
                    aim_list_path = list_path + 'mix_{}_spk_cv.txt'.format(
                        mix_k)
                if train_or_test == 'test':
                    aim_list_path = list_path + 'mix_{}_spk_tt.txt'.format(
                        mix_k)
                    config.batch_size = 1

                all_samples_list[mix_k] = open(
                    aim_list_path).readlines()  # [:31]
                number_samples[mix_k] = len(all_samples_list[mix_k])
                batch_mix[mix_k] = len(
                    all_samples_list[mix_k]) / config.batch_size
                number_samples_all += len(all_samples_list[mix_k])

                sample_idx[mix_k] = 0

                if train_or_test == 'train' and config.SHUFFLE_BATCH:
                    random.shuffle(all_samples_list[mix_k])
                    print('shuffle success!', all_samples_list[mix_k][0])

            batch_total = number_samples_all / config.batch_size

            mix_k = random.sample(mix_number_list, 1)[0]
            # while True:
            for ___ in range(number_samples_all):
                if ___ == number_samples_all - 1:
                    print('ends here.___')
                    yield False
                mix_len = 0
                if sample_idx[mix_k] >= batch_mix[mix_k] * config.batch_size:
                    mix_number_list.remove(mix_k)
                    try:
                        mix_k = random.sample(mix_number_list, 1)[0]
                    except ValueError:
                        print('seems there gets all over.')
                        if len(mix_number_list) == 0:
                            print('all mix number is over~!')
                        yield False

                    batch_idx = 0
                    mix_speechs = np.zeros((config.batch_size, config.MAX_LEN))
                    mix_feas = []
                    mix_phase = []
                    aim_fea = []
                    aim_spkid = []
                    aim_spkname = []
                    query = []
                    multi_spk_fea_list = []
                    multi_spk_order_list = []
                    multi_spk_wav_list = []
                    continue

                all_over = 1
                for kkkkk in mix_number_list:
                    if not sample_idx[
                            kkkkk] >= batch_mix[mix_k] * config.batch_size:
                        all_over = 0
                        break
                    if all_over:
                        print('all mix number is over~!')
                        yield False

                # mix_k=random.sample(mix_number_list,1)[0]
                if train_or_test == 'train':
                    aim_spk_k = random.sample(all_spk_train, mix_k)
                elif train_or_test == 'test':
                    aim_spk_k = random.sample(all_spk_test, mix_k)

                aim_spk_k = re.findall(
                    '/([0-9][0-9].)/',
                    all_samples_list[mix_k][sample_idx[mix_k]])
                aim_spk_db_k = [
                    float(dd) for dd in re.findall(
                        ' (.*?) ', all_samples_list[mix_k][sample_idx[mix_k]])
                ]
                aim_spk_samplename_k = re.findall(
                    '/(.{8})\.wav ',
                    all_samples_list[mix_k][sample_idx[mix_k]])
                assert len(aim_spk_k) == mix_k == len(aim_spk_db_k) == len(
                    aim_spk_samplename_k)

                multi_fea_dict_this_sample = {}
                multi_wav_dict_this_sample = {}
                multi_name_list_this_sample = []
                multi_db_dict_this_sample = {}
                direction_sample = {}
                for k, spk in enumerate(aim_spk_k):

                    sample_name = aim_spk_samplename_k[k]
                    if aim_spk_db_k[k] == 0:
                        aim_spk_db_k[k] = int(aim_spk_db_k[k])
                    if train_or_test != 'test':
                        spk_speech_path = data_path + '/' + 'train' + '/' + sample_name + '_' + str(
                            aim_spk_db_k[k]) + '_simu_nore.wav'
                    else:
                        spk_speech_path = data_path + '/' + 'test' + '/' + sample_name + '_' + str(
                            aim_spk_db_k[k]) + '_simu_nore.wav'

                    signal, rate = sf.read(spk_speech_path)

                    wav_name = sample_name + '_' + str(
                        aim_spk_db_k[k]) + '_simu_nore.wav'
                    direction_sample[spk] = dirDict[wav_name]
                    if rate != config.FRAME_RATE:
                        print("config.FRAME_RATE", config.FRAME_RATE)
                        signal = signal.transpose()
                        signal = resampy.resample(signal,
                                                  rate,
                                                  config.FRAME_RATE,
                                                  filter='kaiser_best')
                        signal = signal.transpose()

                    if signal.shape[0] > config.MAX_LEN:
                        signal = signal[:config.MAX_LEN, :]

                    if signal.shape[0] > mix_len:
                        mix_len = signal.shape[0]

                    signal -= np.mean(signal)
                    signal /= np.max(np.abs(signal))

                    if signal.shape[0] < config.MAX_LEN:
                        signal = np.r_[signal,
                                       np.zeros(
                                           (config.MAX_LEN - signal.shape[0],
                                            signal.shape[1]))]

                    if k == 0:
                        ratio = 10**(aim_spk_db_k[k] / 20.0)
                        signal = ratio * signal
                        aim_spkname.append(aim_spk_k[0])
                        aim_spk_speech = signal
                        aim_spkid.append(aim_spkname)
                        wav_mix = signal
                        signal_c0 = signal[:, 0]
                        a, b, frq = scipy.signal.stft(
                            signal_c0,
                            fs=8000,
                            nfft=config.FRAME_LENGTH,
                            noverlap=config.FRAME_SHIFT)
                        aim_fea_clean = np.transpose(np.abs(frq))
                        aim_fea.append(aim_fea_clean)
                        multi_fea_dict_this_sample[spk] = aim_fea_clean
                        multi_wav_dict_this_sample[spk] = signal[:, 0]

                    else:
                        ratio = 10**(aim_spk_db_k[k] / 20.0)
                        signal = ratio * signal
                        wav_mix = wav_mix + signal
                        a, b, frq = scipy.signal.stft(
                            signal[:, 0],
                            fs=8000,
                            nfft=config.FRAME_LENGTH,
                            noverlap=config.FRAME_SHIFT)
                        some_fea_clean = np.transpose(np.abs(frq))
                        multi_fea_dict_this_sample[spk] = some_fea_clean
                        multi_wav_dict_this_sample[spk] = signal[:, 0]

                multi_spk_fea_list.append(multi_fea_dict_this_sample)
                multi_spk_wav_list.append(multi_wav_dict_this_sample)

                mix_speechs.append(wav_mix)
                direction.append(direction_sample)
                batch_idx += 1

                if batch_idx == config.batch_size:
                    mix_k = random.sample(mix_number_list, 1)[0]
                    aim_fea = np.array(aim_fea)
                    query = np.array(query)
                    print('spk_list_from_this_gen:{}'.format(aim_spkname))
                    print('aim spk list:',
                          [one.keys() for one in multi_spk_fea_list])
                    batch_ordre = get_energy_order(multi_spk_wav_list)
                    direction = get_spk_order(direction, batch_ordre)
                    if mode == 'global':
                        all_spk = sorted(all_spk)
                        all_spk = sorted(all_spk_train)
                        all_spk.insert(0, '<BOS>')  # 添加两个结构符号,来标识开始或结束。
                        all_spk.append('<EOS>')
                        all_dir = sorted(all_dir)
                        all_dir.insert(0, '<BOS>')
                        all_dir.append('<EOS>')
                        all_spk_test = sorted(all_spk_test)
                        dict_spk_to_idx = {
                            spk: idx
                            for idx, spk in enumerate(all_spk)
                        }
                        dict_idx_to_spk = {
                            idx: spk
                            for idx, spk in enumerate(all_spk)
                        }
                        dict_dir_to_idx = {
                            dire: idx
                            for idx, dire in enumerate(all_dir)
                        }
                        dict_idx_to_dir = {
                            idx: dire
                            for idx, dire in enumerate(all_dir)
                        }
                        yield {
                            'all_spk': all_spk,
                            'dict_spk_to_idx': dict_spk_to_idx,
                            'dict_idx_to_spk': dict_idx_to_spk,
                            'all_dir': all_dir,
                            'dict_dir_to_idx': dict_dir_to_idx,
                            'dict_idx_to_dir': dict_idx_to_dir,
                            'num_fre': aim_fea.shape[2],
                            'num_frames': aim_fea.shape[1],
                            'total_spk_num': len(all_spk),
                            'total_batch_num': batch_total
                        }
                    elif mode == 'once':
                        yield {
                            'mix_wav':
                            mix_speechs,
                            'aim_fea':
                            aim_fea,
                            'aim_spkname':
                            aim_spkname,
                            'direction':
                            direction,
                            'query':
                            query,
                            'num_all_spk':
                            len(all_spk),
                            'multi_spk_fea_list':
                            multi_spk_fea_list,
                            'multi_spk_wav_list':
                            multi_spk_wav_list,
                            'batch_order':
                            batch_ordre,
                            'batch_total':
                            batch_total,
                            'tas_zip':
                            _collate_fn(mix_speechs, multi_spk_wav_list,
                                        batch_ordre)
                        }
                    elif mode == 'tasnet':
                        yield _collate_fn(mix_speechs, multi_spk_wav_list)

                    batch_idx = 0
                    mix_speechs = []
                    aim_fea = []
                    aim_spkid = []
                    aim_spkname = []
                    query = []
                    multi_spk_fea_list = []
                    multi_spk_wav_list = []
                    direction = []
                sample_idx[mix_k] += 1

        else:
            raise ValueError('No such dataset:{} for Speech.'.format(
                config.DATASET))
        pass

    else:
        raise ValueError('No such Model:{}'.format(config.MODE))
 for this_trial in range(3):
     out = nsetup.stimulate({}, send_reset_event=True, duration=200)
     out = nsetup.stimulate({}, send_reset_event=False, duration=300)
     out = nsetup.stimulate({},
                            send_reset_event=True,
                            duration=duration_rec)
     signal = go_reconstruct_signal_from_out(out,
                                             figs,
                                             upch=300,
                                             dnch=305,
                                             delta_up=0.1,
                                             delta_dn=0.1,
                                             do_detrend=False)
     signal = [signal[:, 0] - np.min(signal[:, 0]), signal[:, 1]]
     signal = np.array(signal)
     signal = signal.transpose()
     #extract input and output
     raw_data = out[0].raw_data()
     dnch = 300
     upch = 305
     index_dn = np.where(raw_data[:, 1] == dnch)[0]
     index_up = np.where(raw_data[:, 1] == upch)[0]
     raw_data_input = []
     raw_data_input.extend(raw_data[index_dn, :])
     raw_data_input.extend(raw_data[index_up, :])
     raw_data_input = np.reshape(raw_data_input,
                                 [len(index_dn) + len(index_up), 2])
     index_up = np.where(raw_data_input[:, 1] == upch)[0]
     index_dn = np.where(raw_data_input[:, 1] == dnch)[0]
     raw_data_input[index_dn, 1] = 1
     raw_data_input[index_up, 1] = 0
示例#10
0
def fourier_filter(tod,
                   filt_function,
                   detrend='linear',
                   resize='zero_pad',
                   axis_name='samps',
                   signal_name='signal',
                   time_name='timestamps',
                   **kwargs):
    """Return a filtered tod.signal_name along the axis axis_name. 
        Does not change the data in the axis manager.
    
    Arguments:
    
        tod: axis manager
        
        filt_function: function( freqs, tod ) function that takes a set of 
            frequencies and the axis manager and returns the filter in 
            fouier space
        
        detrend: Method of detrending to be done before ffting. Can
            be 'linear', 'mean', or None.
            
        resize: How to resize the axis to increase fft speed. 'zero_pad' 
            will increase to the next 2**N. 'trim' will cut out so the 
            factorization of N contains only low primes. None will not 
            change the axis length and might be quite slow. Trim will be 
            kinda weird here, because signal will not be returned as the same
            size as it is input

        axis_name: name of axis you would like to fft along
        
        signal_name: name of the variable in tod to fft
        
        time_name: name for getting time of data (in seconds) from tod
        
    Returns:
    
        signal: filtered tod.signal_name 
        
    """
    if len(tod._assignments[signal_name]) > 2:
        raise ValueError('fouier_filter only works for 1D or 2D data streams')

    axis = getattr(tod, axis_name)
    times = getattr(tod, time_name)
    delta_t = (times[-1] - times[0]) / axis.count

    if len(tod._assignments[signal_name]) == 1:
        n_det = 1
        ## signal will be at least 2D
        main_idx = 1
        other_idx = None

    elif len(tod._assignments[signal_name]) == 2:
        checks = np.array(
            [x == axis_name for x in tod._assignments[signal_name]],
            dtype='bool')
        main_idx = np.where(checks)[0][0]
        other_idx = np.where(~checks)[0][0]
        other_axis = getattr(tod, tod._assignments[signal_name][other_idx])
        n_det = other_axis.count

    if detrend is None:
        signal = np.atleast_2d(getattr(tod, signal_name))
    else:
        signal = detrend_data(tod,
                              detrend,
                              axis_name=axis_name,
                              signal_name=signal_name)

    if other_idx is not None and other_idx != 0:
        ## so that code can be written always along axis 1
        signal = signal.transpose()

    if resize == 'zero_pad':
        k = int(np.ceil(np.log(axis.count) / np.log(2)))
        n = 2**k
    elif resize == 'trim':
        n = fft.find_inferior_integer(axis.count)
    elif resize is None:
        n = axis.count
    else:
        raise ValueError('resize must be "zero_pad", "trim", or None')

    a, b, t_1, t_2 = build_rfft_object(n_det, n, 'BOTH')
    if resize == 'zero_pad':
        a[:, :axis.count] = signal
        a[:, axis.count:] = 0
    elif resize == 'trim':
        a[:] = signal[:, :n]
    else:
        a[:] = signal[:]

    ## FFT Signal
    t_1()

    ## Get Filter
    freqs = np.fft.rfftfreq(n, delta_t)
    filt = filt_function(freqs, tod, **kwargs)
    b[:] *= filt

    ## FFT Back
    t_2()

    if resize == 'zero_pad':
        signal = a[:, :axis.count]
    else:
        signal = a[:]

    if other_idx is not None and other_idx != 0:
        return signal.transpose()

    return signal