예제 #1
0
파일: intro2.py 프로젝트: Voder/PBE
def move2(dir, n):
    global loc2
    track2[loc2].remove(char)

    loc2 = loc2 + dir*n
    loc2 = envelope(loc2, 0, lastind(track2))
    track2[loc2].append(char)
예제 #2
0
def move2(dir, n):
    global loc2
    track2[loc2].remove(char)

    loc2 = loc2 + dir * n
    loc2 = envelope(loc2, 0, lastind(track2))
    track2[loc2].append(char)
예제 #3
0
파일: intro2.py 프로젝트: Voder/PBE
def move(dir, n):
    """Move `n` times in `dir` direction."""
    global loc
    track[loc] = blank

    loc = envelope(loc + dir*n, 0, lastind(track))
    track[loc] = char
예제 #4
0
def move(dir, n):
    """Move `n` times in `dir` direction."""
    global loc
    track[loc] = blank

    loc = envelope(loc + dir * n, 0, lastind(track))
    track[loc] = char
def process_signal(filename):
    signal, sample_rate = librosa.load(p.join(DATA_DIR, filename),
                                       sr=DOWNSAMPLED_SAMPLE_RATE)
    rolling_mean_signal = envelope(signal, sample_rate, DATA_CHUNKS_PER_SECOND)
    noise_mask = rolling_mean_signal > NOISE_AMPLITUDE_THRESHOLD
    wavfile.write(filename=p.join(DEST_DIR, filename),
                  rate=DOWNSAMPLED_SAMPLE_RATE,
                  data=signal[noise_mask])
def convert_wav2npy(arg_tuple):
    path = arg_tuple[0]
    save_dir = arg_tuple[1]
    sample_rate = arg_tuple[2]
    denoise = arg_tuple[3]
    x = librosa.load(path, sr=sample_rate, mono=True)[0]
    if denoise:
        try:
            mask, env = utils.envelope(x, sample_rate, threshold=0.25)
            x = nr.reduce_noise(audio_clip=x,
                                noise_clip=x[np.logical_not(mask)],
                                verbose=False)
        except ValueError:
            print("=" * 10)
            print(path)
            print(x)
            # raise
    ebird_code = path.parent.name
    write_path = Path(save_dir) / f"{ebird_code}/{path.stem}.npy"
    print(f"write to {write_path}")
    # sf.write(write_path, x, sample_rate)
    np.save(write_path, x)
예제 #7
0
def execute_flow(flow):

    if flow not in get_flows():
        return envelope('unknown flow {}'.format(flow))

    settings = merge_settings(flow)

    if request.method not in settings.get('http_methods'):
        return envelope('invalid http method', success=False)

    # read the args from request type data
    params = request.form if request.method == 'POST' else request.args

    if params.get('token') != settings.get('token'):
        return envelope('incorrect token', success=False)

    text = params.get('text')

    if not text:
        return envelope('empty text', success=False)

    if not settings.get('activate_flow_re').search(text):
        return envelope('no activation phrase found - ignored')

    fields = dict(
        (field, get_field_value(field, settings['selectors'][field], text))
        for field in settings['selectors'].keys()
    )

    values = format_fields(fields, settings['fields'])

    try:
        update_spreadsheet(values, settings)
    except Exception as e:
        return envelope('error updating spreadsheet - '
                        '{} {}'.format(type(e).__name__, str(e)),
                        success=False)

    return envelope('data successfully saved')
예제 #8
0
# Starting from the final position in the burn-in chain, sample for nsteps.
sampler.run_mcmc(pos, nsteps, rstate0=state)

###############################################################################
# statistical inference from sampling the loglikelihood parameter space
###############################################################################

# instruction to save the result of sampling:
# - the coordinates of the sampled points in the chain
# - the corresponding logprobability
np.save('results/{0}/{1}/samples_{0}_{1}'.format(ker, dwarf), sampler.chain)
np.save('results/{0}/{1}/lnprobs_{0}_{1}'.format(ker, dwarf),
        sampler.lnprobability)

# instruction to flatten the chain to successively
# envelope the results along the desired direction

flatsamples = sampler.flatchain
flatlnprobs = sampler.flatlnprobability

# envelope lowermost -lnlikelihood values over sampled J range
# to obtain the profile of another variable, change 'param' argument

Penv, Senv, Lenv = envelope(flatsamples, flatlnprobs, param=2)

# print results of envelope on screen

print 'results'
for P, S, L in zip(Penv, Senv, Lenv):
    print P, S, L
예제 #9
0
def get_data(save_locally = False,
            sub_segments = True, num_segments=5,
            cleaning = True, threshold = 0.005):
    """
    Get the data

    Adjust parameters :
    # save_locally : save the dataframe
    # sub_segments : split the audio into segments
    # num_segments : number of segments
    # cleaning : whether to apply a enveloppe mask
    # threshold : threshold of the enveloppe
    """

    arr_features=[]

    # Get data path
    path = os.path.join(os.getcwd(),'..','data')

    # Get list of genres
    genres = [f for f in os.listdir(path)]
    genres = genres

    for idx,genre in tqdm(enumerate(genres),total=len(genres)):
        # Get genre pathing
        genre_path = os.path.join(path,genre)

        for fname in os.listdir(genre_path):
            # Get file pathing
            file_path = os.path.join(genre_path,fname)
            y, sr = librosa.load(file_path, duration=30)

            # Cleaning option
            if cleaning == True :
                mask = envelope(y,sr,threshold)
                y = y[mask]

            # Split songs into sub segments
            if sub_segments == True :

                # Get subsegments
                track_duration = round(len(y)/sr)
                samples_per_track = sr * track_duration
                samples_per_segment = int(samples_per_track / num_segments)

                for d in range(num_segments):
                    start = samples_per_segment * d
                    finish = start + samples_per_segment
                    new_y = y[start:finish]
                    # Get features
                    dict_features=extract_features(y=new_y,sr=sr)
                    # Get label
                    dict_features['label']=idx
                    # Keep name
                    dict_features['id']=f"{fname}_{d}"
                    # Total track duration
                    dict_features['total_duration']=track_duration
                    dict_features['sub_duration']= round((finish-start)/sr)
                    # Append to list
                    arr_features.append(dict_features)

             # Don't split songs into subsegment
            else:
                # Get features
                dict_features=extract_features(y=y,sr=sr)
                # Get label
                dict_features['label']=idx
                # Keep name
                dict_features['id']=f"{fname}"
                # Append to list
                arr_features.append(dict_features)

    # Create a dataframe with the features
    df=pd.DataFrame(data=arr_features)

    # ----------------------------------------------------------
    # Print final output details
    # ----------------------------------------------------------

    if cleaning == True :
        print('# Envelope used for data cleaning')

    if sub_segments == True:
        print(f'# Split each sound into {num_segments} sub segments')
    else :
        print('# Sounds not split into sub segments')

    if save_locally == False:
        print('# Dataset generated but not saved')
        print('# Shape of the dataset',df.shape)
    else:
        print('# Dataset generated')
        print('# Shape of the dataset',df.shape)
        df.to_csv(os.path.join(os.getcwd(),'..','dataset','clean_df.csv'),index=False)
        print('# Dataset saved')

    return df