示例#1
0
                     (i, snrTrack[i]),
                     xytext=(0, 13),
                     va="center",
                     ha="center",
                     textcoords='offset points')
    plt.show()


def main(pklFile):
    '''
    '''
    global snrTrack
    global trialN
    global wordsCorrect

    with open(pklFile, 'rb') as f:
        l = dill.load(f)
        snrTrack = l['snrTrack']
        trialN = l['trialN']
        wordsCorrect = l['wordsCorrect']
        plotSNR()
        fitLogistic()


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('pklFile', type=PathType(exists=True), help='')
    args = parser.parse_args()
    pklFile = args.pklFile
    main(pklFile)
示例#2
0
    #     sumsqrd += np.sum(y_temp**2)
    #     n += y_temp.size
    # rms = np.sqrt(sumsqrd/n)
    #np.save(os.path.join(rmsDir, 'overall_speech_rms.npy'), rms)
    return rms
    #sentenceFFT.append(np.abs(Zxx[:, ~np.any(sTemp, axis=0)]))


if __name__ == "__main__":
    from pathtype import PathType
    # Create commandline interface
    parser = argparse.ArgumentParser(description='Generate stimulus for '
                                     'training TRF decoder by concatenating '
                                     'matrix test materials')
    parser.add_argument('--MatrixDir',
                        type=PathType(exists=True, type='dir'),
                        default='../speech_components',
                        help='Matrix test speech data location')
    parser.add_argument('--OutDir',
                        type=PathType(exists=None, type='dir'),
                        default='./stimulus',
                        help='Output directory')
    parser.add_argument('--CalcRMS', action='store_true')
    args = {
        k: v
        for k, v in vars(parser.parse_args()).items() if v is not None
    }

    rmsDir = os.path.join(args['OutDir'], "rms")
    if args['CalcRMS']:
        indexes = gen_indexes()
示例#3
0
            out = np.append(out, data[chunk['start']:chunk['stop']])#*rmsCorFactor)
            print(np.sqrt(np.mean((data[chunk['start']:chunk['stop']]*rmsCorFactor)**2)))

    sndio.write('./out.wav', out, rate=fs, format=fmtStr, enc=encStr)



    #silences['start'] = np.abs(zerox - silences['start'])).min()


    #for line in lines[1:]:




if __name__ == "__main__":
    # Create commandline interface
    parser = argparse.ArgumentParser(description='Generate stimulus for '
                                     'training TRF decoder by concatenating '
                                     'matrix test materials')
    parser.add_argument('AudioFile', type=PathType(exists=True),
                        default='./speech.wav',
                        help='Speech wave file')
    parser.add_argument('AnnotationFile', type=PathType(exists=True),
                        default='./speech.csv', help='Speech annotatin csv')
    args = {k:v for k,v in vars(parser.parse_args()).items() if v is not None}


    # Generate stimulus from arguments provided on command line
    flattenRMS(**args)
示例#4
0
def main(args):
    file = args.data_file
    with open(file, 'rb') as pkl:
        a = dill.load(pkl)
    del a['participant']
    np.save(os.path.basename(file) + '-new.npy', a)


if __name__ == '__main__':
    #peak_pick_test()
    parser = argparse.ArgumentParser(
        description=
        'Script for removing BPLabs sepcific objects from participant data')
    parser.add_argument(dest='data_file',
                        type=PathType(),
                        help='Configuration file for processing BDF',
                        metavar='CONFIGFILE')
    parser.add_argument(
        '--verbose',
        '-v',
        action='count',
        help='Specifies level of verbosity in output. For example: \'-vvvvv\' '
        'will output all information. \'-v\' will output minimal information. '
    )
    args = parser.parse_args()

    # Set verbosity of logger output based on argument
    if not args.verbose:
        args.verbose = 10
    else: