Ejemplo n.º 1
0
            nscanlines = int(input("\tnscanlines (usually 127) "))
            npoints = int(input("\tnpoints (usually 1020) "))
            junk = int(input("\tjunk (usually 36, or 1020 - 984) "))

        # TODO use metadata instead of hard-coded values
        header = Header()
        header.w = nscanlines  # input image width
        header.h = npoints - junk  # input image height, trimmed
        header.sf = 4000000  # magic number, sorry!
        probe = Probe()
        probe.radius = 10000  # based on '10' in transducer model number
        probe.numElements = 128  # based on '128' in transducer model number
        probe.pitch = 185  # based on Ultrasonix C9-5/10 transducer
        conv = Converter(header, probe)

    rdr = RawReader(rf, nscanlines=nscanlines, npoints=npoints)

    # define "support" file names based on .raw
    wav = os.path.join(parent, str(basename + ".ch1.wav"))
    sync = os.path.join(parent, str(basename + '.sync.txt'))
    idx_txt = os.path.join(parent, str(basename + ".idx.txt"))

    # make destination and copy "support" files for parent file
    copy_dir = os.path.join(output_dir, basename)

    os.mkdir(copy_dir)
    shutil.copy(wav, copy_dir)
    shutil.copy(idx_txt, copy_dir)
    shutil.copy(stimfile, copy_dir)

    # get frame indices
Ejemplo n.º 2
0
             pass
 
     header = Header()
     print(header)
 # visualize components on (approximately) converted fan, if desired
     header.w = 127          # input image width
     header.h = 1020 #255          # input image height
     header.sf = 4000000     # for ultrasonix this is the vec-freq value
     probe = Probe()
     probe.radius = 10000    # based on '10' in transducer model number
     probe.numElements = 128 # based on '128' in transducer model number
     probe.pitch = 185 #205  # guess based on Ultrasonix C9-5/10 transducer
     c = Converter(header, probe)
 
     image_shape = (1020,127)#(255,127)
     rdr = RawReader(rf, nscanlines=nscanlines, npoints = npoints)
     print(rf)
     for f in np.arange(0,(last_idx+1)):
         d = rdr.get_frame(f).reshape(image_shape)
         mag = np.max(d) - np.min(d)
         d = (d-np.min(d))/mag*255
         pcn = np.flipud(c.as_bmp(np.flipud(d)))
         #pcn = c.as_bmp(d)
         plt.title("Frame{:}, {:}".format((f+1),stim))
         plt.imshow(pcn, cmap="Greys_r")
         file_ending = "subj{:}-{:}-{:}.png".format(subject, stim,(f))
         ultradir = os.path.join(os.path.basename(barename),file_ending)
         savepath = os.path.join(expdir,ultradir)
         print(savepath)
         plt.savefig(savepath)
         
Ejemplo n.º 3
0
        postbuf = 4

        frameperc = int(last_idx * midperc)
        print(frameperc)
        frame_start = frameperc - prebuf
        frame_end = frameperc + postbuf
        print(frame_start)
        print(frame_end)

    consec_diffs = []
    for f in np.arange(frame_start, frame_end):
        # old method of getting frames
        #        minuend = get_frame(rf,f+1, myframesize, med_filter = True)
        #        subtrahend = get_frame(rf, f, myframesize, med_filter = True)
        rdr = RawReader(
            rf, nscanlines=nscanlines, npoints=npoints
        )  # or whatever the appropriate parameters actually are
        minuend = rdr.get_frame(f + 1)
        subtrahend = rdr.get_frame(f)
        cdiff = minuend - subtrahend
        cdiffnorm = np.linalg.norm(cdiff)
        consec_diffs.append(cdiffnorm)
#    print(consec_diffs)
    mindiff, mindiff_idx = min(
        (val, idx) for (idx, val) in enumerate(consec_diffs))
    #    print(mindiff)
    #    print(mindiff_idx)
    rl_frame_idx = frame_start + mindiff_idx
    print(rl_frame_idx)
    # get frame, and check for NaN frames
    change = 0
Ejemplo n.º 4
0
    sync_tg = os.path.join(parent, str(acq + ".sync.TextGrid"))
    idx_txt = os.path.join(parent, str(acq + ".idx.txt"))

    # set up RawReader and frame dimensions
    if data is None:
        try:
            nscanlines, npoints, junk = read_echob_metadata(rf)
        except ValueError:
            print("WARNING: no data in {}.img.txt, please input:".format(acq))
            nscanlines = int(input("\tnscanlines (usually 127) "))
            npoints = int(input("\tnpoints (usually 1020) "))
            junk = int(input("\tjunk (usually 36, or 1020 - 984) "))
        #frame_dim_1 = nscanlines
        #frame_dim_2 = npoints - junk

    rdr = RawReader(rf, nscanlines=nscanlines, npoints=npoints)

    # instantiate LabelManagers
    pm = audiolabel.LabelManager(from_file=tg, from_type="praat")
    sync_pm = audiolabel.LabelManager(from_file=sync_tg, from_type="praat")

    # extract ndarray representations of frames from .raw file
    for v, m in pm.tier('phone').search(vre, return_match=True):
        pron = pm.tier('word').label_at(v.center).text

        # skip any tokens from non-target words
        if pron not in target_list:
            continue

        # get phone label, disambiguating IY and IH based on pronunciation
        # skip some IY (diphthongized variants in some words)
Ejemplo n.º 5
0
    if conv is None:
        print("Making converter...")
        nscanlines, npoints, junk = read_echob_metadata(rf)
        header = Header()
        header.w = nscanlines  # input image width
        header.h = npoints - junk  # input image height, trimmed
        header.sf = 4000000  # magic number, sorry!
        probe = Probe()
        probe.radius = 10000  # based on '10' in transducer model number
        probe.numElements = 128  # based on '128' in transducer model number
        probe.pitch = 185  # based on Ultrasonix C9-5/10 transducer
        conv = Converter(header, probe)

    print("Now working on {}".format(parent))
    rdr = RawReader(rf, nscanlines=nscanlines, npoints=npoints)

    wav = os.path.join(parent, str(basename + ".ch1.wav"))
    tg = os.path.join(parent, str(basename + ".ch1.TextGrid"))
    sync_tg = os.path.join(parent, str(basename + ".sync.TextGrid"))
    sync = os.path.join(parent, str(basename + '.sync.txt'))
    idx_txt = os.path.join(parent, str(basename + ".idx.txt"))

    # instantiate LabelManager objects for FA transcript and sync pulses
    try:
        pm = audiolabel.LabelManager(from_file=tg, from_type="praat")
    except FileNotFoundError:
        print("No alignment TG in {}; skipping".format(basename))
        continue

    try:
Ejemplo n.º 6
0
    sync = os.path.join(parent, str(acq + '.sync.txt'))
    sync_tg = os.path.join(parent, str(acq + ".sync.TextGrid"))
    idx_txt = os.path.join(parent, str(acq + ".idx.txt"))

    # instantiate RawReader, which extracts ultrasound data from .raw files
    if data is None:
        try:
            nscanlines, npoints, junk = read_echob_metadata(rf)
        except ValueError:
            print("WARNING: no data in {}.img.txt".format(acq))
            nscanlines = int(
                input("\tnscanlines (usually 64) "))  # TODO update values
            npoints = int(input("\tnpoints (usually 1024) "))
            junk = int(input("\tjunk (usually 78) "))

    rdr = RawReader(rf, nscanlines=nscanlines, npoints=npoints)

    # instantiate LabelManager objects for FA transcript and sync pulses
    try:
        pm = audiolabel.LabelManager(from_file=tg, from_type="praat")
    except FileNotFoundError:
        print("No alignment TG in {}; skipping".format(acq))
        continue

    try:
        sync_pm = audiolabel.LabelManager(from_file=sync_tg, from_type="praat")
    except FileNotFoundError:
        print("No sync TG in {}; skipping".format(acq))
        continue

    for seg, match in pm.tier('phones').search(seg_regexp, return_match=True):