Пример #1
0
def mask_chunk(num, use_it):
    opts = g_opts

    in_fname = opts['timeseries']  # The entire (large) input file
    out_fname = opts['timeseries_out']  # The entire (large) output file
    chunk_size = opts['chunk_size']
    num_write_chunks = opts['num_write_chunks']
    write_chunk_size = opts['write_chunk_size']

    X = mdaio.DiskReadMda(in_fname)

    # t1=int(num*chunk_size) # first timepoint of the chunk
    # t2=int(np.minimum(X.N2(),(t1+chunk_size))) # last timepoint of chunk (+1)

    t1 = int(num * write_chunk_size)  # first timepoint of the chunk
    t2 = int(np.minimum(
        X.N2(), (t1 + write_chunk_size)))  # last timepoint of chunk (+1)

    chunk = X.readChunk(i1=0, N1=X.N1(), i2=t1,
                        N2=t2 - t1).astype(np.float32)  # Read the chunk

    if sum(use_it) != len(use_it):
        chunk[:,
              get_masked_indices(use_it, write_chunk_size, chunk_size,
                                 num_write_chunks)] = 0

    ###########################################################################################
    # Now we wait until we are ready to append to the output file
    # Note that we need to append in order, thus the shared_data object
    ###########################################################################################
    g_shared_data.reportChunkCompleted(
        num)  # Report that we have completed this chunk
    while True:
        if num == g_shared_data.lastAppendedChunk() + 1:
            break
        time.sleep(0.005)  # so we don't saturate the CPU unnecessarily

    # Append the filtered chunk (excluding the padding) to the output file
    mdaio.appendmda(chunk, out_fname)

    # Report that we have appended so the next chunk can proceed
    g_shared_data.reportChunkAppended(num)

    # Print status if it has been long enough
    if g_shared_data.elapsedTime() > 4:
        g_shared_data.printStatus()
        g_shared_data.resetTimer()
Пример #2
0
def whiten_chunk(num,W):
    #print('Whitening {}'.format(num))
    opts=g_opts
    #print('Whitening chunk {} of {}'.format(num,opts['num_chunks']))
    in_fname=opts['timeseries'] # The entire (large) input file
    out_fname=opts['timeseries_out'] # The entire (large) output file
    chunk_size=opts['chunk_size']
    
    X=mdaio.DiskReadMda(in_fname)
    
    t1=int(num*opts['chunk_size']) # first timepoint of the chunk
    t2=int(np.minimum(X.N2(),(t1+chunk_size))) # last timepoint of chunk (+1)
    
    chunk=X.readChunk(i1=0,N1=X.N1(),i2=t1,N2=t2-t1) # Read the chunk
    
    chunk=W @ chunk
    
    ###########################################################################################
    # Now we wait until we are ready to append to the output file
    # Note that we need to append in order, thus the shared_data object
    ###########################################################################################
    g_shared_data.reportChunkCompleted(num) # Report that we have completed this chunk
    while True:
        if num == g_shared_data.lastAppendedChunk()+1:
            break
        time.sleep(0.005) # so we don't saturate the CPU unnecessarily
    
    # Append the filtered chunk (excluding the padding) to the output file
    mdaio.appendmda(chunk,out_fname)
    
    # Report that we have appended so the next chunk can proceed
    g_shared_data.reportChunkAppended(num)

    # Print status if it has been long enough
    if g_shared_data.elapsedTime()>4:
        g_shared_data.printStatus()
        g_shared_data.resetTimer()
Пример #3
0
def filter_chunk(num):
    #print('Filtering {}'.format(num))
    opts = g_opts
    #print('Filtering chunk {} of {}'.format(num,opts['num_chunks']))
    in_fname = opts['timeseries']  # The entire (large) input file
    out_fname = opts['timeseries_out']  # The entire (large) output file
    samplerate = opts['samplerate']
    freq_min = opts['freq_min']
    freq_max = opts['freq_max']
    freq_wid = opts['freq_wid']
    chunk_size = opts['chunk_size']
    padding = opts['padding']

    X = mdaio.DiskReadMda(in_fname)

    chunk_size_with_padding = chunk_size + 2 * padding
    padded_chunk = np.zeros((X.N1(), chunk_size_with_padding), dtype='float32')

    t1 = int(num * opts['chunk_size'])  # first timepoint of the chunk
    t2 = int(np.minimum(X.N2(),
                        (t1 + chunk_size)))  # last timepoint of chunk (+1)
    s1 = int(np.maximum(0,
                        t1 - padding))  # first timepoint including the padding
    s2 = int(np.minimum(X.N2(), t2 +
                        padding))  # last timepoint (+1) including the padding

    # determine aa so that t1-s1+aa = padding
    # so, aa = padding-(t1-s1)
    aa = padding - (t1 - s1)
    padded_chunk[:, aa:aa + s2 - s1] = X.readChunk(i1=0,
                                                   N1=X.N1(),
                                                   i2=s1,
                                                   N2=s2 -
                                                   s1)  # Read the padded chunk

    # Do the actual filtering with a DFT with real input
    padded_chunk = np.fft.rfft(padded_chunk)
    # Subtract off the mean of each channel unless we are doing only a low-pass filter
    if freq_min != 0:
        for m in range(padded_chunk.shape[0]):
            padded_chunk[m, :] = padded_chunk[m, :] - np.mean(
                padded_chunk[m, :])
    kernel = create_filter_kernel(chunk_size_with_padding, samplerate,
                                  freq_min, freq_max, freq_wid)
    kernel = kernel[
        0:padded_chunk.shape[1]]  # because this is the DFT of real data
    padded_chunk = padded_chunk * np.tile(kernel, (padded_chunk.shape[0], 1))
    padded_chunk = np.fft.irfft(padded_chunk)

    ###########################################################################################
    # Now we wait until we are ready to append to the output file
    # Note that we need to append in order, thus the shared_data object
    ###########################################################################################
    g_shared_data.reportChunkCompleted(
        num)  # Report that we have completed this chunk
    while True:  # Alex: maybe there should be a timeout here in case ...
        if num == g_shared_data.lastAppendedChunk() + 1:
            break
        time.sleep(0.005)  # so we don't saturate the CPU unnecessarily

    # Append the filtered chunk (excluding the padding) to the output file
    mdaio.appendmda(padded_chunk[:, padding:padding + (t2 - t1)], out_fname)

    # Report that we have appended so the next chunk can proceed
    g_shared_data.reportChunkAppended(num)

    # Print status if it has been long enough
    if g_shared_data.elapsedTime() > 4:
        g_shared_data.printStatus()
        g_shared_data.resetTimer()