예제 #1
0
def test_HDF_Datastore_Build_with_fiducialtracks():
    """The datastore build is performed successfully.
    
    """
    dbName = testDataRoot / Path('database_test_files/myDB_Build_Avg.h5')
    if dbName.exists():
        remove(str(dbName))
    parser = parsers.PositionParser(positionIDs={
        1: 'prefix',
        3: 'channelID',
        4: 'acqID'
    })
    readerDict = {
        'FiducialTracks': readers.CSVReader(),
        'AverageFiducial': readers.CSVReader()
    }

    # Directory to traverse for acquisition files
    searchDirectory = testDataRoot / Path('test_experiment_2')

    # Build datastore
    with db.HDFDatastore(dbName) as myDB:
        myDB.build(parser,
                   searchDirectory,
                   filenameStrings={
                       'FiducialTracks': '_Fids.dat',
                       'AverageFiducial': '_AvgFid.dat'
                   },
                   readers=readerDict,
                   dryRun=False)

    # Test for existence of the data
    with h5py.File(str(dbName), mode='r') as hdf:
        key1 = 'Control/Control_1/'
        name1 = 'FiducialTracks_ChannelA647'
        name2 = 'AverageFiducial_ChannelA647'
        ok_(key1 + name1 in hdf)
        ok_(key1 + name2 in hdf)
        ok_(hdf[key1 + name1].attrs.__contains__('SMLM_prefix'))
        ok_(hdf[key1 + name1].attrs.__contains__('SMLM_acqID'))
        ok_(hdf[key1 + name1].attrs.__contains__('SMLM_datasetType'))
        ok_(hdf[key1 + name1].attrs.__contains__('SMLM_channelID'))
        ok_(hdf[key1 + name1].attrs.__contains__('SMLM_dateID'))
        ok_(hdf[key1 + name1].attrs.__contains__('SMLM_posID'))
        ok_(hdf[key1 + name1].attrs.__contains__('SMLM_sliceID'))

        key2 = 'Control/Control_2/'
        ok_(key2 + name1 in hdf)
        ok_(key2 + name2 in hdf)

        key3 = 'shTRF2/shTRF2_1/'
        ok_(key3 + name1 in hdf)
        ok_(key3 + name2 in hdf)

        key4 = 'shTRF2/shTRF2_2/'
        ok_(key4 + name1 in hdf)
        ok_(key4 + name2 in hdf)

    # Remove test datastore file
    remove(str(dbName))
예제 #2
0
def test_CSVReader_Comma_Delimited_Data():
    """CSVReader reads comma delimited files.
    
    """
    filePath = testDataRoot / Path('readers_test_files/csv/comma_delimited/')
    filename = filePath / Path('HeLaL_Control_1.csv')
    reader = readers.CSVReader()
    
    # Read the data from file
    data = reader(filename)
    
    # Verify data was read correctly
    assert_equal(len(data.columns), 9)
    assert_equal(len(data), 11)
예제 #3
0
def test_CSVReader_Kwargs():
    """CSVReader passes keyword arguments to Pandas read_csv() function.
    
    """
    filePath = testDataRoot / Path('readers_test_files/csv/comma_delimited/')
    filename = filePath / Path('HeLaL_Control_1.csv')
    reader = readers.CSVReader()
    
    # Read the data from file
    # 'usecols' and 'nrows' are keywords of the Pandas read_csv() function
    data = reader(filename, usecols = ['x', 'y'], nrows = 5)
    
    # Verify data was read correctly
    assert_equal(len(data.columns), 2)
    assert_equal(len(data), 5)
예제 #4
0
def test_CSVReader_Tab_Delimited_Data():
    """CSVReader reads tab-delimited files.
    
    """
    filePath = testDataRoot / Path('readers_test_files/csv/tab_delimited/')
    filename = filePath / Path('HeLaL_Control_1.csv')
    reader = readers.CSVReader()
    
    # Read the data from file
    # sep is a keyword argument to Pandas read_csv()
    data = reader(filename, sep = '\t')
    
    # Verify data was read correctly
    assert_equal(len(data.columns), 9)
    assert_equal(len(data), 11)
예제 #5
0
def test_CSVReader_Works_With_Parser():
    """CSVReader is correctly passed to readFromFile() from SimpleParser.
    
    """
    filePath = testDataRoot / Path('readers_test_files/csv/tab_delimited/')
    filename = filePath / Path('HeLaL_Control_1.csv')
    
    # Initialize the Parser and Reader                        
    parser = parsers.SimpleParser()
    reader = readers.CSVReader()
    
    # reader keyword argument passes the CSVReader instance;
    # all other keyword arguments are passed to CSVReader's __call__ function.
    parser.parseFilename(
        filename, datasetType = 'Localizations', reader = reader, sep = '\t')
    
    # Test a couple of the localization results
    assert_equal(parser.dataset.data['x'].iloc[0], 6770)
    assert_equal(parser.dataset.data['intensity'].iloc[0],4386.6)
    assert_equal(parser.dataset.data['x'].iloc[1], 7958.1)
    assert_equal(len(parser.dataset.data.columns), 9)
    assert_equal(len(parser.dataset.data), 11)
예제 #6
0
def test_HDFDatastore_Build_With_Reader():
    """HDFDatastore.build() works when Reader objects are specified.
    
    """
    dsName = testDataRoot / Path(('parsers_test_files/SimpleParser/'
                                  'test_id_collection_temp.h5'))
    if dsName.exists():
        remove(str(dsName))

    temp = config.__Registered_DatasetTypes__.copy()
    config.__Registered_DatasetTypes__ = [
        'Localizations', 'LocMetadata', 'WidefieldImage'
    ]

    parser = parsers.SimpleParser()
    filenameStrings = {
        'Localizations': '.csv',
        'LocMetadata': '.txt',
        'WidefieldImage': '.tif'
    }
    readersDict = {'Localizations': readers.CSVReader()}

    # Note sep and skiprows are keyword arguments of CSVReader; readTiffTags is
    # a keyword argument for the WidefieldImage readfromFile() method
    with database.HDFDatastore(dsName) as myDS:
        res = myDS.build(parser,
                         dsName.parent,
                         filenameStrings,
                         readers=readersDict,
                         sep=',',
                         skiprows=2,
                         readTiffTags=False)

    config.__Registered_DatasetTypes__ = temp
    if dsName.exists():
        remove(str(dsName))

    assert_equal(len(res), 6)