Esempio n. 1
0
def test_apply_filters(s, t, c, list, min_value, max_value):
    dataset1 = DataSet(s)
    dataset1.add_table(t, [c])
    for v in list:
        dataset1.tables[t].columns[c].add_value(v, v)

    filter = dict()
    filter["table"] = t
    filter["column"] = c
    filter["from"] = min_value
    filter["to"] = max_value

    filtered_dataset = dataset1.apply_filters([filter, filter])
    schema = filtered_dataset.get_schema()

    assert t in schema
    assert schema[t]
    assert c in schema[t]
    assert "count" in schema[t][c]

    filteredItemsCount = schema[t][c]["count"]
    if filteredItemsCount > 0 and min_value <= max_value:
        assert schema[t][c]["min_value"] >= min_value
        assert schema[t][c]["max_value"] <= max_value

    elif filteredItemsCount == 0:
        assert schema[t][c]["count"] == 0

    else:
        assert schema[t][c]["count"] == len(list)
Esempio n. 2
0
def test_apply_filters(s, t, c, list, min_value, max_value):
    dataset1 = DataSet(s)
    dataset1.add_table(t, [c])
    for v in list:
        dataset1.tables[t].columns[c].add_value(v)

    filter = dict()
    filter["table"] = t
    filter["column"] = c
    filter["from"] = min_value
    filter["to"] = max_value

    filtered_dataset = dataset1.apply_filters([filter, filter])
    schema = filtered_dataset.get_schema()

    table_and_column_in_schema = t in schema and schema[t] and c in schema[t]
    column_has_values_inside_range = (
        schema[t][c]["count"] > 0 and schema[t][c]["min_value"] >= min_value
        and schema[t][c]["max_value"] <= max_value)
    column_is_empty = schema[t][c]["count"] == 0
    wrong_filter_range = min_value > max_value

    assert table_and_column_in_schema and (column_has_values_inside_range
                                           or column_is_empty
                                           or wrong_filter_range)
Esempio n. 3
0
def get_fits_dataset(destination, dsId, table_ids):
    hdulist = fits.open(destination)
    dataset = DataSet(dsId)

    for t in range(len(hdulist)):

        if isinstance(hdulist[t], fits.hdu.table.BinTableHDU):
            table_id = table_ids[t]
            header_names = hdulist[t].columns.names
            tbdata = hdulist[t].data
            dataset.add_table(table_id, header_names)

            for i in range(len(header_names)):
                header_name = header_names[i]
                dataset.tables[table_id].columns[
                    header_name].values = np.append([], tbdata.field(i))

        else:
            logging.debug("No valid data on: %s" % t)
            logging.debug("Type of Data: %s" % type(hdulist[t]))

    hdulist.close()

    logging.debug("Read fits file successfully: %s" % destination)

    return dataset
Esempio n. 4
0
def test_apply_filters(s, t, c, list, min_value, max_value):
    dataset1 = DataSet(s)
    dataset1.add_table(t, [c])
    for v in list:
        dataset1.tables[t].columns[c].add_value(v, v)

    filter = dict()
    filter["table"] = t
    filter["column"] = c
    filter["from"] = min_value
    filter["to"] = max_value

    filtered_dataset = dataset1.apply_filters([filter, filter])
    schema = filtered_dataset.get_schema()

    assert t in schema
    assert schema[t]
    assert c in schema[t]
    assert "count" in schema[t][c]

    filteredItemsCount = schema[t][c]["count"]
    if filteredItemsCount > 0 and min_value <= max_value:
        assert schema[t][c]["min_value"] >= min_value
        assert schema[t][c]["max_value"] <= max_value

    elif filteredItemsCount == 0:
        assert schema[t][c]["count"] == 0

    else:
        assert schema[t][c]["count"] == len(list)
Esempio n. 5
0
def test_get_shema(s, t, c, v):
    dataset = DataSet(s)
    dataset.add_table(t, [c])
    dataset.tables[t].columns[c].add_value(v)
    schema = dataset.get_schema()

    assert t in schema and schema[t] and c in schema[t] and schema[t][c][
        "id"] == c and "count" in schema[t][c] and schema[t][c]["count"] == 1
Esempio n. 6
0
def test_clone(s, t, c, v, e):
    dataset1 = DataSet(s)
    dataset1.add_table(t, [c])
    dataset1.tables[t].columns[c].add_value(v, e)
    schema1 = dataset1.get_schema()
    dataset2 = dataset1.clone()
    schema2 = dataset2.get_schema()
    assert schema1 == schema2
Esempio n. 7
0
def test_clone(s, t, c, v, e):
    dataset1 = DataSet(s)
    dataset1.add_table(t, [c])
    dataset1.tables[t].columns[c].add_value(v, e)
    schema1 = dataset1.get_schema()
    dataset2 = dataset1.clone()
    schema2 = dataset2.get_schema()
    assert schema1 == schema2
Esempio n. 8
0
def test_get_schema(s, t, c, v, e):
    dataset = DataSet(s)
    dataset.add_table(t, [c])
    dataset.tables[t].columns[c].add_value(v, e)
    schema = dataset.get_schema()

    assert t in schema
    assert schema[t]
    assert c in schema[t]
    assert schema[t][c]["id"] == c
    assert "count" in schema[t][c]
    assert schema[t][c]["count"] == 1
Esempio n. 9
0
def get_txt_dataset(destination, table_id, header_names):

    data = np.loadtxt(destination)

    dataset = DataSet( table_id )
    dataset.add_table( table_id, header_names )

    for i in range(len(header_names)):
        header_name = header_names[i]
        dataset.tables[table_id].columns[header_name].values = data[0:len(data),i]

    logging.debug("Read txt file successfully: %s" % destination)

    return dataset
Esempio n. 10
0
def get_txt_dataset(destination, table_id, header_names):

    data = np.loadtxt(destination)

    dataset = DataSet(table_id)
    dataset.add_table(table_id, header_names)

    for i in range(len(header_names)):
        header_name = header_names[i]
        dataset.tables[table_id].columns[header_name].values = data[
            0:len(data), i]

    logging.debug("Read txt file successfully: %s" % destination)

    return dataset
Esempio n. 11
0
def get_fits_dataset(destination, table_id):
    hdulist = fits.open(destination)
    tbdata = hdulist[1].data

    header_names = hdulist[1].columns.names
    dataset = DataSet(table_id)
    dataset.add_table(table_id, header_names)

    for i in range(len(header_names)):
        header_name = header_names[i]
        dataset.tables[table_id].columns[header_name].values = tbdata.field(i)

    logging.debug("Read lc file successfully: %s" % destination)

    return dataset
Esempio n. 12
0
def get_fits_dataset(destination, table_id):
    hdulist = fits.open(destination)
    tbdata = hdulist[1].data

    header_names = hdulist[1].columns.names
    dataset = DataSet( table_id )
    dataset.add_table( table_id, header_names )

    for i in range(len(header_names)):
        header_name = header_names[i]
        dataset.tables[table_id].columns[header_name].values = tbdata.field(i)

    logging.debug("Read lc file successfully: %s" % destination)

    return dataset
Esempio n. 13
0
def test_join(s, t, c, v0, e0, v1, e1):
    dataset1 = DataSet(s)
    dataset1.add_table(t, [c])
    dataset1.tables[t].columns[c].add_value(v0, e0)
    dataset2 = DataSet(s)
    dataset2.add_table(t, [c])
    dataset2.tables[t].columns[c].add_value(v1, e0)

    dataset1 = dataset1.join(dataset2)
    schema = dataset1.get_schema()

    assert t in schema
    assert schema[t]
    assert c in schema[t]
    assert "count" in schema[t][c]
    assert schema[t][c]["count"] == 2
Esempio n. 14
0
def test_join(s, t, c, v0, e0, v1, e1):
    dataset1 = DataSet(s)
    dataset1.add_table(t, [c])
    dataset1.tables[t].columns[c].add_value(v0, e0)
    dataset2 = DataSet(s)
    dataset2.add_table(t, [c])
    dataset2.tables[t].columns[c].add_value(v1, e0)

    dataset1 = dataset1.join(dataset2)
    schema = dataset1.get_schema()

    assert t in schema
    assert schema[t]
    assert c in schema[t]
    assert "count" in schema[t][c]
    assert schema[t][c]["count"] == 2
Esempio n. 15
0
def get_txt_dataset(destination, table_id, header_names):

    data = np.loadtxt(destination)

    dataset = DataSet(table_id)
    dataset.add_table(table_id, header_names)

    # Column1, Column1Err, Column2, Column2Err .. header order expected
    for i in range(len(header_names)):
        header_name = header_names[i]
        column = dataset.tables[table_id].columns[header_name]
        column.values = data[0:len(data), i * 2]
        column.error_values = data[0:len(data), (i * 2) + 1]

    logging.debug("Read txt file successfully: %s" % destination)

    return dataset
Esempio n. 16
0
def get_fits_dataset_with_stingray(destination,
                                   dsId='FITS',
                                   hduname='EVENTS',
                                   column='TIME'):

    # Gets columns from fits hdu table
    columns = get_fits_table_column_names(destination, hduname)

    # Prepares additional_columns
    additional_columns = []
    for i in range(len(columns)):
        if columns[i] != column:
            additional_columns = np.append(additional_columns, columns[i])

    # Reads fits data
    fits_data = load_events_and_gtis(destination,
                                     additional_columns=additional_columns)

    # Creates the dataset
    dataset = DataSet(dsId)

    #Fills Hdu table
    dataset.add_table(hduname, columns)
    dataset.tables[hduname].columns[column].add_values(fits_data.ev_list)
    for i in range(len(additional_columns)):
        column = additional_columns[i]
        dataset.tables[hduname].columns[column].add_values(
            fits_data.additional_data[column])

    #Fills Gtis table
    gti_columns = ["START", "STOP"]
    gti_start = fits_data.gti_list[:, 0]
    gti_end = fits_data.gti_list[:, 1]
    dataset.add_table("GTI", gti_columns)
    dataset.tables["GTI"].columns[gti_columns[0]].add_values(gti_start)
    dataset.tables["GTI"].columns[gti_columns[1]].add_values(gti_end)

    logging.debug("Read fits with stingray file successfully: %s" %
                  destination)

    return dataset
Esempio n. 17
0
def test_add_table(s, t, c):
    dataset = DataSet(s)
    dataset.add_table(t, [c])
    assert len(dataset.tables) == 1
Esempio n. 18
0
def test_add_table(s, t, c):
    dataset = DataSet(s)
    dataset.add_table(t, [c])
    assert len(dataset.tables) == 1