Exemple #1
0
def test(p, parameters):
    """
    Runs the quality control check on profile p and returns a numpy array
    of quality control decisions with False where the data value has
    passed the check and True where it failed.
    """

    country = p.primary_header['Country code'] 
    cruise = p.cruise()
    originator_cruise = p.originator_cruise()
    uid = p.uid()

    # don't bother if this has already been analyzed
    command = 'SELECT en_track_check FROM ' + parameters["table"] + ' WHERE uid = ' + str(uid) + ';'
    en_track_result = main.dbinteract(command)
    if en_track_result[0][0] is not None:
        en_track_result = main.unpack_row(en_track_result[0])[0]
        result = np.zeros(1, dtype=bool)
        result[0] = np.any(en_track_result)
        return result

    # make sure this profile makes sense in the track check
    if not assess_usability(p):
        return np.zeros(1, dtype=bool)

    # fetch all profiles on track, sorted chronologically, earliest first (None sorted as highest), then by uid
    command = 'SELECT uid, year, month, day, time, lat, long, probe, raw FROM ' + parameters["table"] + ' WHERE cruise = ' + str(cruise) + ' and country = "' + str(country) + '" and ocruise = "' + str(originator_cruise) + '" and year is not null and month is not null and day is not null and time is not null ORDER BY year, month, day, time, uid ASC;'
    track_rows = main.dbinteract(command)

    # avoid inappropriate profiles
    track_rows = [tr for tr in track_rows if assess_usability_raw(tr[8][1:-1])]

    # start all as passing by default
    EN_track_results = {}
    for i in range(len(track_rows)):
        EN_track_results[track_rows[i][0]] = np.zeros(1, dtype=bool)

    # copy the list of headers;
    # remove entries as they are flagged.
    passed_rows = copy.deepcopy(track_rows)
    rejects = findOutlier(passed_rows, EN_track_results)

    while rejects != []:
        passed_index = [x for x in range(len(passed_rows)) if x not in rejects ]
        passed_rows = [passed_rows[index] for index in passed_index ]
        rejects = findOutlier(passed_rows, EN_track_results)

    # if more than half got rejected, reject everyone
    if len(passed_rows) < len(track_rows) / 2:
        for i in range(len(track_rows)):
            EN_track_results[track_rows[i][0]][0] = True

    # write all to db
    result = []
    for i in range(len(track_rows)):
        result.append((main.pack_array(EN_track_results[track_rows[i][0]]), track_rows[i][0]))

    query = "UPDATE " + sys.argv[1] + " SET en_track_check=? WHERE uid=?"
    main.interact_many(query, result)
    return EN_track_results[uid]
Exemple #2
0
def test(p, parameters):
    """ 
    Runs the quality control check on profile p and returns a numpy array 
    of quality control decisions with False where the data value has 
    passed the check and True where it failed. 
    """
    
    cruise = p.cruise()
    uid = p.uid()
    
    # don't bother if cruise == 0 or None, or if timestamp is corrupt
    if (cruise in [0, None]) or (None in [p.year(), p.month(), p.day(), p.time()]):
        return np.zeros(1, dtype=bool)
    
    # don't bother if this has already been analyzed
    command = 'SELECT en_track_check FROM ' + parameters["table"] + ' WHERE uid = ' + str(uid) + ';'
    en_track_result = main.dbinteract(command)
    if en_track_result[0][0] is not None:
        en_track_result = main.unpack_row(en_track_result[0])[0]
        result = np.zeros(1, dtype=bool)
        result[0] = np.any(en_track_result)
        return result
    
    # some detector types cannot be assessed by this test; do not raise flag.
    if p.probe_type() in [None]:
        return np.zeros(1, dtype=bool)
    
    # fetch all profiles on track, sorted chronologically, earliest first (None sorted as highest)
    command = 'SELECT uid, year, month, day, time, lat, long, probe FROM ' + parameters["table"] + ' WHERE cruise = ' + str(cruise) + ' and year is not null and month is not null and day is not null and time is not null ORDER BY year, month, day, time, uid ASC;'
    track_rows = main.dbinteract(command)

    # start all as passing by default:
    EN_track_results = {}
    for i in range(len(track_rows)):
        EN_track_results[track_rows[i][0]] = np.zeros(1, dtype=bool)
    
    # copy the list of headers;
    # remove entries as they are flagged.
    passed_rows = copy.deepcopy(track_rows)
    rejects = findOutlier(passed_rows, EN_track_results)
    
    while rejects != []:
        passed_index = [x for x in range(len(passed_rows)) if x not in rejects ]
        passed_rows = [passed_rows[index] for index in passed_index ]
        rejects = findOutlier(passed_rows, EN_track_results)
    
    # if more than half got rejected, reject everyone
    if len(passed_rows) < len(track_rows) / 2:
        for i in range(len(track_rows)):
            EN_track_results[track_rows[i][0]][0] = True
   
    # write all to db
    result = []
    for i in range(len(track_rows)):
        result.append((main.pack_array(EN_track_results[track_rows[i][0]]), track_rows[i][0]))

    query = "UPDATE " + sys.argv[1] + " SET en_track_check=? WHERE uid=?"
    main.interact_many(query, result)

    return EN_track_results[uid]
Exemple #3
0
def test(p, parameters):
    """
    Runs the quality control check on profile p and returns a numpy array
    of quality control decisions with False where the data value has
    passed the check and True where it failed.
    """

    country = p.primary_header['Country code'] 
    cruise = p.cruise()
    originator_cruise = p.originator_cruise()
    uid = p.uid()

    # don't bother if this has already been analyzed
    command = 'SELECT en_track_check FROM ' + parameters["table"] + ' WHERE uid = ' + str(uid) + ';'
    en_track_result = main.dbinteract(command)
    if en_track_result[0][0] is not None:
        en_track_result = main.unpack_row(en_track_result[0])[0]
        result = np.zeros(1, dtype=bool)
        result[0] = np.any(en_track_result)
        return result

    # make sure this profile makes sense in the track check
    if not assess_usability(p):
        return np.zeros(1, dtype=bool)

    # fetch all profiles on track, sorted chronologically, earliest first (None sorted as highest), then by uid
    command = 'SELECT uid, year, month, day, time, lat, long, probe, raw FROM ' + parameters["table"] + ' WHERE cruise = ' + str(cruise) + ' and country = "' + str(country) + '" and ocruise = "' + str(originator_cruise) + '" and year is not null and month is not null and day is not null and time is not null ORDER BY year, month, day, time, uid ASC;'
    track_rows = main.dbinteract(command)

    # avoid inappropriate profiles
    track_rows = [tr for tr in track_rows if assess_usability_raw(tr[8][1:-1])]

    # start all as passing by default
    EN_track_results = {}
    for i in range(len(track_rows)):
        EN_track_results[track_rows[i][0]] = np.zeros(1, dtype=bool)

    # copy the list of headers;
    # remove entries as they are flagged.
    passed_rows = copy.deepcopy(track_rows)
    rejects = findOutlier(passed_rows, EN_track_results)

    while rejects != []:
        passed_index = [x for x in range(len(passed_rows)) if x not in rejects ]
        passed_rows = [passed_rows[index] for index in passed_index ]
        rejects = findOutlier(passed_rows, EN_track_results)

    # if more than half got rejected, reject everyone
    if len(passed_rows) < len(track_rows) / 2:
        for i in range(len(track_rows)):
            EN_track_results[track_rows[i][0]][0] = True

    # write all to db
    result = []
    for i in range(len(track_rows)):
        result.append((main.pack_array(EN_track_results[track_rows[i][0]]), track_rows[i][0]))

    query = "UPDATE " + sys.argv[1] + " SET en_track_check=? WHERE uid=?"
    main.interact_many(query, result)
    return EN_track_results[uid]
Exemple #4
0
def process_row(uid, logdir):
    '''run all tests on the indicated database row'''

    # reroute stdout, stderr to separate files for each profile to preserve logs
    sys.stdout = open(logdir + "/" + str(uid) + ".stdout", "w")
    sys.stderr = open(logdir + "/" + str(uid) + ".stderr", "w")

    # extract profile
    profile = main.get_profile_from_db(uid)

    # mask out error codes in temperature data
    main.catchFlags(profile)

    # run tests
    for itest, test in enumerate(testNames):
        try:
            result = run(test, [profile], parameterStore)[0]
        except:
            print test, 'exception', sys.exc_info()
            result = np.zeros(1, dtype=bool)

        try:
            query = "UPDATE " + sys.argv[
                1] + " SET " + test + "=? WHERE uid=" + str(
                    profile.uid()) + ";"
            main.dbinteract(query, [main.pack_array(result)])
        except:
            print 'db exception', sys.exc_info()
def level_order(p, parameters):
    '''Reorders data into depth order and rejects levels with 
       negative depth.
    '''

    # check if the relevant info is already in the db
    query = 'SELECT nlevels, origlevels, zr, tr, qc FROM icdclevelorder WHERE uid = ' + str(
        p.uid())
    precomputed = main.dbinteract(query, targetdb=parameters["db"])
    if len(precomputed) > 0:
        nlevels = precomputed[0][0]
        origlevels = pickle.load(io.BytesIO(precomputed[0][1]))
        zr = pickle.load(io.BytesIO(precomputed[0][2]))
        tr = pickle.load(io.BytesIO(precomputed[0][3]))
        qc = pickle.load(io.BytesIO(precomputed[0][4]))
        return p.uid(), nlevels, origlevels, zr, tr, qc

    # Extract data and define the index for each level.
    z = p.z()
    t = p.t()
    origlevels = np.arange(p.n_levels())

    # Implement the QC. For this test we only reject negative depths.
    qc = z < 0

    # Remove occurrences of no data at a level and rejected obs.
    use = (z.mask == False) & (t.mask == False) & (qc == False)
    z = z[use]
    t = t[use]
    origlevels = origlevels[use]
    nlevels = np.count_nonzero(use)

    if nlevels > 1:
        # Sort the data. Using mergesort keeps levels with the same depth
        # in the same order.
        isort = np.argsort(z, kind='mergesort')
        zr = z[isort]
        tr = t[isort]
        origlevels = origlevels[isort]
    else:
        zr = z
        tr = t

    # register pre-computed arrays in db for reuse
    origlevels_p = pickle.dumps(origlevels, -1)
    zr_p = pickle.dumps(zr, -1)
    tr_p = pickle.dumps(tr, -1)
    qc_p = pickle.dumps(qc, -1)

    query = "REPLACE INTO icdclevelorder VALUES(?,?,?,?,?,?)"
    main.dbinteract(query, [
        p.uid(), nlevels,
        sqlite3.Binary(origlevels_p),
        sqlite3.Binary(zr_p),
        sqlite3.Binary(tr_p),
        sqlite3.Binary(qc_p)
    ],
                    targetdb=parameters["db"])

    return p.uid(), nlevels, origlevels, zr, tr, qc
Exemple #6
0
def process_row(uid, logdir):
  '''run all tests on the indicated database row'''
  
  # reroute stdout, stderr to separate files for each profile to preserve logs
  sys.stdout = open(logdir + "/" + str(uid) + ".stdout", "w")
  sys.stderr = open(logdir + "/" + str(uid) + ".stderr", "w")

  # extract profile
  profile = main.get_profile_from_db(uid)
  
  # mask out error codes in temperature data
  main.catchFlags(profile)

  # run tests
  for itest, test in enumerate(testNames):
    try:
      result = run(test, [profile], parameterStore)[0]
    except:
      print test, 'exception', sys.exc_info()
      result = np.zeros(1, dtype=bool)

    try:
      query = "UPDATE " + sys.argv[1] + " SET " + test + "=? WHERE uid=" + str(profile.uid()) + ";"
      main.dbinteract(query, [main.pack_array(result)])
    except:
      print 'db exception', sys.exc_info()
Exemple #7
0
def loadParameters(parameterStore):

    main.dbinteract("DROP TABLE IF EXISTS enbackground")
    main.dbinteract(
        "CREATE TABLE IF NOT EXISTS enbackground (uid INTEGER PRIMARY KEY, bgstdlevels BLOB, bgevstdlevels BLOB, origlevels BLOB, ptlevels BLOB, bglevels BLOB)"
    )
    parameterStore['enbackground'] = readENBackgroundCheckAux()
Exemple #8
0
def loadParameters(parameterStore):

    main.dbinteract("DROP TABLE IF EXISTS enspikeandstep",
                    targetdb=parameterStore["db"])
    main.dbinteract(
        "CREATE TABLE IF NOT EXISTS enspikeandstep (uid INTEGER PRIMARY KEY, suspect BLOB)",
        targetdb=parameterStore["db"])
def loadParameters(parameterStore):

    main.dbinteract("DROP TABLE IF EXISTS icdclevelorder",
                    targetdb=parameterStore["db"])
    main.dbinteract(
        "CREATE TABLE IF NOT EXISTS icdclevelorder (uid INTEGER PRIMARY KEY, nlevels INTEGER, origlevels BLOB, zr BLOB, tr BLOB, qc BLOB)",
        targetdb=parameterStore["db"])
Exemple #10
0
def record_parameters(profile, bgStdLevels, bgevStdLevels, origLevels,
                      ptLevels, bgLevels):
    # pack the parameter arrays into the enbackground table
    # for consumption by the buddy check

    bgstdlevels = main.pack_array(bgStdLevels)
    bgevstdlevels = main.pack_array(bgevStdLevels)
    origlevels = main.pack_array(origLevels)
    ptlevels = main.pack_array(ptLevels)
    bglevels = main.pack_array(bgLevels)
    query = "REPLACE INTO enbackground VALUES(?,?,?,?,?,?);"
    main.dbinteract(query, [
        profile.uid(), bgstdlevels, bgevstdlevels, origlevels, ptlevels,
        bglevels
    ])
def stdLevelData(p, parameters):
    """
    Combines data that have passed other QC checks to create a 
    set of observation minus background data on standard levels.
    """

    # Combine other QC results.
    preQC = (EN_background_check.test(p, parameters) | 
             EN_constant_value_check.test(p, parameters) | 
             EN_increasing_depth_check.test(p, parameters) | 
             EN_range_check.test(p, parameters) |
             EN_spike_and_step_check.test(p, parameters) | 
             EN_stability_check.test(p, parameters))

    # Get the data stored by the EN background check.
    # As it was run above we know that the data is available in the db.
    query = 'SELECT origlevels, ptlevels, bglevels FROM enbackground WHERE uid = ' + str(p.uid())
    enbackground_pars = main.dbinteract(query)
    enbackground_pars = main.unpack_row(enbackground_pars[0])
    origlevels = enbackground_pars[0]
    ptlevels = enbackground_pars[1]
    bglevels = enbackground_pars[2]
    origLevels = np.array(origlevels)
    diffLevels = (np.array(ptlevels) - np.array(bglevels))
    nLevels    = len(origLevels)
    if nLevels == 0: return None # Nothing more to do.

    # Remove any levels that failed previous QC.
    nLevels, origLevels, diffLevels = filterLevels(preQC, origLevels, diffLevels)
    if nLevels == 0: return None

    levels, assocLevs = meanDifferencesAtStandardLevels(origLevels, diffLevels, p.z(), parameters)

    return levels, origLevels, assocLevs
Exemple #12
0
    def test_determine_pge(self):
        '''
        totally ridiculous differences between observation and background should give pge == 1
        '''

        p = util.testingProfile.fakeProfile([1.8, 1.8, 1.8, 7.1],
                                            [0.0, 2.5, 5.0, 7.5],
                                            latitude=55.6,
                                            longitude=12.9,
                                            date=[1900, 01, 15, 0],
                                            probe_type=7,
                                            uid=8888)
        levels = numpy.ma.array([1000, 1000, 1000, 1000])
        levels.mask = False

        #bgev = qctests.EN_background_check.bgevStdLevels
        qctests.EN_background_check.test(
            p, self.parameters
        )  #need to populate the enbackground db with profile specific info
        query = 'SELECT bgevstdlevels FROM enbackground WHERE uid = 8888'
        enbackground_pars = main.dbinteract(query)
        bgev = pickle.load(StringIO.StringIO(enbackground_pars[0][0]))

        obev = self.parameters['enbackground']['obev']
        expected = [1.0, 1.0, 1.0, 1.0]
        assert numpy.array_equal(
            qctests.EN_std_lev_bkg_and_buddy_check.determine_pge(
                levels, bgev, obev, p), expected
        ), 'PGE of extreme departures from background not flagged as 1.0'
def test(p, parameters):
    """ 
    Runs the quality control check on profile p and returns a numpy array 
    of quality control decisions with False where the data value has 
    passed the check and True where it failed. 
    """

    # Check if the QC of this profile was already done and if not
    # run the QC.
    query = 'SELECT en_constant_value_check FROM ' + parameters["table"] + ' WHERE uid = ' + str(p.uid()) + ';'
    qc_log = main.dbinteract(query)
    qc_log = main.unpack_row(qc_log[0])
    if qc_log[0] is not None:
        return qc_log[0]
        
    return run_qc(p, parameters)
def test(p, parameters):
    """
    Runs the quality control check on profile p and returns a numpy array
    of quality control decisions with False where the data value has
    passed the check and True where it failed.
    """

    # Check if the QC of this profile was already done and if not
    # run the QC.
    query = 'SELECT en_increasing_depth_check FROM ' + parameters["table"] + ' WHERE uid = ' + str(p.uid()) + ';'
    qc_log = main.dbinteract(query)
    qc_log = main.unpack_row(qc_log[0])
    if qc_log[0] is not None:
        return qc_log[0]

    return run_qc(p, parameters)
    def test_determine_pge(self):
        '''
        totally ridiculous differences between observation and background should give pge == 1
        '''

        p = util.testingProfile.fakeProfile([1.8, 1.8, 1.8, 7.1], [0.0, 2.5, 5.0, 7.5], latitude=55.6, longitude=12.9, date=[1900, 01, 15, 0], probe_type=7, uid=8888) 
        levels = numpy.ma.array([1000,1000,1000,1000])
        levels.mask = False

        #bgev = qctests.EN_background_check.bgevStdLevels
        qctests.EN_background_check.test(p, self.parameters) #need to populate the enbackground db with profile specific info
        query = 'SELECT bgevstdlevels FROM enbackground WHERE uid = 8888'
        enbackground_pars = main.dbinteract(query) 
        bgev = pickle.load(StringIO.StringIO(enbackground_pars[0][0]))

        obev = self.parameters['enbackground']['obev']
        expected = [1.0, 1.0, 1.0, 1.0]
        assert numpy.array_equal(qctests.EN_std_lev_bkg_and_buddy_check.determine_pge(levels, bgev, obev, p), expected), 'PGE of extreme departures from background not flagged as 1.0'
Exemple #16
0
def stdLevelData(p, parameters):
    """
    Combines data that have passed other QC checks to create a 
    set of observation minus background data on standard levels.
    """

    # Combine other QC results.
    preQC = (EN_background_check.test(p, parameters)
             | EN_constant_value_check.test(p, parameters)
             | EN_increasing_depth_check.test(p, parameters)
             | EN_range_check.test(p, parameters)
             | EN_spike_and_step_check.test(p, parameters)
             | EN_stability_check.test(p, parameters))

    # Get the data stored by the EN background check.
    # As it was run above we know that the data is available in the db.
    query = 'SELECT origlevels, ptlevels, bglevels FROM enbackground WHERE uid = ' + str(
        p.uid())
    enbackground_pars = main.dbinteract(query)
    enbackground_pars = main.unpack_row(enbackground_pars[0])
    origlevels = enbackground_pars[0]
    ptlevels = enbackground_pars[1]
    bglevels = enbackground_pars[2]
    origLevels = np.array(origlevels)
    diffLevels = (np.array(ptlevels) - np.array(bglevels))
    nLevels = len(origLevels)
    if nLevels == 0: return None  # Nothing more to do.

    # Remove any levels that failed previous QC.
    nLevels, origLevels, diffLevels = filterLevels(preQC, origLevels,
                                                   diffLevels)
    if nLevels == 0: return None

    levels, assocLevs = meanDifferencesAtStandardLevels(
        origLevels, diffLevels, p.z(), parameters)

    return levels, origLevels, assocLevs
Exemple #17
0
def find_roc(table, 
             costratio=[2.5, 1.0],
             filter_on_wire_break_test=False,
             filter_from_file_spec=True,
             enforce_types_of_check=True,
             n_profiles_to_analyse=np.iinfo(np.int32).max,
             n_combination_iterations=1, 
             with_reverses=False,
             effectiveness_ratio=2.0,
             improve_threshold=1.0, 
             verbose=True, 
             plot_roc=True,
             write_roc=True):
    '''
    Generates a ROC curve from the database data in table by maximising the gradient
    of the ROC curve. It will combine different tests together and invert the results
    of tests if requested.

    costratio - two element iterable that defines how the ROC curve is developed. Higher 
                numbers gives a ROC curve with lower false rates; the two elements allows
                control over the shape of the ROC curve near the start and end. E.g. [2.5, 1.0].
    filter_on_wire_break_test - filter out the impact of XBT wire breaks from results.
    filter_from_file_spec - use specification from file to choose filtering.
    enforce_types_of_check - use specification from file on particular types of checks to use.
    n_profiles_to_analyse - restrict the number of profiles extracted from the database.
    n_combination_iterations - AND tests together; restricted to max of 2 as otherwise
                               number of tests gets very large.
    with_reverses - if True, a copy of each test with inverted results is made.
    effectiveness_ratio - will give a warning if TPR / FPR is less than this value.
    improve_threshold - ignores tests if they do not results in a change in true positive 
                        rate (in %) of at least this amount.
    verbose - if True, will print a lot of messages to screen.
    plot_roc - if True, will save an image of the ROC to roc.png.
    write_roc - if True, will save the ROC data to roc.json.
    '''

    # Read QC test specifications if required.
    groupdefinition = {}
    if filter_from_file_spec or enforce_types_of_check:
        groupdefinition = read_qc_groups()

    # Read data from database into a pandas data frame.
    df = dbutils.db_to_df(sys.argv[1],
                          filter_on_wire_break_test = filter_on_wire_break_test,
                          filter_on_tests = groupdefinition,
                          n_to_extract = n_profiles_to_analyse)

    # Drop nondiscriminating tests
    nondiscrim = []
    cols = list(df.columns)
    for c in cols:
        if len(pandas.unique(df[c])) == 1:
            nondiscrim.append(c)
            if verbose: print c + ' is nondiscriminating and will be removed'
    cols = [t for t in cols if t not in nondiscrim]
    df = df[cols]
    print list(df)
    testNames = df.columns[2:].values.tolist()

    if verbose:
        print 'Number of profiles is: ', len(df.index)
        print 'Number of quality checks to process is: ', len(testNames)

    # mark chosen profiles as part of the training set 
    all_uids = main.dbinteract('SELECT uid from ' + sys.argv[1] + ';')
    for uid in all_uids:
        uid = uid[0]
        is_training = int(uid in df['uid'].astype(int).as_matrix())
        query = "UPDATE " + sys.argv[1] + " SET training=" + str(is_training) + " WHERE uid=" + str(uid) + ";"
        main.dbinteract(query)

    # Convert to numpy structures and make inverse versions of tests if required.
    # Any test with true positive rate of zero is discarded.
    truth = df['Truth'].as_matrix()
    tests = []
    names = []
    tprs  = []
    fprs  = []
    if with_reverses:
        reverselist = [False, True]
    else:
        reverselist = [False]
    for i, testname in enumerate(testNames):
        for reversal in reverselist:
            results = df[testname].as_matrix() != reversal
            tpr, fpr, fnr, tnr = main.calcRates(results, truth)
            if tpr > 0.0:
                tests.append(results)
                if reversal:
                    addtext = 'r'
                else:
                    addtext = ''
                names.append(addtext + testname)
                tprs.append(tpr)
                fprs.append(fpr)
    del df # No further need for the data frame.
    if verbose: print 'Number of quality checks after adding reverses and removing zero TPR was: ', len(names)

    # Create storage to hold the roc curve.
    cumulative = truth.copy()
    cumulative[:] = False
    currenttpr    = 0.0
    currentfpr    = 0.0
    r_fprs        = [] # The false positive rate for each ROC point.
    r_tprs        = [] # True positive rate for each ROC point.
    testcomb      = [] # The QC test that was added at each ROC point.
    groupsel      = [] # Set to True if the ROC point was from an enforced group.

    # Pre-select some tests if required.
    if enforce_types_of_check:
        if verbose: print 'Enforcing types of checks'
        while len(groupdefinition['At least one from group']) > 0:
            bestchoice = ''
            bestgroup  = ''
            bestdist   = np.sqrt(100.0**2 + 100.0**2)
            besti      = -1
            for key in groupdefinition['At least one from group']:
                for testname in groupdefinition['At least one from group'][key]:
                    # Need to check that the test exists - it may have been removed
                    # if it was non-discriminating.
                    if testname in names:
                        for itest, name in enumerate(names):
                            if name == testname: 
                                cumulativenew = np.logical_or(cumulative, tests[itest])
                                tpr, fpr, fnr, tnr = main.calcRates(cumulativenew, truth)
                                newdist = return_cost(costratio, tpr, fpr)
                                print '    ', tpr, fpr, newdist, bestdist, testname
                                if newdist == bestdist:
                                    if verbose:
                                        print '  ' + bestchoice + ' and ' + testname + ' have the same results and the first is kept'
                                elif newdist < bestdist:
                                    bestchoice = testname
                                    bestdist   = newdist
                                    besti      = itest
                                    bestgroup  = key
                    else:
                        if verbose: print '    ' + testname + ' not found and so was skipped'
            #assert bestchoice != '', '    Error, did not make a choice in group ' + key
            if verbose: print '  ' + bestchoice + ' was selected from group ' + bestgroup
            if fprs[besti] > 0:
                if tprs[besti] / fprs[besti] < effectiveness_ratio:
                    print 'WARNING - ' + bestchoice + ' TPR / FPR is below the effectiveness ratio limit: ', tprs[besti] / fprs[besti], effectiveness_ratio
            cumulative = np.logical_or(cumulative, tests[besti])
            currenttpr, currentfpr, fnr, tnr = main.calcRates(cumulative, truth)
            testcomb.append(names[besti])
            r_fprs.append(currentfpr)
            r_tprs.append(currenttpr)
            groupsel.append(True)
            # Once a test has been added, it can be deleted so that it is not considered again.
            del names[besti]
            del tests[besti]
            del fprs[besti]
            del tprs[besti]
            del groupdefinition['At least one from group'][bestgroup]
            print 'ROC point from enforced group: ', currenttpr, currentfpr, testcomb[-1], bestgroup

    # Make combinations of the single checks and store.
    assert n_combination_iterations <= 2, 'Setting n_combination_iterations > 2 results in a very large number of combinations'
    if verbose: print 'Starting construction of combinations with number of iterations: ', n_combination_iterations
    for its in range(n_combination_iterations):
        ntests = len(names)
        for i in range(ntests - 1):
            if verbose: print 'Processing iteration ', its + 1, ' out of ', n_combination_iterations, ' step ', i + 1, ' out of ', ntests - 1, ' with number of tests now ', len(names)
            for j in range(i + 1, ntests):
                # Create the name for this combination.
                newname = ('&').join(sorted((names[i] + '&' + names[j]).split('&')))
                if newname in names: continue # Do not keep multiple copies of the same combination.

                results = np.logical_and(tests[i], tests[j])
                tpr, fpr, fnr, tnr = main.calcRates(results, truth)
                if tpr > 0.0:
                    tests.append(results)
                    tprs.append(tpr)
                    fprs.append(fpr)
                    names.append(newname)
    if verbose: print 'Completed generation of tests, now constructing roc from number of tests: ', len(names)         

    # Create roc.
    used      = np.zeros(len(names), dtype=bool)
    overallbest = return_cost(costratio, tpr, fpr)
    keepgoing = True
    while keepgoing:
        keepgoing = False
        besti     = -1
        bestcost  = overallbest
        bestncomb = 100000
        bestdtpr  = 0
        bestdfpr  = 100000
        for i in range(len(names)):
            if used[i]: continue
            cumulativenew = np.logical_or(cumulative, tests[i])
            tpr, fpr, fnr, tnr = main.calcRates(cumulativenew, truth)
            dtpr               = tpr - currenttpr
            dfpr               = fpr - currentfpr
            newcost            = return_cost(costratio, tpr, fpr) 
            newbest            = False
            if newcost <= bestcost and dtpr >= improve_threshold and dtpr > 0.0:
                # If cost is better than found previously, use it else if it is
                # the same then decide if to use it or not.
                if newcost < bestcost:
                    newbest = True
                elif dtpr >= bestdtpr:
                    if dtpr > bestdtpr:
                        newbest = True
                    elif len(names[i].split('&')) < bestncomb:
                        newbest = True
                if newbest:
                    besti     = i
                    bestcost  = newcost
                    bestncomb = len(names[i].split('&'))
                    bestdtpr  = dtpr
                    bestdfpr  = dfpr
        if besti >= 0:
            keepgoing   = True
            used[besti] = True
            overallbest = bestcost
            cumulative  = np.logical_or(cumulative, tests[besti])
            currenttpr, currentfpr, fnr, tnr = main.calcRates(cumulative, truth)
            testcomb.append(names[besti])
            r_fprs.append(currentfpr)
            r_tprs.append(currenttpr)
            groupsel.append(False)
            print 'ROC point: ', currenttpr, currentfpr, names[besti], overallbest

    if plot_roc:
        plt.plot(r_fprs, r_tprs, 'k')
        for i in range(len(r_fprs)):
            if groupsel[i]:
                colour = 'r'
            else:
                colour = 'b'
            plt.plot(r_fprs[i], r_tprs[i], colour + 'o')
        plt.xlim(0, 100)
        plt.ylim(0, 100)
        plt.xlabel('False positive rate (%)')
        plt.ylabel('True positive rate (%)')
        plt.savefig('roc.png')
        plt.close()

    if write_roc:
        f = open('roc.json', 'w')
        r = {}
        r['tpr'] = r_tprs
        r['fpr'] = r_fprs
        r['tests'] = testcomb
        r['groupsel'] = groupsel
        json.dump(r, f)
        f.close()
def run_qc(p, suspect):

    # check for pre-registered suspect tabulation, if that's what we want:
    if suspect:
        query = 'SELECT suspect FROM enspikeandstep WHERE uid = ' + str(p.uid()) + ';'
        susp = main.dbinteract(query)
        if len(susp) > 0:
            return main.unpack_row(susp[0])[0]
            
    # Define tolerances used.
    tolD     = np.array([0, 200, 300, 500, 600])
    tolDTrop = np.array([0, 300, 400, 500, 600])
    tolT     = np.array([5.0, 5.0, 2.5, 2.0, 1.5])  

    # Define an array to hold results.
    qc    = np.zeros(p.n_levels(), dtype=bool)

    # Get depth and temperature values from the profile.
    z = p.z()
    t = p.t()

    # Find which levels have data.
    isTemperature = (t.mask==False)
    isDepth = (z.mask==False)
    isData = isTemperature & isDepth

    # Array to hold temperature differences between levels and gradients.
    dt, gt = composeDT(t, z, p.n_levels())
        
    # Spikes and steps detection.
    for i in range(1, p.n_levels()):
        if i >= 2:
            if (isData[i-2] and isData[i-1] and isData[i]) == False:
                continue
            if z[i] - z[i-2] >= 5.0:
                wt1 = (z[i-1] - z[i-2]) / (z[i] - z[i-2])
            else:
                wt1 = 0.5
        else:
            if (isData[i-1] and isData[i]) == False:
                continue
            wt1 = 0.5
        
        dTTol = determineDepthTolerance(z[i-1], np.abs(p.latitude()))
        gTTol = 0.05

        # Check for low temperatures in the Tropics.
        # This might be more appropriate to appear in a separate EN regional
        # range check but is included here for now for consistency with the
        # original code.
        if (np.abs(p.latitude()) < 20.0 and z[i-1] < 1000.0 and
            t[i-1] < 1.0):
               dt[i] = np.ma.masked 
               if suspect == True: qc[i-1] = True
               continue
               
        qc, dt = conditionA(dt, dTTol, qc, wt1, i, suspect)                
        qc, dt = conditionB(dt, dTTol, gTTol, qc, gt, i, suspect)
        qc = conditionC(dt, dTTol, z, qc, t, i, suspect)
    
    # End of loop over levels.
    
    # Step or 0.0 at the bottom of a profile.
    if isData[-1] and dt.mask[-1] == False:
        dTTol = determineDepthTolerance(z[-1], np.abs(p.latitude()))
        if np.abs(dt[-1]) > dTTol:
            if suspect == True: qc[-1] = True
    if isTemperature[-1]:
        if t[-1] == 0.0:
            if suspect == True: qc[-1] = True
        
    # If 4 levels or more than half the profile is rejected then reject all.
    if suspect == False:
        nRejects = np.count_nonzero(qc)
        if nRejects >= 4 or nRejects > p.n_levels()/2:
            qc[:] = True

    # register suspects, if computed, to db
    if suspect:
        query = "REPLACE INTO enspikeandstep VALUES(?,?);"
        main.dbinteract(query, [p.uid(), main.pack_array(qc)] )

    return qc
Exemple #19
0
def test(p, parameters, allow_level_reinstating=True):
    """ 
    Runs the quality control check on profile p and returns a numpy array 
    of quality control decisions with False where the data value has 
    passed the check and True where it failed. 

    If allow_level_reinstating is set to True then rejected levels can be
    reprieved by comparing with levels above and below. NB this is done by
    default in EN processing.
    """

    # Define an array to hold results.
    qc = np.zeros(p.n_levels(), dtype=bool)

    # Obtain the obs minus background differences on standard levels.
    result = stdLevelData(p, parameters)
    if result is None:
        return qc

    # Unpack the results.
    levels, origLevels, assocLevels = result
    # Retrieve the background and observation error variances and
    # the background values.
    query = 'SELECT bgstdlevels, bgevstdlevels FROM enbackground WHERE uid = ' + str(
        p.uid())
    enbackground_pars = main.dbinteract(query)
    enbackground_pars = main.unpack_row(enbackground_pars[0])
    bgsl = enbackground_pars[0]
    slev = parameters['enbackground']['depth']
    bgev = enbackground_pars[1]
    obev = parameters['enbackground']['obev']

    #find initial pge
    pgeData = determine_pge(levels, bgev, obev, p)

    # Find buddy.
    profiles = get_profile_info(parameters)
    minDist = 1000000000.0
    iMinDist = None
    for iProfile, profile in enumerate(profiles):
        pDist = assessBuddyDistance(p, profile)
        if pDist is not None and pDist < minDist:
            minDist = pDist
            iMinDist = iProfile

    # Check if we have found a buddy and process if so.
    if minDist <= 400000:
        pBuddy = main.get_profile_from_db(profiles[iMinDist][0])

        # buddy vetos
        Fail = False
        if pBuddy.var_index() is None:
            Fail = True
        if Fail == False:
            main.catchFlags(pBuddy)
            if np.sum(pBuddy.t().mask == False) == 0:
                Fail = True

        if Fail == False:

            result = stdLevelData(pBuddy, parameters)

            query = 'SELECT bgevstdlevels FROM enbackground WHERE uid = ' + str(
                pBuddy.uid())
            buddy_pars = main.dbinteract(query)

            buddy_pars = main.unpack_row(buddy_pars[0])

            if result is not None:
                levelsBuddy, origLevelsBuddy, assocLevelsBuddy = result
                bgevBuddy = buddy_pars[0]
                pgeBuddy = determine_pge(levels, bgevBuddy, obev, pBuddy)
                pgeData = update_pgeData(pgeData, pgeBuddy, levels,
                                         levelsBuddy, minDist, p, pBuddy, obev,
                                         bgev, bgevBuddy)

    # Check if levels should be reinstated.
    if allow_level_reinstating:
        if np.abs(p.latitude()) < 20.0:
            depthTol = 300.0
        else:
            depthTol = 200.0
        stdLevelFlags = pgeData >= 0.5
        for i, slflag in enumerate(stdLevelFlags):
            if slflag:
                # Check for non rejected surrounding levels.
                okbelow = False
                if i > 0:
                    if stdLevelFlags[i - 1] == False and levels.mask[
                            i - 1] == False and bgsl.mask[i - 1] == False:
                        okbelow = True
                okabove = False
                nsl = len(stdLevelFlags)
                if i < nsl - 1:
                    if stdLevelFlags[i + 1] == False and levels.mask[
                            i + 1] == False and bgsl.mask[i + 1] == False:
                        okabove = True
                # Work out tolerances.
                if slev[i] > depthTol + 100:
                    tolFactor = 0.5
                elif slev[i] > depthTol:
                    tolFactor = 1.0 - 0.005 * (slev[i] - depthTol)
                else:
                    tolFactor = 1.0
                ttol = 0.5 * tolFactor
                if okbelow == True and okabove == True:
                    xmax = levels[i - 1] + bgsl[i - 1] + ttol
                    xmin = levels[i + 1] + bgsl[i + 1] - ttol
                elif okbelow == True:
                    xmax = levels[i - 1] + bgsl[i - 1] + ttol
                    xmin = levels[i - 1] + bgsl[i - 1] - ttol
                elif okabove == True:
                    xmax = levels[i + 1] + bgsl[i + 1] + ttol
                    xmin = levels[i + 1] + bgsl[i + 1] - ttol
                else:
                    continue
                # Reassign PGE if level is within the tolerances.
                if levels[i] + bgsl[i] >= xmin and levels[i] + bgsl[i] <= xmax:
                    pgeData[i] = 0.49

    # Assign the QC flags to original levels.
    for i, pge in enumerate(pgeData):
        if pgeData.mask[i]: continue
        if pge < 0.5: continue
        for j, assocLevel in enumerate(assocLevels):
            if assocLevel == i:
                origLevel = origLevels[j]
                qc[origLevel] = True

    return qc
Exemple #20
0
def run_qc(p, suspect, parameters):

    # check for pre-registered suspect tabulation, if that's what we want:
    if suspect:
        query = 'SELECT suspect FROM enspikeandstep WHERE uid = ' + str(
            p.uid()) + ';'
        susp = main.dbinteract(query, targetdb=parameters["db"])
        if len(susp) > 0:
            return main.unpack_row(susp[0])[0]

    # Define tolerances used.
    tolD = np.array([0, 200, 300, 500, 600])
    tolDTrop = np.array([0, 300, 400, 500, 600])
    tolT = np.array([5.0, 5.0, 2.5, 2.0, 1.5])

    # Define an array to hold results.
    qc = np.zeros(p.n_levels(), dtype=bool)

    # Get depth and temperature values from the profile.
    z = p.z()
    t = p.t()

    # Find which levels have data.
    isTemperature = (t.mask == False)
    isDepth = (z.mask == False)
    isData = isTemperature & isDepth

    # Array to hold temperature differences between levels and gradients.
    dt, gt = composeDT(t, z, p.n_levels())

    # Spikes and steps detection.
    for i in range(1, p.n_levels()):
        if i >= 2:
            if (isData[i - 2] and isData[i - 1] and isData[i]) == False:
                continue
            if z[i] - z[i - 2] >= 5.0:
                wt1 = (z[i - 1] - z[i - 2]) / (z[i] - z[i - 2])
            else:
                wt1 = 0.5
        else:
            if (isData[i - 1] and isData[i]) == False:
                continue
            wt1 = 0.5

        dTTol = determineDepthTolerance(z[i - 1], np.abs(p.latitude()))
        gTTol = 0.05

        # Check for low temperatures in the Tropics.
        # This might be more appropriate to appear in a separate EN regional
        # range check but is included here for now for consistency with the
        # original code.
        if (np.abs(p.latitude()) < 20.0 and z[i - 1] < 1000.0
                and t[i - 1] < 1.0):
            dt[i] = np.ma.masked
            if suspect == True: qc[i - 1] = True
            continue

        qc, dt = conditionA(dt, dTTol, qc, wt1, i, suspect)
        qc, dt = conditionB(dt, dTTol, gTTol, qc, gt, i, suspect)
        qc = conditionC(dt, dTTol, z, qc, t, i, suspect)

    # End of loop over levels.

    # Step or 0.0 at the bottom of a profile.
    if isData[-1] and dt.mask[-1] == False:
        dTTol = determineDepthTolerance(z[-1], np.abs(p.latitude()))
        if np.abs(dt[-1]) > dTTol:
            if suspect == True: qc[-1] = True
    if isTemperature[-1]:
        if t[-1] == 0.0:
            if suspect == True: qc[-1] = True

    # If 4 levels or more than half the profile is rejected then reject all.
    if suspect == False:
        nRejects = np.count_nonzero(qc)
        if nRejects >= 4 or nRejects > p.n_levels() / 2:
            qc[:] = True

    # register suspects, if computed, to db
    if suspect:
        query = "REPLACE INTO enspikeandstep VALUES(?,?);"
        main.dbinteract(query, [p.uid(), main.pack_array(qc)],
                        targetdb=parameters["db"])

    return qc
def loadParameters(parameterStore):

    main.dbinteract("DROP TABLE IF EXISTS enspikeandstep")
    main.dbinteract("CREATE TABLE IF NOT EXISTS enspikeandstep (uid INTEGER PRIMARY KEY, suspect BLOB)")
Exemple #22
0
# Read data from database into a pandas data frame.
df = dbutils.db_to_df(sys.argv[1],
                      filter_on_wire_break_test = False,
                      filter_on_tests = groupdefinition,
                      n_to_extract = sys.argv[2])
testNames = df.columns[2:].values.tolist()

# declare some downstream constructs
accepted = []
unflagged = []
fprs = []
bad = df.loc[df['Truth']]
bad.reset_index(inplace=True, drop=True)

# mark chosen profiles as part of the training set
all_uids = main.dbinteract('SELECT uid from ' + sys.argv[1] + ';')
for uid in all_uids:
    uid = uid[0]
    is_training = int(uid in df['uid'].astype(int).as_matrix())
    query = "UPDATE " + sys.argv[1] + " SET training=" + str(is_training) + " WHERE uid=" + str(uid) + ";"
    main.dbinteract(query)

# algo. step 0:
# demand individual QC tests have TPR/FPR > some threshold
perf_thresh = 2
drop_tests = []
for test in testNames:
    tpr, fpr, fnr, tnr = main.calcRates(df[test].tolist(), df['Truth'].tolist())
    if fpr > 0 and tpr / fpr < perf_thresh:
        print('dropping', test, '; tpr/fpr = ', tpr/fpr)
        df.drop([test], axis=1)
Exemple #23
0
 def tearDown(self):
     main.dbinteract('DROP TABLE icdclevelorder;')
Exemple #24
0
def get_profile_info(parameters):
    # Gets information about the profiles from the database.

    query = 'SELECT uid,year,month,cruise,lat,long FROM ' + parameters['table']
    return main.dbinteract(query)
Exemple #25
0
def find_roc(table,
             costratio=[2.5, 1.0],
             filter_on_wire_break_test=False,
             filter_from_file_spec=True,
             enforce_types_of_check=True,
             n_profiles_to_analyse=np.iinfo(np.int32).max,
             n_combination_iterations=1,
             with_reverses=False,
             effectiveness_ratio=2.0,
             improve_threshold=1.0,
             verbose=True,
             plot_roc=True,
             write_roc=True):
    '''
    Generates a ROC curve from the database data in table by maximising the gradient
    of the ROC curve. It will combine different tests together and invert the results
    of tests if requested.

    costratio - two element iterable that defines how the ROC curve is developed. Higher 
                numbers gives a ROC curve with lower false rates; the two elements allows
                control over the shape of the ROC curve near the start and end. E.g. [2.5, 1.0].
    filter_on_wire_break_test - filter out the impact of XBT wire breaks from results.
    filter_from_file_spec - use specification from file to choose filtering.
    enforce_types_of_check - use specification from file on particular types of checks to use.
    n_profiles_to_analyse - restrict the number of profiles extracted from the database.
    n_combination_iterations - AND tests together; restricted to max of 2 as otherwise
                               number of tests gets very large.
    with_reverses - if True, a copy of each test with inverted results is made.
    effectiveness_ratio - will give a warning if TPR / FPR is less than this value.
    improve_threshold - ignores tests if they do not results in a change in true positive 
                        rate (in %) of at least this amount.
    verbose - if True, will print a lot of messages to screen.
    plot_roc - if True, will save an image of the ROC to roc.png.
    write_roc - if True, will save the ROC data to roc.json.
    '''

    # Read QC test specifications if required.
    groupdefinition = {}
    if filter_from_file_spec or enforce_types_of_check:
        groupdefinition = read_qc_groups()

    # Read data from database into a pandas data frame.
    df = dbutils.db_to_df(sys.argv[1],
                          filter_on_wire_break_test=filter_on_wire_break_test,
                          filter_on_tests=groupdefinition,
                          n_to_extract=n_profiles_to_analyse)

    # Drop nondiscriminating tests
    nondiscrim = []
    cols = list(df.columns)
    for c in cols:
        if len(pandas.unique(df[c])) == 1:
            nondiscrim.append(c)
            if verbose: print(c + ' is nondiscriminating and will be removed')
    cols = [t for t in cols if t not in nondiscrim]
    df = df[cols]
    print(list(df))
    testNames = df.columns[2:].values.tolist()

    if verbose:
        print('Number of profiles is: ', len(df.index))
        print('Number of quality checks to process is: ', len(testNames))

    # mark chosen profiles as part of the training set
    all_uids = main.dbinteract('SELECT uid from ' + sys.argv[1] + ';')
    for uid in all_uids:
        uid = uid[0]
        is_training = int(uid in df['uid'].astype(int).as_matrix())
        query = "UPDATE " + sys.argv[1] + " SET training=" + str(
            is_training) + " WHERE uid=" + str(uid) + ";"
        main.dbinteract(query)

    # Convert to numpy structures and make inverse versions of tests if required.
    # Any test with true positive rate of zero is discarded.
    truth = df['Truth'].as_matrix()
    tests = []
    names = []
    tprs = []
    fprs = []
    if with_reverses:
        reverselist = [False, True]
    else:
        reverselist = [False]
    for i, testname in enumerate(testNames):
        for reversal in reverselist:
            results = df[testname].as_matrix() != reversal
            tpr, fpr, fnr, tnr = main.calcRates(results, truth)
            if tpr > 0.0:
                tests.append(results)
                if reversal:
                    addtext = 'r'
                else:
                    addtext = ''
                names.append(addtext + testname)
                tprs.append(tpr)
                fprs.append(fpr)
    del df  # No further need for the data frame.
    if verbose:
        print(
            'Number of quality checks after adding reverses and removing zero TPR was: ',
            len(names))

    # Create storage to hold the roc curve.
    cumulative = truth.copy()
    cumulative[:] = False
    currenttpr = 0.0
    currentfpr = 0.0
    r_fprs = []  # The false positive rate for each ROC point.
    r_tprs = []  # True positive rate for each ROC point.
    testcomb = []  # The QC test that was added at each ROC point.
    groupsel = []  # Set to True if the ROC point was from an enforced group.

    # Pre-select some tests if required.
    if enforce_types_of_check:
        if verbose: print('Enforcing types of checks')
        while len(groupdefinition['At least one from group']) > 0:
            bestchoice = ''
            bestgroup = ''
            bestdist = np.sqrt(100.0**2 + 100.0**2)
            besti = -1
            for key in groupdefinition['At least one from group']:
                for testname in groupdefinition['At least one from group'][
                        key]:
                    # Need to check that the test exists - it may have been removed
                    # if it was non-discriminating.
                    if testname in names:
                        for itest, name in enumerate(names):
                            if name == testname:
                                cumulativenew = np.logical_or(
                                    cumulative, tests[itest])
                                tpr, fpr, fnr, tnr = main.calcRates(
                                    cumulativenew, truth)
                                newdist = return_cost(costratio, tpr, fpr)
                                print('    ', tpr, fpr, newdist, bestdist,
                                      testname)
                                if newdist == bestdist:
                                    if verbose:
                                        print(
                                            '  ' + bestchoice + ' and ' +
                                            testname +
                                            ' have the same results and the first is kept'
                                        )
                                elif newdist < bestdist:
                                    bestchoice = testname
                                    bestdist = newdist
                                    besti = itest
                                    bestgroup = key
                    else:
                        if verbose:
                            print('    ' + testname +
                                  ' not found and so was skipped')
            #assert bestchoice != '', '    Error, did not make a choice in group ' + key
            if verbose:
                print('  ' + bestchoice + ' was selected from group ' +
                      bestgroup)
            if fprs[besti] > 0:
                if tprs[besti] / fprs[besti] < effectiveness_ratio:
                    print(
                        'WARNING - ' + bestchoice +
                        ' TPR / FPR is below the effectiveness ratio limit: ',
                        tprs[besti] / fprs[besti], effectiveness_ratio)
            cumulative = np.logical_or(cumulative, tests[besti])
            currenttpr, currentfpr, fnr, tnr = main.calcRates(
                cumulative, truth)
            testcomb.append(names[besti])
            r_fprs.append(currentfpr)
            r_tprs.append(currenttpr)
            groupsel.append(True)
            # Once a test has been added, it can be deleted so that it is not considered again.
            del names[besti]
            del tests[besti]
            del fprs[besti]
            del tprs[besti]
            del groupdefinition['At least one from group'][bestgroup]
            print('ROC point from enforced group: ', currenttpr, currentfpr,
                  testcomb[-1], bestgroup)

    # Make combinations of the single checks and store.
    assert n_combination_iterations <= 2, 'Setting n_combination_iterations > 2 results in a very large number of combinations'
    if verbose:
        print(
            'Starting construction of combinations with number of iterations: ',
            n_combination_iterations)
    for its in range(n_combination_iterations):
        ntests = len(names)
        for i in range(ntests - 1):
            if verbose:
                print('Processing iteration ', its + 1, ' out of ',
                      n_combination_iterations, ' step ', i + 1, ' out of ',
                      ntests - 1, ' with number of tests now ', len(names))
            for j in range(i + 1, ntests):
                # Create the name for this combination.
                newname = ('&').join(
                    sorted((names[i] + '&' + names[j]).split('&')))
                if newname in names:
                    continue  # Do not keep multiple copies of the same combination.

                results = np.logical_and(tests[i], tests[j])
                tpr, fpr, fnr, tnr = main.calcRates(results, truth)
                if tpr > 0.0:
                    tests.append(results)
                    tprs.append(tpr)
                    fprs.append(fpr)
                    names.append(newname)
    if verbose:
        print(
            'Completed generation of tests, now constructing roc from number of tests: ',
            len(names))

    # Create roc.
    used = np.zeros(len(names), dtype=bool)
    overallbest = return_cost(costratio, tpr, fpr)
    keepgoing = True
    while keepgoing:
        keepgoing = False
        besti = -1
        bestcost = overallbest
        bestncomb = 100000
        bestdtpr = 0
        bestdfpr = 100000
        for i in range(len(names)):
            if used[i]: continue
            cumulativenew = np.logical_or(cumulative, tests[i])
            tpr, fpr, fnr, tnr = main.calcRates(cumulativenew, truth)
            dtpr = tpr - currenttpr
            dfpr = fpr - currentfpr
            newcost = return_cost(costratio, tpr, fpr)
            newbest = False
            if newcost <= bestcost and dtpr >= improve_threshold and dtpr > 0.0:
                # If cost is better than found previously, use it else if it is
                # the same then decide if to use it or not.
                if newcost < bestcost:
                    newbest = True
                elif dtpr >= bestdtpr:
                    if dtpr > bestdtpr:
                        newbest = True
                    elif len(names[i].split('&')) < bestncomb:
                        newbest = True
                if newbest:
                    besti = i
                    bestcost = newcost
                    bestncomb = len(names[i].split('&'))
                    bestdtpr = dtpr
                    bestdfpr = dfpr
        if besti >= 0:
            keepgoing = True
            used[besti] = True
            overallbest = bestcost
            cumulative = np.logical_or(cumulative, tests[besti])
            currenttpr, currentfpr, fnr, tnr = main.calcRates(
                cumulative, truth)
            testcomb.append(names[besti])
            r_fprs.append(currentfpr)
            r_tprs.append(currenttpr)
            groupsel.append(False)
            print('ROC point: ', currenttpr, currentfpr, names[besti],
                  overallbest)

    if plot_roc:
        plt.plot(r_fprs, r_tprs, 'k')
        for i in range(len(r_fprs)):
            if groupsel[i]:
                colour = 'r'
            else:
                colour = 'b'
            plt.plot(r_fprs[i], r_tprs[i], colour + 'o')
        plt.xlim(0, 100)
        plt.ylim(0, 100)
        plt.xlabel('False positive rate (%)')
        plt.ylabel('True positive rate (%)')
        plt.savefig('roc.png')
        plt.close()

    if write_roc:
        f = open('roc.json', 'w')
        r = {}
        r['tpr'] = r_tprs
        r['fpr'] = r_fprs
        r['tests'] = testcomb
        r['groupsel'] = groupsel
        json.dump(r, f)
        f.close()
def test(p, parameters, allow_level_reinstating=True):
    """ 
    Runs the quality control check on profile p and returns a numpy array 
    of quality control decisions with False where the data value has 
    passed the check and True where it failed. 

    If allow_level_reinstating is set to True then rejected levels can be
    reprieved by comparing with levels above and below. NB this is done by
    default in EN processing.
    """

    # Define an array to hold results.
    qc = np.zeros(p.n_levels(), dtype=bool)

    # Obtain the obs minus background differences on standard levels.
    result = stdLevelData(p, parameters)
    if result is None:
        return qc
    
    # Unpack the results.
    levels, origLevels, assocLevels = result
    # Retrieve the background and observation error variances and
    # the background values.
    query = 'SELECT bgstdlevels, bgevstdlevels FROM enbackground WHERE uid = ' + str(p.uid())
    enbackground_pars = main.dbinteract(query)
    enbackground_pars = main.unpack_row(enbackground_pars[0])
    bgsl = enbackground_pars[0]
    slev = parameters['enbackground']['depth']
    bgev = enbackground_pars[1]
    obev = parameters['enbackground']['obev']

    #find initial pge
    pgeData = determine_pge(levels, bgev, obev, p)

    # Find buddy.
    profiles = get_profile_info(parameters)
    minDist  = 1000000000.0
    iMinDist = None
    for iProfile, profile in enumerate(profiles):
        pDist = assessBuddyDistance(p, profile)
        if pDist is not None and pDist < minDist:
            minDist  = pDist
            iMinDist = iProfile

    # Check if we have found a buddy and process if so.
    if minDist <= 400000:
        pBuddy = main.get_profile_from_db(profiles[iMinDist][0])

        # buddy vetos
        Fail = False
        if pBuddy.var_index() is None:
            Fail = True
        if Fail == False:
            main.catchFlags(pBuddy)
            if np.sum(pBuddy.t().mask == False) == 0:
                Fail = True

        if Fail == False:

          result = stdLevelData(pBuddy, parameters)

          query = 'SELECT bgevstdlevels FROM enbackground WHERE uid = ' + str(pBuddy.uid())
          buddy_pars = main.dbinteract(query)

          buddy_pars = main.unpack_row(buddy_pars[0])

          if result is not None: 
            levelsBuddy, origLevelsBuddy, assocLevelsBuddy = result
            bgevBuddy = buddy_pars[0]
            pgeBuddy  = determine_pge(levels, bgevBuddy, obev, pBuddy)
            pgeData   = update_pgeData(pgeData, pgeBuddy, levels, levelsBuddy, minDist, p, pBuddy, obev, bgev, bgevBuddy)

    # Check if levels should be reinstated.
    if allow_level_reinstating:
        if np.abs(p.latitude()) < 20.0:
            depthTol = 300.0
        else:
            depthTol = 200.0
        stdLevelFlags = pgeData >= 0.5
        for i, slflag in enumerate(stdLevelFlags):
            if slflag:
                # Check for non rejected surrounding levels.
                okbelow = False
                if i > 0:
                    if stdLevelFlags[i - 1] == False and levels.mask[i - 1] == False and bgsl.mask[i - 1] == False:
                        okbelow = True
                okabove = False
                nsl = len(stdLevelFlags)
                if i < nsl - 1:
                    if stdLevelFlags[i + 1] == False and levels.mask[i + 1] == False and bgsl.mask[i + 1] == False:
                        okabove = True
                # Work out tolerances.
                if slev[i] > depthTol + 100: 
                    tolFactor = 0.5
                elif slev[i] > depthTol:
                    tolFactor = 1.0 - 0.005 * (slev[i] - depthTol)
                else:
                    tolFactor = 1.0
                ttol = 0.5 * tolFactor 
                if okbelow == True and okabove == True:
                    xmax = levels[i - 1] + bgsl[i - 1] + ttol
                    xmin = levels[i + 1] + bgsl[i + 1] - ttol
                elif okbelow == True:
                    xmax = levels[i - 1] + bgsl[i - 1] + ttol
                    xmin = levels[i - 1] + bgsl[i - 1] - ttol
                elif okabove == True:
                    xmax = levels[i + 1] + bgsl[i + 1] + ttol
                    xmin = levels[i + 1] + bgsl[i + 1] - ttol
                else:
                    continue
                # Reassign PGE if level is within the tolerances.
                if levels[i] + bgsl[i] >= xmin and levels[i] + bgsl[i] <= xmax:
                    pgeData[i] = 0.49      

    # Assign the QC flags to original levels.
    for i, pge in enumerate(pgeData):
        if pgeData.mask[i]: continue
        if pge < 0.5: continue
        for j, assocLevel in enumerate(assocLevels):
            if assocLevel == i:
                origLevel = origLevels[j]        
                qc[origLevel] = True

    return qc
def get_profile_info(parameters):
    # Gets information about the profiles from the database.
    
    query = 'SELECT uid,year,month,cruise,lat,long FROM ' + parameters['table']
    return main.dbinteract(query)
 def tearDown(self):
     main.dbinteract('DROP TABLE unit;')
Exemple #29
0
# Read data from database into a pandas data frame.
df = dbutils.db_to_df(sys.argv[1],
                      filter_on_wire_break_test = False,
                      filter_on_tests = groupdefinition,
                      n_to_extract = sys.argv[2])
testNames = df.columns[2:].values.tolist()

# declare some downstream constructs
accepted = []
unflagged = []
fprs = []
bad = df.loc[df['Truth']]
bad.reset_index(inplace=True, drop=True)

# mark chosen profiles as part of the training set
all_uids = main.dbinteract('SELECT uid from ' + sys.argv[1] + ';')
for uid in all_uids:
    uid = uid[0]
    is_training = int(uid in df['uid'].astype(int).as_matrix())
    query = "UPDATE " + sys.argv[1] + " SET training=" + str(is_training) + " WHERE uid=" + str(uid) + ";"
    main.dbinteract(query)

# algo. step 0:
# demand individual QC tests have TPR/FPR > some threshold
perf_thresh = 2
drop_tests = []
for test in testNames:
    tpr, fpr, fnr, tnr = main.calcRates(df[test].tolist(), df['Truth'].tolist())
    if fpr > 0 and tpr / fpr < perf_thresh:
        print 'dropping', test, '; tpr/fpr = ', tpr/fpr
        df.drop([test], axis=1)
Exemple #30
0
        # keep tabs on how many good and how many bad profiles have been added to db
        # nowire == index of first wire break level
        wireqc = qctests.CSIRO_wire_break.test(profile, {})
        try:
            nowire = list(wireqc).index(True)
        except:
            nowire = len(truth)
        # flag only counts if its before the wire break:
        flagged = dbutils.summarize_truth(truth[0:nowire])
        if flagged:
            bad += 1
        else:
            good += 1

        query = "INSERT INTO " + sys.argv[2] + " (raw, truth, uid, year, month, day, time, lat, long, cruise, probe) values (?,?,?,?,?,?,?,?,?,?,?);"
        values = (p['raw'], p['truth'], p['uid'], p['year'], p['month'], p['day'], p['time'], p['latitude'], p['longitude'], p['cruise'], p['probe_type'])
        main.dbinteract(query, values)
        if profile.is_last_profile_in_file(fid) == True:
            break

    conn.commit()
    print 'number of clean profiles written:', good
    print 'number of flagged profiles written:', bad
else:

    print 'Usage: python build-db.py inputdatafile databasetable' 



 def tearDown(self):
     main.dbinteract('DROP TABLE icdclevelorder;')
Exemple #32
0
# set up global parmaeter store
parameterStore = {
  "table": dbtable,
  "db": targetdb
}
for test in testNames:
  exec('from qctests import ' + test)
  try:
    exec(test + '.loadParameters(parameterStore)')
  except:
    print('No parameters to load for', test)

# connect to database & fetch list of all uids
query = 'SELECT uid FROM ' + dbtable + ' ORDER BY uid;'
uids = main.dbinteract(query, targetdb=targetdb)

# launch async processes
if batchnumber is not None and nperbatch is not None:
  batchnumber = int(batchnumber)
  nperbatch   = int(nperbatch)
  startindex  = batchnumber*nperbatch
  endindex    = min((batchnumber+1)*nperbatch,len(uids))
else:
  startindex  = 0
  endindex    = len(uids)
pool = Pool(processes=int(cores))
for i in range(startindex, endindex):
  pool.apply_async(process_row, (uids[i][0], logdir, dbtable, targetdb))
pool.close()
pool.join()
 def tearDown(self):
     main.dbinteract('DROP TABLE unit;')
     main.catchFlags = realcatchflagsfunc
     main.get_profile_from_db = realgetproffunc
Exemple #34
0
def find_roc(table, 
             filter_on_wire_break_test=True,
             n_profiles_to_analyse=np.iinfo(np.int32).max,
             n_combination_iterations=2, 
             with_reverses=False,
             improve_threshold=1.0, 
             verbose=True, 
             plot_roc=True,
             write_roc=True):
    '''
    Generates a ROC curve from the database data in table by maximising the gradient
    of the ROC curve. It will combine different tests together and invert the results
    of tests if requested.

    filter_on_wire_break_test - filter out the impact of XBT wire breaks from results.
    n_profiles_to_analyse - restrict the number of profiles extracted from the database.
    n_combination_iterations - AND tests together; restricted to max of 2 as otherwise
                               number of tests gets very large.
    with_reverses - if True, a copy of each test with inverted results is made.
    improve_threshold - ignores tests if they do not results in a change in true positive 
                        rate (in %) of at least this amount.
    verbose - if True, will print a lot of messages to screen.
    plot_roc - if True, will save an image of the ROC to roc.png.
    write_roc - if True, will save the ROC data to roc.json.
    '''

    # Read data from database into a pandas data frame.
    df        = dbutils.db_to_df(sys.argv[1],
                                 filter_on_wire_break_test=filter_on_wire_break_test,
                                 n_to_extract=n_profiles_to_analyse)

    # mark chosen profiles as part of the training set 
    all_uids = main.dbinteract('SELECT uid from ' + sys.argv[1] + ';')
    for uid in all_uids:
        uid = uid[0]
        is_training = int(uid in df['uid'].astype(int).as_matrix())
        query = "UPDATE " + sys.argv[1] + " SET training=" + str(is_training) + " WHERE uid=" + str(uid) + ";"
        main.dbinteract(query)

    # drop nondiscriminating tests
    nondiscrim = []
    cols = list(df.columns)
    for c in cols:
        if len(pandas.unique(df[c])) == 1:
            nondiscrim.append(c)
    cols = [t for t in cols if t not in nondiscrim]
    df = df[cols]
    print list(df)

    testNames = df.columns[2:].values.tolist()

    if verbose:
        print 'Number of profiles from database was: ', len(df.index)
        print 'Number of quality checks from database was: ', len(testNames)

    # Convert to numpy structures and make inverse versions of tests if required.
    # Any test with true positive rate of zero is discarded.
    truth = df['Truth'].as_matrix()
    tests = []
    names = []
    tprs  = []
    fprs  = []
    if with_reverses:
        reverselist = [False, True]
    else:
        reverselist = [False]
    for i, testname in enumerate(testNames):
        for reversal in reverselist:
            results = df[testname].as_matrix() != reversal
            tpr, fpr, fnr, tnr = main.calcRates(results, truth)
            if tpr > 0.0:
                tests.append(results)
                if reversal:
                    addtext = 'r'
                else:
                    addtext = ''
                names.append(addtext + testname)
                tprs.append(tpr)
                fprs.append(fpr)
    del df # No further need for the data frame.
    if verbose: print 'Number of quality checks after reverses and removing zero TPR was: ', len(names)

    # Make combinations of the single checks and store.
    assert n_combination_iterations <= 2, 'Setting n_combination_iterations > 2 results in a very large number of combinations'
    if verbose: print 'Starting construction of combinations with number of iterations: ', n_combination_iterations
    for its in range(n_combination_iterations):
        ntests = len(names)
        for i in range(ntests - 1):
            if verbose: print 'Processing iteration ', its + 1, ' out of ', n_combination_iterations, ' step ', i + 1, ' out of ', ntests - 1, ' with number of tests now ', len(names)
            for j in range(i + 1, ntests):
                # Create the name for this combination.
                newname = ('&').join(sorted((names[i] + '&' + names[j]).split('&')))
                if newname in names: continue # Do not keep multiple copies of the same combination.

                results = np.logical_and(tests[i], tests[j])
                tpr, fpr, fnr, tnr = main.calcRates(results, truth)
                if tpr > 0.0:
                    tests.append(results)
                    tprs.append(tpr)
                    fprs.append(fpr)
                    names.append(newname)
    if verbose: print 'Completed generation of tests, now constructing roc from number of tests: ', len(names)

    # Create storage to hold the roc curve.
    cumulative = truth.copy()
    cumulative[:] = False
    currenttpr    = 0.0
    currentfpr    = 0.0
    used          = np.zeros(len(names), dtype=bool) 
    r_fprs    = []
    r_tprs    = []

    # Create roc by keep adding tests in order of ratio of tpr/fpr change to get the highest
    # gradient in the roc curve.
    keepgoing = True
    used      = np.zeros(len(names), dtype=bool)
    testcomb  = []
    while keepgoing:
        keepgoing = False
        besti     = -1
        bestratio = 0.0
        bestncomb = 100000
        bestdtpr  = 0
        bestdfpr  = 100000
        for i in range(len(names)):
            if used[i]: continue
            cumulativenew = np.logical_or(cumulative, tests[i])
            tpr, fpr, fnr, tnr = main.calcRates(cumulativenew, truth)
            dtpr = tpr - currenttpr
            dfpr = max(fpr - currentfpr, 0.1 / len(cumulative)) # In case of 0 change in fpr.
            ratio = dtpr / dfpr
            newbest = False
            if ratio >= bestratio and dtpr >= improve_threshold and dtpr > 0.0:
                # If ration is better than found previously, use it else if it is
                # the same then decide if to use it or not.
                if ratio > bestratio:
                    newbest = True
                elif dtpr >= bestdtpr:
                    if dtpr > bestdtpr:
                        newbest = True
                    elif len(names[i].split('&')) < bestncomb:
                        newbest = True
            if newbest:
                besti     = i
                bestratio = ratio
                bestncomb = len(names[i].split('&'))
                bestdtpr  = dtpr
                bestdfpr  = dfpr
        if besti >= 0:
            keepgoing = True
            used[besti] = True
            cumulative = np.logical_or(cumulative, tests[besti])
            currenttpr, currentfpr, fnr, tnr = main.calcRates(cumulative, truth)
            testcomb.append(names[besti])
            r_fprs.append(currentfpr)
            r_tprs.append(currenttpr)
            print 'ROC point: ', currenttpr, currentfpr, names[besti]

    if plot_roc:
        plt.plot(r_fprs, r_tprs)
        plt.xlim(0, 100)
        plt.ylim(0, 100)
        plt.xlabel('False positive rate (%)')
        plt.ylabel('True positive rate (%)')
        plt.savefig('roc.png')
        plt.close()

    if write_roc:
        f = open('roc.json', 'w')
        r = {}
        r['tpr'] = r_tprs
        r['fpr'] = r_fprs
        r['tests'] = testcomb
        json.dump(r, f)
        f.close()
Exemple #35
0
def builddb(infile,
            check_originator_flag_type=True,
            months_to_use=range(1, 13),
            outfile='iquod.db',
            dbtable='iquod'):

    conn = sqlite3.connect(outfile, isolation_level=None)
    cur = conn.cursor()

    # Identify tests
    testNames = main.importQC('qctests')
    testNames.sort()

    # set up our table
    query = "CREATE TABLE IF NOT EXISTS " + dbtable + """(
                raw text,
                truth BLOB,
                uid integer PRIMARY KEY,
                year integer,
                month integer,
                day integer,
                time real,
                lat real,
                long real,
                country text,
                cruise integer,
                ocruise text,
                probe integer,
                training integer,
                flagged integer,
                """
    for i in range(len(testNames)):
        query += testNames[i].lower() + ' BLOB'
        if i < len(testNames) - 1:
            query += ','
        else:
            query += ');'

    cur.execute(query)

    # populate table from wod-ascii data
    fid = open(infile)
    uids = []
    good = 0
    bad = 0

    while True:
        # extract profile as wodpy object and raw text
        start = fid.tell()
        profile = wod.WodProfile(fid)
        end = fid.tell()
        fid.seek(start)
        raw = fid.read(end - start)
        fid.seek(end)
        # set up dictionary for populating query string
        p = profile.npdict()
        p['raw'] = "'" + raw + "'"

        # check for duplicate profiles in raw data
        if p['uid'] in uids:
            if profile.is_last_profile_in_file(fid) == True:
                break
            else:
                continue
        uids.append(p['uid'])

        # skip pathological profiles
        isgood = assessProfile(profile, check_originator_flag_type,
                               months_to_use)
        if not isgood and profile.is_last_profile_in_file(fid) == True:
            break
        elif not isgood:
            continue

        # encode temperature error codes into truth array
        truth = encodeTruth(profile)
        p['truth'] = main.pack_array(truth)

        # extract country code
        country = profile.primary_header['Country code']

        # originator cruise
        orig_cruise = profile.originator_cruise()

        # keep tabs on how many good and how many bad profiles have been added to db
        # nowire == index of first wire break level
        wireqc = qctests.CSIRO_wire_break.test(profile, {})
        try:
            nowire = list(wireqc).index(True)
        except:
            nowire = len(truth)
        # flag only counts if its before the wire break:
        flagged = dbutils.summarize_truth(truth[0:nowire])
        if flagged:
            bad += 1
        else:
            good += 1

        query = "INSERT INTO " + dbtable + " (raw, truth, uid, year, month, day, time, lat, long, country, cruise, ocruise, probe, flagged) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?);"
        values = (p['raw'], p['truth'], p['uid'], p['year'], p['month'],
                  p['day'], p['time'], p['latitude'], p['longitude'], country,
                  p['cruise'], orig_cruise, p['probe_type'], int(flagged))
        main.dbinteract(query, values, targetdb=outfile)
        if profile.is_last_profile_in_file(fid) == True:
            break

    conn.commit()
    print('number of clean profiles written:', good)
    print('number of flagged profiles written:', bad)
    print('total number of profiles written:', good + bad)
 def tearDown(self):
     main.dbinteract('DROP TABLE unit;')
     main.catchFlags = realcatchflagsfunc
     main.get_profile_from_db = realgetproffunc
Exemple #37
0
    # Parallel processing.
    print('\nPlease wait while QC is performed\n')

    # set up global parmaeter store
    parameterStore = {"table": sys.argv[1]}
    for test in testNames:
        exec('from qctests import ' + test)
        try:
            exec(test + '.loadParameters(parameterStore)')
        except:
            print 'No parameters to load for', test

    # connect to database & fetch list of all uids
    query = 'SELECT uid FROM ' + sys.argv[1] + ' ORDER BY uid;'
    uids = main.dbinteract(query)

    # launch async processes
    if len(sys.argv) > 4:
        batchnumber = int(sys.argv[3])
        nperbatch = int(sys.argv[4])
        startindex = batchnumber * nperbatch
        endindex = min((batchnumber + 1) * nperbatch, len(uids))
    else:
        startindex = 0
        endindex = len(uids)
    pool = Pool(processes=int(sys.argv[2]))
    for i in range(startindex, endindex):
        pool.apply_async(process_row, (uids[i][0], logdir))
    pool.close()
    pool.join()
Exemple #38
0
df = dbutils.db_to_df(table=dbtable,
                      targetdb=targetdb,
                      filter_on_wire_break_test=False,
                      filter_on_tests=groupdefinition,
                      n_to_extract=samplesize)
testNames = df.columns[2:].values.tolist()

# declare some downstream constructs
accepted = []
unflagged = []
fprs = []
bad = df.loc[df['Truth']]
bad.reset_index(inplace=True, drop=True)

# mark chosen profiles as part of the training set
all_uids = main.dbinteract('SELECT uid from ' + dbtable + ';',
                           targetdb=targetdb)
for uid in all_uids:
    uid = uid[0]
    is_training = int(uid in df['uid'].astype(int).as_matrix())
    query = "UPDATE " + dbtable + " SET training=" + str(
        is_training) + " WHERE uid=" + str(uid) + ";"
    main.dbinteract(query, targetdb=targetdb)

# algo. step 0:
# demand individual QC tests have TPR/FPR > some threshold
perf_thresh = 2
drop_tests = []
for test in testNames:
    tpr, fpr, fnr, tnr = main.calcRates(df[test].tolist(),
                                        df['Truth'].tolist())
    if fpr > 0 and tpr / fpr < perf_thresh:
 def tearDown(self):
     main.dbinteract('DROP TABLE unit;')
Exemple #40
0
  # Parallel processing.
  print('\nPlease wait while QC is performed\n')

  # set up global parmaeter store
  parameterStore = {
    "table": sys.argv[1]
  }
  for test in testNames:
    exec('from qctests import ' + test)
    try:
      exec(test + '.loadParameters(parameterStore)')
    except:
      print 'No parameters to load for', test
      
  # connect to database & fetch list of all uids
  query = 'SELECT uid FROM ' + sys.argv[1] + ' ORDER BY uid;' 
  uids = main.dbinteract(query)
  
  # launch async processes
  pool = Pool(processes=int(sys.argv[2]))
  for i in range(len(uids)):
    pool.apply_async(process_row, (uids[i][0], logdir))
  pool.close()
  pool.join()
    
else:
  print 'Please add command line arguments to name your output file and set parallelization:'
  print 'python AutoQC <database results table> <number of processes>'
  print 'will use <database results table> to log QC results in the database, and run the calculation parallelized over <number of processes>.'