def sortHeaders_test(self): ''' check basic behavior of header sorting ''' p = [] p.append( util.testingProfile.fakeProfile([0], [0], date=[2004, 3, 17, 11.3], cruise=1)) p.append( util.testingProfile.fakeProfile([0], [0], date=[2004, 3, 17, 22.6], cruise=1)) p.append( util.testingProfile.fakeProfile([0], [0], date=[2004, 3, 17, 1.1], cruise=1)) p.append( util.testingProfile.fakeProfile([0], [0], date=[2004, 3, 17, 15.9], cruise=1)) p.append( util.testingProfile.fakeProfile([0], [0], date=[2005, 1, 11, 11.5], cruise=2)) p.append( util.testingProfile.fakeProfile([0], [0], date=[2005, 1, 11, 22.2], cruise=2)) p.append( util.testingProfile.fakeProfile([0], [0], date=[2005, 1, 11, 1.0], cruise=2)) p.append( util.testingProfile.fakeProfile([0], [0], date=[2005, 1, 11, 16.0], cruise=2)) sortedProfiles = main.sort_headers(p) truth = { 1: [p[2], p[0], p[3], p[1]], 2: [p[6], p[4], p[7], p[5]], } assert sortedProfiles == truth, 'incorrectly sorted profiles'
def sortHeaders_test(self): ''' check basic behavior of header sorting ''' p = [] p.append(util.testingProfile.fakeProfile([0], [0], date=[2004, 3, 17, 11.3], cruise=1)) p.append(util.testingProfile.fakeProfile([0], [0], date=[2004, 3, 17, 22.6], cruise=1)) p.append(util.testingProfile.fakeProfile([0], [0], date=[2004, 3, 17, 1.1], cruise=1)) p.append(util.testingProfile.fakeProfile([0], [0], date=[2004, 3, 17, 15.9], cruise=1)) p.append(util.testingProfile.fakeProfile([0], [0], date=[2005, 1, 11, 11.5], cruise=2)) p.append(util.testingProfile.fakeProfile([0], [0], date=[2005, 1, 11, 22.2], cruise=2)) p.append(util.testingProfile.fakeProfile([0], [0], date=[2005, 1, 11, 1.0], cruise=2)) p.append(util.testingProfile.fakeProfile([0], [0], date=[2005, 1, 11, 16.0], cruise=2)) sortedProfiles = main.sort_headers(p) truth = {1: [p[2], p[0], p[3], p[1]], 2: [p[6], p[4], p[7], p[5]], } assert sortedProfiles == truth, 'incorrectly sorted profiles'
def test(p): """ Runs the quality control check on profile p and returns a numpy array of quality control decisions with False where the data value has passed the check and True where it failed. """ global EN_track_headers global EN_track_results global threadFile cruise = p.cruise() uid = p.uid() # don't bother if cruise == 0 or None if cruise in [0, None]: return np.zeros(1, dtype=bool); # The headers from an entire cruise must be analyzed all at once; # we'll write the results to the global data store, in a dictionary # with ntuple keys (cruise, uid), and values as single element # numpy arrays, containing either a true or a false (per all the other # qc return objects) # check if this profile has been examined already if (cruise, uid) in EN_track_results.keys(): return EN_track_results[(cruise, uid)] # some detector types cannot be assessed by this test; do not raise flag. if p.probe_type in [None]: return np.zeros(1, dtype=bool) # the first time this test is run, sort ds.threadProfiles into a cruise-keyed dictionary: if ds.threadFile != threadFile: EN_track_headers = main.sort_headers(ds.threadProfiles) threadFile = ds.threadFile # since we didn't find an answer already calculated, # we still need to do the calculation for this cruise; # all the relevant headers are sitting in the EN_track_headers list. headers = EN_track_headers[cruise] # start all as passing by default: for i in range(len(headers)): EN_track_results[(headers[i].cruise(), headers[i].uid())] = np.zeros(1, dtype=bool) # copy the list of headers; # remove entries as they are flagged. passedHeaders = copy.deepcopy(headers) rejects = findOutlier(passedHeaders) while rejects != []: passedIndex = [x for x in range(len(passedHeaders)) if x not in rejects ] passedHeaders = [passedHeaders[index] for index in passedIndex ] rejects = findOutlier(passedHeaders) # if more than half got rejected, reject everyone if len(passedHeaders) < len(headers) / 2: for i in range(len(headers)): EN_track_results[(headers[i].cruise(), headers[i].uid())][0] = True return EN_track_results[(cruise, uid)]