def _binary_diff(path_test, path_ref, min_cdd):
    for (test, ref) in it.izip(ordr.output_generator(path_test), 
                               ordr.output_generator(path_ref)):
        
        if len(test[0]) != len(ref[0]):
            raise ValueError('Inconsistent lengths')
        
        # Skip over arrays that are equal
        if np.array_equal(test[0], ref[0]):
            continue
        else:
            lre = _log_relative_error(test[0], ref[0])
            idx = np.unravel_index(np.argmin(lre), lre.shape)

            if lre[idx] < min_cdd: 
                _print_diff(idx, lre, test, ref)

    return 
def swmm_allclose_compare(path_test, path_ref, rtol, atol):
    ''' 
    Compares results in two SWMM binary files. Using the comparison criteria 
    described in the numpy assert_allclose documentation. 
            
        (test_value - ref_value) <= atol + rtol * abs(ref_value) 
    
    Returns true if all of the results in the two binary files meet the 
    comparison criteria; otherwise, an AssertionError is thrown. 
    
    Numpy allclose is quite expensive to evaluate. Test and reference results 
    are checked to see if they are equal before being compared using the 
    allclose criteria. This reduces comparison times significantly. 
    
    Arguments: 
        path_test - path to result file being tested
        path_ref  - path to reference result file
        rtol - relative tolerance
        atol - absolute tolerance
    
    Returns: 
        True or raises an error
                
    Raises:
        ValueError()
        AssertionError()
        ...
    '''
    for (test, ref) in _zip(ordr.output_generator(path_test),
                            ordr.output_generator(path_ref)):

        if len(test) != len(ref):
            raise ValueError('Inconsistent lengths')

        # Skip over results if they are equal
        if (np.array_equal(test, ref)):
            continue

        else:
            np.testing.assert_allclose(test, ref, rtol, atol)

    return True
Example #3
0
def result_compare(path_test, path_ref, comp_args):

    isclose = True
    close = 0
    notclose = 0
    equal = 0
    total = 0
    output = cStringIO.StringIO()
    eps =  np.finfo(float).eps

    start = time.time()

    test_reader = ordr.output_generator(path_test)
    ref_reader = ordr.output_generator(path_ref)

    for test, ref in _zip(test_reader, ref_reader):
        total += 1
        if total%100000 == 0:
        print(total)

        if len(test) != len(ref):
            raise ValueError('Inconsistent lengths')

        # Skip results if they are zero or equal
        if np.array_equal(test, ref):
            equal += 1
            continue
        else:
            try:
                np.testing.assert_allclose(test, ref, comp_args[0], comp_args[1])
                close += 1

            except AssertionError as ae:
                notclose += 1
                output.write(str(ae))
                output.write('\n\n')
                continue

    stop = time.time()

    print(output.getvalue())
    output.close()

    print('equal: %d  close: %d  notclose: %d  total: %d  in %f (sec)\n' %
          (equal, close, notclose, total, (stop - start)))

    if notclose > 0:
        print('%d differences found\n' % notclose)
        isclose = False

    return isclose

def array_zero(test, ref):
    if not test.any() and not ref.any():
        return True
    return False

def report_compare(path_test, path_ref, (comp_args)):
    '''
    Compares results in two report files ignoring contents of header and footer.
    '''
    with open(path_test ,'r') as ftest, open(path_ref, 'r') as fref:
        for (test_line, ref_line) in zip(hdf.parse(ftest, 4, 4)[1],
                                         hdf.parse(fref, 4, 4)[1]):
            if test_line != ref_line:
                return False

    return True