def validateCorrectness( self, outfile ): correctness = ParallelTest.validateCorrectness( self, outfile ) with Multitmp( len( outfile ) ) as tmp: Multitmp.check_call( [ "sed", "-e", "/^#im_vips2ppm/d", outfile ], stdout = tmp ) Multitmp.check_call( [ "mv", tmp, outfile ] ) if self.options.error: with Multitmp( len( outfile ) ) as result: with open( "/dev/null", 'w' ) as null: golden = self.getGolden() diffimg = os.path.join( root, "bin", "diff-img") Multitmp.check_call( [ diffimg, golden, outfile ], stdout = result, stderr = null, verbose = self.options.verbose, ) errors = list() for fname in result: with open( fname ) as fh: error = 0 for line in fh: if line.startswith( "total" ): error += float( line.split()[ 2 ] ) errors.append( 1 / ( error + 1 ) ) self.error = errors return True else: return correctness
def getParallelFitness( self, *args ): results = ParallelTest.getParallelFitness( self, *args ) if self.options.error: if results == [ [ 0 ] ]: return [ [ 0 ], [ 0 ] ] results.append( self.error ) return results
def validateCorrectness( self, outfile ): correctness = ParallelTest.validateCorrectness( self, outfile ) if self.options.error: with Multitmp( len( outfile ) ) as outdir: Multitmp.check_call( [ "rm", "-rf", outdir ], verbose = self.options.verbose ) Multitmp.check_call( [ "mkdir", outdir ], verbose = self.options.verbose ) Multitmp.check_call( [ "avconv", "-i", outfile, "-r", "25", "-loglevel", "panic", "%03d.png" ], verbose = self.options.verbose, cwd = outdir ) if not ParallelTest.validateCorrectness( self, outdir ): return False with Multitmp( len( outfile ) ) as result: with open( "/dev/null", 'w' ) as null: golden = self.getGolden() diffimg = os.path.join( root, "bin", "diff-img.sh") Multitmp.check_call( [ diffimg, golden, outdir ], stdout = result, stderr = null, verbose = self.options.verbose, ) errors = list() for fname in result: with open( fname ) as fh: error = 0 for line in fh: if line.startswith( "total" ): error += float( line.split()[ 2 ] ) errors.append( 1 / ( error + 1 ) ) self.error = errors return True else: return correctness
def diff( self, golden, actual ): if not self.options.error: return ParallelTest.diff( self, golden, actual ) golden = self._getPoses( golden ) self.error = list() for fname in actual: values = self._getPoses( fname ) if values is None or values.shape != golden.shape: self.error.append( 0 ) else: rmse = np.sqrt( np.mean( ( golden - values ) ** 2 ) ) self.error.append( 1.0 / ( 1 + rmse ) ) return True
def validateCorrectness( self, outfile ): if self.options.error: with Multitmp( len( outfile ) ) as scratch: Multitmp.check_call( [ "mv", outfile, scratch ], verbose = self.options.verbose ) Multitmp.check_call( [ "mkdir", outfile ], verbose = self.options.verbose ) Multitmp.check_call( [ "avconv", "-i", scratch, "-r", "25", "-loglevel", "panic", "%03d.png" ], verbose = self.options.verbose, cwd = outfile ) return ParallelTest.validateCorrectness( self, outfile ) else: return ParallelTest.validateCorrectness( self, outfile )
def diff( self, golden, actual ): if not self.options.error: return ParallelTest.diff( self, golden, actual ) golden = self._readOutput( golden ) errors = list() for fname in actual: values = self._readOutput( fname ) if values is None or len( values ) != len( golden ): errors.append( 0 ) else: rmse = np.sqrt( np.mean( ( golden - values ) ** 2 ) ) errors.append( 1.0 / ( 1 + rmse ) ) self.error = errors return True
def diff( self, golden, actual ): if not self.options.error: return ParallelTest.diff( self, golden, actual ) with Multitmp( len( actual ) ) as count: Multitmp.check_call( [ os.path.join( root, "bin", "hamming" ), golden, actual ], stdout = count, verbose = self.options.verbose ) self.error = list() for fname in count: with open( fname ) as fh: for line in fh: self.error.append( 1.0 / ( 1 + float( line.strip() ) ) ) break return True
def diff( self, golden, actual ): if not self.options.error: return ParallelTest.diff( self, golden, actual ) with Multitmp( len( actual ) ) as result: Multitmp.check_call( [ "./freqdiff", golden, actual ], stdout = result, verbose = self.options.verbose ) self.error = list() for fname in result: with open( fname ) as fh: for line in fh: self.error.append( 1 / ( 1 + float( line.strip() ) ) ) break else: self.error.append( 0 ) return True
def getParallelFitness( self, *args ): with mktemp( prefix = "vips" ) as tmpexe: oldexe = self.exe shutil.copyfile( self.exe, tmpexe ) shutil.copymode( self.exe, tmpexe ) try: self.exe = tmpexe results = ParallelTest.getParallelFitness( self, *args ) if self.options.error: if results == [ [ 0 ] ]: return [ [ 0 ], [ 0 ] ] results.append( self.error ) return results else: return results finally: self.exe = oldexe
def diff( self, golden, actual ): if not self.options.error: return ParallelTest.diff( self, golden, actual ) g_prices = self._getPrices( golden ) self.error = list() for fname in actual: a_prices = self._getPrices( fname ) i, j = 0, 0 a, b = list(), list() error = 0 while i < len( g_prices ) and j < len( a_prices ): if g_prices[ i ][ 0 ] == a_prices[ j ][ 0 ]: a.append( g_prices[ i ][ 1 ] ) b.append( a_prices[ j ][ 1 ] ) if g_prices[ i ][ 2 ] == 0: e = abs( a_prices[ j ][ 2 ] ) elif g_prices[ i ][ 2 ] < a_prices[ j ][ 2 ]: e = 1 - g_prices[ i ][ 2 ] / a_prices[ j ][ 2 ] else: e = 1 - a_prices[ j ][ 2 ] / g_prices[ i ][ 2 ] if isnan( e ) or isinf( e ): e = 1 error += e i += 1 j += 1 elif g_prices[ i ][ 0 ] < a_prices[ j ][ 0 ]: i += 1 error += 1 else: j += 1 error += 1 error += len( g_prices ) - i error += len( a_prices ) - j if len( a ) < 2: error += 1 else: error += 0.5 - 0.5 * kendalltau( a, b )[ 0 ] self.error.append( 1 / ( error + 1.0 ) ) return True
def diff( self, golden, actual ): if not self.options.error: return ParallelTest.diff( self, golden, actual ) with Multitmp( len( actual ) ) as result: with open( "/dev/null", 'w' ) as null: diffimg = os.path.join( root, "bin", "diff-img.sh") Multitmp.check_call( [ diffimg, golden, actual ], stdout = result, stderr = null, verbose = self.options.verbose, ) errors = list() for fname in result: with open( fname ) as fh: error = 0 for line in fh: if line.startswith( "total" ): error += float( line.split()[ 2 ] ) errors.append( 1 / ( error + 1 ) ) self.error = errors return True
def diff( self, golden, actual ): if self.options.error: return True else: return ParallelTest.diff( self, golden, actual )
def validateCorrectness( self, outfile ): correctness = ParallelTest.validateCorrectness( self, outfile ) if not correctness: return False if self.options.error: golden = self.getGolden() gold_queries = self.readFile( golden ) errors = list() # Error function for missing/extra things. Max good is 0, max bad is 1 def errorFun( missing, extra ): return 1 - ( 1 / ( 2 + ( 2 * missing ) ) ) - ( 1 / ( 2 + ( 2 * extra ) ) ) for fname in outfile: # No output file is max error if not os.path.isfile(fname): return False test_queries = dict() test_queries = self.readFile( fname ) t1 = 0 # Kendall tau penalty for weights t2 = 0 # Kendall tau penalty for rank output order t3 = 0 # Kendall tau penalty for query output order w = 0 # Weighting error penalty r = 0 # Penalty for missing or extra ranks extra_queries, test_queries_int, gold_queries_int, missing_queries = \ self.comm( test_queries, gold_queries ) # Penalty for missing or extra queries q = errorFun( len( missing_queries ), len( extra_queries ) ) if len( gold_queries_int ) < 2: t3 = 1 else: t3, _ = stats.kendalltau( [ x for x, _ in test_queries_int ], [ x for x, _ in gold_queries_int ] ) t3 = (.5) - (.5 * t3) # Change tau scale to [0-1] where 0 is good if isclose( t3, 0 ): t3 = 0.0 for test_query, gold_query in zip( test_queries_int, gold_queries_int ): extra_ranks, test_ranks_int, gold_ranks_int, missing_ranks = \ self.comm( test_query[1], gold_query[1] ) r += errorFun( len( missing_ranks ), len( extra_ranks ) ) if len( gold_ranks_int ) < 2: tau1 = 1 else: tau1, _ = stats.kendalltau( [x for _, x in gold_ranks_int], [x for _, x in test_ranks_int] ) tau1 = (.5) - (.5 * tau1) # Change tau scale to [0-1] where 0 is good if isclose( tau1, 0 ): tau1 = 0.0 absolute_error = sum( [ abs( a[1] - b[1] ) for a, b in zip( gold_ranks_int, test_ranks_int ) ] ) if isnan( absolute_error ) or isinf( absolute_error ): w += 1 else: w += absolute_error / ( absolute_error + 1 ) t1 += tau1 if len( gold_ranks_int ) < 2: tau2 = 1 else: tau2, _ = stats.kendalltau( [x for x, _ in gold_ranks_int], [x for x, _ in test_ranks_int] ) tau2 = (.5) - (.5 * tau2) # Change tau scale to [0-1] where 0 is good if isclose( tau2, 0 ): tau2 = 0.0 t2 += tau2 t1 = t1 / len( gold_queries_int ) t2 = t2 / len( gold_queries_int ) w = w / len( gold_queries_int ) r = r / len( gold_queries_int ) error = 1000 * q + 100 * r + 10 * t1 + 10 * t2 + 5 * t3 + w errors.append( 1 / ( error + 1 ) ) self.error = errors return True else: return correctness
def checkArgs( self, parser, args ): ParallelTest.checkArgs( self, parser, args ) self.test = args[ 1 ]
def validateCorrectness(self, outfile): Multitmp.check_call(["rm", "-rf", "Face_Data", "Storytelling/output/log.txt"], cwd=outfile) return ParallelTest.validateCorrectness(self, outfile)
def checkArgs(self, parser, args): ParallelTest.checkArgs(self, parser, args) if self.size != "huge": self.size = "large"