user_resp_times = []
            user_gets_avg = []
            user_gets_90th = []
            user_gets_99th = []
            user_puts_avg = []
            user_puts_90th = []
            user_puts_99th = []

            for run in range(0, repeats):
                log_fname = "expt_{0}_users_{1}_size_{2}_run_{3}.log".format( expt_tag, u, size, run )
                results_file = None
                try:
                    input_fname = "{0}/{1}".format( results_dir, log_fname )
                    results_file = open( input_fname, "r" )
                    results = results_file.read()
                    track_results = RainOutputParser.parse_output( results )
                    for result in track_results:
                        result.pct_overhead_ops_threshold=10.0
                        user_tputs.append( result.effective_load_ops_per_sec )
                        user_resp_times.append( result.average_op_response_time_sec )
                        user_gets_avg.append( result.op_response_times["Get"][2] )
                        user_gets_90th.append( result.op_response_times["Get"][0] )
                        user_gets_99th.append( result.op_response_times["Get"][1] )
                        user_puts_avg.append( result.op_response_times["Put"][2] )
                        user_puts_90th.append( result.op_response_times["Put"][0] )
                        user_puts_99th.append( result.op_response_times["Put"][1] )

                    if not header:
                        print( RainOutputParser.RESULTS_HEADER + '\n' )
                        header = True
                    
    def run( self, hostlist_fname, popular_host_fraction,\
             mean_think_time, users_per_popular_host,\
             users_per_less_popular_host,\
             connection_timeout_msecs, socket_timeout_msecs,\
             results_dir="./results", run_duration_secs=60, \
             config_dir="./config", pipe_port=7851 ):

        # Some pre-reqs:
        # 1) create the config_dir if it doesn't exist
        # 2) create the results_dir if it doesn't exist
        self.create_dir(config_dir)
        self.create_dir(results_dir)

        num_tests = 1
        for i in range(num_tests):
            # With a single Rain launch, load an entire block of ip's
            config = FixedUrlTestConfig()
            config.hostListFile = hostlist_fname
            config.duration = run_duration_secs
            config.popularHostFraction = popular_host_fraction
            config.usersPerPopularHost = users_per_popular_host
            config.usersPerLessPopularHost = users_per_less_popular_host
            config.meanThinkTime = mean_think_time
            config.pipePort = pipe_port
            # Add in the parameters for the workload generator
            # the operation mixes etc.
            generatorParams = FixedUrlGeneratorParameters()
            generatorParams.connectionTimeoutMsecs = connection_timeout_msecs
            generatorParams.socketTimeoutMsecs = socket_timeout_msecs
            config.generatorParameters = generatorParams

            json_data = \
                json.dumps(config, sort_keys='True',\
                               default=FixedUrlTestConfig.to_json)
            # Write this data out to a file, then invoke the run mananger
            # passing in the path to this file

            print( "[FixedUrlTestRunner] json config: {0}"\
                       .format(json_data) )

            run_classpath = ".:rain.jar:workloads/httptest.jar"
            run_config_filename = config_dir + "/" + \
                "run_fixed_url_config" + "_nodes.json"
            run_output_filename = results_dir + "/" + \
                "run_fixed_url_log" + "_nodes.txt"
            run_results_filename = results_dir + "/" + \
                "run_fixed_url_result" + "_nodes.txt"

            # write the json data out to the config file
            # invoke the run manager passing the location of the config file
            # collect the results and write them out to the results_dir

            print "[FixedUrlTestRunner] Writing config file: {0}"\
                .format( run_config_filename )
            config_file = open(run_config_filename, 'w')
            config_file.write(json_data)
            config_file.flush()
            config_file.close()
            run_output = RunManager.run_rain( run_config_filename,\
                                               run_classpath )
            #print run_output
            track_results = RainOutputParser.parse_output(run_output)
            # Validate each of the track_results instances

            for result in track_results:
                # Set some 90th and 99th pctile thresholds
                result.pct_overhead_ops_threshold = 10.0
                result.pct_failed_ops_threshold = 5.0
                # Set the desired 90th and 99th percentile thresholds for
                # the 50ms, 100ms, 200ms operations - set everything to
                # 500 ms = 0.5s. Threshold units = seconds
                result.op_response_time_thresholds['FixedUrl']=\
                    (0.5,0.5)

            # Write out the run output
            print "[FixedUrlTestRunner] Writing output: {0}"\
                .format( run_output_filename )
            run_output_file = open(run_output_filename, 'w')
            run_output_file.write(run_output)
            run_output_file.flush()
            run_output_file.close()

            # Write out the run results
            print "[FixedUrlTestRunner] Writing results: {0}"\
                .format( run_results_filename )
            run_results_file = open(run_results_filename, 'w')
            RainOutputParser.print_results(track_results, run_results_file)

            run_results_file.write("\n")
            # After writing out the table for all the tracks
            # Spit out the 90th and 99th percentiles
            for result in track_results:
                for k, v in result.op_response_times.items():
                    run_results_file.write( "{0},{1},{2},{3}\n"\
                               .format(result.name, k, v[0], v[1]) )

            run_results_file.flush()
            run_results_file.close()
    def run(
        self,
        start_ip,
        num_apps_to_load,
        apps_powered_on,
        host_port,
        popular_host_fraction,
        operation_work_done,
        operation_mix,
        operation_busy_pct,
        memory_sizes,
        memory_mix,
        mean_think_time,
        users_per_popular_host,
        users_per_less_popular_host,
        connection_timeout_msecs,
        socket_timeout_msecs,
        results_dir="./results",
        run_duration_secs=60,
        config_dir="./config",
    ):
        """
        Given a starting IP, a step size, e.g.,
        1) run servers on ip addressed 11.0.0.1 - 11.0.0.200
        2) with a step size of 10 run experiments on 11.0.0.1 - 10
           11.0.0.1 - 20, ... 11.0.0.1 - 200   
        """
        # Some pre-reqs:
        # 1) create the config_dir if it doesn't exist
        # 2) create the results_dir if it doesn't exist
        self.create_dir(config_dir)
        self.create_dir(results_dir)

        num_tests = apps_powered_on / num_apps_to_load
        for i in range(num_tests):
            # With a single Rain launch, load an entire block of ip's
            config = PreditableAppTestConfig()
            config.baseHostIp = start_ip
            config.numHostTargets = (i + 1) * num_apps_to_load
            config.duration = run_duration_secs
            config.hostPort = host_port
            config.popularHostFraction = popular_host_fraction
            config.usersPerPopularHost = users_per_popular_host
            config.usersPerLessPopularHost = users_per_less_popular_host
            # config.popularHostLoadFraction = popular_host_load_fraction
            # config.userPopulation = user_population
            config.meanThinkTime = mean_think_time
            # Add in the parameters for the workload generator
            # the operation mixes etc.
            generatorParams = PredictableAppGeneratorParameters()
            generatorParams.operationWorkDone = operation_work_done
            generatorParams.operationMix = operation_mix
            generatorParams.operationBusyPct = operation_busy_pct
            generatorParams.memorySizes = memory_sizes
            generatorParams.memoryMix = memory_mix
            generatorParams.connectionTimeoutMsecs = connection_timeout_msecs
            generatorParams.socketTimeoutMsecs = socket_timeout_msecs
            config.generatorParameters = generatorParams

            json_data = json.dumps(config, sort_keys="True", default=PreditableAppTestConfig.to_json)
            # Write this data out to a file, then invoke the run mananger
            # passing in the path to this file

            print ("[PredictableAppTestRunner] json config: {0}".format(json_data))

            run_classpath = ".:rain.jar:workloads/httptest.jar"
            run_config_filename = (
                config_dir
                + "/"
                + "run_predictable_config_"
                + start_ip
                + "_"
                + str(config.numHostTargets)
                + "_nodes.json"
            )
            run_output_filename = (
                results_dir + "/" + "run_predictable_log_" + start_ip + "_" + str(config.numHostTargets) + "_nodes.txt"
            )
            run_results_filename = (
                results_dir
                + "/"
                + "run_predictable_result_"
                + start_ip
                + "_"
                + str(config.numHostTargets)
                + "_nodes.txt"
            )

            # write the json data out to the config file
            # invoke the run manager passing the location of the config file
            # collect the results and write them out to the results_dir

            print "[PredictableAppTestRunner] Writing config file: {0}".format(run_config_filename)
            config_file = open(run_config_filename, "w")
            config_file.write(json_data)
            config_file.flush()
            config_file.close()
            run_output = RunManager.run_rain(run_config_filename, run_classpath)
            # print run_output
            track_results = RainOutputParser.parse_output(run_output)
            # Validate each of the track_results instances

            for result in track_results:
                # Set some 90th and 99th pctile thresholds
                result.pct_overhead_ops_threshold = 10.0
                result.pct_failed_ops_threshold = 5.0
                # Set the desired 90th and 99th percentile thresholds for
                # the 50ms, 100ms, 200ms operations - set everything to
                # 500 ms = 0.5s. Threshold units = seconds
                result.op_response_time_thresholds["PredicatableOp_50"] = (0.5, 0.5)
                result.op_response_time_thresholds["PredicatableOp_100"] = (0.5, 0.5)
                result.op_response_time_thresholds["PredicatableOp_200"] = (0.5, 0.5)

            # Write out the run output
            print "[PredictableAppTestRunner] Writing output: {0}".format(run_output_filename)
            run_output_file = open(run_output_filename, "w")
            run_output_file.write(run_output)
            run_output_file.flush()
            run_output_file.close()

            # Write out the run results
            print "[PredictableAppTestRunner] Writing results: {0}".format(run_results_filename)
            run_results_file = open(run_results_filename, "w")
            RainOutputParser.print_results(track_results, run_results_file)

            run_results_file.write("\n")
            # After writing out the table for all the tracks
            # Spit out the 90th and 99th percentiles
            for result in track_results:
                for k, v in result.op_response_times.items():
                    run_results_file.write("{0},{1},{2},{3}\n".format(result.name, k, v[0], v[1]))

            run_results_file.flush()
            run_results_file.close()
Example #4
0
    def step_run( self, start_ip, num_apps_to_load, apps_powered_on, \
                      results_dir="./results", run_duration_secs=60, \
                      config_dir="./config" ):
        '''
        Given a starting IP, a step size

        e.g.,:
        1) run servers on ip addressed 11.0.0.1 - 11.0.0.200
        2) with a step size of 10 run experiments on 11.0.0.1 - 10
           11.0.0.1 - 20, ... 11.0.0.1 - 200
        '''

        # Some pre-reqs:
        # 1) create the config_dir if it does not exist
        # 2) create the results_dir if it does not exist
        self.create_dir(config_dir)
        self.create_dir(results_dir)

        num_tests = apps_powered_on / num_apps_to_load
        for i in range(num_tests):
            # with one Rain launch we can load an entire block of ip's
            # using the track feature
            #ip_address_parts = start_ip.split( "." )
            #print len(ip_address_parts)
            # throw exception if we don't find a numeric ip v4 address
            #if len(ip_address_parts) != 4:
            #    raise Exception( "Expected a numeric IPv4 address"\
            #                         + " (format N.N.N.N)" )
            #lastOctet = int( ip_address_parts[3] )
            #base_ip = "{0}.{1}.{2}.{3}".format( ip_address_parts[0],\
            #                        ip_address_parts[1],\
            #                        ip_address_parts[2],\
            #                        str(lastOctet+(num_apps_to_load*i)))

            # Create config objects to write out as files
            base_ip = start_ip
            config = HttpTestConfig()
            config.baseHostIp = base_ip
            config.numHostTargets = (i + 1) * num_apps_to_load
            config.duration = run_duration_secs

            json_data = \
                json.dumps(config, sort_keys='True',\
                               default=HttpTestConfig.to_json)
            # Write this data out to a file, then invoke the run mananger
            # passing in the path to this file
            print("[HttpTestStepRunner] json config: {0}".format(json_data))

            run_classpath = ".:rain.jar:workloads/httptest.jar"
            run_config_filename = config_dir + "/" + \
                "run_config_" + base_ip + "_" + \
                str(config.numHostTargets) + "_nodes.json"
            run_output_filename = results_dir + "/" + \
                "run_log_" + base_ip + "_" + \
                str(config.numHostTargets) + "_nodes.txt"
            run_results_filename = results_dir + "/" + \
                "run_result_" + base_ip + "_" + \
                str(config.numHostTargets) + "_nodes.txt"

            # write the json data out to the config file
            # invoke the run manager passing the location of the config file
            # collect the results and write them out to the results_dir
            print "[HttpTestStepRunner] Writing config file: {0}"\
                .format( run_config_filename )
            config_file = open(run_config_filename, 'w')
            config_file.write(json_data)
            config_file.flush()
            config_file.close()
            run_output = RunManager.run_rain( run_config_filename,\
                                               run_classpath )
            #print run_output
            track_results = RainOutputParser.parse_output(run_output)

            # Write out the run output
            print "[HttpTestStepRunner] Writing output: {0}"\
                .format( run_output_filename )
            run_output_file = open(run_output_filename, 'w')
            run_output_file.write(run_output)
            run_output_file.flush()
            run_output_file.close()

            # Write out the run results
            print "[HttpTestStepRunner] Writing results: {0}"\
                .format( run_results_filename )
            run_results_file = open(run_results_filename, 'w')
            RainOutputParser.print_results(track_results, run_results_file)
            run_results_file.flush()
            run_results_file.close()
    def run( self, hostlist_fname, popular_host_fraction,\
             mean_think_time, users_per_popular_host,\
             users_per_less_popular_host,\
             connection_timeout_msecs, socket_timeout_msecs,\
             results_dir="./results", run_duration_secs=60, \
             config_dir="./config", pipe_port=7851 ):

        # Some pre-reqs:
        # 1) create the config_dir if it doesn't exist
        # 2) create the results_dir if it doesn't exist
        self.create_dir( config_dir )
        self.create_dir( results_dir )
        
        num_tests = 1
        for i in range(num_tests):
            # With a single Rain launch, load an entire block of ip's
            config = FixedUrlTestConfig()
            config.hostListFile = hostlist_fname
            config.duration = run_duration_secs
            config.popularHostFraction = popular_host_fraction
            config.usersPerPopularHost = users_per_popular_host
            config.usersPerLessPopularHost = users_per_less_popular_host
            config.meanThinkTime = mean_think_time
            config.pipePort = pipe_port
            # Add in the parameters for the workload generator
            # the operation mixes etc.
            generatorParams = FixedUrlGeneratorParameters()
            generatorParams.connectionTimeoutMsecs = connection_timeout_msecs
            generatorParams.socketTimeoutMsecs = socket_timeout_msecs
            config.generatorParameters = generatorParams
            
            json_data = \
                json.dumps(config, sort_keys='True',\
                               default=FixedUrlTestConfig.to_json)
            # Write this data out to a file, then invoke the run mananger
            # passing in the path to this file
                                  
            print( "[FixedUrlTestRunner] json config: {0}"\
                       .format(json_data) )

            run_classpath=".:rain.jar:workloads/httptest.jar"
            run_config_filename = config_dir + "/" + \
                "run_fixed_url_config" + "_nodes.json"
            run_output_filename = results_dir + "/" + \
                "run_fixed_url_log" + "_nodes.txt"
            run_results_filename = results_dir + "/" + \
                "run_fixed_url_result" + "_nodes.txt"
            
            # write the json data out to the config file
            # invoke the run manager passing the location of the config file
            # collect the results and write them out to the results_dir
         
            print "[FixedUrlTestRunner] Writing config file: {0}"\
                .format( run_config_filename )
            config_file = open( run_config_filename, 'w' )
            config_file.write( json_data )
            config_file.flush()
            config_file.close()
            run_output = RunManager.run_rain( run_config_filename,\
                                               run_classpath )
            #print run_output
            track_results = RainOutputParser.parse_output( run_output )
            # Validate each of the track_results instances
            
            for result in track_results:
                # Set some 90th and 99th pctile thresholds
                result.pct_overhead_ops_threshold=10.0
                result.pct_failed_ops_threshold=5.0
                # Set the desired 90th and 99th percentile thresholds for
                # the 50ms, 100ms, 200ms operations - set everything to
                # 500 ms = 0.5s. Threshold units = seconds
                result.op_response_time_thresholds['FixedUrl']=\
                    (0.5,0.5)

            # Write out the run output
            print "[FixedUrlTestRunner] Writing output: {0}"\
                .format( run_output_filename )
            run_output_file = open( run_output_filename, 'w' )
            run_output_file.write( run_output )
            run_output_file.flush()
            run_output_file.close()

            # Write out the run results
            print "[FixedUrlTestRunner] Writing results: {0}"\
                .format( run_results_filename )
            run_results_file = open( run_results_filename, 'w' )
            RainOutputParser.print_results( track_results, run_results_file )
            
            run_results_file.write( "\n" )
            # After writing out the table for all the tracks
            # Spit out the 90th and 99th percentiles
            for result in track_results:
                for k,v in result.op_response_times.items():
                    run_results_file.write( "{0},{1},{2},{3}\n"\
                               .format(result.name, k, v[0], v[1]) )

            run_results_file.flush()
            run_results_file.close()
    def step_run( self, start_ip, num_apps_to_load, apps_powered_on, \
                      results_dir="./results", run_duration_secs=60, \
                      config_dir="./config" ):
        '''
        Given a starting IP, a step size

        e.g.,:
        1) run servers on ip addressed 11.0.0.1 - 11.0.0.200
        2) with a step size of 10 run experiments on 11.0.0.1 - 10
           11.0.0.1 - 20, ... 11.0.0.1 - 200
        '''

        # Some pre-reqs:
        # 1) create the config_dir if it does not exist
        # 2) create the results_dir if it does not exist
        self.create_dir( config_dir )
        self.create_dir( results_dir )

        num_tests = apps_powered_on/num_apps_to_load
        for i in range(num_tests):
            # with one Rain launch we can load an entire block of ip's
            # using the track feature
            #ip_address_parts = start_ip.split( "." )
            #print len(ip_address_parts)
            # throw exception if we don't find a numeric ip v4 address
            #if len(ip_address_parts) != 4:
            #    raise Exception( "Expected a numeric IPv4 address"\
            #                         + " (format N.N.N.N)" )
            #lastOctet = int( ip_address_parts[3] )
            #base_ip = "{0}.{1}.{2}.{3}".format( ip_address_parts[0],\
            #                        ip_address_parts[1],\
            #                        ip_address_parts[2],\
            #                        str(lastOctet+(num_apps_to_load*i)))

            # Create config objects to write out as files
            base_ip = start_ip
            config = HttpTestConfig()
            config.baseHostIp = base_ip
            config.numHostTargets = (i+1)*num_apps_to_load
            config.duration = run_duration_secs
            
            json_data = \
                json.dumps(config, sort_keys='True',\
                               default=HttpTestConfig.to_json)
            # Write this data out to a file, then invoke the run mananger
            # passing in the path to this file
            print( "[HttpTestStepRunner] json config: {0}".format(json_data) )
            
            run_classpath=".:rain.jar:workloads/httptest.jar"
            run_config_filename = config_dir + "/" + \
                "run_config_" + base_ip + "_" + \
                str(config.numHostTargets) + "_nodes.json"
            run_output_filename = results_dir + "/" + \
                "run_log_" + base_ip + "_" + \
                str(config.numHostTargets) + "_nodes.txt"
            run_results_filename = results_dir + "/" + \
                "run_result_" + base_ip + "_" + \
                str(config.numHostTargets) + "_nodes.txt"
            
            # write the json data out to the config file
            # invoke the run manager passing the location of the config file
            # collect the results and write them out to the results_dir
            print "[HttpTestStepRunner] Writing config file: {0}"\
                .format( run_config_filename )
            config_file = open( run_config_filename, 'w' )
            config_file.write( json_data )
            config_file.flush()
            config_file.close()
            run_output = RunManager.run_rain( run_config_filename,\
                                               run_classpath ) 
            #print run_output
            track_results = RainOutputParser.parse_output( run_output )
            
            # Write out the run output
            print "[HttpTestStepRunner] Writing output: {0}"\
                .format( run_output_filename )
            run_output_file = open( run_output_filename, 'w' )
            run_output_file.write( run_output )
            run_output_file.flush()
            run_output_file.close()
            
            # Write out the run results
            print "[HttpTestStepRunner] Writing results: {0}"\
                .format( run_results_filename )
            run_results_file = open( run_results_filename, 'w' )
            RainOutputParser.print_results( track_results, run_results_file )
            run_results_file.flush()
            run_results_file.close()
            user_gets_avg = []
            user_gets_90th = []
            user_gets_99th = []
            user_puts_avg = []
            user_puts_90th = []
            user_puts_99th = []

            for run in range(0, repeats):
                log_fname = "expt_{0}_users_{1}_size_{2}_run_{3}.log".format(
                    expt_tag, u, size, run)
                results_file = None
                try:
                    input_fname = "{0}/{1}".format(results_dir, log_fname)
                    results_file = open(input_fname, "r")
                    results = results_file.read()
                    track_results = RainOutputParser.parse_output(results)
                    for result in track_results:
                        result.pct_overhead_ops_threshold = 10.0
                        user_tputs.append(result.effective_load_ops_per_sec)
                        user_resp_times.append(
                            result.average_op_response_time_sec)
                        user_gets_avg.append(
                            result.op_response_times["Get"][2])
                        user_gets_90th.append(
                            result.op_response_times["Get"][0])
                        user_gets_99th.append(
                            result.op_response_times["Get"][1])
                        user_puts_avg.append(
                            result.op_response_times["Put"][2])
                        user_puts_90th.append(
                            result.op_response_times["Put"][0])
    def run( self, start_ip, num_apps_to_load, apps_powered_on,\
             host_port, popular_host_fraction,\
             operation_work_done, operation_mix, \
             operation_busy_pct, memory_sizes, memory_mix, \
             mean_think_time, users_per_popular_host,\
             users_per_less_popular_host,\
             connection_timeout_msecs, socket_timeout_msecs,\
             results_dir="./results", run_duration_secs=60, \
             config_dir="./config" ):
        '''
        Given a starting IP, a step size, e.g.,
        1) run servers on ip addressed 11.0.0.1 - 11.0.0.200
        2) with a step size of 10 run experiments on 11.0.0.1 - 10
           11.0.0.1 - 20, ... 11.0.0.1 - 200   
        '''
        # Some pre-reqs:
        # 1) create the config_dir if it doesn't exist
        # 2) create the results_dir if it doesn't exist
        self.create_dir(config_dir)
        self.create_dir(results_dir)

        num_tests = apps_powered_on / num_apps_to_load
        for i in range(num_tests):
            # With a single Rain launch, load an entire block of ip's
            config = PreditableAppTestConfig()
            config.baseHostIp = start_ip
            config.numHostTargets = (i + 1) * num_apps_to_load
            config.duration = run_duration_secs
            config.hostPort = host_port
            config.popularHostFraction = popular_host_fraction
            config.usersPerPopularHost = users_per_popular_host
            config.usersPerLessPopularHost = users_per_less_popular_host
            #config.popularHostLoadFraction = popular_host_load_fraction
            #config.userPopulation = user_population
            config.meanThinkTime = mean_think_time
            # Add in the parameters for the workload generator
            # the operation mixes etc.
            generatorParams = PredictableAppGeneratorParameters()
            generatorParams.operationWorkDone = operation_work_done
            generatorParams.operationMix = operation_mix
            generatorParams.operationBusyPct = operation_busy_pct
            generatorParams.memorySizes = memory_sizes
            generatorParams.memoryMix = memory_mix
            generatorParams.connectionTimeoutMsecs = connection_timeout_msecs
            generatorParams.socketTimeoutMsecs = socket_timeout_msecs
            config.generatorParameters = generatorParams

            json_data = \
                json.dumps(config, sort_keys='True',\
                               default=PreditableAppTestConfig.to_json)
            # Write this data out to a file, then invoke the run mananger
            # passing in the path to this file

            print( "[PredictableAppTestRunner] json config: {0}"\
                       .format(json_data) )

            run_classpath = ".:rain.jar:workloads/httptest.jar"
            run_config_filename = config_dir + "/" + \
                "run_predictable_config_" + start_ip + "_" + \
                str(config.numHostTargets) + "_nodes.json"
            run_output_filename = results_dir + "/" + \
                "run_predictable_log_" + start_ip + "_" + \
                str(config.numHostTargets) + "_nodes.txt"
            run_results_filename = results_dir + "/" + \
                "run_predictable_result_" + start_ip + "_" + \
                str(config.numHostTargets) + "_nodes.txt"

            # write the json data out to the config file
            # invoke the run manager passing the location of the config file
            # collect the results and write them out to the results_dir

            print "[PredictableAppTestRunner] Writing config file: {0}"\
                .format( run_config_filename )
            config_file = open(run_config_filename, 'w')
            config_file.write(json_data)
            config_file.flush()
            config_file.close()
            run_output = RunManager.run_rain( run_config_filename,\
                                               run_classpath )
            #print run_output
            track_results = RainOutputParser.parse_output(run_output)
            # Validate each of the track_results instances

            for result in track_results:
                # Set some 90th and 99th pctile thresholds
                result.pct_overhead_ops_threshold = 10.0
                result.pct_failed_ops_threshold = 5.0
                # Set the desired 90th and 99th percentile thresholds for
                # the 50ms, 100ms, 200ms operations - set everything to
                # 500 ms = 0.5s. Threshold units = seconds
                result.op_response_time_thresholds['PredicatableOp_50']=\
                    (0.5,0.5)
                result.op_response_time_thresholds['PredicatableOp_100']=\
                    (0.5,0.5)
                result.op_response_time_thresholds['PredicatableOp_200']=\
                    (0.5,0.5)

            # Write out the run output
            print "[PredictableAppTestRunner] Writing output: {0}"\
                .format( run_output_filename )
            run_output_file = open(run_output_filename, 'w')
            run_output_file.write(run_output)
            run_output_file.flush()
            run_output_file.close()

            # Write out the run results
            print "[PredictableAppTestRunner] Writing results: {0}"\
                .format( run_results_filename )
            run_results_file = open(run_results_filename, 'w')
            RainOutputParser.print_results(track_results, run_results_file)

            run_results_file.write("\n")
            # After writing out the table for all the tracks
            # Spit out the 90th and 99th percentiles
            for result in track_results:
                for k, v in result.op_response_times.items():
                    run_results_file.write( "{0},{1},{2},{3}\n"\
                               .format(result.name, k, v[0], v[1]) )

            run_results_file.flush()
            run_results_file.close()