Exemplo n.º 1
0
    def process_output(cls, config, short_name, opt_list, stdout_filename, stderr_filename):
        with open(stdout_filename, "r") as stdout_file:
            output = stdout_file.read()
        results_token = "results: "
        result = ""
        if results_token not in output:
            result = "FAILED"
        else:
            result_line = filter(lambda x: results_token in x, output.split("\n"))[0]
            result_list = result_line.replace(results_token, "").split(",")
            err_msg = ("Expecting at least %s results "
                       "but only found %s" % (config.IGNORED_TRIALS + 1, len(result_list)))
            assert len(result_list) > config.IGNORED_TRIALS, err_msg
            result_list = result_list[config.IGNORED_TRIALS:]
            result += "Runtime: %s, %.3f, %s, %s, %s\n" % \
                      stats_for_results([x.split(";")[0] for x in result_list])
            result += "Train Set Metric: %s, %.3f, %s, %s, %s\n" %\
                      stats_for_results([x.split(";")[1] for x in result_list])
            result += "Test Set Metric: %s, %.3f, %s, %s, %s" %\
                      stats_for_results([x.split(";")[2] for x in result_list])

        result_string = "%s, %s\n%s" % (short_name, " ".join(opt_list), result)

        sys.stdout.flush()
        return result_string
Exemplo n.º 2
0
    def process_output(cls, config, short_name, opt_list, stdout_filename,
                       stderr_filename):
        with open(stdout_filename, "r") as stdout_file:
            output = stdout_file.read()
        results_token = "results: "
        result = ""
        if results_token not in output:
            result = "FAILED"
        else:
            result_line = filter(lambda x: results_token in x,
                                 output.split("\n"))[0]
            result_list = result_line.replace(results_token, "").split(",")
            err_msg = ("Expecting at least %s results "
                       "but only found %s" %
                       (config.IGNORED_TRIALS + 1, len(result_list)))
            assert len(result_list) > config.IGNORED_TRIALS, err_msg
            result_list = result_list[config.IGNORED_TRIALS:]
            result += "Runtime: %s, %.3f, %s, %s, %s\n" % \
                      stats_for_results([x.split(";")[0] for x in result_list])
            result += "Train Set Metric: %s, %.3f, %s, %s, %s\n" %\
                      stats_for_results([x.split(";")[1] for x in result_list])
            result += "Test Set Metric: %s, %.3f, %s, %s, %s" %\
                      stats_for_results([x.split(";")[2] for x in result_list])

        result_string = "%s, %s\n%s" % (short_name, " ".join(opt_list), result)

        sys.stdout.flush()
        return result_string
Exemplo n.º 3
0
    def process_output(cls, config, short_name, opt_list, stdout_filename,
                       stderr_filename):
        with open(stdout_filename, "r") as stdout_file:
            output = stdout_file.read()
        results_token = "results: "
        result_string = ""
        if results_token not in output:
            result_string = "FAILED"
        else:
            result_line = filter(lambda x: results_token in x,
                                 output.split("\n"))[-1]
            result_json = result_line.replace(results_token, "")
            try:
                result_dict = json.loads(result_json)
            except:
                print "Failed to parse JSON:\n", result_json
                raise

            num_results = len(result_dict['results'])
            err_msg = ("Expecting at least %s results "
                       "but only found %s" %
                       (config.IGNORED_TRIALS + 1, num_results))
            assert num_results > config.IGNORED_TRIALS, err_msg

            # 2 modes: prediction problems (4 metrics) and others (time only)
            if 'trainingTime' in result_dict['results'][0]:
                # prediction problem
                trainingTimes = [
                    r['trainingTime'] for r in result_dict['results']
                ]
                testTimes = [r['testTime'] for r in result_dict['results']]
                trainingMetrics = [
                    r['trainingMetric'] for r in result_dict['results']
                ]
                testMetrics = [r['testMetric'] for r in result_dict['results']]
                trainingTimes = trainingTimes[config.IGNORED_TRIALS:]
                testTimes = testTimes[config.IGNORED_TRIALS:]
                trainingMetrics = trainingMetrics[config.IGNORED_TRIALS:]
                testMetrics = testMetrics[config.IGNORED_TRIALS:]
                result_string += "Training time: %s, %.3f, %s, %s, %s\n" % \
                                 stats_for_results(trainingTimes)
                result_string += "Test time: %s, %.3f, %s, %s, %s\n" % \
                                 stats_for_results(testTimes)
                result_string += "Training Set Metric: %s, %.3f, %s, %s, %s\n" % \
                                 stats_for_results(trainingMetrics)
                result_string += "Test Set Metric: %s, %.3f, %s, %s, %s" % \
                                 stats_for_results(testMetrics)
            else:
                # non-prediction problem
                times = [r['time'] for r in result_dict['results']]
                times = times[config.IGNORED_TRIALS:]
                result_string += "Time: %s, %.3f, %s, %s, %s\n" % \
                                 stats_for_results(times)

        result_string = "%s, %s\n%s" % (short_name, " ".join(opt_list),
                                        result_string)

        sys.stdout.flush()
        return result_string
Exemplo n.º 4
0
    def process_output(cls, config, short_name, opt_list, stdout_filename, stderr_filename):
        with open(stdout_filename, "r") as stdout_file:
            output = stdout_file.read()
        results_token = "results: "
        result_string = ""
        if results_token not in output:
            result_string = "FAILED"
        else:
            result_line = filter(lambda x: results_token in x, output.split("\n"))[-1]
            result_json = result_line.replace(results_token, "")
            try:
                result_dict = json.loads(result_json)
            except:
                print "Failed to parse JSON:\n", result_json
                raise

            num_results = len(result_dict['results'])
            err_msg = ("Expecting at least %s results "
                       "but only found %s" % (config.IGNORED_TRIALS + 1, num_results))
            assert num_results > config.IGNORED_TRIALS, err_msg

            # 2 modes: prediction problems (4 metrics) and others (time only)
            if 'trainingTime' in result_dict['results'][0]:
                # prediction problem
                trainingTimes = [r['trainingTime'] for r in result_dict['results']]
                testTimes = [r['testTime'] for r in result_dict['results']]
                trainingMetrics = [r['trainingMetric'] for r in result_dict['results']]
                testMetrics = [r['testMetric'] for r in result_dict['results']]
                trainingTimes = trainingTimes[config.IGNORED_TRIALS:]
                testTimes = testTimes[config.IGNORED_TRIALS:]
                trainingMetrics = trainingMetrics[config.IGNORED_TRIALS:]
                testMetrics = testMetrics[config.IGNORED_TRIALS:]
                result_string += "Training time: %s, %.3f, %s, %s, %s\n" % \
                                 stats_for_results(trainingTimes)
                result_string += "Test time: %s, %.3f, %s, %s, %s\n" % \
                                 stats_for_results(testTimes)
                result_string += "Training Set Metric: %s, %.3f, %s, %s, %s\n" % \
                                 stats_for_results(trainingMetrics)
                result_string += "Test Set Metric: %s, %.3f, %s, %s, %s" % \
                                 stats_for_results(testMetrics)
            else:
                # non-prediction problem
                times = [r['time'] for r in result_dict['results']]
                times = times[config.IGNORED_TRIALS:]
                result_string += "Time: %s, %.3f, %s, %s, %s\n" % \
                                 stats_for_results(times)

        result_string = "%s, %s\n%s" % (short_name, " ".join(opt_list), result_string)

        sys.stdout.flush()
        return result_string
Exemplo n.º 5
0
    def process_output(cls, config, short_name, opt_list, stdout_filename,
                       stderr_filename):
        with open(stdout_filename, "r") as stdout_file:
            output = stdout_file.read()
        results_token = "results: "
        if results_token not in output:
            print("Test did not produce expected results. Output was:")
            print(output)
            sys.exit(1)
        result_line = filter(lambda x: results_token in x,
                             output.split("\n"))[-1]
        result_list = result_line.replace(results_token, "").split(",")
        err_msg = ("Expecting at least %s results "
                   "but only found %s" %
                   (config.IGNORED_TRIALS + 1, len(result_list)))
        assert len(result_list) > config.IGNORED_TRIALS, err_msg
        result_list = result_list[config.IGNORED_TRIALS:]

        result_string = "%s, %s, " % (short_name, " ".join(opt_list))

        result_string += "%s, %.3f, %s, %s, %s\n" % stats_for_results(
            result_list)

        sys.stdout.flush()
        return result_string
Exemplo n.º 6
0
    def process_output(cls, config, short_name, opt_list, stdout_filename, stderr_filename):
        with open(stdout_filename, "r") as stdout_file:
            output = stdout_file.read()
        results_token = "results: "
        if results_token not in output:
            print("Test did not produce expected results. Output was:")
            print(output)
            sys.exit(1)
        result_line = filter(lambda x: results_token in x, output.split("\n"))[0]
        result_list = result_line.replace(results_token, "").split(",")
        err_msg = ("Expecting at least %s results "
                   "but only found %s" % (config.IGNORED_TRIALS + 1, len(result_list)))
        assert len(result_list) > config.IGNORED_TRIALS, err_msg
        result_list = result_list[config.IGNORED_TRIALS:]

        result_string = "%s, %s, " % (short_name, " ".join(opt_list))

        result_string += "%s, %.3f, %s, %s, %s\n" % stats_for_results(result_list)

        sys.stdout.flush()
        return result_string