Example #1
0
    def test_conservative_predict_cancel(self):
        """
            The termination criterion expects the learning_curve in a file
            called learning_curve.txt as well as the current best value in 
            ybest.txt. We create both files and see if the termination criterion
            correctly predicts to cancel or continue running under various artificial
            ybest.
        """
        for prob_x_greater_type in [
                "posterior_mean_prob_x_greater_than",
                "posterior_prob_x_greater_than"
        ]:
            np.random.seed(13)
            #generate some data:
            for model_name in ["pow3", "log_power"]:
                function = all_models[model_name]
                params = model_defaults[model_name]
                xlim = 500
                x = np.arange(1, xlim, 1)
                y = function(x, **params)
                noise = 0.0005 * np.random.randn(len(y))
                y_noisy = y + noise
                y_final = y_noisy[-1]
                num_train = 200
                np.savetxt("learning_curve.txt", y_noisy[:200])
                write_xlim(xlim)

                print(("Actual ybest: %f" % y_noisy[-1]))

                #we set ybest to be higher than the final value of this curve
                #hence we DO want the evaluation to stop!
                open("ybest.txt", "w").write(str(y_final + 0.05))
                open("termination_criterion_running", "w").write("running")

                ret = main(mode="conservative",
                           prob_x_greater_type=prob_x_greater_type,
                           nthreads=4)
                self.assertEqual(ret, 1)

                self.assertTrue(os.path.exists("y_predict.txt"))
                y_predict = float(open("y_predict.txt").read())
                abserr = np.abs(y_predict - y_noisy[-1])
                print(("abs error %f" % abserr))
                self.assertTrue(abserr < 0.03)

                #we set ybest to be lower than the final value of this curve
                #hence we DON'T want the evaluation to stop!
                open("ybest.txt", "w").write(str(y_final - 0.05))
                open("termination_criterion_running", "w").write("running")

                ret = main(mode="conservative",
                           prob_x_greater_type=prob_x_greater_type,
                           nthreads=4)
                self.assertEqual(ret, 0)
                self.assertFalse(os.path.exists("y_predict.txt"))
                self.assertFalse(
                    os.path.exists("termination_criterion_running"))
                self.assertFalse(
                    os.path.exists("termination_criterion_running_pid"))
        self.cleanup()
    def test_conservative_predict_cancel(self):
        """
            The termination criterion expects the learning_curve in a file
            called learning_curve.txt as well as the current best value in 
            ybest.txt. We create both files and see if the termination criterion
            correctly predicts to cancel or continue running under various artificial
            ybest.
        """
        for prob_x_greater_type in ["posterior_mean_prob_x_greater_than", "posterior_prob_x_greater_than"]:
            np.random.seed(13)
            #generate some data:
            for model_name in ["pow3", "log_power"]:
                function = all_models[model_name]
                params = model_defaults[model_name]
                xlim = 500
                x = np.arange(1, xlim, 1)
                y = function(x, **params)
                noise = 0.0005 * np.random.randn(len(y))
                y_noisy = y + noise
                y_final = y_noisy[-1]
                num_train = 200
                np.savetxt("learning_curve.txt", y_noisy[:200])
                write_xlim(xlim)

                print "Actual ybest: %f" % y_noisy[-1]

                #we set ybest to be higher than the final value of this curve
                #hence we DO want the evaluation to stop!
                open("ybest.txt", "w").write(str(y_final + 0.05))
                open("termination_criterion_running", "w").write("running")

                ret = main(mode="conservative",
                    prob_x_greater_type=prob_x_greater_type,
                    nthreads=4)
                self.assertEqual(ret, 1)

                self.assertTrue(os.path.exists("y_predict.txt"))
                y_predict = float(open("y_predict.txt").read())
                abserr = np.abs(y_predict - y_noisy[-1])
                print "abs error %f" % abserr
                self.assertTrue(abserr < 0.03)

                #we set ybest to be lower than the final value of this curve
                #hence we DON'T want the evaluation to stop!
                open("ybest.txt", "w").write(str(y_final - 0.05))
                open("termination_criterion_running", "w").write("running")

                ret = main(mode="conservative",
                    prob_x_greater_type=prob_x_greater_type,
                    nthreads=4)
                self.assertEqual(ret, 0)
                self.assertFalse(os.path.exists("y_predict.txt"))
                self.assertFalse(os.path.exists("termination_criterion_running"))
                self.assertFalse(os.path.exists("termination_criterion_running_pid"))
        self.cleanup()
Example #3
0
    def test_conservative_predictive_std_predict_cancel(self):
        for prob_x_greater_type in [
                "posterior_mean_prob_x_greater_than",
                "posterior_prob_x_greater_than"
        ]:
            np.random.seed(13)
            #generate some data:

            model_name = "pow3"
            function = all_models[model_name]

            params = {'a': 0.52, 'alpha': 0.2, 'c': 0.84}
            xlim = 500
            x = np.arange(1, xlim, 1)
            y = function(x, **params)
            noise = 0.01 * np.random.randn(len(y))
            y_noisy = y + noise
            y_final = y_noisy[-1]
            num_train = 30
            np.savetxt("learning_curve.txt", y_noisy[:num_train])
            write_xlim(xlim)

            #first check:
            #if there's no ybest and the predictive_std is high
            #then we want the evaluation to continue
            if os.path.exists("ybest.txt"):
                os.remove("ybest.txt")
            ret = main(mode="conservative",
                       prob_x_greater_type=prob_x_greater_type,
                       predictive_std_threshold=0.00001,
                       nthreads=4)
            self.assertEqual(ret, 0)

            print(("Actual ybest: %f" % y_noisy[-1]))

            #we set ybest to be higher than the final value of this curve
            #BUT because the predictive std is still high we don't want to stop
            open("ybest.txt", "w").write(str(y_final + 0.05))
            open("termination_criterion_running", "w").write("running")

            ret = main(mode="conservative",
                       prob_x_greater_type=prob_x_greater_type,
                       predictive_std_threshold=0.00001,
                       nthreads=4)
            self.assertEqual(ret, 0)

            self.assertFalse(os.path.exists("y_predict.txt"))

        self.cleanup()
Example #4
0
    def test_conservative_real_example(self):
        """
            The termination criterion expects the learning_curve in a file
            called learning_curve.txt as well as the current best value in 
            ybest.txt. We create both files and see if the termination criterion
            correctly predicts to cancel or continue running under various artificial
            ybest.
        """
        for prob_x_greater_type in ["posterior_mean_prob_x_greater_than", "posterior_prob_x_greater_than"]:
            np.savetxt("learning_curve.txt", real_abort_learning_curve)
            write_xlim(real_abort_xlim)

            open("ybest.txt", "w").write(str(real_abort_ybest))
            open("termination_criterion_running", "w").write("running")

            ret = main(mode="conservative",
                prob_x_greater_type=prob_x_greater_type,
                nthreads=4)
            #ybest is higher than what the curve will ever reach
            #hence we expect to cancel the run:
            self.assertEqual(ret, 1)

            self.assertTrue(os.path.exists("y_predict.txt"))
            self.assertFalse(os.path.exists("termination_criterion_running"))
            self.assertFalse(os.path.exists("termination_criterion_running_pid"))
        self.cleanup()
    def test_data2_optimistic_prob_example(self):
        prob_types = ["posterior_prob_x_greater_than"]
        for mode in ["optimistic"]:
            for prob_x_greater_type in prob_types:
                np.savetxt("learning_curve.txt", lr_p)
                write_xlim(xlim)

                open("ybest.txt", "w").write(str(ybest))
                open("termination_criterion_running", "w").write("running")

                ret = main(mode=mode,
                           prob_x_greater_type=prob_x_greater_type,
                           nthreads=4)

                self.assertTrue(os.path.exists("y_predict.txt"))
                y_predict = float(open("y_predict.txt").read())
                print("{} predicted accuracy: {}".format(
                    prob_x_greater_type, y_predict))
                self.update_result(mode, prob_x_greater_type, lr,
                                   num_checkpoint, y_predict)
                self.assertFalse(
                    os.path.exists("termination_criterion_running"))
                self.assertFalse(
                    os.path.exists("termination_criterion_running_pid"))
        self.cleanup()
    def test_conservative_predictive_std_predict_cancel(self):
        for prob_x_greater_type in ["posterior_mean_prob_x_greater_than", "posterior_prob_x_greater_than"]:
            np.random.seed(13)
            #generate some data:

            model_name = "pow3"
            function = all_models[model_name]

            params = {'a': 0.52, 'alpha': 0.2, 'c': 0.84}
            xlim = 500
            x = np.arange(1, xlim, 1)
            y = function(x, **params)
            noise = 0.01 * np.random.randn(len(y))
            y_noisy = y + noise
            y_final = y_noisy[-1]
            num_train = 30
            np.savetxt("learning_curve.txt", y_noisy[:num_train])
            write_xlim(xlim)

            #first check:
            #if there's no ybest and the predictive_std is high
            #then we want the evaluation to continue
            if os.path.exists("ybest.txt"):
                os.remove("ybest.txt")
            ret = main(mode="conservative",
                prob_x_greater_type=prob_x_greater_type,
                predictive_std_threshold=0.00001,
                nthreads=4)
            self.assertEqual(ret, 0)

            print "Actual ybest: %f" % y_noisy[-1]

            #we set ybest to be higher than the final value of this curve
            #BUT because the predictive std is still high we don't want to stop
            open("ybest.txt", "w").write(str(y_final + 0.05))
            open("termination_criterion_running", "w").write("running")

            ret = main(mode="conservative",
                prob_x_greater_type=prob_x_greater_type,
                predictive_std_threshold=0.00001,
                nthreads=4)
            self.assertEqual(ret, 0)

            self.assertFalse(os.path.exists("y_predict.txt"))

        self.cleanup()
Example #7
0
    def test_error_logging(self):
        """
            Test in case of an error, the error will be logged.
        """
        open("ybest.txt", "w").write(str(0.5))
        #Let's e.g. run main without creating any files
        if os.path.exists("learning_curve.txt"):
            os.remove("learning_curve.txt")
        ret = main()
        self.assertTrue(os.path.exists("term_crit_error.txt"))

        os.remove("ybest.txt")
Example #8
0
    def test_optimistic_predict_cancel(self):
        """
            Optimisitic mode

            The termination criterion expects the learning_curve in a file
            called learning_curve.txt as well as the current best value in 
            ybest.txt. We create both files and see if the termination criterion
            correctly predicts to cancel or continue running under various artificial
            ybest.
        """
        for prob_x_greater_type in ["posterior_mean_prob_x_greater_than", "posterior_prob_x_greater_than"]:
            np.random.seed(13)
            #generate some data:

            model_name = "pow3"
            function = all_models[model_name]

            params = {'a': 0.52, 'alpha': 0.2, 'c': 0.84}
            xlim = 500
            x = np.arange(1, xlim, 1)
            y = function(x, **params)
            noise = 0.01 * np.random.randn(len(y))
            y_noisy = y + noise
            y_final = y_noisy[-1]
            num_train = 30
            np.savetxt("learning_curve.txt", y_noisy[:num_train])
            write_xlim(xlim)

            #first check:
            #if there's no ybest and the predictive_std is high
            #then we want the evaluation to continue
            if os.path.exists("ybest.txt"):
                os.remove("ybest.txt")
            ret = main(mode="optimistic",
                prob_x_greater_type=prob_x_greater_type,
                nthreads=4)
            self.assertEqual(ret, 0)

            print "Actual ybest: %f" % y_noisy[-1]

            #we set ybest to be higher than the final value of this curve
            #hence we DO want the evaluation to stop!
            open("ybest.txt", "w").write(str(y_final + 0.05))
            open("termination_criterion_running", "w").write("running")

            ret = main(mode="optimistic",
                prob_x_greater_type=prob_x_greater_type,
                nthreads=4)
            self.assertEqual(ret, 1)

            self.assertTrue(os.path.exists("y_predict.txt"))
            y_predict = float(open("y_predict.txt").read())
            abserr = np.abs(y_predict-y_noisy[-1])
            self.assertTrue(abserr < 0.05)
            print "abs error %f" % abserr

            #we set ybest to be lower than the final value of this curve
            #hence we DON'T want the evaluation to stop!
            #we assume here that because the model was set up like this 
            #the predictive_std is above (it should actually be around 0.019)
            open("ybest.txt", "w").write(str(y_final - 0.05))
            open("termination_criterion_running", "w").write("running")

            ret = main(mode="optimistic", nthreads=4)
            self.assertEqual(ret, 0)
            self.assertFalse(os.path.exists("y_predict.txt"))
            self.assertFalse(os.path.exists("termination_criterion_running"))
            self.assertFalse(os.path.exists("termination_criterion_running_pid"))

            num_train = 300
            np.savetxt("learning_curve.txt", y_noisy[:num_train])
            #we set ybest to be lower than the final value of this curve
            #HOWEVER we except the predictive std to be around .0027
            #so the the run should be cancelled nevertheless
            open("ybest.txt", "w").write(str(y_final - 0.05))
            open("termination_criterion_running", "w").write("running")

            ret = main(mode="optimistic",
                prob_x_greater_type=prob_x_greater_type,
                nthreads=4)
            self.assertEqual(ret, 1)
            self.assertTrue(os.path.exists("y_predict.txt"))
            y_predict = float(open("y_predict.txt").read())
            abserr = np.abs(y_predict-y_noisy[-1])
            self.assertTrue(abserr < 0.05)
            print "abs error %f" % abserr

            self.assertFalse(os.path.exists("termination_criterion_running"))
            self.assertFalse(os.path.exists("termination_criterion_running_pid"))

        self.cleanup()
    def run(self,
            index,
            modes=None,
            prob_types=None,
            checkpoints=None,
            as_process=True,
            restore=True):
        if modes == None:
            modes = self.modes
        if prob_types == None:
            prob_types = self.prob_types

        lr = self.lcr.get_lr(index)
        if checkpoints == None:
            checkpoints = [self.num_checkpoint]
        r = {"max_acc": max(lr)}
        if str(index) in self.results:
            r = self.results[str(index)]

        start_time = None
        eval_time = None

        for num_checkpoint in checkpoints:
            for mode in modes:
                for prob_type in prob_types:
                    key = "{}-{}-{}".format(num_checkpoint, mode, prob_type)
                    ybest = max(
                        lr
                    ) + 0.5  # Unrealistic fatasy to finding stopping prediction value.
                    ret = 0
                    y_predict = None
                    if key in r and restore == True:
                        y_predict = r[key]['y_predict']
                        if 'y_best' in r[key]:
                            ybest = r[key]['y_best']
                        if y_predict != None:
                            print("Restore [{}] {}: {}".format(
                                index, key, y_predict))

                    if y_predict == None:
                        print("Run [{}] {}".format(index, key))
                        while ybest <= max(lr) + 0.5:
                            start_time = time.time()
                            self.prepare(lr, num_checkpoint, ybest)
                            if as_process == False:
                                ret = main(mode=mode,
                                           prob_x_greater_type=prob_type,
                                           nthreads=4)
                            else:
                                ret = run_program([
                                    "python", "-m",
                                    "pylrpredictor.terminationcriterion",
                                    "--nthreads", "5", "--mode", mode,
                                    "--prob-x-greater-type", prob_type
                                ])

                            print("{}:{}-{}-{}:{} returns {}".format(
                                self.lcr.name, index, num_checkpoint, mode,
                                prob_type, ret))
                            if ret == 1:
                                break
                            else:
                                #ybest += 0.5
                                # here means no termination.
                                break

                        if os.path.exists("y_predict.txt"):
                            y_predict = float(open("y_predict.txt").read())
                        else:
                            y_predict = 0.0
                    if start_time != None:
                        eval_time = time.time() - start_time
                    r[key] = {
                        "num_checkpoint": num_checkpoint,
                        "y_predict": y_predict,
                        "y_best": ybest,
                        "est_time": eval_time
                    }

                    self.cleanup()

        self.add_result(index, r)