示例#1
0
 def test_prediction_long_term_hard_brake(self):
     config = TestConfig()
     config.n_global_steps = 20000
     config.env_id = 'HeuristicRiskEnv-v0'
     config.discount = 0.  # 599. / 600
     config.max_timesteps = 10000
     config.prime_timesteps = 50
     config.learning_rate = 1e-3
     config.adam_beta1 = .995
     config.adam_beta2 = .999
     config.dropout_keep_prob = 1.
     config.l2_reg = 0.
     config.local_steps_per_update = 20
     config.hidden_layer_sizes = [32, 16]
     config.hard_brake_threshold = -3.
     config.hard_brake_n_past_frames = 1
     config.target_loss_index = 3
     config.loss_type = 'mse'
     env = build_envs.create_env(config)
     test_state = env.reset()
     summary_writer = tf.summary.FileWriter('/tmp/test')
     with tf.Session() as sess:
         trainer = async_td.AsyncTD(env, 0, config)
         sess.run(tf.global_variables_initializer())
         sess.run(trainer.sync)
         trainer.start(sess, summary_writer)
         global_step = sess.run(trainer.global_step)
         c, h = trainer.network.get_initial_features()
         while global_step < config.n_global_steps:
             trainer.process(sess)
             global_step = sess.run(trainer.global_step)
             value = trainer.network.value(test_state, c, h)
             print(value)
示例#2
0
    def test_validate_const_reward_discounted_env(self):
        # config
        config = TestConfig()
        config.n_global_steps = 50000
        config.env_id = 'RandObsConstRewardEnv-v0'
        config.discount = .9
        config.value_dim = 2
        config.adam_beta1 = .9
        config.local_steps_per_update = 1000
        config.hidden_layer_sizes = [256]
        config.learning_rate = 1e-3
        config.learning_rate_end = 1e-5
        config.loss_type = 'mse'
        config.target_loss_index = None

        # build env
        const_reward = .01
        horizon = 10000000
        rand_obs = False
        env = debug_envs.RandObsConstRewardEnv(horizon=horizon,
                                               reward=const_reward,
                                               value_dim=config.value_dim,
                                               rand_obs=rand_obs)
        env.spec = gym.envs.registration.EnvSpec(
            id='RandObsConstRewardEnv-v0',
            tags={'wrapper_config.TimeLimit.max_episode_steps': horizon + 1})

        n_samples = 2
        n_timesteps = 10  # predict after seeing this many timesteps
        n_prediction_timesteps = 10  # determines discount
        input_dim = 1
        obs_gen = np.random.randn if rand_obs else np.ones
        x = obs_gen(np.prod((n_samples, n_timesteps, input_dim))).reshape(
            (n_samples, n_timesteps, input_dim))
        y = (const_reward * np.ones(
            (n_samples, config.value_dim)) * n_prediction_timesteps)
        w = np.ones((n_samples, 1))
        dataset = validation.Dataset(x, y, w)

        # run it
        summary_writer = tf.summary.FileWriter('/tmp/test')
        avg_loss = -1
        with tf.Session() as sess:
            trainer = async_td.AsyncTD(env, 0, config)
            sess.run(tf.global_variables_initializer())
            sess.run(trainer.sync)
            trainer.start(sess, summary_writer)
            global_step = sess.run(trainer.global_step)
            while global_step < config.n_global_steps:
                trainer.process(sess)
                if global_step % 10 == 0:
                    avg_loss = trainer.validate(sess, dataset)
                global_step = sess.run(trainer.global_step)
示例#3
0
    def test_validate(self):
        # config
        config = TestConfig()
        config.n_global_steps = 5000
        config.env_id = 'SeqSumDebugEnv-v0'
        config.discount = 1.
        config.value_dim = 1
        config.adam_beta1 = .99
        config.local_steps_per_update = 100000
        config.hidden_layer_sizes = [128]
        config.learning_rate = 5e-4
        config.learning_rate_end = 5e-6
        config.loss_type = 'mse'
        config.target_loss_index = None

        # build env
        env = gym.make(config.env_id)

        # build validation set
        # in this case just sequences of either 1s or 0s (const per sequence)
        # e.g.,
        # horizon = 4, and seeing 1s: [1 1 1 1]
        # then after seeing [1 1], should predict a value from this point of 2
        # because that is the amount of reward expect to accrue in the future
        horizon = 4
        n_samples = 2
        n_timesteps = 2  # predict after seeing this many timesteps
        input_dim = 1
        # half ones and half neg ones
        x = np.ones((n_samples, n_timesteps, input_dim))
        x[int(n_samples / 2):] = -1
        # expect value to be how many timesteps have left * -1 or 1
        y = x[:, 0, :] * (horizon - n_timesteps + 1)
        w = np.ones(n_samples)
        dataset = validation.Dataset(x, y, w)

        # run it
        summary_writer = tf.summary.FileWriter('/tmp/test')
        avg_loss = -1
        with tf.Session() as sess:
            trainer = async_td.AsyncTD(env, 0, config)
            sess.run(tf.global_variables_initializer())
            sess.run(trainer.sync)
            trainer.start(sess, summary_writer)
            global_step = sess.run(trainer.global_step)
            while global_step < config.n_global_steps:
                trainer.process(sess)
                if global_step % 10 == 0:
                    avg_loss = trainer.validate(sess, dataset)
                global_step = sess.run(trainer.global_step)

        self.assertTrue(avg_loss < .1)
示例#4
0
    'order: 5',
    'Goal accepted with ID:',
    'Result:',
    'sequence:',
    '0',
    '1',
    '2',
    '3',
    '5',
    'Goal finished with status: SUCCEEDED',
]

configs = [
    TestConfig(
        command='action',
        arguments=['info', '/fibonacci'],
        actions=[get_action_server_node_action()],
        expected_output=common_info_output + ['/fibonacci_action_server'],
    ),
    TestConfig(
        command='action',
        arguments=['info', '-t', '/fibonacci'],
        actions=[get_action_server_node_action()],
        expected_output=common_info_output + [
            '/fibonacci_action_server [action_tutorials_interfaces/action/Fibonacci]'
        ],
    ),
    TestConfig(
        command='action',
        arguments=['info', '-c', '/fibonacci'],
        actions=[get_action_server_node_action()],
        expected_output=common_info_output,
示例#5
0
    def __init__(self, test_path='', test_config=None):
        """Create a new instance of a TestCase. Must be called by inheriting
        classes.

        Keyword Arguments:
        test_path Optional parameter that specifies the path where this test
                  resides
        test_config Loaded YAML test configuration
        """

        if not len(test_path):
            self.test_name = os.path.dirname(sys.argv[0])
        else:
            self.test_name = test_path

        # We're not using /tmp//full//test//name because it gets so long that
        # it doesn't fit in AF_UNIX paths (limited to around 108 chars) used
        # for the rasterisk CLI connection. As a quick fix, we hash the path
        # using md5, to make it unique enough.
        self.realbase = self.test_name.replace("tests/", "", 1)
        self.base = md5(self.realbase).hexdigest()
        # We provide a symlink to it from a named path.
        named_dir = os.path.join(Asterisk.test_suite_root, self.realbase)
        try:
            os.makedirs(os.path.dirname(named_dir))
        except OSError:
            pass
        try:
            join_path = os.path.relpath(
                os.path.join(Asterisk.test_suite_root, self.base),
                os.path.dirname(named_dir)
            )
            os.symlink(join_path, named_dir)
        except OSError:
            pass

        self.ast = []
        self.ami = []
        self.fastagi = []
        self.base_config_path = None
        self.reactor_timeout = 30
        self.passed = None
        self.fail_tokens = []
        self.timeout_id = None
        self.global_config = TestConfig(os.getcwd())
        self.test_config = TestConfig(self.test_name, self.global_config)
        self.condition_controller = None
        self.pcap = None
        self.pcapfilename = None
        self.create_pcap = False
        self._stopping = False
        self.testlogdir = self._set_test_log_directory()
        self.ast_version = AsteriskVersion()
        self._start_callbacks = []
        self._stop_callbacks = []
        self._ami_callbacks = []
        self._pcap_callbacks = []
        self._stop_deferred = None
        log_full = True
        log_messages = True

        if os.getenv("VALGRIND_ENABLE") == "true":
            self.reactor_timeout *= 20

        # Pull additional configuration from YAML config if possible
        if test_config:
            if 'config-path' in test_config:
                self.base_config_path = test_config['config-path']
            if 'reactor-timeout' in test_config:
                self.reactor_timeout = test_config['reactor-timeout']
            self.ast_conf_options = test_config.get('ast-config-options')
            log_full = test_config.get('log-full', True)
            log_messages = test_config.get('log-messages', True)
        else:
            self.ast_conf_options = None

        os.makedirs(self.testlogdir)

        # Set up logging
        setup_logging(self.testlogdir, log_full, log_messages)

        LOGGER.info("Executing " + self.test_name)

        if PCAP_AVAILABLE and self.create_pcap:
            self.pcapfilename = os.path.join(self.testlogdir, "dumpfile.pcap")
            self.pcap = self.create_pcap_listener(dumpfile=self.pcapfilename)

        self._setup_conditions()

        # Enable twisted logging
        observer = log.PythonLoggingObserver()
        observer.start()

        reactor.callWhenRunning(self._run)
示例#6
0
import sys

sys.path.append(os.path.dirname(__file__))

from test_config import TestConfig  # noqa

some_messages_from_std_msgs = [
    'std_msgs/msg/Bool',
    'std_msgs/msg/Float32',
    'std_msgs/msg/Float64',
]

configs = [
    TestConfig(
        command='msg',
        arguments=['list'],
        expected_output=some_messages_from_std_msgs,
    ),
    TestConfig(
        command='msg',
        arguments=['package', 'std_msgs'],
        expected_output=some_messages_from_std_msgs,
    ),
    TestConfig(
        command='msg',
        arguments=['packages'],
        expected_output=['std_msgs'],
    ),
    TestConfig(
        command='msg',
        arguments=['show', 'std_msgs/msg/String'],
示例#7
0
def before_all(context):
    context.config = TestConfig()
    context.config.driver = None
    context.config.components = Components()
    def test_model_losses(self):
        config = TestConfig()
        config.hidden_layer_sizes = [32]
        config.value_dim = 1
        config.learning_rate = 5e-3

        # simple dataset, learn to output the sum
        n_samples = 10
        n_timesteps = 4
        input_dim = 1
        x = np.random.rand(n_samples, n_timesteps, input_dim)
        y = np.ones((n_samples, n_timesteps)) * 0.5

        for loss_type in ['mse', 'ce']:
            tf.reset_default_graph()
            with tf.Session() as session:
                predictor = model.LSTMPredictor((input_dim, ), config)
                target_placeholder = tf.placeholder(tf.float32, [None, 1],
                                                    'target')
                if loss_type == 'mse':
                    loss = tf.reduce_sum(
                        (predictor.vf - target_placeholder)**2)
                elif loss_type == 'ce':
                    yt = target_placeholder
                    yp = predictor.vf
                    loss = tf.reduce_sum(
                        (yt * -tf.log(tf.nn.sigmoid(yp)) +
                         (1 - yt) * -tf.log(1 - tf.nn.sigmoid(yp))))
                opt = tf.train.AdamOptimizer(config.learning_rate)
                train_op = opt.minimize(loss)
                session.run(tf.global_variables_initializer())

                def run_sample(p, x, y, state_in, train=True):
                    feed_dict = {
                        p.x: x,
                        target_placeholder: y,
                        p.dropout_keep_prob_ph: 1.,
                        p.state_in[0]: state_in[0],
                        p.state_in[1]: state_in[1],
                    }
                    outputs_list = [loss]
                    if train:
                        outputs_list += [train_op]
                    fetched = session.run(outputs_list, feed_dict=feed_dict)

                    if train:
                        val_loss, _ = fetched
                    else:
                        val_loss = fetched[0]
                    return val_loss

                n_epochs = 100
                n_train = int(n_samples * .8)
                n_val = n_samples - n_train
                verbose = False
                for epoch in range(n_epochs):

                    # train
                    train_loss_mean = 0
                    for sidx in range(n_train):
                        train_loss = run_sample(predictor, x[sidx, :, :],
                                                y[sidx].reshape(-1, 1),
                                                predictor.state_init)
                        train_loss_mean += train_loss / n_train

                    # val
                    val_loss_mean = 0
                    for sidx in range(n_val):
                        val_loss = run_sample(predictor,
                                              x[sidx, :, :],
                                              y[sidx].reshape(-1, 1),
                                              predictor.state_init,
                                              train=False)
                        val_loss_mean += val_loss / n_val

                        value = predictor.value(x[sidx, :, :],
                                                predictor.state_init[0],
                                                predictor.state_init[1],
                                                sequence=True)
                        if loss_type == 'ce':
                            value = 1 / (1 + np.exp(-value))
                        # print('x: {}\ny: {}\ny_pred: {}\n'.format(
                        #     x[sidx,:,:], y[sidx].reshape(-1,1), value))
                        # input()

                    # report
                    if verbose:
                        print('epoch: {} / {}\ttrain loss: {}\tval loss: {}'.
                              format(epoch, n_epochs, train_loss_mean,
                                     val_loss_mean))

                self.assertTrue(np.abs(value - .5) < 1e-2)
    def test_full_sequence_prediction(self):
        config = TestConfig()
        config.hidden_layer_sizes = [32, 32]
        config.value_dim = 1
        config.learning_rate = 1e-3

        # simple dataset, learn to output the sum
        n_samples = 100
        n_timesteps = 4
        input_dim = 1
        x = np.random.rand(n_samples, n_timesteps, input_dim)
        y = np.sum(x, axis=(1, 2)).reshape(-1, 1)

        with tf.Session() as session:
            predictor = model.LSTMPredictor((input_dim, ), config)
            target_placeholder = tf.placeholder(tf.float32, [None, 1],
                                                'target')
            loss = tf.reduce_sum((predictor.vf[-1] - target_placeholder)**2)
            opt = tf.train.AdamOptimizer(config.learning_rate)
            train_op = opt.minimize(loss)
            session.run(tf.global_variables_initializer())

            def run_sample(p, x, y, state_in, train=True):
                feed_dict = {
                    p.x: x,
                    target_placeholder: y,
                    p.dropout_keep_prob_ph: 1.,
                    p.state_in[0]: state_in[0],
                    p.state_in[1]: state_in[1],
                }
                outputs_list = [loss]
                if train:
                    outputs_list += [train_op]
                else:
                    outputs_list += [p.vf[-1]]
                fetched = session.run(outputs_list, feed_dict=feed_dict)

                if train:
                    val_loss, _ = fetched
                    return val_loss
                else:
                    val_loss, val_vf = fetched
                    return val_loss, val_vf

            n_epochs = 10
            n_train = int(n_samples * .8)
            n_val = n_samples - n_train
            verbose = False
            for epoch in range(n_epochs):

                # train
                train_loss_mean = 0
                for sidx in range(n_train):
                    train_loss = run_sample(predictor, x[sidx, :, :],
                                            y[sidx].reshape(1, -1),
                                            predictor.state_init)
                    train_loss_mean += train_loss / n_train

                # val
                val_loss_mean = 0
                for sidx in range(n_val):
                    val_loss, val_vf = run_sample(predictor,
                                                  x[sidx, :, :],
                                                  y[sidx].reshape(1, -1),
                                                  predictor.state_init,
                                                  train=False)
                    val_loss_mean += val_loss / n_val
                    # print('x: {}\ny: {}\ny_pred: {}'.format(
                    #     x[sidx,:,:], y[sidx].reshape(1,-1), val_vf))
                    # input()

                # report
                if verbose:
                    print(
                        'epoch: {} / {}\ttrain loss: {}\tval loss: {}'.format(
                            epoch, n_epochs, train_loss_mean, val_loss_mean))

            self.assertTrue(train_loss_mean < 1e-2)
            self.assertTrue(val_loss_mean < 1e-2)
示例#10
0
    def test_heuristic_deterministic_case(self):

        config = TestConfig()
        config.n_global_steps = 50000
        config.max_timesteps = 50
        config.env_id = 'BayesNetRiskEnv-v0'
        config.discount = 1.  # 49. / 50
        config.value_dim = 5
        config.adam_beta1 = .9
        config.local_steps_per_update = 100
        config.hidden_layer_sizes = [128]
        config.learning_rate = 1e-3
        config.learning_rate_end = 5e-6
        config.loss_type = 'mse'
        config.target_loss_index = 1
        config.validation_dataset_filepath = '/Users/wulfebw/Dropbox/School/Stanford/research/risk/risk_prediction/data/experiments/heuristic_determinstic_1_lane_5_sec/data/subselect_proposal_prediction_data.h5'
        config.max_validation_samples = 1
        config.validate_every = 1000
        config.visualize_every = 10000
        config.summarize_features = True

        validation.transfer_dataset_settings_to_config(
            config.validation_dataset_filepath, config)

        config.base_bn_filepath = '/Users/wulfebw/Dropbox/School/Stanford/research/risk/risk_prediction/data/experiments/heuristic_determinstic_1_lane_5_sec/data/base_bn_filepath.h5'
        config.base_prop_filepath = '/Users/wulfebw/Dropbox/School/Stanford/research/risk/risk_prediction/data/experiments/heuristic_determinstic_1_lane_5_sec/data/prop_bn_filepath.h5'
        config.max_validation_samples = 1000

        # config.roadway_radius = 400.
        # config.roadway_length = 100.
        # config.lon_accel_std_dev = 0.
        # config.lat_accel_std_dev = 0.
        # config.overall_response_time = .2
        # config.lon_response_time = .0
        # config.err_p_a_to_i = .15
        # config.err_p_i_to_a = .3
        # config.max_num_vehicles = 50
        # config.min_num_vehicles = 50
        # config.hard_brake_threshold = -3.
        # config.hard_brake_n_past_frames = 2
        # config.min_base_speed = 30.
        # config.max_base_speed = 30.
        # config.min_vehicle_length = 5.
        # config.max_vehicle_length = 5.
        # config.min_vehicle_width = 2.5
        # config.max_vehicle_width = 2.5
        # config.min_init_dist = 10.
        # config.heuristic_behavior_type = "normal"

        # build env
        env = build_envs.create_env(config)
        dataset = validation.build_dataset(config, env)
        print('mean validation targets: {}'.format(np.mean(dataset.y, axis=0)))

        # run it
        summary_writer = tf.summary.FileWriter('/tmp/test')
        avg_loss = -1
        last_global_step_val = 0
        with tf.Session() as sess:
            trainer = async_td.AsyncTD(env, 0, config)
            sess.run(tf.global_variables_initializer())
            sess.run(trainer.sync)
            trainer.start(sess, summary_writer)
            global_step = sess.run(trainer.global_step)
            while global_step < config.n_global_steps:
                trainer.process(sess)
                if (global_step -
                        last_global_step_val) > config.validate_every:
                    avg_loss = trainer.validate(sess, dataset)
                    last_global_step_val = global_step
                global_step = sess.run(trainer.global_step)
示例#11
0
def test_prediction_across_target_variance(val_x,
                                           val_y,
                                           sigma=0,
                                           n_mc=1,
                                           hidden_layer_sizes=[32, 32],
                                           n_epochs=50,
                                           n_samples=100,
                                           input_dim=1,
                                           n_timesteps=2):
    config = TestConfig()
    config.hidden_layer_sizes = hidden_layer_sizes
    config.value_dim = 1
    config.learning_rate = 1e-3
    config.n_epochs = n_epochs

    # simple dataset
    x, y = generate_data(sigma, n_mc, n_samples, n_timesteps, input_dim)
    val_losses = []
    with tf.Session() as session:
        predictor = model.LSTMPredictor((input_dim, ), config)
        target_placeholder = tf.placeholder(tf.float32, [None, 1], 'target')
        loss = tf.reduce_sum((predictor.vf[-1] - target_placeholder)**2)
        opt = tf.train.AdamOptimizer(config.learning_rate)
        train_op = opt.minimize(loss)
        session.run(tf.global_variables_initializer())

        def run_sample(p, x, y, state_in, train=True):
            feed_dict = {
                p.x: x,
                target_placeholder: y,
                p.dropout_keep_prob_ph: 1.,
                p.state_in[0]: state_in[0],
                p.state_in[1]: state_in[1],
            }
            outputs_list = [loss]
            if train:
                outputs_list += [train_op]
            else:
                outputs_list += [p.vf[-1]]
            fetched = session.run(outputs_list, feed_dict=feed_dict)

            if train:
                val_loss, _ = fetched
                return val_loss
            else:
                val_loss, val_vf = fetched
                return val_loss, val_vf

        n_val = len(val_x)
        for epoch in range(n_epochs):

            # train
            train_loss_mean = 0
            for sidx in range(n_samples):
                train_loss = run_sample(predictor, x[sidx, :, :],
                                        y[sidx].reshape(1, -1),
                                        predictor.state_init)
                train_loss_mean += train_loss / n_samples

            # val
            val_loss_mean = 0
            for sidx in range(n_val):
                val_loss, val_vf = run_sample(predictor,
                                              val_x[sidx, :, :],
                                              val_y[sidx].reshape(1, -1),
                                              predictor.state_init,
                                              train=False)
                val_loss_mean += val_loss / n_val
                # print('x: {}\ny: {}\ny_pred: {}'.format(
                #     x[sidx,:,:], y[sidx].reshape(1,-1), val_vf))
                # input()

            # report, track
            val_losses.append(val_loss_mean)
            print('epoch: {} / {}\ttrain loss: {}\tval loss: {}'.format(
                epoch, n_epochs, train_loss_mean, val_loss_mean))

    return val_losses
示例#12
0
def pytest_configure(config):
    """ configure webdriver based on cmd-line arguments """

    # load config (ini and cmd-line)
    test_config = TestConfig(config)
    TestLog.configure()

    log.debug("pytest_configure")
    log.debug(test_config)

    # download drivers (if using Local drivers)
    if not (test_config.use_browserstack or test_config.use_selenoid):
        LocalDriverFactory.download_drivers(test_config.browsers_list)

    class DriverPlugin:
        """ Driver plugin class """
        @pytest.fixture(autouse=True,
                        params=test_config.browsers_list,
                        scope="function")
        def driver(self, request):
            """ web driver fixture """

            # init browser options
            options = BrowserOptions()

            options.browser_type = Browser[request.param]
            options.headless = test_config.headless
            options.window_size = test_config.win_size
            options.timeout = test_config.timeout
            options.use_browserstack = test_config.use_browserstack
            options.use_selenoid = test_config.use_selenoid
            options.hub_url = test_config.hub_url

            log.debug("Create 'driver' fixture: {}".format(options))

            # get webdriver instance
            if options.use_browserstack:
                d = BsDriverFactory.get_driver(options)
            elif options.use_selenoid:
                d = SelenoidDriverFactory.get_driver(options)
            else:
                d = LocalDriverFactory.get_driver(options)

            yield d

            try:
                if request.node.rep_call.failed:
                    log.error("Test '{}' failed!".format(
                        request.function.__name__))
                    try:
                        allure.attach(
                            d.get_screenshot_as_png(),
                            name='screenshot on fail',
                            attachment_type=allure.attachment_type.PNG)
                    except:
                        log.warn("Unable to attch screenshot to allure report")
                        pass
            finally:
                # finalization
                d.quit()
                log.debug("'driver' fixture finalized")

    # register plugin
    config.pluginmanager.register(DriverPlugin())