示例#1
0
def generic_test_main(test_file,
                      test_data_file,
                      test_func,
                      comparator=None,
                      res_printer=None):
    """
    The main test starter.
    :param test_file - name of the test file
    :param test_data_file - file with test data
    :param test_func - function to be tested
    :param comparator - custom comparator. A function that accepts
        (expected, computed result) and returns a boolean value
    :param res_printer - function for customized printing
    """
    try:
        with open(
                get_file_path_in_judge_dir('config.json')) as config_file_data:
            config_override = json.load(config_file_data)

        commandline_args = sys.argv[1:]
        config = TestConfig.from_command_line(
            test_file, test_data_file, config_override['timeoutSeconds'],
            config_override['numFailedTestsBeforeStop'], commandline_args)

        set_output_opts(config.tty_mode, config.color_mode)

        test_handler = GenericTestHandler(test_func, comparator=comparator)
        return run_tests(test_handler, config, res_printer)
    except RuntimeError as e:
        print(
            '\nCritical error({}): {}'.format(e.__class__.__name__, e),
            file=sys.stderr)
        return TestResult.RUNTIME_ERROR
	def test_WriteToXMP_JPG(self):
		method_name = sys._getframe(0).f_code.co_name
		print "**** %s ****" % method_name
		fn = self.data['ferns']
		backup_folder = TestConfig.getInstance().output_path
		backup_file = backupFile(fn, outputPath=backup_folder, overwrite=True)
		print backup_file
		ex = ExifTool(backup_file, False)
		now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
		ex.setAttributes("exif", {"DateTimeOriginal": now})
		dates = ex.getDateAttributes()
		for k, v in dates.iteritems():
			print "%40s = %s" % (k, v)
示例#3
0
import sys

sys.path.append(os.path.dirname(__file__))

from test_config import TestConfig  # noqa

some_messages_from_std_msgs = [
    'std_msgs/msg/Bool',
    'std_msgs/msg/Float32',
    'std_msgs/msg/Float64',
]

configs = [
    TestConfig(
        command='msg',
        arguments=['list'],
        expected_output=some_messages_from_std_msgs,
    ),
    TestConfig(
        command='msg',
        arguments=['package', 'std_msgs'],
        expected_output=some_messages_from_std_msgs,
    ),
    TestConfig(
        command='msg',
        arguments=['packages'],
        expected_output=['std_msgs'],
    ),
    TestConfig(
        command='msg',
        arguments=['show', 'std_msgs/msg/String'],
示例#4
0
def before_all(context):
    context.config = TestConfig()
    context.config.driver = None
    context.config.components = Components()
    def test_model_losses(self):
        config = TestConfig()
        config.hidden_layer_sizes = [32]
        config.value_dim = 1
        config.learning_rate = 5e-3

        # simple dataset, learn to output the sum
        n_samples = 10
        n_timesteps = 4
        input_dim = 1
        x = np.random.rand(n_samples, n_timesteps, input_dim)
        y = np.ones((n_samples, n_timesteps)) * 0.5

        for loss_type in ['mse', 'ce']:
            tf.reset_default_graph()
            with tf.Session() as session:
                predictor = model.LSTMPredictor((input_dim, ), config)
                target_placeholder = tf.placeholder(tf.float32, [None, 1],
                                                    'target')
                if loss_type == 'mse':
                    loss = tf.reduce_sum(
                        (predictor.vf - target_placeholder)**2)
                elif loss_type == 'ce':
                    yt = target_placeholder
                    yp = predictor.vf
                    loss = tf.reduce_sum(
                        (yt * -tf.log(tf.nn.sigmoid(yp)) +
                         (1 - yt) * -tf.log(1 - tf.nn.sigmoid(yp))))
                opt = tf.train.AdamOptimizer(config.learning_rate)
                train_op = opt.minimize(loss)
                session.run(tf.global_variables_initializer())

                def run_sample(p, x, y, state_in, train=True):
                    feed_dict = {
                        p.x: x,
                        target_placeholder: y,
                        p.dropout_keep_prob_ph: 1.,
                        p.state_in[0]: state_in[0],
                        p.state_in[1]: state_in[1],
                    }
                    outputs_list = [loss]
                    if train:
                        outputs_list += [train_op]
                    fetched = session.run(outputs_list, feed_dict=feed_dict)

                    if train:
                        val_loss, _ = fetched
                    else:
                        val_loss = fetched[0]
                    return val_loss

                n_epochs = 100
                n_train = int(n_samples * .8)
                n_val = n_samples - n_train
                verbose = False
                for epoch in range(n_epochs):

                    # train
                    train_loss_mean = 0
                    for sidx in range(n_train):
                        train_loss = run_sample(predictor, x[sidx, :, :],
                                                y[sidx].reshape(-1, 1),
                                                predictor.state_init)
                        train_loss_mean += train_loss / n_train

                    # val
                    val_loss_mean = 0
                    for sidx in range(n_val):
                        val_loss = run_sample(predictor,
                                              x[sidx, :, :],
                                              y[sidx].reshape(-1, 1),
                                              predictor.state_init,
                                              train=False)
                        val_loss_mean += val_loss / n_val

                        value = predictor.value(x[sidx, :, :],
                                                predictor.state_init[0],
                                                predictor.state_init[1],
                                                sequence=True)
                        if loss_type == 'ce':
                            value = 1 / (1 + np.exp(-value))
                        # print('x: {}\ny: {}\ny_pred: {}\n'.format(
                        #     x[sidx,:,:], y[sidx].reshape(-1,1), value))
                        # input()

                    # report
                    if verbose:
                        print('epoch: {} / {}\ttrain loss: {}\tval loss: {}'.
                              format(epoch, n_epochs, train_loss_mean,
                                     val_loss_mean))

                self.assertTrue(np.abs(value - .5) < 1e-2)
    def test_full_sequence_prediction(self):
        config = TestConfig()
        config.hidden_layer_sizes = [32, 32]
        config.value_dim = 1
        config.learning_rate = 1e-3

        # simple dataset, learn to output the sum
        n_samples = 100
        n_timesteps = 4
        input_dim = 1
        x = np.random.rand(n_samples, n_timesteps, input_dim)
        y = np.sum(x, axis=(1, 2)).reshape(-1, 1)

        with tf.Session() as session:
            predictor = model.LSTMPredictor((input_dim, ), config)
            target_placeholder = tf.placeholder(tf.float32, [None, 1],
                                                'target')
            loss = tf.reduce_sum((predictor.vf[-1] - target_placeholder)**2)
            opt = tf.train.AdamOptimizer(config.learning_rate)
            train_op = opt.minimize(loss)
            session.run(tf.global_variables_initializer())

            def run_sample(p, x, y, state_in, train=True):
                feed_dict = {
                    p.x: x,
                    target_placeholder: y,
                    p.dropout_keep_prob_ph: 1.,
                    p.state_in[0]: state_in[0],
                    p.state_in[1]: state_in[1],
                }
                outputs_list = [loss]
                if train:
                    outputs_list += [train_op]
                else:
                    outputs_list += [p.vf[-1]]
                fetched = session.run(outputs_list, feed_dict=feed_dict)

                if train:
                    val_loss, _ = fetched
                    return val_loss
                else:
                    val_loss, val_vf = fetched
                    return val_loss, val_vf

            n_epochs = 10
            n_train = int(n_samples * .8)
            n_val = n_samples - n_train
            verbose = False
            for epoch in range(n_epochs):

                # train
                train_loss_mean = 0
                for sidx in range(n_train):
                    train_loss = run_sample(predictor, x[sidx, :, :],
                                            y[sidx].reshape(1, -1),
                                            predictor.state_init)
                    train_loss_mean += train_loss / n_train

                # val
                val_loss_mean = 0
                for sidx in range(n_val):
                    val_loss, val_vf = run_sample(predictor,
                                                  x[sidx, :, :],
                                                  y[sidx].reshape(1, -1),
                                                  predictor.state_init,
                                                  train=False)
                    val_loss_mean += val_loss / n_val
                    # print('x: {}\ny: {}\ny_pred: {}'.format(
                    #     x[sidx,:,:], y[sidx].reshape(1,-1), val_vf))
                    # input()

                # report
                if verbose:
                    print(
                        'epoch: {} / {}\ttrain loss: {}\tval loss: {}'.format(
                            epoch, n_epochs, train_loss_mean, val_loss_mean))

            self.assertTrue(train_loss_mean < 1e-2)
            self.assertTrue(val_loss_mean < 1e-2)
示例#7
0
import random

from behave import then, step
from utils import random_string, filter_list_by_parameter_start_with, validate_json
from hamcrest import *
import requests
from test_config import TestConfig
from datetime import datetime
from random import choice

dataset_name_prefix = "test_dataset_name_"

orb_url = TestConfig.configs().get('orb_url')
configs = TestConfig.configs()


@step("{amount_of_datasets} new dataset is created using the policy, {group_order} group and {amount_of_sinks}"
      " {sink_number}")
def create_new_dataset(context, amount_of_datasets, group_order, amount_of_sinks, sink_number):
    assert_that(sink_number, any_of(equal_to("sink"), equal_to("sinks")), "Unexpected value for sink")
    assert_that(group_order, any_of(equal_to("first"), equal_to("second"), equal_to("last"), equal_to("an existing")),
                "Unexpected value for group.")

    if group_order == "an existing":
        groups_to_be_used = random.sample(list(context.agent_groups.keys()), int(amount_of_datasets))
    else:
        assert_that(str(amount_of_datasets), equal_to(str(1)), "For more than one dataset, pass 'an existing' as group"
                                                          " parameter")
        order_convert = {"first": 0, "last": -1, "second": 1}
        groups_to_be_used = [list(context.agent_groups.keys())[order_convert[group_order]]]
示例#8
0
from users import authenticate
from behave import given, when, then, step
from test_config import TestConfig
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from ui_utils import go_to_page, input_text_by_id
from hamcrest import *

configs = TestConfig.configs()
user_email = configs.get('email')
user_password = configs.get('password')
orb_url = configs.get('orb_url')


@given("the Orb user logs in through the UI")
def logs_in_orb_ui(context):
    orb_page(context)
    use_credentials(context)
    check_home_page(context)
    context.token = authenticate(user_email, user_password)['token']


@step("that the user is on the orb page")
def orb_page(context):
    current_url = go_to_page(orb_url, context)
    assert_that(current_url, equal_to(f"{orb_url}/auth/login"),
                "user not enabled to access orb login page")


@when("the Orb user logs in Orb UI")
def use_credentials(context):
示例#9
0
 def test_prediction_long_term_hard_brake(self):
     config = TestConfig()
     config.n_global_steps = 20000
     config.env_id = 'HeuristicRiskEnv-v0'
     config.discount = 0.  # 599. / 600
     config.max_timesteps = 10000
     config.prime_timesteps = 50
     config.learning_rate = 1e-3
     config.adam_beta1 = .995
     config.adam_beta2 = .999
     config.dropout_keep_prob = 1.
     config.l2_reg = 0.
     config.local_steps_per_update = 20
     config.hidden_layer_sizes = [32, 16]
     config.hard_brake_threshold = -3.
     config.hard_brake_n_past_frames = 1
     config.target_loss_index = 3
     config.loss_type = 'mse'
     env = build_envs.create_env(config)
     test_state = env.reset()
     summary_writer = tf.summary.FileWriter('/tmp/test')
     with tf.Session() as sess:
         trainer = async_td.AsyncTD(env, 0, config)
         sess.run(tf.global_variables_initializer())
         sess.run(trainer.sync)
         trainer.start(sess, summary_writer)
         global_step = sess.run(trainer.global_step)
         c, h = trainer.network.get_initial_features()
         while global_step < config.n_global_steps:
             trainer.process(sess)
             global_step = sess.run(trainer.global_step)
             value = trainer.network.value(test_state, c, h)
             print(value)
示例#10
0
    def test_heuristic_deterministic_case(self):

        config = TestConfig()
        config.n_global_steps = 50000
        config.max_timesteps = 50
        config.env_id = 'BayesNetRiskEnv-v0'
        config.discount = 1.  # 49. / 50
        config.value_dim = 5
        config.adam_beta1 = .9
        config.local_steps_per_update = 100
        config.hidden_layer_sizes = [128]
        config.learning_rate = 1e-3
        config.learning_rate_end = 5e-6
        config.loss_type = 'mse'
        config.target_loss_index = 1
        config.validation_dataset_filepath = '/Users/wulfebw/Dropbox/School/Stanford/research/risk/risk_prediction/data/experiments/heuristic_determinstic_1_lane_5_sec/data/subselect_proposal_prediction_data.h5'
        config.max_validation_samples = 1
        config.validate_every = 1000
        config.visualize_every = 10000
        config.summarize_features = True

        validation.transfer_dataset_settings_to_config(
            config.validation_dataset_filepath, config)

        config.base_bn_filepath = '/Users/wulfebw/Dropbox/School/Stanford/research/risk/risk_prediction/data/experiments/heuristic_determinstic_1_lane_5_sec/data/base_bn_filepath.h5'
        config.base_prop_filepath = '/Users/wulfebw/Dropbox/School/Stanford/research/risk/risk_prediction/data/experiments/heuristic_determinstic_1_lane_5_sec/data/prop_bn_filepath.h5'
        config.max_validation_samples = 1000

        # config.roadway_radius = 400.
        # config.roadway_length = 100.
        # config.lon_accel_std_dev = 0.
        # config.lat_accel_std_dev = 0.
        # config.overall_response_time = .2
        # config.lon_response_time = .0
        # config.err_p_a_to_i = .15
        # config.err_p_i_to_a = .3
        # config.max_num_vehicles = 50
        # config.min_num_vehicles = 50
        # config.hard_brake_threshold = -3.
        # config.hard_brake_n_past_frames = 2
        # config.min_base_speed = 30.
        # config.max_base_speed = 30.
        # config.min_vehicle_length = 5.
        # config.max_vehicle_length = 5.
        # config.min_vehicle_width = 2.5
        # config.max_vehicle_width = 2.5
        # config.min_init_dist = 10.
        # config.heuristic_behavior_type = "normal"

        # build env
        env = build_envs.create_env(config)
        dataset = validation.build_dataset(config, env)
        print('mean validation targets: {}'.format(np.mean(dataset.y, axis=0)))

        # run it
        summary_writer = tf.summary.FileWriter('/tmp/test')
        avg_loss = -1
        last_global_step_val = 0
        with tf.Session() as sess:
            trainer = async_td.AsyncTD(env, 0, config)
            sess.run(tf.global_variables_initializer())
            sess.run(trainer.sync)
            trainer.start(sess, summary_writer)
            global_step = sess.run(trainer.global_step)
            while global_step < config.n_global_steps:
                trainer.process(sess)
                if (global_step -
                        last_global_step_val) > config.validate_every:
                    avg_loss = trainer.validate(sess, dataset)
                    last_global_step_val = global_step
                global_step = sess.run(trainer.global_step)
示例#11
0
    def test_validate(self):
        # config
        config = TestConfig()
        config.n_global_steps = 5000
        config.env_id = 'SeqSumDebugEnv-v0'
        config.discount = 1.
        config.value_dim = 1
        config.adam_beta1 = .99
        config.local_steps_per_update = 100000
        config.hidden_layer_sizes = [128]
        config.learning_rate = 5e-4
        config.learning_rate_end = 5e-6
        config.loss_type = 'mse'
        config.target_loss_index = None

        # build env
        env = gym.make(config.env_id)

        # build validation set
        # in this case just sequences of either 1s or 0s (const per sequence)
        # e.g.,
        # horizon = 4, and seeing 1s: [1 1 1 1]
        # then after seeing [1 1], should predict a value from this point of 2
        # because that is the amount of reward expect to accrue in the future
        horizon = 4
        n_samples = 2
        n_timesteps = 2  # predict after seeing this many timesteps
        input_dim = 1
        # half ones and half neg ones
        x = np.ones((n_samples, n_timesteps, input_dim))
        x[int(n_samples / 2):] = -1
        # expect value to be how many timesteps have left * -1 or 1
        y = x[:, 0, :] * (horizon - n_timesteps + 1)
        w = np.ones(n_samples)
        dataset = validation.Dataset(x, y, w)

        # run it
        summary_writer = tf.summary.FileWriter('/tmp/test')
        avg_loss = -1
        with tf.Session() as sess:
            trainer = async_td.AsyncTD(env, 0, config)
            sess.run(tf.global_variables_initializer())
            sess.run(trainer.sync)
            trainer.start(sess, summary_writer)
            global_step = sess.run(trainer.global_step)
            while global_step < config.n_global_steps:
                trainer.process(sess)
                if global_step % 10 == 0:
                    avg_loss = trainer.validate(sess, dataset)
                global_step = sess.run(trainer.global_step)

        self.assertTrue(avg_loss < .1)
示例#12
0
    def __init__(self, test_path='', test_config=None):
        """Create a new instance of a TestCase. Must be called by inheriting
        classes.

        Keyword Arguments:
        test_path Optional parameter that specifies the path where this test
                  resides
        test_config Loaded YAML test configuration
        """

        if not len(test_path):
            self.test_name = os.path.dirname(sys.argv[0])
        else:
            self.test_name = test_path

        # We're not using /tmp//full//test//name because it gets so long that
        # it doesn't fit in AF_UNIX paths (limited to around 108 chars) used
        # for the rasterisk CLI connection. As a quick fix, we hash the path
        # using md5, to make it unique enough.
        self.realbase = self.test_name.replace("tests/", "", 1)
        self.base = md5(self.realbase).hexdigest()
        # We provide a symlink to it from a named path.
        named_dir = os.path.join(Asterisk.test_suite_root, self.realbase)
        try:
            os.makedirs(os.path.dirname(named_dir))
        except OSError:
            pass
        try:
            join_path = os.path.relpath(
                os.path.join(Asterisk.test_suite_root, self.base),
                os.path.dirname(named_dir)
            )
            os.symlink(join_path, named_dir)
        except OSError:
            pass

        self.ast = []
        self.ami = []
        self.fastagi = []
        self.base_config_path = None
        self.reactor_timeout = 30
        self.passed = None
        self.fail_tokens = []
        self.timeout_id = None
        self.global_config = TestConfig(os.getcwd())
        self.test_config = TestConfig(self.test_name, self.global_config)
        self.condition_controller = None
        self.pcap = None
        self.pcapfilename = None
        self.create_pcap = False
        self._stopping = False
        self.testlogdir = self._set_test_log_directory()
        self.ast_version = AsteriskVersion()
        self._start_callbacks = []
        self._stop_callbacks = []
        self._ami_callbacks = []
        self._pcap_callbacks = []
        self._stop_deferred = None
        log_full = True
        log_messages = True

        if os.getenv("VALGRIND_ENABLE") == "true":
            self.reactor_timeout *= 20

        # Pull additional configuration from YAML config if possible
        if test_config:
            if 'config-path' in test_config:
                self.base_config_path = test_config['config-path']
            if 'reactor-timeout' in test_config:
                self.reactor_timeout = test_config['reactor-timeout']
            self.ast_conf_options = test_config.get('ast-config-options')
            log_full = test_config.get('log-full', True)
            log_messages = test_config.get('log-messages', True)
        else:
            self.ast_conf_options = None

        os.makedirs(self.testlogdir)

        # Set up logging
        setup_logging(self.testlogdir, log_full, log_messages)

        LOGGER.info("Executing " + self.test_name)

        if PCAP_AVAILABLE and self.create_pcap:
            self.pcapfilename = os.path.join(self.testlogdir, "dumpfile.pcap")
            self.pcap = self.create_pcap_listener(dumpfile=self.pcapfilename)

        self._setup_conditions()

        # Enable twisted logging
        observer = log.PythonLoggingObserver()
        observer.start()

        reactor.callWhenRunning(self._run)
示例#13
0
class TestCase(object):
    """The base class object for python tests. This class provides common
    functionality to all tests, including management of Asterisk instances, AMI,
    twisted reactor, and various other utilities.
    """

    def __init__(self, test_path='', test_config=None):
        """Create a new instance of a TestCase. Must be called by inheriting
        classes.

        Keyword Arguments:
        test_path Optional parameter that specifies the path where this test
                  resides
        test_config Loaded YAML test configuration
        """

        if not len(test_path):
            self.test_name = os.path.dirname(sys.argv[0])
        else:
            self.test_name = test_path

        # We're not using /tmp//full//test//name because it gets so long that
        # it doesn't fit in AF_UNIX paths (limited to around 108 chars) used
        # for the rasterisk CLI connection. As a quick fix, we hash the path
        # using md5, to make it unique enough.
        self.realbase = self.test_name.replace("tests/", "", 1)
        self.base = md5(self.realbase).hexdigest()
        # We provide a symlink to it from a named path.
        named_dir = os.path.join(Asterisk.test_suite_root, self.realbase)
        try:
            os.makedirs(os.path.dirname(named_dir))
        except OSError:
            pass
        try:
            join_path = os.path.relpath(
                os.path.join(Asterisk.test_suite_root, self.base),
                os.path.dirname(named_dir)
            )
            os.symlink(join_path, named_dir)
        except OSError:
            pass

        self.ast = []
        self.ami = []
        self.fastagi = []
        self.base_config_path = None
        self.reactor_timeout = 30
        self.passed = None
        self.fail_tokens = []
        self.timeout_id = None
        self.global_config = TestConfig(os.getcwd())
        self.test_config = TestConfig(self.test_name, self.global_config)
        self.condition_controller = None
        self.pcap = None
        self.pcapfilename = None
        self.create_pcap = False
        self._stopping = False
        self.testlogdir = self._set_test_log_directory()
        self.ast_version = AsteriskVersion()
        self._start_callbacks = []
        self._stop_callbacks = []
        self._ami_callbacks = []
        self._pcap_callbacks = []
        self._stop_deferred = None
        log_full = True
        log_messages = True

        if os.getenv("VALGRIND_ENABLE") == "true":
            self.reactor_timeout *= 20

        # Pull additional configuration from YAML config if possible
        if test_config:
            if 'config-path' in test_config:
                self.base_config_path = test_config['config-path']
            if 'reactor-timeout' in test_config:
                self.reactor_timeout = test_config['reactor-timeout']
            self.ast_conf_options = test_config.get('ast-config-options')
            log_full = test_config.get('log-full', True)
            log_messages = test_config.get('log-messages', True)
        else:
            self.ast_conf_options = None

        os.makedirs(self.testlogdir)

        # Set up logging
        setup_logging(self.testlogdir, log_full, log_messages)

        LOGGER.info("Executing " + self.test_name)

        if PCAP_AVAILABLE and self.create_pcap:
            self.pcapfilename = os.path.join(self.testlogdir, "dumpfile.pcap")
            self.pcap = self.create_pcap_listener(dumpfile=self.pcapfilename)

        self._setup_conditions()

        # Enable twisted logging
        observer = log.PythonLoggingObserver()
        observer.start()

        reactor.callWhenRunning(self._run)

    def _set_test_log_directory(self):
        """Determine which logging directory we should use for this test run

        Returns:
        The full path that should be used as the directory for all log data
        """
        i = 1
        base_path = os.path.join(Asterisk.test_suite_root, self.base)
        while os.path.isdir(os.path.join(base_path, "run_%d" % i)):
            i += 1
        full_path = os.path.join(base_path, "run_%d" % i)
        return full_path

    def _setup_conditions(self):
        """Register pre and post-test conditions.

        Note that we have to first register condition checks without related
        conditions, so that those that have dependencies can find them
        """
        self.condition_controller = TestConditionController(self.test_config,
                                                            self.ast,
                                                            self.stop_reactor)
        global_conditions = self.global_config.get_conditions()
        conditions = self.test_config.get_conditions()

        # Get those global conditions that are not in the self conditions
        for g_cond in global_conditions:
            disallowed = [i for i in conditions
                          if i[0].get_name() == g_cond[0].get_name()
                          and i[1] == g_cond[1]]
            if len(disallowed) == 0:
                conditions.append(g_cond)

        for cond in conditions:
            # cond is a 3-tuple of object, pre-post type, and related name
            obj, pre_post_type, related_name = cond
            if pre_post_type == "PRE":
                self.condition_controller.register_pre_test_condition(obj)
            elif pre_post_type == "POST":
                self.condition_controller.register_post_test_condition(obj, related_name)
            else:
                msg = "Unknown condition type [%s]" % pre_post_type
                LOGGER.warning(msg)
        self.condition_controller.register_observer(
            self.handle_condition_failure, 'Failed')

    def get_asterisk_hosts(self, count):
        """Return a list of host dictionaries for Asterisk instances

        Keyword Arguments:
        count  The number of Asterisk instances to create, if no remote
               Asterisk instances have been specified
        """
        if (self.global_config.config and
            'asterisk-instances' in self.global_config.config):
            asterisks = self.global_config.config.get('asterisk-instances')
        else:
            asterisks = [{'num': i + 1, 'host': '127.0.0.%d' % (i + 1)}
                         for i in range(count)]
        return asterisks

    def create_asterisk(self, count=1, base_configs_path=None):
        """Create n instances of Asterisk

        Note: if the instances of Asterisk being created are remote, the
        keyword arguments to this function are ignored.

        Keyword arguments:
        count             The number of Asterisk instances to create.  Each
                          Asterisk instance will be hosted on 127.0.0.x, where x
                          is the 1-based index of the instance created.
        base_configs_path Provides common configuration for Asterisk instances
                          to use. This is useful for certain test types that use
                          the same configuration all the time. This
                          configuration can be overwritten by individual tests,
                          however.
        """
        for i, ast_config in enumerate(self.get_asterisk_hosts(count)):
            local_num = ast_config.get('num')
            host = ast_config.get('host')

            if not host:
                msg = "Cannot manage Asterisk instance without 'host'"
                raise Exception(msg)

            if local_num:
                LOGGER.info("Creating Asterisk instance %d" % local_num)
                ast_instance = Asterisk(base=self.testlogdir, host=host,
                                        ast_conf_options=self.ast_conf_options)
            else:
                LOGGER.info("Managing Asterisk instance at %s" % host)
                ast_instance = Asterisk(base=self.testlogdir, host=host,
                                        remote_config=ast_config)
            self.ast.append(ast_instance)
            self.condition_controller.register_asterisk_instance(self.ast[i])

            if local_num:
                # If a base configuration for this Asterisk instance has been
                # provided, install it first
                if base_configs_path is None:
                    base_configs_path = self.base_config_path
                if base_configs_path:
                    ast_dir = "%s/ast%d" % (base_configs_path, local_num)
                    self.ast[i].install_configs(ast_dir,
                                                self.test_config.get_deps())
                # Copy test specific config files
                self.ast[i].install_configs("%s/configs/ast%d" %
                                            (self.test_name, local_num),
                                            self.test_config.get_deps())

    def create_ami_factory(self, count=1, username="******", secret="mysecret",
                           port=5038):
        """Create n instances of AMI.  Each AMI instance will attempt to connect
        to a previously created instance of Asterisk.  When a connection is
        complete, the ami_connect method will be called.

        Keyword arguments:
        count    The number of instances of AMI to create
        username The username to login with
        secret   The password to login with
        port     The port to connect over
        """

        def on_reconnect(login_deferred):
            """Called if the connection is lost and re-made"""
            login_deferred.addCallbacks(self._ami_connect, self.ami_login_error)

        for i, ast_config in enumerate(self.get_asterisk_hosts(count)):
            host = ast_config.get('host')
            ami_config = ast_config.get('ami', {})
            actual_user = ami_config.get('username', username)
            actual_secret = ami_config.get('secret', secret)
            actual_port = ami_config.get('port', port)

            self.ami.append(None)
            LOGGER.info("Creating AMIFactory %d to %s" % ((i + 1), host))
            try:
                ami_factory = manager.AMIFactory(actual_user, actual_secret, i,
                                             on_reconnect=on_reconnect)
            except:
                ami_factory = manager.AMIFactory(actual_user, actual_secret, i)
            deferred = ami_factory.login(ip=host, port=actual_port)
            deferred.addCallbacks(self._ami_connect, self.ami_login_error)

    def create_fastagi_factory(self, count=1):
        """Create n instances of AGI.  Each AGI instance will attempt to connect
        to a previously created instance of Asterisk.  When a connection is
        complete, the fastagi_connect method will be called.

        Keyword arguments:
        count The number of instances of AGI to create
        """

        for i, ast_config in enumerate(self.get_asterisk_hosts(count)):
            host = ast_config.get('host')

            self.fastagi.append(None)
            LOGGER.info("Creating FastAGI Factory %d" % (i + 1))
            fastagi_factory = fastagi.FastAGIFactory(self.fastagi_connect)
            reactor.listenTCP(4573, fastagi_factory,
                              self.reactor_timeout, host)

    def fastagi_connect(self, agi):
        """Callback called by starpy when FastAGI connects

        This method should be overridden by derived classes that use
        create_fastagi_factory

        Keyword arguments:
        agi The AGI manager
        """
        pass

    def create_pcap_listener(self, device=None, bpf_filter=None, dumpfile=None,
                             snaplen=None, buffer_size=None):
        """Create a single instance of a pcap listener.

        Keyword arguments:
        device      The interface to listen on. Defaults to the first interface
                    beginning with 'lo'.
        bpf_filter  BPF (filter) describing what packets to match, i.e.
                    "port 5060"
        dumpfile    The filename at which to save a pcap capture
        snaplen     Number of bytes to capture from each packet. Defaults to
                    65535.
        buffer_size The ring buffer size. Defaults to 0.

        """

        if not PCAP_AVAILABLE:
            msg = ("PCAP not available on this machine. "
                   "Test config is missing pcap dependency.")
            raise Exception(msg)

        # TestCase will create a listener for logging purposes, and individual
        # tests can create their own. Tests may only want to watch a specific
        # port, while a general logger will want to watch more general traffic
        # which can be filtered later.
        return PcapListener(device, bpf_filter, dumpfile, self._pcap_callback,
                            snaplen, buffer_size)

    def start_asterisk(self):
        """This method will be called when the reactor is running, but
        immediately before instances of Asterisk are launched. Derived classes
        can override this if needed.
        """
        pass

    def _start_asterisk(self):
        """Start the instances of Asterisk that were previously created. See
        create_asterisk. Note that this should be the first thing called
        when the reactor has started to run
        """
        def __check_success_failure(result):
            """Make sure the instances started properly"""
            for (success, value) in result:
                if not success:
                    LOGGER.error(value.getErrorMessage())
                    self.stop_reactor()
            return result

        def __perform_pre_checks(result):
            """Execute the pre-condition checks"""
            deferred = self.condition_controller.evaluate_pre_checks()
            if deferred is None:
                return result
            else:
                return deferred

        def __run_callback(result):
            """Notify the test that we are running"""
            for callback in self._start_callbacks:
                callback(self.ast)
            self.run()
            return result

        # Call the method that derived objects can override
        self.start_asterisk()

        # Gather up the deferred objects from each of the instances of Asterisk
        # and wait until all are finished before proceeding
        start_defers = []
        for index, ast in enumerate(self.ast):
            LOGGER.info("Starting Asterisk instance %d" % (index + 1))
            temp_defer = ast.start(self.test_config.get_deps())
            start_defers.append(temp_defer)

        deferred = defer.DeferredList(start_defers, consumeErrors=True)
        deferred.addCallback(__check_success_failure)
        deferred.addCallback(__perform_pre_checks)
        deferred.addCallback(__run_callback)

    def stop_asterisk(self):
        """This method is called when the reactor is running but immediately
        before instances of Asterisk are stopped. Derived classes can override
        this method if needed.
        """
        pass

    def _stop_asterisk(self):
        """Stops the instances of Asterisk.

        Returns:
        A deferred object that can be used to be notified when all instances
        of Asterisk have stopped.
         """
        def __check_success_failure(result):
            """Make sure the instances stopped properly"""
            for (success, value) in result:
                if not success:
                    LOGGER.warning(value.getErrorMessage())
                    # This should already be called when the reactor is being
                    # terminated. If we couldn't stop the instance of Asterisk,
                    # there isn't much else to do here other then complain
            self._stop_deferred.callback(self)
            return result

        def __stop_instances(result):
            """Stop the instances"""

            # Call the overridable method now
            self.stop_asterisk()
            # Gather up the stopped defers; check success failure of stopping
            # when all instances of Asterisk have stopped
            stop_defers = []
            for index, ast in enumerate(self.ast):
                LOGGER.info("Stopping Asterisk instance %d" % (index + 1))
                temp_defer = ast.stop()
                stop_defers.append(temp_defer)

            defer.DeferredList(stop_defers).addCallback(
                __check_success_failure)
            return result

        self._stop_deferred = defer.Deferred()
        deferred = self.condition_controller.evaluate_post_checks()
        if deferred:
            deferred.addCallback(__stop_instances)
        else:
            __stop_instances(None)
        return self._stop_deferred

    def stop_reactor(self):
        """Stop the reactor and cancel the test."""

        def __stop_reactor(result):
            """Called when the Asterisk instances are stopped"""
            LOGGER.info("Stopping Reactor")
            if reactor.running:
                try:
                    reactor.stop()
                except twisted_error.ReactorNotRunning:
                    # Something stopped it between our checks - at least we're
                    # stopped
                    pass
            return result
        if not self._stopping:
            self._stopping = True
            deferred = self._stop_asterisk()
            for callback in self._stop_callbacks:
                deferred.addCallback(callback)
            deferred.addCallback(__stop_reactor)

    def _reactor_timeout(self):
        """A wrapper function for stop_reactor(), so we know when a reactor
        timeout has occurred.
        """
        LOGGER.warning("Reactor timeout: '%s' seconds" % self.reactor_timeout)
        self.on_reactor_timeout()
        self.stop_reactor()

    def on_reactor_timeout(self):
        """Virtual method called when reactor times out"""
        pass

    def _run(self):
        """Private entry point called when the reactor first starts up. This
        needs to first ensure that Asterisk is fully up and running before
        moving on.
        """
        if self.ast:
            self._start_asterisk()
        else:
            # If no instances of Asterisk are needed, go ahead and just run
            self.run()

    def run(self):
        """Base implementation of the test execution method, run. Derived
        classes should override this and start their Asterisk dependent logic
        from this method.

        Derived classes must call this implementation, as this method provides a
        fail out mechanism in case the test hangs.
        """
        if self.reactor_timeout > 0:
            self.timeout_id = reactor.callLater(self.reactor_timeout,
                                                self._reactor_timeout)

    def ami_login_error(self, reason):
        """Handler for login errors into AMI. This will stop the test.

        Keyword arguments:
        ami The instance of AMI that raised the login error
        """
        LOGGER.error("Error logging into AMI: %s" % reason.getErrorMessage())
        LOGGER.error(reason.getTraceback())
        self.stop_reactor()
        return reason

    def ami_connect(self, ami):
        """Virtual method used after create_ami_factory() successfully logs into
        the Asterisk AMI.
        """
        pass

    def _ami_connect(self, ami):
        """Callback when AMI first connects"""
        LOGGER.info("AMI Connect instance %s" % (ami.id + 1))
        self.ami[ami.id] = ami
        try:
            for callback in self._ami_callbacks:
                callback(ami)
            self.ami_connect(ami)
        except:
            LOGGER.error("Exception raised in ami_connect:")
            LOGGER.error(traceback.format_exc())
            self.stop_reactor()
        return ami

    def pcap_callback(self, packet):
        """Virtual method used to receive captured packets."""
        pass

    def _pcap_callback(self, packet):
        """Packet capture callback"""
        self.pcap_callback(packet)
        for callback in self._pcap_callbacks:
            callback(packet)

    def handle_originate_failure(self, reason):
        """Fail the test on an Originate failure

        Convenience callback handler for twisted deferred errors for an AMI
        originate call. Derived classes can choose to add this handler to
        originate calls in order to handle them safely when they fail.
        This will stop the test if called.

        Keyword arguments:
        reason The reason the originate failed
        """
        LOGGER.error("Error sending originate: %s" % reason.getErrorMessage())
        LOGGER.error(reason.getTraceback())
        self.stop_reactor()
        return reason

    def reset_timeout(self):
        """Resets the reactor timeout"""
        if self.timeout_id is not None:
            original_time = datetime.fromtimestamp(self.timeout_id.getTime())
            self.timeout_id.reset(self.reactor_timeout)
            new_time = datetime.fromtimestamp(self.timeout_id.getTime())
            msg = ("Reactor timeout originally scheduled for %s, "
                   "rescheduled for %s" % (str(original_time), str(new_time)))
            LOGGER.info(msg)

    def handle_condition_failure(self, test_condition):
        """Callback handler for condition failures"""
        if test_condition.pass_expected:
            msg = ("Test Condition %s failed; setting passed status to False" %
                   test_condition.get_name())
            LOGGER.error(msg)
            self.passed = False
        else:
            msg = ("Test Condition %s failed but expected failure was set; "
                   "test status not modified" % test_condition.get_name())
            LOGGER.info(msg)

    def evaluate_results(self):
        """Return whether or not the test has passed"""

        while len(self.fail_tokens):
            fail_token = self.fail_tokens.pop(0)
            LOGGER.error("Fail token present: %s" % fail_token['message'])
            self.passed = False

        return self.passed

    def register_pcap_observer(self, callback):
        """Register an observer that will be called when a packet is received
        from a created pcap listener

        Keyword Arguments:
        callback The callback to receive the packet. The callback function
                 should take in a single parameter, which will be the packet
                 received
        """
        self._pcap_callbacks.append(callback)

    def register_start_observer(self, callback):
        """Register an observer that will be called when all Asterisk instances
        have started

        Keyword Arguments:
        callback The deferred callback function to be called when all instances
                 of Asterisk have started. The callback should take no
                 parameters.
        """
        self._start_callbacks.append(callback)

    def register_stop_observer(self, callback):
        """Register an observer that will be called when Asterisk is stopped

        Keyword Arguments:
        callback The deferred callback function to be called when Asterisk is
                 stopped

        Note:
        This appends a callback to the deferred chain of callbacks executed when
        all instances of Asterisk are stopped.
        """
        self._stop_callbacks.append(callback)

    def register_ami_observer(self, callback):
        """Register an observer that will be called when TestCase connects with
        Asterisk over the Manager interface

        Parameters:
        callback The deferred callback function to be called when AMI connects
        """
        self._ami_callbacks.append(callback)

    def create_fail_token(self, message):
        """Add a fail token to the test. If any fail tokens exist at the end of
        the test, the test will fail.

        Keyword Arguments:
        message A text message describing the failure

        Returns:
        A token that can be removed from the test at a later time, if the test
        should pass
        """
        fail_token = {'uuid': uuid.uuid4(), 'message': message}
        self.fail_tokens.append(fail_token)
        return fail_token

    def remove_fail_token(self, fail_token):
        """Remove a fail token from the test.

        Keyword Arguments:
        fail_token A previously created fail token to be removed from the test
        """
        if not fail_token in self.fail_tokens:
            LOGGER.warning('Attempted to remove an unknown fail token: %s',
                           fail_token['message'])
            self.passed = False
            return
        self.fail_tokens.remove(fail_token)

    def set_passed(self, value):
        """Accumulate pass/fail value.

        If a test module has already claimed that the test has failed, then this
        method will ignore any further attempts to change the pass/fail status.
        """
        if self.passed is False:
            return
        self.passed = value
示例#14
0
def test_prediction_across_target_variance(val_x,
                                           val_y,
                                           sigma=0,
                                           n_mc=1,
                                           hidden_layer_sizes=[32, 32],
                                           n_epochs=50,
                                           n_samples=100,
                                           input_dim=1,
                                           n_timesteps=2):
    config = TestConfig()
    config.hidden_layer_sizes = hidden_layer_sizes
    config.value_dim = 1
    config.learning_rate = 1e-3
    config.n_epochs = n_epochs

    # simple dataset
    x, y = generate_data(sigma, n_mc, n_samples, n_timesteps, input_dim)
    val_losses = []
    with tf.Session() as session:
        predictor = model.LSTMPredictor((input_dim, ), config)
        target_placeholder = tf.placeholder(tf.float32, [None, 1], 'target')
        loss = tf.reduce_sum((predictor.vf[-1] - target_placeholder)**2)
        opt = tf.train.AdamOptimizer(config.learning_rate)
        train_op = opt.minimize(loss)
        session.run(tf.global_variables_initializer())

        def run_sample(p, x, y, state_in, train=True):
            feed_dict = {
                p.x: x,
                target_placeholder: y,
                p.dropout_keep_prob_ph: 1.,
                p.state_in[0]: state_in[0],
                p.state_in[1]: state_in[1],
            }
            outputs_list = [loss]
            if train:
                outputs_list += [train_op]
            else:
                outputs_list += [p.vf[-1]]
            fetched = session.run(outputs_list, feed_dict=feed_dict)

            if train:
                val_loss, _ = fetched
                return val_loss
            else:
                val_loss, val_vf = fetched
                return val_loss, val_vf

        n_val = len(val_x)
        for epoch in range(n_epochs):

            # train
            train_loss_mean = 0
            for sidx in range(n_samples):
                train_loss = run_sample(predictor, x[sidx, :, :],
                                        y[sidx].reshape(1, -1),
                                        predictor.state_init)
                train_loss_mean += train_loss / n_samples

            # val
            val_loss_mean = 0
            for sidx in range(n_val):
                val_loss, val_vf = run_sample(predictor,
                                              val_x[sidx, :, :],
                                              val_y[sidx].reshape(1, -1),
                                              predictor.state_init,
                                              train=False)
                val_loss_mean += val_loss / n_val
                # print('x: {}\ny: {}\ny_pred: {}'.format(
                #     x[sidx,:,:], y[sidx].reshape(1,-1), val_vf))
                # input()

            # report, track
            val_losses.append(val_loss_mean)
            print('epoch: {} / {}\ttrain loss: {}\tval loss: {}'.format(
                epoch, n_epochs, train_loss_mean, val_loss_mean))

    return val_losses
示例#15
0
class TestCase(object):
    """The base class object for python tests. This class provides common
    functionality to all tests, including management of Asterisk instances, AMI,
    twisted reactor, and various other utilities.
    """

    def __init__(self, test_path='', test_config=None):
        """Create a new instance of a TestCase. Must be called by inheriting
        classes.

        Keyword Arguments:
        test_path Optional parameter that specifies the path where this test
                  resides
        test_config Loaded YAML test configuration
        """

        if not len(test_path):
            self.test_name = os.path.dirname(sys.argv[0])
        else:
            self.test_name = test_path

        # We're not using /tmp//full//test//name because it gets so long that
        # it doesn't fit in AF_UNIX paths (limited to around 108 chars) used
        # for the rasterisk CLI connection. As a quick fix, we hash the path
        # using md5, to make it unique enough.
        self.realbase = self.test_name.replace("tests/", "", 1)
        self.base = md5(self.realbase).hexdigest()
        # We provide a symlink to it from a named path.
        named_dir = os.path.join(Asterisk.test_suite_root, self.realbase)
        try:
            os.makedirs(os.path.dirname(named_dir))
        except OSError:
            pass
        try:
            join_path = os.path.relpath(
                os.path.join(Asterisk.test_suite_root, self.base),
                os.path.dirname(named_dir)
            )
            os.symlink(join_path, named_dir)
        except OSError:
            pass

        self.ast = []
        self.ami = []
        self.fastagi = []
        self.base_config_path = None
        self.reactor_timeout = 30
        self.passed = None
        self.fail_tokens = []
        self.timeout_id = None
        self.global_config = TestConfig(os.getcwd())
        self.test_config = TestConfig(self.test_name, self.global_config)
        self.condition_controller = None
        self.pcap = None
        self.pcapfilename = None
        self.create_pcap = False
        self._stopping = False
        self.testlogdir = self._set_test_log_directory()
        self.ast_version = AsteriskVersion()
        self._start_callbacks = []
        self._stop_callbacks = []
        self._ami_callbacks = []
        self._pcap_callbacks = []
        self._stop_deferred = None
        log_full = True
        log_messages = True

        if os.getenv("VALGRIND_ENABLE") == "true":
            self.reactor_timeout *= 20

        # Pull additional configuration from YAML config if possible
        if test_config:
            if 'config-path' in test_config:
                self.base_config_path = test_config['config-path']
            if 'reactor-timeout' in test_config:
                self.reactor_timeout = test_config['reactor-timeout']
            self.ast_conf_options = test_config.get('ast-config-options')
            log_full = test_config.get('log-full', True)
            log_messages = test_config.get('log-messages', True)
        else:
            self.ast_conf_options = None

        os.makedirs(self.testlogdir)

        # Set up logging
        setup_logging(self.testlogdir, log_full, log_messages)

        LOGGER.info("Executing " + self.test_name)

        if PCAP_AVAILABLE and self.create_pcap:
            self.pcapfilename = os.path.join(self.testlogdir, "dumpfile.pcap")
            self.pcap = self.create_pcap_listener(dumpfile=self.pcapfilename)

        self._setup_conditions()

        # Enable twisted logging
        observer = log.PythonLoggingObserver()
        observer.start()

        reactor.callWhenRunning(self._run)

    def _set_test_log_directory(self):
        """Determine which logging directory we should use for this test run

        Returns:
        The full path that should be used as the directory for all log data
        """
        i = 1
        base_path = os.path.join(Asterisk.test_suite_root, self.base)
        while os.path.isdir(os.path.join(base_path, "run_%d" % i)):
            i += 1
        full_path = os.path.join(base_path, "run_%d" % i)
        return full_path

    def _setup_conditions(self):
        """Register pre and post-test conditions.

        Note that we have to first register condition checks without related
        conditions, so that those that have dependencies can find them
        """
        self.condition_controller = TestConditionController(self.test_config,
                                                            self.ast,
                                                            self.stop_reactor)
        global_conditions = self.global_config.get_conditions()
        conditions = self.test_config.get_conditions()

        # Get those global conditions that are not in the self conditions
        for g_cond in global_conditions:
            disallowed = [i for i in conditions
                          if i[0].get_name() == g_cond[0].get_name() and
                          i[1] == g_cond[1]]
            if len(disallowed) == 0:
                conditions.append(g_cond)

        for cond in conditions:
            # cond is a 3-tuple of object, pre-post type, and related name
            obj, pre_post_type, related_name = cond
            if pre_post_type == "PRE":
                self.condition_controller.register_pre_test_condition(obj)
            elif pre_post_type == "POST":
                self.condition_controller.register_post_test_condition(obj, related_name)
            else:
                msg = "Unknown condition type [%s]" % pre_post_type
                LOGGER.warning(msg)
        self.condition_controller.register_observer(
            self.handle_condition_failure, 'Failed')

    def get_asterisk_hosts(self, count):
        """Return a list of host dictionaries for Asterisk instances

        Keyword Arguments:
        count  The number of Asterisk instances to create, if no remote
               Asterisk instances have been specified
        """
        if (self.global_config.config and
                'asterisk-instances' in self.global_config.config):
            asterisks = self.global_config.config.get('asterisk-instances')
        else:
            asterisks = [{'num': i + 1, 'host': '127.0.0.%d' % (i + 1)}
                         for i in range(count)]
        return asterisks

    def create_asterisk(self, count=1, base_configs_path=None):
        """Create n instances of Asterisk

        Note: if the instances of Asterisk being created are remote, the
        keyword arguments to this function are ignored.

        Keyword arguments:
        count             The number of Asterisk instances to create.  Each
                          Asterisk instance will be hosted on 127.0.0.x, where x
                          is the 1-based index of the instance created.
        base_configs_path Provides common configuration for Asterisk instances
                          to use. This is useful for certain test types that use
                          the same configuration all the time. This
                          configuration can be overwritten by individual tests,
                          however.
        """
        for i, ast_config in enumerate(self.get_asterisk_hosts(count)):
            local_num = ast_config.get('num')
            host = ast_config.get('host')

            if not host:
                msg = "Cannot manage Asterisk instance without 'host'"
                raise Exception(msg)

            if local_num:
                LOGGER.info("Creating Asterisk instance %d" % local_num)
                ast_instance = Asterisk(base=self.testlogdir, host=host,
                                        ast_conf_options=self.ast_conf_options)
            else:
                LOGGER.info("Managing Asterisk instance at %s" % host)
                ast_instance = Asterisk(base=self.testlogdir, host=host,
                                        remote_config=ast_config)
            self.ast.append(ast_instance)
            self.condition_controller.register_asterisk_instance(self.ast[i])

            if local_num:
                # If a base configuration for this Asterisk instance has been
                # provided, install it first
                if base_configs_path is None:
                    base_configs_path = self.base_config_path
                if base_configs_path:
                    ast_dir = "%s/ast%d" % (base_configs_path, local_num)
                    self.ast[i].install_configs(ast_dir,
                                                self.test_config.get_deps())
                # Copy test specific config files
                self.ast[i].install_configs("%s/configs/ast%d" %
                                            (self.test_name, local_num),
                                            self.test_config.get_deps())

    def create_ami_factory(self, count=1, username="******", secret="mysecret",
                           port=5038):
        """Create n instances of AMI.  Each AMI instance will attempt to connect
        to a previously created instance of Asterisk.  When a connection is
        complete, the ami_connect method will be called.

        Keyword arguments:
        count    The number of instances of AMI to create
        username The username to login with
        secret   The password to login with
        port     The port to connect over
        """

        def on_reconnect(login_deferred):
            """Called if the connection is lost and re-made"""
            login_deferred.addCallbacks(self._ami_connect, self.ami_login_error)

        for i, ast_config in enumerate(self.get_asterisk_hosts(count)):
            host = ast_config.get('host')
            ami_config = ast_config.get('ami', {})
            actual_user = ami_config.get('username', username)
            actual_secret = ami_config.get('secret', secret)
            actual_port = ami_config.get('port', port)

            self.ami.append(None)
            LOGGER.info("Creating AMIFactory %d to %s" % ((i + 1), host))
            try:
                ami_factory = manager.AMIFactory(actual_user, actual_secret, i,
                                                 on_reconnect=on_reconnect)
            except:
                ami_factory = manager.AMIFactory(actual_user, actual_secret, i)
            deferred = ami_factory.login(ip=host, port=actual_port)
            deferred.addCallbacks(self._ami_connect, self.ami_login_error)

    def create_fastagi_factory(self, count=1):
        """Create n instances of AGI.  Each AGI instance will attempt to connect
        to a previously created instance of Asterisk.  When a connection is
        complete, the fastagi_connect method will be called.

        Keyword arguments:
        count The number of instances of AGI to create
        """

        for i, ast_config in enumerate(self.get_asterisk_hosts(count)):
            host = ast_config.get('host')

            self.fastagi.append(None)
            LOGGER.info("Creating FastAGI Factory %d" % (i + 1))
            fastagi_factory = fastagi.FastAGIFactory(self.fastagi_connect)
            reactor.listenTCP(4573, fastagi_factory,
                              self.reactor_timeout, host)

    def fastagi_connect(self, agi):
        """Callback called by starpy when FastAGI connects

        This method should be overridden by derived classes that use
        create_fastagi_factory

        Keyword arguments:
        agi The AGI manager
        """
        pass

    def create_pcap_listener(self, device=None, bpf_filter=None, dumpfile=None,
                             snaplen=None, buffer_size=None):
        """Create a single instance of a pcap listener.

        Keyword arguments:
        device      The interface to listen on. Defaults to the first interface
                    beginning with 'lo'.
        bpf_filter  BPF (filter) describing what packets to match, i.e.
                    "port 5060"
        dumpfile    The filename at which to save a pcap capture
        snaplen     Number of bytes to capture from each packet. Defaults to
                    65535.
        buffer_size The ring buffer size. Defaults to 0.

        """

        if not PCAP_AVAILABLE:
            msg = ("PCAP not available on this machine. "
                   "Test config is missing pcap dependency.")
            raise Exception(msg)

        # TestCase will create a listener for logging purposes, and individual
        # tests can create their own. Tests may only want to watch a specific
        # port, while a general logger will want to watch more general traffic
        # which can be filtered later.
        return PcapListener(device, bpf_filter, dumpfile, self._pcap_callback,
                            snaplen, buffer_size)

    def start_asterisk(self):
        """This method will be called when the reactor is running, but
        immediately before instances of Asterisk are launched. Derived classes
        can override this if needed.
        """
        pass

    def _start_asterisk(self):
        """Start the instances of Asterisk that were previously created. See
        create_asterisk. Note that this should be the first thing called
        when the reactor has started to run
        """
        def __check_success_failure(result):
            """Make sure the instances started properly"""
            for (success, value) in result:
                if not success:
                    LOGGER.error(value.getErrorMessage())
                    self.stop_reactor()
            return result

        def __perform_pre_checks(result):
            """Execute the pre-condition checks"""
            deferred = self.condition_controller.evaluate_pre_checks()
            if deferred is None:
                return result
            else:
                return deferred

        def __run_callback(result):
            """Notify the test that we are running"""
            for callback in self._start_callbacks:
                callback(self.ast)
            self.run()
            return result

        # Call the method that derived objects can override
        self.start_asterisk()

        # Gather up the deferred objects from each of the instances of Asterisk
        # and wait until all are finished before proceeding
        start_defers = []
        for index, ast in enumerate(self.ast):
            LOGGER.info("Starting Asterisk instance %d" % (index + 1))
            temp_defer = ast.start(self.test_config.get_deps())
            start_defers.append(temp_defer)

        deferred = defer.DeferredList(start_defers, consumeErrors=True)
        deferred.addCallback(__check_success_failure)
        deferred.addCallback(__perform_pre_checks)
        deferred.addCallback(__run_callback)

    def stop_asterisk(self):
        """This method is called when the reactor is running but immediately
        before instances of Asterisk are stopped. Derived classes can override
        this method if needed.
        """
        pass

    def _stop_asterisk(self):
        """Stops the instances of Asterisk.

        Returns:
        A deferred object that can be used to be notified when all instances
        of Asterisk have stopped.
         """
        def __check_success_failure(result):
            """Make sure the instances stopped properly"""
            for (success, value) in result:
                if not success:
                    LOGGER.warning(value.getErrorMessage())
                    # This should already be called when the reactor is being
                    # terminated. If we couldn't stop the instance of Asterisk,
                    # there isn't much else to do here other then complain
            self._stop_deferred.callback(self)
            return result

        def __stop_instances(result):
            """Stop the instances"""

            # Call the overridable method now
            self.stop_asterisk()
            # Gather up the stopped defers; check success failure of stopping
            # when all instances of Asterisk have stopped
            stop_defers = []
            for index, ast in enumerate(self.ast):
                LOGGER.info("Stopping Asterisk instance %d" % (index + 1))
                temp_defer = ast.stop()
                stop_defers.append(temp_defer)

            defer.DeferredList(stop_defers).addCallback(
                __check_success_failure)
            return result

        self._stop_deferred = defer.Deferred()
        deferred = self.condition_controller.evaluate_post_checks()
        if deferred:
            deferred.addCallback(__stop_instances)
        else:
            __stop_instances(None)
        return self._stop_deferred

    def stop_reactor(self):
        """Stop the reactor and cancel the test."""

        def __stop_reactor(result):
            """Called when the Asterisk instances are stopped"""
            LOGGER.info("Stopping Reactor")
            if reactor.running:
                try:
                    reactor.stop()
                except twisted_error.ReactorNotRunning:
                    # Something stopped it between our checks - at least we're
                    # stopped
                    pass
            return result
        if not self._stopping:
            self._stopping = True
            deferred = self._stop_asterisk()
            for callback in self._stop_callbacks:
                deferred.addCallback(callback)
            deferred.addCallback(__stop_reactor)

    def _reactor_timeout(self):
        """A wrapper function for stop_reactor(), so we know when a reactor
        timeout has occurred.
        """
        if not self._stopping:
            LOGGER.warning("Reactor timeout: '%s' seconds" % self.reactor_timeout)
            self.on_reactor_timeout()
            self.stop_reactor()
        else:
            LOGGER.info("Reactor timeout: '%s' seconds (ignored; already stopping)"
                        % self.reactor_timeout)

    def on_reactor_timeout(self):
        """Virtual method called when reactor times out"""
        pass

    def _run(self):
        """Private entry point called when the reactor first starts up. This
        needs to first ensure that Asterisk is fully up and running before
        moving on.
        """
        if self.ast:
            self._start_asterisk()
        else:
            # If no instances of Asterisk are needed, go ahead and just run
            self.run()

    def run(self):
        """Base implementation of the test execution method, run. Derived
        classes should override this and start their Asterisk dependent logic
        from this method.

        Derived classes must call this implementation, as this method provides a
        fail out mechanism in case the test hangs.
        """
        if self.reactor_timeout > 0:
            self.timeout_id = reactor.callLater(self.reactor_timeout,
                                                self._reactor_timeout)

    def ami_login_error(self, reason):
        """Handler for login errors into AMI. This will stop the test.

        Keyword arguments:
        ami The instance of AMI that raised the login error
        """
        LOGGER.error("Error logging into AMI: %s" % reason.getErrorMessage())
        LOGGER.error(reason.getTraceback())
        self.stop_reactor()
        return reason

    def ami_connect(self, ami):
        """Virtual method used after create_ami_factory() successfully logs into
        the Asterisk AMI.
        """
        pass

    def _ami_connect(self, ami):
        """Callback when AMI first connects"""
        LOGGER.info("AMI Connect instance %s" % (ami.id + 1))
        self.ami[ami.id] = ami
        try:
            for callback in self._ami_callbacks:
                callback(ami)
            self.ami_connect(ami)
        except:
            LOGGER.error("Exception raised in ami_connect:")
            LOGGER.error(traceback.format_exc())
            self.stop_reactor()
        return ami

    def pcap_callback(self, packet):
        """Virtual method used to receive captured packets."""
        pass

    def _pcap_callback(self, packet):
        """Packet capture callback"""
        self.pcap_callback(packet)
        for callback in self._pcap_callbacks:
            callback(packet)

    def handle_originate_failure(self, reason):
        """Fail the test on an Originate failure

        Convenience callback handler for twisted deferred errors for an AMI
        originate call. Derived classes can choose to add this handler to
        originate calls in order to handle them safely when they fail.
        This will stop the test if called.

        Keyword arguments:
        reason The reason the originate failed
        """
        LOGGER.error("Error sending originate: %s" % reason.getErrorMessage())
        LOGGER.error(reason.getTraceback())
        self.stop_reactor()
        return reason

    def reset_timeout(self):
        """Resets the reactor timeout"""
        if self.timeout_id is not None:
            original_time = datetime.fromtimestamp(self.timeout_id.getTime())
            self.timeout_id.reset(self.reactor_timeout)
            new_time = datetime.fromtimestamp(self.timeout_id.getTime())
            msg = ("Reactor timeout originally scheduled for %s, "
                   "rescheduled for %s" % (str(original_time), str(new_time)))
            LOGGER.info(msg)

    def handle_condition_failure(self, test_condition):
        """Callback handler for condition failures"""
        if test_condition.pass_expected:
            msg = ("Test Condition %s failed; setting passed status to False" %
                   test_condition.get_name())
            LOGGER.error(msg)
            self.passed = False
        else:
            msg = ("Test Condition %s failed but expected failure was set; "
                   "test status not modified" % test_condition.get_name())
            LOGGER.info(msg)

    def evaluate_results(self):
        """Return whether or not the test has passed"""

        while len(self.fail_tokens):
            fail_token = self.fail_tokens.pop(0)
            LOGGER.error("Fail token present: %s" % fail_token['message'])
            self.passed = False

        return self.passed

    def register_pcap_observer(self, callback):
        """Register an observer that will be called when a packet is received
        from a created pcap listener

        Keyword Arguments:
        callback The callback to receive the packet. The callback function
                 should take in a single parameter, which will be the packet
                 received
        """
        self._pcap_callbacks.append(callback)

    def register_start_observer(self, callback):
        """Register an observer that will be called when all Asterisk instances
        have started

        Keyword Arguments:
        callback The deferred callback function to be called when all instances
                 of Asterisk have started. The callback should take no
                 parameters.
        """
        self._start_callbacks.append(callback)

    def register_stop_observer(self, callback):
        """Register an observer that will be called when Asterisk is stopped

        Keyword Arguments:
        callback The deferred callback function to be called when Asterisk is
                 stopped

        Note:
        This appends a callback to the deferred chain of callbacks executed when
        all instances of Asterisk are stopped.
        """
        self._stop_callbacks.append(callback)

    def register_ami_observer(self, callback):
        """Register an observer that will be called when TestCase connects with
        Asterisk over the Manager interface

        Parameters:
        callback The deferred callback function to be called when AMI connects
        """
        self._ami_callbacks.append(callback)

    def create_fail_token(self, message):
        """Add a fail token to the test. If any fail tokens exist at the end of
        the test, the test will fail.

        Keyword Arguments:
        message A text message describing the failure

        Returns:
        A token that can be removed from the test at a later time, if the test
        should pass
        """
        fail_token = {'uuid': uuid.uuid4(), 'message': message}
        self.fail_tokens.append(fail_token)
        return fail_token

    def remove_fail_token(self, fail_token):
        """Remove a fail token from the test.

        Keyword Arguments:
        fail_token A previously created fail token to be removed from the test
        """
        if fail_token not in self.fail_tokens:
            LOGGER.warning('Attempted to remove an unknown fail token: %s',
                           fail_token['message'])
            self.passed = False
            return
        self.fail_tokens.remove(fail_token)

    def set_passed(self, value):
        """Accumulate pass/fail value.

        If a test module has already claimed that the test has failed, then this
        method will ignore any further attempts to change the pass/fail status.
        """
        if self.passed is False:
            return
        self.passed = value
示例#16
0
    def __init__(self, test_path='', test_config=None):
        """Create a new instance of a TestCase. Must be called by inheriting
        classes.

        Keyword Arguments:
        test_path Optional parameter that specifies the path where this test
                  resides
        test_config Loaded YAML test configuration
        """

        if not len(test_path):
            self.test_name = os.path.dirname(sys.argv[0])
        else:
            self.test_name = test_path

        # We're not using /tmp//full//test//name because it gets so long that
        # it doesn't fit in AF_UNIX paths (limited to around 108 chars) used
        # for the rasterisk CLI connection. As a quick fix, we hash the path
        # using md5, to make it unique enough.
        self.realbase = self.test_name.replace("tests/", "", 1)
        self.base = md5(self.realbase).hexdigest()
        # We provide a symlink to it from a named path.
        named_dir = os.path.join(Asterisk.test_suite_root, self.realbase)
        try:
            os.makedirs(os.path.dirname(named_dir))
        except OSError:
            pass
        try:
            join_path = os.path.relpath(
                os.path.join(Asterisk.test_suite_root, self.base),
                os.path.dirname(named_dir)
            )
            os.symlink(join_path, named_dir)
        except OSError:
            pass

        self.ast = []
        self.ami = []
        self.fastagi = []
        self.base_config_path = None
        self.reactor_timeout = 30
        self.passed = None
        self.fail_tokens = []
        self.timeout_id = None
        self.global_config = TestConfig(os.getcwd())
        self.test_config = TestConfig(self.test_name, self.global_config)
        self.condition_controller = None
        self.pcap = None
        self.pcapfilename = None
        self.create_pcap = False
        self._stopping = False
        self.testlogdir = self._set_test_log_directory()
        self.ast_version = AsteriskVersion()
        self._start_callbacks = []
        self._stop_callbacks = []
        self._ami_callbacks = []
        self._pcap_callbacks = []
        self._stop_deferred = None
        log_full = True
        log_messages = True

        if os.getenv("VALGRIND_ENABLE") == "true":
            self.reactor_timeout *= 20

        # Pull additional configuration from YAML config if possible
        if test_config:
            if 'config-path' in test_config:
                self.base_config_path = test_config['config-path']
            if 'reactor-timeout' in test_config:
                self.reactor_timeout = test_config['reactor-timeout']
            self.ast_conf_options = test_config.get('ast-config-options')
            log_full = test_config.get('log-full', True)
            log_messages = test_config.get('log-messages', True)
        else:
            self.ast_conf_options = None

        os.makedirs(self.testlogdir)

        # Set up logging
        setup_logging(self.testlogdir, log_full, log_messages)

        LOGGER.info("Executing " + self.test_name)

        if PCAP_AVAILABLE and self.create_pcap:
            self.pcapfilename = os.path.join(self.testlogdir, "dumpfile.pcap")
            self.pcap = self.create_pcap_listener(dumpfile=self.pcapfilename)

        self._setup_conditions()

        # Enable twisted logging
        observer = log.PythonLoggingObserver()
        observer.start()

        reactor.callWhenRunning(self._run)
示例#17
0
    def test_validate_const_reward_discounted_env(self):
        # config
        config = TestConfig()
        config.n_global_steps = 50000
        config.env_id = 'RandObsConstRewardEnv-v0'
        config.discount = .9
        config.value_dim = 2
        config.adam_beta1 = .9
        config.local_steps_per_update = 1000
        config.hidden_layer_sizes = [256]
        config.learning_rate = 1e-3
        config.learning_rate_end = 1e-5
        config.loss_type = 'mse'
        config.target_loss_index = None

        # build env
        const_reward = .01
        horizon = 10000000
        rand_obs = False
        env = debug_envs.RandObsConstRewardEnv(horizon=horizon,
                                               reward=const_reward,
                                               value_dim=config.value_dim,
                                               rand_obs=rand_obs)
        env.spec = gym.envs.registration.EnvSpec(
            id='RandObsConstRewardEnv-v0',
            tags={'wrapper_config.TimeLimit.max_episode_steps': horizon + 1})

        n_samples = 2
        n_timesteps = 10  # predict after seeing this many timesteps
        n_prediction_timesteps = 10  # determines discount
        input_dim = 1
        obs_gen = np.random.randn if rand_obs else np.ones
        x = obs_gen(np.prod((n_samples, n_timesteps, input_dim))).reshape(
            (n_samples, n_timesteps, input_dim))
        y = (const_reward * np.ones(
            (n_samples, config.value_dim)) * n_prediction_timesteps)
        w = np.ones((n_samples, 1))
        dataset = validation.Dataset(x, y, w)

        # run it
        summary_writer = tf.summary.FileWriter('/tmp/test')
        avg_loss = -1
        with tf.Session() as sess:
            trainer = async_td.AsyncTD(env, 0, config)
            sess.run(tf.global_variables_initializer())
            sess.run(trainer.sync)
            trainer.start(sess, summary_writer)
            global_step = sess.run(trainer.global_step)
            while global_step < config.n_global_steps:
                trainer.process(sess)
                if global_step % 10 == 0:
                    avg_loss = trainer.validate(sess, dataset)
                global_step = sess.run(trainer.global_step)
示例#18
0
    'order: 5',
    'Goal accepted with ID:',
    'Result:',
    'sequence:',
    '0',
    '1',
    '2',
    '3',
    '5',
    'Goal finished with status: SUCCEEDED',
]

configs = [
    TestConfig(
        command='action',
        arguments=['info', '/fibonacci'],
        actions=[get_action_server_node_action()],
        expected_output=common_info_output + ['/fibonacci_action_server'],
    ),
    TestConfig(
        command='action',
        arguments=['info', '-t', '/fibonacci'],
        actions=[get_action_server_node_action()],
        expected_output=common_info_output + [
            '/fibonacci_action_server [action_tutorials_interfaces/action/Fibonacci]'
        ],
    ),
    TestConfig(
        command='action',
        arguments=['info', '-c', '/fibonacci'],
        actions=[get_action_server_node_action()],
        expected_output=common_info_output,
示例#19
0
def pytest_configure(config):
    """ configure webdriver based on cmd-line arguments """

    # load config (ini and cmd-line)
    test_config = TestConfig(config)
    TestLog.configure()

    log.debug("pytest_configure")
    log.debug(test_config)

    # download drivers (if using Local drivers)
    if not (test_config.use_browserstack or test_config.use_selenoid):
        LocalDriverFactory.download_drivers(test_config.browsers_list)

    class DriverPlugin:
        """ Driver plugin class """
        @pytest.fixture(autouse=True,
                        params=test_config.browsers_list,
                        scope="function")
        def driver(self, request):
            """ web driver fixture """

            # init browser options
            options = BrowserOptions()

            options.browser_type = Browser[request.param]
            options.headless = test_config.headless
            options.window_size = test_config.win_size
            options.timeout = test_config.timeout
            options.use_browserstack = test_config.use_browserstack
            options.use_selenoid = test_config.use_selenoid
            options.hub_url = test_config.hub_url

            log.debug("Create 'driver' fixture: {}".format(options))

            # get webdriver instance
            if options.use_browserstack:
                d = BsDriverFactory.get_driver(options)
            elif options.use_selenoid:
                d = SelenoidDriverFactory.get_driver(options)
            else:
                d = LocalDriverFactory.get_driver(options)

            yield d

            try:
                if request.node.rep_call.failed:
                    log.error("Test '{}' failed!".format(
                        request.function.__name__))
                    try:
                        allure.attach(
                            d.get_screenshot_as_png(),
                            name='screenshot on fail',
                            attachment_type=allure.attachment_type.PNG)
                    except:
                        log.warn("Unable to attch screenshot to allure report")
                        pass
            finally:
                # finalization
                d.quit()
                log.debug("'driver' fixture finalized")

    # register plugin
    config.pluginmanager.register(DriverPlugin())