Example #1
0
    def test_reload(self, mock_logging):
        logger = multistructlog.create_logger({'version':1, 'foo':'bar'})
        logger0 = multistructlog.create_logger({'version':1, 'foo':'bar'})
        logger2 = multistructlog.create_logger({'version':1, 'foo':'notbar'})
        self.assertEqual(logger, logger0)
        self.assertNotEqual(logger,logger2)

        # "Starting" is only printed once 
        self.assertEqual(mock_logging.StreamHandler.call_count, 2)
    def test_reload(self):
        '''
        Test that creating creatinging multiple identical loggers will reuse
        existing loggers
        '''

        logger0 = multistructlog.create_logger({'version': 1, 'foo': 'bar'})
        logger1 = multistructlog.create_logger({'version': 1, 'foo': 'bar'})
        logger2 = multistructlog.create_logger()

        self.assertEqual(logger0, logger1)
        self.assertNotEqual(logger0, logger2)
Example #3
0
def main():

    log = create_logger(Config().get("logging"))

    models_active = False
    wait = False
    while not models_active:
        try:
            _ = Instance.objects.first()
            _ = NetworkTemplate.objects.first()
            models_active = True
        except Exception as e:
            log.exception("Exception", e=e)
            log.info("Waiting for data model to come up before starting...")
            time.sleep(10)
            wait = True

    if wait:
        time.sleep(
            60
        )  # Safety factor, seeing that we stumbled waiting for the data model to come up.

    # start model policies thread
    policies_dir = Config.get("model_policies_dir")

    XOSPolicyEngine(policies_dir=policies_dir, log=log).run()
Example #4
0
    def test_level(self, mock_logging):
        logger = multistructlog.create_logger({'version':1, 'foo':'x'})
        logger.info('Test 1')
        logger.debug('Test 2')

        # Default level is INFO
        self.assertEqual(mock_logging.StreamHandler.call_count, 1)
Example #5
0
def main(args):
    # check if config path is set
    config_file_path = DEFAULT_CONFIG_FILE_PATH
    if args.config:
        config_file_path = args.config

    if os.path.exists(config_file_path):
        # read config
        config = read_config(config_file_path)
        if config:
            global progargs
            for k in progargs:
                # overwrite
                if k in config:
                    progargs[k] = config[k]

    global log
    log = create_logger(progargs["logging"])

    if args.controller:
        progargs['controller_url'] = args.controller

    if args.cmd:
        if args.cmd.strip().lower() in ['reg', 'register', 'register_workflow']:
            results = register_workflow(args.cmd_args)
            print(results)
        elif args.cmd.strip().lower() in ['emit', 'send', 'event', 'message']:
            results = emit_event(args.cmd_args)
            print(results)
        else:
            log.error('unknown command %s' % args.cmd)
            raise InputError('unknown command %s' % args.cmd)
Example #6
0
    def init():

        global log
        global kafka_producer

        if not log:
            log = create_logger(Config().get("logging"))

        if kafka_producer:
            raise Exception("XOSKafkaProducer already initialized")

        else:
            log.info("Connecting to Kafka with bootstrap servers: %s" %
                     Config.get("kafka_bootstrap_servers"))

            try:
                producer_config = {
                    "bootstrap.servers":
                    ",".join(Config.get("kafka_bootstrap_servers"))
                }

                kafka_producer = confluent_kafka.Producer(**producer_config)

                log.info("Connected to Kafka: %s" % kafka_producer)

            except confluent_kafka.KafkaError as e:
                log.exception("Kafka Error: %s" % e)
Example #7
0
    def init():

        global log
        global kafka_producer

        if not log:
            log = create_logger(Config().get("logging"))

        if kafka_producer:
            raise Exception("XOSKafkaProducer already initialized")

        else:
            log.info(
                "Connecting to Kafka with bootstrap servers: %s"
                % Config.get("kafka_bootstrap_servers")
            )

            try:
                producer_config = {
                    "bootstrap.servers": ",".join(Config.get("kafka_bootstrap_servers"))
                }

                kafka_producer = confluent_kafka.Producer(**producer_config)

                log.info("Connected to Kafka: %s" % kafka_producer)

            except confluent_kafka.KafkaError as e:
                log.exception("Kafka Error: %s" % e)
Example #8
0
 def __init__(self):
     self.db_name = Config().get("database.name")
     self.db_username = Config().get("database.username")
     self.db_password = Config().get("database.password")
     self.db_host = "xos-db"
     self.db_port = "5432"
     self.log = create_logger(Config().get("logging"))
Example #9
0
    def test_override_level(self, mock_logging):
        logger = multistructlog.create_logger(self.config, level='DEBUG')

        logger.info('Test 1')
        logger.debug('Test 2')
        pdb.set_trace()

        self.assertEqual(mock_logging.StreamHandler.call_count, 2)
Example #10
0
File: main.py Project: opencord/xos
def configure_logging(verbose):
    global log
    # INITIALIZING LOGGER
    Config.init()

    cfg = Config().get("logging")
    if verbose:
        cfg["handlers"]["console"]["level"] = "DEBUG"

    log = create_logger(cfg)
Example #11
0
def configure_logging(verbose):
    global log
    # INITIALIZING LOGGER
    Config.init()

    cfg = Config().get("logging")
    if verbose:
        cfg["handlers"]["console"]["level"] = "DEBUG"

    log = create_logger(cfg)
Example #12
0
    def setUp(self):

        self.sys_path_save = sys.path

        # Setting up the config module
        from xosconfig import Config
        config = os.path.join(test_path, "../test_config.yaml")
        Config.clear()
        Config.init(config, "synchronizer-config-schema.yaml")
        from multistructlog import create_logger
        log = create_logger(Config().get('logging'))
        # END Setting up the config module

        from xossynchronizer.mock_modelaccessor_build import mock_modelaccessor_config
        mock_modelaccessor_config(test_path, [("dt-workflow-driver", "dt-workflow-driver.xproto"),
                                              ("olt-service", "volt.xproto"),
                                              ("rcord", "rcord.xproto")])

        import xossynchronizer.modelaccessor
        import mock_modelaccessor
        reload(mock_modelaccessor)  # in case nose2 loaded it in a previous test
        reload(xossynchronizer.modelaccessor)      # in case nose2 loaded it in a previous test

        from xossynchronizer.modelaccessor import model_accessor
        from pppoe_event import SubscriberPppoeEventStep

        # import all class names to globals
        for (k, v) in model_accessor.all_model_classes.items():
            globals()[k] = v

        self.model_accessor = model_accessor
        self.log = log

        self.event_step = SubscriberPppoeEventStep(model_accessor=self.model_accessor, log=self.log)

        self.event = Mock()

        self.volt = Mock()
        self.volt.name = "vOLT"
        self.volt.leaf_model = Mock()

        # self.subscriber = RCORDSubscriber()
        # self.subscriber.onu_device = "BRCM1234"
        # self.subscriber.save = Mock()

        self.mac_address = "00:AA:00:00:00:01"
        self.ip_address = "192.168.3.5"
        self.pppoe_session_id = "12"

        self.si = DtWorkflowDriverServiceInstance()
        self.si.serial_number = "BRCM1234"
        self.si.save = Mock()
Example #13
0
    def setUpClass(cls):

        global log

        config = os.path.join(test_path, "test_config.yaml")
        from xosconfig import Config

        Config.clear()
        Config.init(config, "synchronizer-config-schema.yaml")

        if not log:
            from multistructlog import create_logger

            log = create_logger(Config().get("logging"))
Example #14
0
    def setUp(self):

        self.sys_path_save = sys.path
        sys.path.append(xos_dir)
        sys.path.append(os.path.join(xos_dir, 'synchronizers', 'new_base'))

        # Setting up the config module
        from xosconfig import Config
        config = os.path.join(test_path, "../test_config.yaml")
        Config.clear()
        Config.init(config, "synchronizer-config-schema.yaml")
        from multistructlog import create_logger
        log = create_logger(Config().get('logging'))
        # END Setting up the config module

        from synchronizers.new_base.mock_modelaccessor_build import build_mock_modelaccessor
        # build_mock_modelaccessor(xos_dir, services_dir, [get_models_fn("olt-service", "volt.xproto")])

        build_mock_modelaccessor(xos_dir, services_dir, [
            get_models_fn("hippie-oss", "hippie-oss.xproto"),
            get_models_fn("olt-service", "volt.xproto"),
            get_models_fn("rcord", "rcord.xproto")
        ])
        import synchronizers.new_base.modelaccessor
        from dhcp_event import SubscriberDhcpEventStep, model_accessor

        # import all class names to globals
        for (k, v) in model_accessor.all_model_classes.items():
            globals()[k] = v

        self.log = log

        self.event_step = SubscriberDhcpEventStep(self.log)

        self.event = Mock()

        self.volt = Mock()
        self.volt.name = "vOLT"
        self.volt.leaf_model = Mock()

        self.subscriber = RCORDSubscriber()
        self.subscriber.onu_device = "BRCM1234"
        self.subscriber.save = Mock()

        self.mac_address = "aa:bb:cc:dd:ee"
        self.ip_address = "192.168.3.5"
Example #15
0
    def setUp(self):

        self.sys_path_save = sys.path

        # Setting up the config module
        from xosconfig import Config
        config = os.path.join(test_path, "../test_config.yaml")
        Config.clear()
        Config.init(config, "synchronizer-config-schema.yaml")
        from multistructlog import create_logger
        log = create_logger(Config().get('logging'))
        # END Setting up the config module

        from xossynchronizer.mock_modelaccessor_build import mock_modelaccessor_config
        mock_modelaccessor_config(
            test_path, [("att-workflow-driver", "att-workflow-driver.xproto"),
                        ("olt-service", "volt.xproto"),
                        ("rcord", "rcord.xproto")])

        import xossynchronizer.modelaccessor
        import mock_modelaccessor
        reload(
            mock_modelaccessor)  # in case nose2 loaded it in a previous test
        reload(xossynchronizer.modelaccessor
               )  # in case nose2 loaded it in a previous test

        from xossynchronizer.modelaccessor import model_accessor
        from auth_event import SubscriberAuthEventStep

        # import all class names to globals
        for (k, v) in model_accessor.all_model_classes.items():
            globals()[k] = v

        self.model_accessor = model_accessor
        self.log = log

        self.event_step = SubscriberAuthEventStep(
            model_accessor=self.model_accessor, log=self.log)

        self.event = Mock()

        self.att_si = AttWorkflowDriverServiceInstance()
        self.att_si.serial_number = "BRCM1234"
        self.att_si.save = Mock()
Example #16
0
    def setUp(self):
        global log, steps, event_loop

        self.sys_path_save = sys.path
        self.cwd_save = os.getcwd()
        sys.path.append(xos_dir)
        sys.path.append(os.path.join(xos_dir, 'synchronizers', 'new_base'))
        sys.path.append(
            os.path.join(xos_dir, 'synchronizers', 'new_base', 'tests',
                         'steps'))

        config = os.path.join(test_path, "test_config.yaml")
        from xosconfig import Config
        Config.clear()
        Config.init(config, 'synchronizer-config-schema.yaml')

        from synchronizers.new_base.mock_modelaccessor_build import build_mock_modelaccessor
        build_mock_modelaccessor(xos_dir,
                                 services_dir=None,
                                 service_xprotos=[])

        os.chdir(os.path.join(test_path,
                              '..'))  # config references tests/model-deps

        import event_loop
        reload(event_loop)
        import backend
        reload(backend)
        import steps.sync_instances
        import steps.sync_controller_slices
        from modelaccessor import model_accessor

        # import all class names to globals
        for (k, v) in model_accessor.all_model_classes.items():
            globals()[k] = v

        from multistructlog import create_logger
        log = create_logger()

        b = backend.Backend()
        steps_dir = Config.get("steps_dir")
        self.steps = b.load_sync_step_modules(steps_dir)
        self.synchronizer = event_loop.XOSObserver(self.steps)
Example #17
0
def main(args):
    # check if config path is set
    config_file_path = DEFAULT_CONFIG_FILE_PATH
    if args.config:
        config_file_path = args.config

    if os.path.exists(config_file_path):
        # read config
        config = read_config(config_file_path)
        if config:
            global progargs
            for k in progargs:
                # overwrite
                if k in config:
                    progargs[k] = config[k]

    log = create_logger(progargs["logging"])

    code_filepath = args.input_file
    if not os.path.exists(code_filepath):
        raise IOError('cannot find an input file - %s' % code_filepath)

    output_filepath = './essence.json'
    if args.output:
        output_filepath = args.output

    print_console = False
    if args.stdout or output_filepath == '-':
        print_console = True

    extractor = EssenceExtractor(logger=log)
    extractor.parse_codefile(code_filepath)
    essence = extractor.extract()
    json_string = pretty_format_json(essence)
    if print_console:
        print(json_string)
    else:
        print_graffiti()
        with open(output_filepath, 'w') as f:
            f.write(json_string)
Example #18
0
import inspect
import pytz
from protos import xos_pb2
from google.protobuf.empty_pb2 import Empty

from django.db.models import F, Q
from core.models import Site, User, XOSBase
from xos.exceptions import (
    XOSPermissionDenied,
    XOSNotFound,
)

from xosconfig import Config
from multistructlog import create_logger

log = create_logger(Config().get("logging"))


class XOSDefaultSecurityContext(object):
    grant_access = True
    write_access = True
    read_access = True


xos_anonymous_site = Site(
    name="XOS Anonymous Site",
    enabled=True,
    hosts_nodes=False,
    hosts_users=True,
    login_base="xos",
    abbreviated_name="xos-anonymous",
Example #19
0
File: diag.py Project: vpramo/xos-1
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import traceback
import json

from xosconfig import Config
from multistructlog import create_logger

log = create_logger(Config().get('logging'))


def update_diag(diag_class, loop_end=None, loop_start=None, syncrecord_start=None, sync_start=None,
                backend_status=None, backend_code=0):
    observer_name = Config.get("name")

    try:
        diag = diag_class.objects.filter(name=observer_name).first()
        if (not diag):
            if hasattr(diag_class.objects, "new"):
                # api style
                diag = diag_class.objects.new(name=observer_name)
            else:
                # django style
                diag = diag_class(name=observer_name)
Example #20
0
 def __init__(self):
     self.log = create_logger(Config().get("logging"))
Example #21
0
def main(args):
    print_graffiti()

    # check if config path is set
    config_file_path = DEFAULT_CONFIG_FILE_PATH
    if args.config:
        config_file_path = args.config

    if os.path.exists(config_file_path):
        # read config
        config = read_config(config_file_path)
        if config:
            global progargs
            for k in progargs:
                # overwrite
                if k in config:
                    progargs[k] = config[k]

    global log
    log = create_logger(progargs["logging"])

    if args.controller:
        progargs['controller_url'] = args.controller

    print('=CONFIG=')
    config_json_string = pretty_format_json(progargs)
    print(config_json_string)
    print('\n')

    # checking controller and airflow web interface
    log.info('Checking if Workflow Controller (%s) is live...' %
             progargs['controller_url'])
    controller_live = check_web_live(progargs['controller_url'])
    if not controller_live:
        log.error('Controller (%s) appears to be down' %
                  progargs['controller_url'])
        raise IOError('Controller (%s) appears to be down' %
                      progargs['controller_url'])

    airflow_live = check_airflow_live()
    if not airflow_live:
        log.error('Airflow appears to be down')
        raise IOError('Airflow appears to be down')

    # connect to workflow controller
    log.info('Connecting to Workflow Controller (%s)...' %
             progargs['controller_url'])
    global manager
    manager = Manager(logger=log)
    manager.connect(progargs['controller_url'])
    manager.set_handlers({
        'kickstart': on_kickstart,
        'check_status': on_check_status,
        'check_status_bulk': on_check_status_bulk
    })

    # connect to airflow
    # global airflow_client
    # log.info('Connecting to Airflow...')

    # api.load_auth()
    # api_module = import_module(AirflowConf.get('cli', 'api_client'))
    # airflow_client = api_module.Client(
    #     api_base_url=AirflowConf.get('cli', 'endpoint_url'),
    #     auth=api.api_auth.client_auth
    # )

    log.info('Waiting for kickstart events from Workflow Controller...')
    try:
        manager.wait()
    finally:
        log.info('Terminating the program...')
        manager.disconnect()
Example #22
0
# We can't use experimental APIs for managing workflows/workflow runs of Airflow
# - REST API does not provide sufficient features at this version
# - API_Client does not work if a caller is not in main thread

# from importlib import import_module
# from airflow import configuration as AirflowConf
# from airflow import api
# from airflow.models import DagRun

try:
    from urllib.parse import urlparse
except ImportError:
    from urlparse import urlparse

log = create_logger()
manager = None
# airflow_client = None

airflow_bin = os.getenv('AIRFLOW_BIN', '/usr/local/bin')

progargs = {
    'controller_url': 'http://localhost:3030',
    'airflow_bin': airflow_bin,
    'logging': None
}

DEFAULT_CONFIG_FILE_PATH = '/etc/cord_workflow_airflow_extensions/config.json'
SOCKET_CONNECTION_TEST_TIMEOUT = 5
DEFAULT_CONNECTION_TEST_DELAY = 5
DEFAULT_CONNECTION_TEST_RETRY = 999999
Example #23
0
 def __init__(self):
     self.log = create_logger(Config().get("logging"))
class SyncFabricCrossconnectServiceInstance(SyncStep):
    provides = [FabricCrossconnectServiceInstance]
    log = create_logger(Config().get('logging'))

    observes = FabricCrossconnectServiceInstance

    @staticmethod
    def format_url(url):
        if 'http' in url:
            return url
        else:
            return 'http://%s' % url

    @staticmethod
    def get_fabric_onos_info(si):

        # get the fabric-crossconnect service
        fabric_crossconnect = si.owner

        # get the onos_fabric service
        fabric_onos = [
            s.leaf_model for s in fabric_crossconnect.provider_services
            if "onos" in s.name.lower()
        ]

        if len(fabric_onos) == 0:
            raise Exception(
                'Cannot find ONOS service in provider_services of Fabric-Crossconnect'
            )

        fabric_onos = fabric_onos[0]

        return {
            'url':
            SyncFabricCrossconnectServiceInstance.format_url(
                "%s:%s" % (fabric_onos.rest_hostname, fabric_onos.rest_port)),
            'user':
            fabric_onos.rest_username,
            'pass':
            fabric_onos.rest_password
        }

    def make_handle(self, s_tag, switch_datapath_id):
        # Generate a backend_handle that uniquely identifies the cross connect. ONOS doesn't provide us a handle, so
        # we make up our own. This helps us to detect other FabricCrossconnectServiceInstance using the same
        # entry, as well as to be able to extract the necessary information to delete the entry later.
        return "%d/%s" % (s_tag, switch_datapath_id)

    def extract_handle(self, backend_handle):
        (s_tag, switch_datapath_id) = backend_handle.split("/", 1)
        s_tag = int(s_tag)
        return (s_tag, switch_datapath_id)

    def range_matches(self, value, pattern):
        value = int(value)
        for this_range in pattern.split(","):
            this_range = this_range.strip()
            if "-" in this_range:
                (first, last) = this_range.split("-")
                first = int(first.strip())
                last = int(last.strip())
                if (value >= first) and (value <= last):
                    return True
            elif this_range.lower() == "any":
                return True
            else:
                if (value == int(this_range)):
                    return True
        return False

    def find_bng(self, s_tag):
        # See if there's a mapping for our s-tag directly
        bng_mappings = BNGPortMapping.objects.filter(s_tag=str(s_tag))
        if bng_mappings:
            return bng_mappings[0]

        # TODO(smbaker): Examine miss performance, and if necessary set a flag in the save method to allow filtering
        # of mappings based on whether they are ranges or any.

        # See if there are any ranges or "any" that match
        for bng_mapping in BNGPortMapping.objects.all():
            if self.range_matches(s_tag, bng_mapping.s_tag):
                return bng_mapping

        return None

    def sync_record(self, o):
        self.log.info("Sync'ing Fabric Crossconnect Service Instance",
                      service_instance=o)

        if (o.policed is None) or (o.policed < o.updated):
            raise DeferredException(
                "Waiting for model_policy to run on fcsi %s" % o.id)

        onos = self.get_fabric_onos_info(o)

        si = ServiceInstance.objects.get(id=o.id)

        if (o.s_tag is None):
            raise Exception(
                "Cannot sync FabricCrossconnectServiceInstance if s_tag is None on fcsi %s"
                % o.id)

        if (o.source_port is None):
            raise Exception(
                "Cannot sync FabricCrossconnectServiceInstance if source_port is None on fcsi %s"
                % o.id)

        if (not o.switch_datapath_id):
            raise Exception(
                "Cannot sync FabricCrossconnectServiceInstance if switch_datapath_id is unset on fcsi %s"
                % o.id)

        bng_mapping = self.find_bng(s_tag=o.s_tag)
        if not bng_mapping:
            raise Exception("Unable to determine BNG port for s_tag %s" %
                            o.s_tag)
        east_port = bng_mapping.switch_port

        data = {
            "deviceId": o.switch_datapath_id,
            "vlanId": o.s_tag,
            "ports": [int(o.source_port), int(east_port)]
        }

        url = onos['url'] + '/onos/segmentrouting/xconnect'

        self.log.info("Sending request to ONOS", url=url, body=data)

        r = requests.post(url,
                          json=data,
                          auth=HTTPBasicAuth(onos['user'], onos['pass']))

        if r.status_code != 200:
            raise Exception(
                "Failed to create fabric crossconnect in ONOS: %s" % r.text)

        # TODO(smbaker): If the o.backend_handle changed, then someone must have changed the
        #   FabricCrossconnectServiceInstance. If so, then we potentially need to clean up the old
        #   entry in ONOS. Furthermore, we might want to also save the two port numbers that we used,
        #   to detect someone changing those.

        o.backend_handle = self.make_handle(o.s_tag, o.switch_datapath_id)
        o.save(update_fields=["backend_handle"])

        self.log.info("ONOS response", res=r.text)

    def delete_record(self, o):
        self.log.info("Deleting Fabric Crossconnect Service Instance",
                      service_instance=o)

        if o.backend_handle:
            onos = self.get_fabric_onos_info(o)

            # backend_handle has everything we need in it to delete this entry.
            (s_tag, switch_datapath_id) = self.extract_handle(o.backend_handle)

            data = {"deviceId": switch_datapath_id, "vlanId": s_tag}

            url = onos['url'] + '/onos/segmentrouting/xconnect'

            r = requests.delete(url,
                                json=data,
                                auth=HTTPBasicAuth(onos['user'], onos['pass']))

            if r.status_code != 204:
                raise Exception(
                    "Failed to remove fabric crossconnect in ONOS: %s" %
                    r.text)

            self.log.info("ONOS response", res=r.text)
Example #25
0
 def __init__(self):
     self.backup_request_dir = "/var/run/xos/backup/requests"
     self.backup_response_dir = "/var/run/xos/backup/responses"
     self.backup_file_dir = "/var/run/xos/backup/local"
     self.log = create_logger(Config().get("logging"))
Example #26
0
# limitations under the License.


import os
import sys
from xossynchronizer.steps.syncstep import SyncStep
from xossynchronizer.modelaccessor import MCordSubscriberInstance, ServiceInstanceLink, ProgranServiceInstance

from xosconfig import Config
from multistructlog import create_logger
import json
import requests
from requests.auth import HTTPBasicAuth


log = create_logger(Config().get('logging'))

parentdir = os.path.join(os.path.dirname(__file__), "..")
sys.path.insert(0, parentdir)
sys.path.insert(0, os.path.dirname(__file__))
from helpers import ProgranHelpers

class SyncProgranIMSILink(SyncStep):
    provides = [ServiceInstanceLink]

    observes = ServiceInstanceLink

    # NOTE Override the default fetch_pending method to receive on links between MCordSubscriberInstances and ProgranServiceInstances
    def fetch_pending(self, deleted):

        objs = super(SyncProgranIMSILink, self).fetch_pending(deleted)
Example #27
0
    def setUp(self):

        self.sys_path_save = sys.path

        # Setting up the config module
        from xosconfig import Config
        config = os.path.join(test_path, "test_config.yaml")
        Config.clear()
        Config.init(config, "synchronizer-config-schema.yaml")
        # END Setting up the config module

        from multistructlog import create_logger
        self.log = create_logger(Config().get('logging'))

        from xossynchronizer.mock_modelaccessor_build import mock_modelaccessor_config
        mock_modelaccessor_config(
            test_path, [("ntt-workflow-driver", "ntt-workflow-driver.xproto"),
                        ("olt-service", "volt.xproto"),
                        ("rcord", "rcord.xproto")])

        import xossynchronizer.modelaccessor
        import mock_modelaccessor
        reload(
            mock_modelaccessor)  # in case nose2 loaded it in a previous test
        reload(xossynchronizer.modelaccessor
               )  # in case nose2 loaded it in a previous test

        from xossynchronizer.modelaccessor import model_accessor
        from helpers import NttHelpers

        # import all class names to globals
        for (k, v) in model_accessor.all_model_classes.items():
            globals()[k] = v

        self.helpers = NttHelpers
        self.model_accessor = model_accessor

        self._volt = VOLTService()
        self._volt.id = 1

        self.volt = Service()
        self.volt.id = 1
        self.volt.name = "vOLT"
        self.volt.leaf_model = self._volt

        self.pon_port = PONPort()
        self.pon_port.port_no = 1234

        self.onu = ONUDevice()
        self.onu.pon_port = self.pon_port
        self.onu.serial_number = "BRCM1234"

        self.technologyProfile = TechnologyProfile()
        self.technologyProfile.profile_id = 64
        self.technologyProfile.profile_value = '{"profile_type": "EPON","epon_attribute": {"package_type": "A"}}'

        self.ntt_si = NttWorkflowDriverServiceInstance(
            serial_number="BRCM1234",
            owner=self.volt,
            owner_id=self.volt.id,
            mac_address="0a0a0a",
            of_dpid="of:1234")

        self.whitelist_entry = NttWorkflowDriverWhiteListEntry(
            mac_address="0a0a0a",
            owner=self.volt,
            owner_id=self.volt.id,
            pon_port_from=1234,
            pon_port_to=1235,
        )
Example #28
0
from collections import defaultdict
from networkx import (
    DiGraph,
    weakly_connected_component_subgraphs,
    all_shortest_paths,
    NetworkXNoPath,
)
from networkx.algorithms.dag import topological_sort

from xossynchronizer.steps.syncstep import InnocuousException, DeferredException, SyncStep

from xosconfig import Config
from multistructlog import create_logger

log = create_logger(Config().get("logging"))


class StepNotReady(Exception):
    pass


class ExternalDependencyFailed(Exception):
    pass


# FIXME: Move drivers into a context shared across sync steps.


class NoOpDriver:
    def __init__(self):
    def test_different_formatters(self):
        '''
        Test different formattters and levels to different output streams
        NOTE: Only one test as logger has global state that is hard to reset
        between tests without breaking other things.
        '''

        f1 = os.path.join(test_scratch, 'different_formatters_test_file1')
        f2 = os.path.join(test_scratch, 'different_formatters_test_file2.json')

        logging_config = {
            'version': 1,
            'handlers': {
                'file1': {
                    'class': 'logging.handlers.RotatingFileHandler',
                    'level': 'WARNING',
                    'formatter': 'structured',
                    'filename': f1,
                },
                'file2': {
                    'class': 'logging.handlers.RotatingFileHandler',
                    'level': 'INFO',
                    'formatter': 'json',
                    'filename': f2,
                },
            },
            'formatters': {
                'json': {
                    '()': structlog.stdlib.ProcessorFormatter,
                    "processor": structlog.processors.JSONRenderer(),
                },
                'structured': {
                    '()': structlog.stdlib.ProcessorFormatter,
                    'processor': structlog.dev.ConsoleRenderer(colors=False),
                },
            },
            'loggers': {
                '': {
                    'handlers': ['file1', 'file2'],
                    'level': 'WARNING',
                    'propagate': True
                },
            }
        }

        # reset level to debug, overriding 'loggers' directive above
        logger = multistructlog.create_logger(logging_config,
                                              level=logging.DEBUG)

        extra_data = {'number': 42}

        logger.warning("should be in both files", extra=extra_data)

        # filtered by file1 handler
        logger.info("should only be in file2")

        # filtered by both handlers, but not by loggers
        logger.debug("should not be in either file")

        # test new trace level
        logger.trace("testing trace, shouldn't be in either file")

        # check contents of file1
        with open(f1) as f1fh:

            # regex at start should roughly match ISO8601 datetime
            f1_desired = r"[\dTZ\-\.\:]+ \[warning  \] should be in both files        extra={'number': 42}"
            self.assertRegexpMatches(f1fh.read(), f1_desired)

        # check contents of file2
        f2_read = []
        f2_desired = [
            {
                "event": "should be in both files",
                "extra": {
                    "number": 42
                },
                "level": "warning",
                "timestamp": "removed"
            },
            {
                "event": "should only be in file2",
                "level": "info",
                "timestamp": "removed"
            },
        ]

        with open(f2) as f2fh:
            for line in f2fh:
                jl = json.loads(line)

                # assert there is a timestamp, and remove it as it changes
                self.assertTrue(True if "timestamp" in jl else False)
                jl['timestamp'] = "removed"

                f2_read.append(jl)

        self.assertEqual(f2_read, f2_desired)