Example #1
0
    def __init__(self, serverdb=None, log_file=None):
        if serverdb:
            self._serverDb = serverdb
        else:
            self._serverDb = db(DEF_SERVER_DB_LOCATION)

        self._smgr_log = ServerMgrlogger()
class PlayLogger:
    """
    Store log output in a single object
    One object per Ansible run
    """
    def __init__(self):
        self.log = ''
        self.runtime = 0
        try:
            self._sm_logger = ServerMgrlogger()
        except:
            f = open("/var/log/contrail-server-manager/debug.log", "a")
            f.write("Ansible Callback Init - ServerMgrlogger init failed\n")
            f.close()


    def append(self, log_line):
        self.log += log_line+"\n"
        self._sm_logger.log(self._sm_logger.INFO, log_line)

    def banner(self, msg):
        width = 78 - len(msg)
        if width < 3:
            width = 3
        filler = "*" * width
        return "\n%s %s " % (msg, filler)
Example #3
0
    def __init__(self, json_entity, args):
        super(ContrailAnsiblePlaybooks, self).__init__()
        try:
            self.logger = ServerMgrlogger()
        except:
            f = open("/var/log/contrail-server-manager/debug.log", "a")
            f.write("Ansible Callback Init - ServerMgrlogger init failed\n")
            f.close()

        #Initialize common stuff
        self.json_entity = json_entity
        self.args = args
        self.hosts_in_inv = json_entity[0]["hosts_in_inv"]
        if "kolla_inv" in json_entity[0]["parameters"]:
            self.hosts_in_kolla_inv = \
                    SMAnsibleUtils(self.logger).hosts_in_kolla_inventory(\
                        json_entity[0]['parameters']['kolla_inv'])

        self.tasks = re.split(r'[,\ ]+', json_entity[0]["tasks"])

        #Initialize vars required for Ansible Playbook APIs
        self.options = None
        self.extra_vars = None
        self.pbook_path = None
        self.var_mgr = None
        self.inventory = None
        self.pb_executor = None
Example #4
0
    def __init__(self, vm_params):
        global _smgr_log
        _smgr_log = ServerMgrlogger()
        _smgr_log.log(_smgr_log.DEBUG, "ContrailVM Init")

        self.vm = vm_params['vm']
        self.vmdk = vm_params['vmdk']
        self.datastore = vm_params['datastore']
        self.eth0_mac = vm_params['eth0_mac']
        self.eth0_ip = vm_params['eth0_ip']
        self.eth0_pg = vm_params['eth0_pg']
        self.eth0_vswitch = vm_params['eth0_vswitch']
        self.eth0_vlan = vm_params['eth0_vlan']
        self.eth1_vswitch = vm_params['eth1_vswitch']
        self.eth1_pg = vm_params['eth1_pg']
        self.eth1_vlan = vm_params['eth1_vlan']
        self.uplink_nic = vm_params['uplink_nic']
        self.uplink_vswitch = vm_params['uplink_vswitch']
        self.server = vm_params['server']
        self.username = vm_params['username']
        self.password = vm_params['password']
        self.thindisk = vm_params['thindisk']
	self.vm_domain = vm_params['domain']
        self.vm_id = 0
	self.smgr_ip = vm_params['smgr_ip']
	self.vm_server = vm_params['vm_server']
	self.vm_password = vm_params['vm_password']
	self.vm_deb = vm_params['vm_deb']
        self._create_networking()
        print self._create_vm()
        print self._install_contrailvm_pkg(self.eth0_ip, "root", self.vm_password, self.vm_domain, self.vm_server,
                                     self.vm_deb, self.smgr_ip)
    def run(self):
        #create the logger
        try:
            self._smgr_log = ServerMgrlogger()
        except:
            print "Error Creating logger object"

        # Connect to the cluster-servers database
        try:
            self._status_serverDb = db(
                self._smgr_main._args.server_manager_base_dir +
                self._smgr_main._args.database_name)
        except:
            self._smgr_log.log(
                self._smgr_log.DEBUG,
                "Error Connecting to Server Database %s" %
                (self._smgr_main._args.server_manager_base_dir +
                 self._smgr_main._args.database_name))
            exit()

        #set the status related handlers
        status_bottle_app = Bottle()
        status_bottle_app.route('/server_status', 'POST',
                                self.put_server_status)
        status_bottle_app.route('/server_status', 'PUT',
                                self.put_server_status)
        self._base_obj = self._status_thread_config['base_obj']

        try:
            bottle.run(status_bottle_app,
                       host=self._status_thread_config['listen_ip'],
                       port=self._status_thread_config['listen_port'])
        except Exception as e:
            # cleanup gracefully
            exit()
    def __init__(self, base_dir=_DEF_BASE_DIR,
                 ip_address=_DEF_COBBLER_IP,
                 port=_DEF_COBBLER_PORT,
                 username=_DEF_USERNAME,
                 password=_DEF_PASSWORD):

        self._smgr_log = ServerMgrlogger()
        self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrCobbler Init")


        # Store the passed in values
        self._cobbler_ip = ip_address
        self._cobbler_port = port
        self._cobbler_username = username
        self._cobbler_password = password
        try:
            if self._cobbler_port:
                self._server = xmlrpclib.Server(
                    "http://" +
                    self._cobbler_ip + ":" +
                    self._cobbler_port + "/cobbler_api")
            else:
                self._server = xmlrpclib.Server(
                    "http://" +
                    self._cobbler_ip + "/cobbler_api")
            self._token = self._server.login(self._cobbler_username,
                                             self._cobbler_password)
        except subprocess.CalledProcessError as e:
            msg = ("Cobbler Init: error %d when executing"
                   "\"%s\"" %(e.returncode, e.cmd))
            self.log_and_raise_exception(msg, ERR_OPR_ERROR)
        except Exception as e:
            raise e
Example #7
0
class PlayLogger:
    default_config = dict()
    """
    Store log output in a single object
    One object per Ansible run
    """
    def __init__(self):
        self.log = ''
        self.runtime = 0
        self.defaults_file = "/etc/contrail/sm-client-config.ini"
        config = ConfigParser.SafeConfigParser()
        config.read([self.defaults_file])
        self.default_config["smgr"] = dict(config.items("SERVER-MANAGER"))
        self.smgr_ip = self.default_config["smgr"]["listen_ip_addr"]
        f = open("/var/log/contrail-server-manager/debug.log", "a")
        f.write("Ansible callback init - smgr_ip: %s" % self.smgr_ip)
        try:
            self._sm_logger = ServerMgrlogger()
        except:
            f = open("/var/log/contrail-server-manager/debug.log", "a")
            f.write("Ansible Callback Init - ServerMgrlogger init failed\n")
            f.close()

    def append(self, log_line):
        self.log += log_line + "\n"
        self._sm_logger.log(self._sm_logger.INFO, log_line)

    def banner(self, msg):
        width = 78 - len(msg)
        if width < 3:
            width = 3
        filler = "*" * width
        return "\n%s %s " % (msg, filler)
    def __init__(self, smgr_base_dir, puppet_dir):
        self._smgr_log = ServerMgrlogger()
        self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrPuppet Init")

        self.smgr_base_dir = smgr_base_dir
        self.puppet_directory = puppet_dir
        if not os.path.exists(os.path.dirname(puppet_dir)):
            os.makedirs(os.path.dirname(puppet_dir))
Example #9
0
    def __init__(self, db_file_name=def_server_db_file):
        try:
            self._smgr_log = ServerMgrlogger()
            self._con = lite.connect(db_file_name)
            with self._con:
                cursor = self._con.cursor()
                # Create cluster table.
                cursor.execute("CREATE TABLE IF NOT EXISTS " + cluster_table +
                               """ (id TEXT PRIMARY KEY,
                                    parameters TEXT,
                                    email TEXT)""")
                # Create image table
                cursor.execute("CREATE TABLE IF NOT EXISTS " + image_table +
                               """ (id TEXT PRIMARY KEY,
                    version TEXT, type TEXT, path TEXT,
                    parameters TEXT)""")
                # Create status table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               server_status_table + """ (id TEXT PRIMARY KEY,
                            server_status TEXT)""")
                # Create server table
                cursor.execute("CREATE TABLE IF NOT EXISTS " + server_table +
                               """ (mac_address TEXT PRIMARY KEY NOT NULL,
                         id TEXT, host_name TEXT, static_ip varchar default 'N',
                         ip_address TEXT, subnet_mask TEXT, gateway TEXT, domain TEXT,
                         cluster_id TEXT,  base_image_id TEXT,
                         package_image_id TEXT, password TEXT,
                         last_update TEXT, discovered varchar default 'false',
                         parameters TEXT, roles TEXT, ipmi_username TEXT,
                         ipmi_password TEXT, ipmi_address TEXT,
                         ipmi_type TEXT, intf_control TEXT,
                         intf_data TEXT, intf_bond TEXT,
                         email TEXT, status TEXT,
                         tag1 TEXT, tag2 TEXT, tag3 TEXT,
                         tag4 TEXT, tag5 TEXT, tag6 TAXT, tag7 TEXT,
                         UNIQUE (id))""")
                # Create server tags table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               server_tags_table +
                               """ (tag_id TEXT PRIMARY KEY NOT NULL,
                         value TEXT,
                         UNIQUE (tag_id),
                         UNIQUE (value))""")
            self._get_table_columns()
            self._smgr_log.log(self._smgr_log.DEBUG, "Created tables")

            # During init, we check if any of the Cluster in DB are missing any Storage Parameters (Generated UUIDs)
            cluster_list = self._get_items(cluster_table, None, None, True,
                                           None)
            for cluster in cluster_list:
                # Check if storage parameters are present in Cluster, else generate them
                if 'storage_fsid' not in set(
                        eval(cluster['parameters']
                             )) or 'storage_virsh_uuid' not in set(
                                 eval(cluster['parameters'])):
                    self.update_cluster_uuids(cluster)
        except e:
            raise e
Example #10
0
    def __init__(self,
                 base_dir=_DEF_BASE_DIR,
                 ip_address=_DEF_COBBLER_IP,
                 port=_DEF_COBBLER_PORT,
                 username=_DEF_USERNAME,
                 password=_DEF_PASSWORD):

        self._smgr_log = ServerMgrlogger()
        self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrCobbler Init")

        # Store the passed in values
        self._cobbler_ip = ip_address
        self._cobbler_port = port
        self._cobbler_username = username
        self._cobbler_password = password
        try:
            if self._cobbler_port:
                self._server = xmlrpclib.Server("http://" + self._cobbler_ip +
                                                ":" + self._cobbler_port +
                                                "/cobbler_api")
            else:
                self._server = xmlrpclib.Server("http://" + self._cobbler_ip +
                                                "/cobbler_api")
            self._token = self._server.login(self._cobbler_username,
                                             self._cobbler_password)

            # Copy contrail centos repo to cobber repos, so that target
            # systems can install and run puppet agent from kickstart.
            repo = self._server.find_repo({"name": _CONTRAIL_CENTOS_REPO})
            if repo:
                rid = self._server.get_repo_handle(_CONTRAIL_CENTOS_REPO,
                                                   self._token)
            else:
                rid = self._server.new_repo(self._token)
            self._server.modify_repo(rid, "arch", "x86_64", self._token)
            repo_dir = base_dir + "contrail-centos-repo"
            self._server.modify_repo(rid, "name", _CONTRAIL_CENTOS_REPO,
                                     self._token)
            self._server.modify_repo(rid, "mirror", repo_dir, self._token)
            self._server.modify_repo(rid, "keep_updated", True, self._token)
            self._server.modify_repo(rid, "priority", "99", self._token)
            self._server.modify_repo(rid, "rpm_list", [], self._token)
            self._server.modify_repo(rid, "yumopts", {}, self._token)
            self._server.modify_repo(rid, "mirror_locally", True, self._token)
            self._server.modify_repo(rid, "environment", {}, self._token)
            self._server.modify_repo(rid, "comment", "...", self._token)
            self._server.save_repo(rid, self._token)
            # Issue cobbler reposync for this repo
            cmd = "cobbler reposync --only=" + _CONTRAIL_CENTOS_REPO
            subprocess.check_call(cmd, shell=True)
        except subprocess.CalledProcessError as e:
            msg = ("Cobbler Init: error %d when executing"
                   "\"%s\"" % (e.returncode, e.cmd))
            raise ServerMgrException(msg)
        except Exception as e:
            raise e
    def __init__(self, json_entity, args):
        super(ContrailAnsiblePlayBook, self).__init__()
        lb = []
        agent = []
        inv_file = None
        self.hosts_in_inv = json_entity[0]["hosts_in_inv"]
        cluster_id = json_entity[0]["cluster_id"]
        parameters = json_entity[0]["parameters"]
        inventory = parameters["inventory"]
        self.current_status = self.validate_provision_params(inventory, args)
        self.pbook_path = inventory["[all:vars]"]["ansible_playbook"]
        pbook_dir = os.path.dirname(self.pbook_path)

        inv_dir = pbook_dir + '/inventory/'
        inv_file = inv_dir + cluster_id + ".inv"
        create_inv_file(inv_file, inventory)

        self.var_mgr = VariableManager()
        self.ldr = DataLoader()
        self.args = args
        self.inventory = Inventory(loader=self.ldr,
                                   variable_manager=self.var_mgr,
                                   host_list=inv_file)
        self.var_mgr.set_inventory(self.inventory)

        Options = namedtuple('Options', [
            'connection', 'forks', 'module_path', 'become', 'become_method',
            'become_user', 'check', 'listhosts', 'listtasks', 'listtags',
            'syntax', 'verbosity'
        ])
        self.options = Options(connection='ssh',
                               forks=100,
                               module_path=None,
                               become=True,
                               become_method='sudo',
                               become_user='******',
                               check=False,
                               listhosts=None,
                               listtasks=None,
                               listtags=None,
                               syntax=None,
                               verbosity=None)

        self.pws = {}
        self.pb_executor = PlaybookExecutor(playbooks=[self.pbook_path],
                                            inventory=self.inventory,
                                            variable_manager=self.var_mgr,
                                            loader=self.ldr,
                                            options=self.options,
                                            passwords=self.pws)
        try:
            self._sm_logger = ServerMgrlogger()
        except:
            f = open("/var/log/contrail-server-manager/debug.log", "a")
            f.write("Ansible Callback Init - ServerMgrlogger init failed\n")
            f.close()
Example #12
0
 def __init__(self):
     ''' Constructor '''
     self.MonitoringCfg = {
         'monitoring_frequency': _DEF_MON_FREQ,
         'monitoring_plugin': _DEF_MONITORING_PLUGIN
     }
     self.InventoryCfg = {'inventory_plugin': _DEF_INVENTORY_PLUGIN}
     self._smgr_log = ServerMgrlogger()
     self.monitoring_args = None
     self.monitoring_config_set = False
     self.inventory_args = None
     self.inventory_config_set = False
     self.server_monitoring_obj = None
     self.server_inventory_obj = None
     self.monitoring_gevent_thread_obj = None
class Joiner(threading.Thread):
    def __init__(self, q):
        super(Joiner, self).__init__()
        self._smgr_log = ServerMgrlogger()
        self.queue = q

    def run(self):
        while True:
            child = self.queue.get()
            self._smgr_log.log(self._smgr_log.INFO, "Joining a process")
            print "joining a process"
            if child == None:
                return
            child.join()
            self._smgr_log.log(self._smgr_log.INFO, "Process Done")
            print "process done"
    def run(self):
        #create the logger
        try:
            self._smgr_log = ServerMgrlogger()
        except:
            print "Error Creating logger object"

        # Connect to the cluster-servers database
        try:
            self._status_serverDb = db(
                self._smgr_main._args.server_manager_base_dir+self._smgr_main._args.database_name)
        except:
            self._smgr_log.log(self._smgr_log.DEBUG,
                     "Error Connecting to Server Database %s"
                    % (self._smgr_main._args.server_manager_base_dir+self._smgr_main._args.database_name))
            exit()

        #set the status related handlers
        status_bottle_app = Bottle()
        status_bottle_app.route('/server_status', 'POST', self.put_server_status)
        status_bottle_app.route('/server_status', 'PUT', self.put_server_status)
        self._base_obj = self._status_thread_config['base_obj']

        try:
            bottle.run(status_bottle_app,
                       host=self._status_thread_config['listen_ip'],
                       port=self._status_thread_config['listen_port'])
        except Exception as e:
            # cleanup gracefully
            exit()
Example #15
0
class Joiner(threading.Thread):
    def __init__(self, q):
        super(Joiner, self).__init__()
        self._smgr_log = ServerMgrlogger()
        self.queue = q

    def run(self):
        while True:
            child = self.queue.get()
            self._smgr_log.log(self._smgr_log.INFO, "Joining a process")
            print "joining a process"
            if child == None:
                return
            child.join()
            self._smgr_log.log(self._smgr_log.INFO, "Process Done")
            print "process done"
    def __init__(self, json_entity, args):
        super(ContrailAnsiblePlaybooks, self).__init__()
        try:
            self.logger = ServerMgrlogger()
        except:
            f = open("/var/log/contrail-server-manager/debug.log", "a")
            f.write("Ansible Callback Init - ServerMgrlogger init failed\n")
            f.close()

        #Initialize common stuff
        self.json_entity  = json_entity
        self.args         = args
        self.hosts_in_inv = json_entity[0]["hosts_in_inv"]
        if "kolla_inv" in json_entity[0]["parameters"]:
            self.hosts_in_kolla_inv = \
                    SMAnsibleUtils(self.logger).hosts_in_kolla_inventory(\
                        json_entity[0]['parameters']['kolla_inv'])

        self.tasks        = re.split(r'[,\ ]+', json_entity[0]["tasks"])

        #Initialize vars required for Ansible Playbook APIs
        self.options      = None
        self.extra_vars   = None
        self.pbook_path   = None
        self.var_mgr      = None
        self.inventory    = None
        self.pb_executor  = None
Example #17
0
 def __init__(self):
     self.log = ''
     self.runtime = 0
     self.defaults_file = "/etc/contrail/sm-client-config.ini"
     config = ConfigParser.SafeConfigParser()
     config.read([self.defaults_file])
     self.default_config["smgr"] = dict(config.items("SERVER-MANAGER"))
     self.smgr_ip = self.default_config["smgr"]["listen_ip_addr"]
     f = open("/var/log/contrail-server-manager/debug.log", "a")
     f.write("Ansible callback init - smgr_ip: %s" % self.smgr_ip)
     try:
         self._sm_logger = ServerMgrlogger()
     except:
         f = open("/var/log/contrail-server-manager/debug.log", "a")
         f.write("Ansible Callback Init - ServerMgrlogger init failed\n")
         f.close()
    def __init__(self, serverdb=None, log_file=None):
        if serverdb:
            self._serverDb = serverdb
        else:
            self._serverDb = db(DEF_SERVER_DB_LOCATION)

        self._smgr_log = ServerMgrlogger()
    def __init__(self, base_dir=_DEF_BASE_DIR,
                 ip_address=_DEF_COBBLER_IP,
                 port=_DEF_COBBLER_PORT,
                 username=_DEF_USERNAME,
                 password=_DEF_PASSWORD):

        self._smgr_log = ServerMgrlogger()
        self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrCobbler Init")


        # Store the passed in values
        self._cobbler_ip = ip_address
        self._cobbler_port = port
        self._cobbler_username = username
        self._cobbler_password = password
        try:
            if self._cobbler_port:
                self._server = xmlrpclib.Server(
                    "http://" +
                    self._cobbler_ip + ":" +
                    self._cobbler_port + "/cobbler_api")
            else:
                self._server = xmlrpclib.Server(
                    "http://" +
                    self._cobbler_ip + "/cobbler_api")
            self._token = self._server.login(self._cobbler_username,
                                             self._cobbler_password)
        except subprocess.CalledProcessError as e:
            msg = ("Cobbler Init: error %d when executing"
                   "\"%s\"" %(e.returncode, e.cmd))
            self.log_and_raise_exception(msg, ERR_OPR_ERROR)
        except Exception as e:
            raise e
    def __init__(self, args_str=None):
        try:
            self._smgr_log = ServerMgrlogger()
        except:
            print "Error Creating logger object"

        self._smgr_log.log(self._smgr_log.INFO, "Starting SM Ansible Server")
        if not args_str:
            args_str = sys.argv[1:]
        self._parse_args(args_str)
        self.joinq = Queue.Queue()
        self.joiner = Joiner(self.joinq)
        self.joiner.start()
        self._smgr_log.log(self._smgr_log.INFO, 'Initializing Bottle App')
        self.app = bottle.app()
        bottle.route('/run_ansible_playbooks', 'POST',
                     self.start_ansible_playbooks)
    def __init__(self, base_dir=_DEF_BASE_DIR,
                 ip_address=_DEF_COBBLER_IP,
                 port=_DEF_COBBLER_PORT,
                 username=_DEF_USERNAME,
                 password=_DEF_PASSWORD):

        self._smgr_log = ServerMgrlogger()
        self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrCobbler Init")


        # Store the passed in values
        self._cobbler_ip = ip_address
        self._cobbler_port = port
        self._cobbler_username = username
        self._cobbler_password = password
        try:
            if self._cobbler_port:
                self._server = xmlrpclib.Server(
                    "http://" +
                    self._cobbler_ip + ":" +
                    self._cobbler_port + "/cobbler_api")
            else:
                self._server = xmlrpclib.Server(
                    "http://" +
                    self._cobbler_ip + "/cobbler_api")
            self._token = self._server.login(self._cobbler_username,
                                             self._cobbler_password)

            # Copy contrail centos repo to cobber repos, so that target
            # systems can install and run puppet agent from kickstart.
            repo = self._server.find_repo({"name": _CONTRAIL_CENTOS_REPO})
            if repo:
                rid = self._server.get_repo_handle(
                    _CONTRAIL_CENTOS_REPO, self._token)
            else:
                rid = self._server.new_repo(self._token)
            self._server.modify_repo(rid, "arch", "x86_64", self._token)
            repo_dir = base_dir + "contrail-centos-repo"
            self._server.modify_repo(
                rid, "name", _CONTRAIL_CENTOS_REPO, self._token)
            self._server.modify_repo(rid, "mirror", repo_dir, self._token)
            self._server.modify_repo(rid, "keep_updated", True, self._token)
            self._server.modify_repo(rid, "priority", "99", self._token)
            self._server.modify_repo(rid, "rpm_list", [], self._token)
            self._server.modify_repo(rid, "yumopts", {}, self._token)
            self._server.modify_repo(rid, "mirror_locally", True, self._token)
            self._server.modify_repo(rid, "environment", {}, self._token)
            self._server.modify_repo(rid, "comment", "...", self._token)
            self._server.save_repo(rid, self._token)
            # Issue cobbler reposync for this repo
            cmd = "cobbler reposync --only=" + _CONTRAIL_CENTOS_REPO
            subprocess.check_call(cmd, shell=True)
        except subprocess.CalledProcessError as e:
            msg = ("Cobbler Init: error %d when executing"
                   "\"%s\"" %(e.returncode, e.cmd))
            raise ServerMgrException(msg)
        except Exception as e:
            raise e
Example #22
0
class SM_Docker():
    '''
    This class deals with all things docker that server manager needs
    '''

    _docker_client = None

    def __init__(self):
        self._docker_client = Client()
        self._smgr_log      = ServerMgrlogger()

    def new_image(self, pre, post):
        found = False
        for x in post:
            found = False
            new_id = x['Id']
            for y in pre:
                if x['Id'] == y['Id']:
                    found = True
                    break
            if found == False:
                return x
        if found == True:
            return None

    def load_containers(self, image):
        try:
            pre = self._docker_client.images()

            f = open(image, 'r')
            self._docker_client.load_image(f)
            msg = "docker loaded image %s" % (image)
            self._smgr_log.log(self._smgr_log.INFO, msg)
            f.close()

            post = self._docker_client.images()
            return [True, self.new_image(pre, post)]
        except Exception as e:
            msg = "docker load failed for image %s: %s" % (image, e)
            self._smgr_log.log(self._smgr_log.INFO, msg)
            return [False, None]


    def tag_containers(self, image, repo, tag):
        return self._docker_client.tag(image, repo, tag)

    def push_containers(self, image):
        try:
            stream = self._docker_client.push(image, stream=True)
            for line in stream:
                self._smgr_log.log(self._smgr_log.INFO, line)
        except Exception as e:
            msg = "docker push failed for image %s: %s" % (image, e)
            #raise ServerMgrException(msg, ERR_OPR_ERROR)
            self._smgr_log.log(self._smgr_log.INFO, msg)
    def __init__(self, db_file_name=def_server_db_file):
        try:
            self._smgr_log = ServerMgrlogger()
            self._con = lite.connect(db_file_name)
            with self._con:
                cursor = self._con.cursor()
                # Create cluster table.
                cursor.execute("CREATE TABLE IF NOT EXISTS " + cluster_table +
                               """ (id TEXT PRIMARY KEY,
                                    parameters TEXT,
                                    email TEXT)""")
                # Create image table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               image_table + """ (id TEXT PRIMARY KEY,
                    version TEXT, type TEXT, path TEXT,
                    parameters TEXT)""")
                # Create status table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               server_status_table + """ (id TEXT PRIMARY KEY,
                            server_status TEXT)""")
                # Create server table
                cursor.execute(
                    "CREATE TABLE IF NOT EXISTS " + server_table +
                    """ (mac_address TEXT PRIMARY KEY NOT NULL,
                         id TEXT, host_name TEXT, static_ip varchar default 'N',
                         ip_address TEXT, subnet_mask TEXT, gateway TEXT, domain TEXT,
                         cluster_id TEXT,  base_image_id TEXT,
                         package_image_id TEXT, password TEXT,
                         last_update TEXT, discovered varchar default 'false',
                         parameters TEXT, roles TEXT, ipmi_username TEXT,
                         ipmi_password TEXT, ipmi_address TEXT,
                         ipmi_type TEXT, intf_control TEXT,
                         intf_data TEXT, intf_bond TEXT,
                         email TEXT, status TEXT,
                         tag1 TEXT, tag2 TEXT, tag3 TEXT,
                         tag4 TEXT, tag5 TEXT, tag6 TAXT, tag7 TEXT,
                         UNIQUE (id))""")
                # Create server tags table
                cursor.execute(
                    "CREATE TABLE IF NOT EXISTS " + server_tags_table +
                    """ (tag_id TEXT PRIMARY KEY NOT NULL,
                         value TEXT,
                         UNIQUE (tag_id),
                         UNIQUE (value))""")
            self._get_table_columns()
            self._smgr_log.log(self._smgr_log.DEBUG, "Created tables")

            # During init, we check if any of the Cluster in DB are missing any Storage Parameters (Generated UUIDs)
            cluster_list = self._get_items(cluster_table, None,
                                       None, True, None)
            for cluster in cluster_list:
                # Check if storage parameters are present in Cluster, else generate them
                if 'storage_fsid' not in set(eval(cluster['parameters'])) or 'storage_virsh_uuid' not in set(eval(
                        cluster['parameters'])):
                    self.update_cluster_uuids(cluster)
        except e:
            raise e
 def __init__(self):
     self.log = ''
     self.runtime = 0
     try:
         self._sm_logger = ServerMgrlogger()
     except:
         f = open("/var/log/contrail-server-manager/debug.log", "a")
         f.write("Ansible Callback Init - ServerMgrlogger init failed\n")
         f.close()
    def __init__(self, smgr_base_dir, puppet_dir):
        self._smgr_log = ServerMgrlogger()
        self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrPuppet Init")


        self.smgr_base_dir = smgr_base_dir
        self.puppet_directory = puppet_dir
        if not os.path.exists(os.path.dirname(puppet_dir)):
            os.makedirs(os.path.dirname(puppet_dir))
    def __init__(self, package, v1 = 0, v2 = 0, v3 = 0, v4 = 0):
        self.logger = ServerMgrlogger()
        if package == None:
            self.os_sku = 14
            self.major_version = v1
            self.moderate_version = v2
            self.minor_version_1 = v3
            self.minor_version_2 = v4
        elif eval(str(package["parameters"])).get("playbooks_version", False) == False:
            # Could not detect playbooks in image. Set version such
            # that puppet gets triggered
            self.os_sku = 14
            self.major_version = 4
            self.moderate_version = 0
            self.minor_version_1 = 0
            self.minor_version_2 = 0
        else:
            try:
                version_list = re.split(r'[\.\-]',
                        eval(str(package["parameters"]))["playbooks_version"])

                if "sku" in package["parameters"]:
                    # sku is of the following format:
                    # 2:14.0.2-0ubuntu1~cloud0.1contrail
                    # here, 14 represents the openstack sku - newton
                    sku_list = re.split(r'[:.]',
                            eval(str(package["parameters"]))["sku"])
                    self.os_sku = sku_list[1]
                else:
                    self.os_sku = 14
                self.major_version = version_list[0]
                self.moderate_version = version_list[1]
                self.minor_version_1 = version_list[2]
                self.minor_version_2 = version_list[3]
            except Exception as e:
                self.logger.log(self.logger.ERROR, 
                        "ContrailVersion failed: %s. Falling back to 14.4.0.0.0" % e)
                # Could not detect SKU. Fall back to puppet scheme of things
                self.os_sku = 14
                self.major_version = 4
                self.moderate_version = 0
                self.minor_version_1 = 0
                self.minor_version_2 = 0
class PlayLogger:
    default_config = dict()
    """
    Store log output in a single object
    One object per Ansible run
    """
    def __init__(self, cluster_id):
        self.log = ''
        self.runtime = 0
        self.defaults_file = "/etc/contrail/sm-client-config.ini"
        config = ConfigParser.SafeConfigParser()
        config.read([self.defaults_file])
        self.default_config["smgr"] = dict(config.items("SERVER-MANAGER"))
        self.smgr_ip = self.default_config["smgr"]["listen_ip_addr"]
        self._sm_prov_logger = None
        f = open("/var/log/contrail-server-manager/debug.log", "a")
        f.write("Ansible callback init - smgr_ip: %s" % self.smgr_ip)
        try:
            self._sm_logger = ServerMgrlogger()
            if cluster_id:
                self._sm_prov_logger = ServerMgrProvlogger(cluster_id)
            else:
                self._sm_logger.log(self._sm_logger.ERROR,
                   "cluster_id not found in inventory - provision specific "\
                   "logging will not be done")
        except:
            f = open("/var/log/contrail-server-manager/debug.log", "a")
            f.write("Ansible Callback Init - ServerMgrlogger init failed\n")
            f.close()


    def append(self, log_line):
        self.log += log_line+"\n"
        if self._sm_prov_logger:
            self._sm_prov_logger.log("info", log_line)
        self._sm_logger.log(self._sm_logger.INFO, log_line)

    def banner(self, msg):
        width = 78 - len(msg)
        if width < 3:
            width = 3
        filler = "*" * width
        return "\n%s %s " % (msg, filler)
class PlayLogger:
    default_config = dict()
    """
    Store log output in a single object
    One object per Ansible run
    """
    def __init__(self, cluster_id):
        self.log = ''
        self.runtime = 0
        self.defaults_file = "/etc/contrail/sm-client-config.ini"
        config = ConfigParser.SafeConfigParser()
        config.read([self.defaults_file])
        self.default_config["smgr"] = dict(config.items("SERVER-MANAGER"))
        self.smgr_ip = self.default_config["smgr"]["listen_ip_addr"]
        self._sm_prov_logger = None
        f = open("/var/log/contrail-server-manager/debug.log", "a")
        f.write("Ansible callback init - smgr_ip: %s" % self.smgr_ip)
        try:
            self._sm_logger = ServerMgrlogger()
            if cluster_id:
                self._sm_prov_logger = ServerMgrProvlogger(cluster_id)
            else:
                self._sm_logger.log(self._sm_logger.ERROR,
                   "cluster_id not found in inventory - provision specific "\
                   "logging will not be done")
        except:
            f = open("/var/log/contrail-server-manager/debug.log", "a")
            f.write("Ansible Callback Init - ServerMgrlogger init failed\n")
            f.close()

    def append(self, log_line):
        self.log += log_line + "\n"
        if self._sm_prov_logger:
            self._sm_prov_logger.log("info", log_line)
        self._sm_logger.log(self._sm_logger.INFO, log_line)

    def banner(self, msg):
        width = 78 - len(msg)
        if width < 3:
            width = 3
        filler = "*" * width
        return "\n%s %s " % (msg, filler)
 def __init__(self, cluster_id):
     self.log = ''
     self.runtime = 0
     self.defaults_file = "/etc/contrail/sm-client-config.ini"
     config = ConfigParser.SafeConfigParser()
     config.read([self.defaults_file])
     self.default_config["smgr"] = dict(config.items("SERVER-MANAGER"))
     self.smgr_ip = self.default_config["smgr"]["listen_ip_addr"]
     self._sm_prov_logger = None
     f = open("/var/log/contrail-server-manager/debug.log", "a")
     f.write("Ansible callback init - smgr_ip: %s" % self.smgr_ip)
     try:
         self._sm_logger = ServerMgrlogger()
         if cluster_id:
             self._sm_prov_logger = ServerMgrProvlogger(cluster_id)
         else:
             self._sm_logger.log(self._sm_logger.ERROR,
                "cluster_id not found in inventory - provision specific "\
                "logging will not be done")
     except:
         f = open("/var/log/contrail-server-manager/debug.log", "a")
         f.write("Ansible Callback Init - ServerMgrlogger init failed\n")
         f.close()
 def __init__(self):
     ''' Constructor '''
     self.MonitoringCfg = {
         'monitoring_frequency': _DEF_MON_FREQ,
         'monitoring_plugin': _DEF_MONITORING_PLUGIN
     }
     self.InventoryCfg = {
         'inventory_plugin': _DEF_INVENTORY_PLUGIN
     }
     self._smgr_log = ServerMgrlogger()
     self.monitoring_args = None
     self.monitoring_config_set = False
     self.inventory_args = None
     self.inventory_config_set = False
     self.server_monitoring_obj = None
     self.server_inventory_obj = None
     self.monitoring_gevent_thread_obj = None
    def __init__(self, args_str=None):
        try:
            self._smgr_log = ServerMgrlogger()
        except:
            print "Error Creating logger object"

        self._smgr_log.log(self._smgr_log.INFO, "Starting SM Ansible Server")
        if not args_str:
            args_str = sys.argv[1:]
        self._parse_args(args_str)
        self.joinq  = Queue.Queue()
        self.joiner = Joiner(self.joinq)
        self.joiner.start()
        self._smgr_log.log(self._smgr_log.INFO,  'Initializing Bottle App')
        self.app = bottle.app()
        bottle.route('/run_ansible_playbooks', 'POST',
                self.start_ansible_playbooks)
    def __init__(self, args_str=None):
        try:
            self._smgr_log = ServerMgrlogger()
        except:
            print "Error Creating logger object"

        self._smgr_log.log(self._smgr_log.INFO, "Starting SM Ansible Server")
        self.host_run_results = defaultdict(dict)
        if not args_str:
            args_str = sys.argv[1:]
        self._parse_args(args_str)
        self.joinq  = Queue.Queue()
        self.joiner = Joiner(self.joinq)
        self.joiner.start()
        self._smgr_log.log(self._smgr_log.INFO,  'Initializing Bottle App')
        self.app = bottle.app()
        bottle.route('/start_provision', 'POST', self.start_provision)
        bottle.route('/run_playbook', 'POST', self.start_playbook)
        bottle.route('/playbook_status', 'PUT', self.playbook_status)
    def __init__(self, base_dir=_DEF_BASE_DIR,
                 ip_address=_DEF_COBBLER_IP,
                 port=_DEF_COBBLER_PORT,
                 username=_DEF_USERNAME,
                 password=_DEF_PASSWORD):

        self._smgr_log = ServerMgrlogger()
        self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrCobbler Init")


        # Store the passed in values
        self._cobbler_ip = ip_address
        self._cobbler_port = port
        self._cobbler_username = username
        self._cobbler_password = password
        try:
            if self._cobbler_port:
                self._server = xmlrpclib.Server(
                    "http://" +
                    self._cobbler_ip + ":" +
                    self._cobbler_port + "/cobbler_api")
            else:
                self._server = xmlrpclib.Server(
                    "http://" +
                    self._cobbler_ip + "/cobbler_api")
            self._token = self._server.login(self._cobbler_username,
                                             self._cobbler_password)
            # Copy contrail centos/redhat repo to cobber repos, so that target
            # systems can install and run puppet agent from kickstart.
            self._init_create_repo(_CONTRAIL_CENTOS_REPO, self._server, 
                                   self._token, base_dir)
            self._init_create_repo(_CONTRAIL_REDHAT_REPO, self._server, 
                                   self._token, base_dir)
        except subprocess.CalledProcessError as e:
            msg = ("Cobbler Init: error %d when executing"
                   "\"%s\"" %(e.returncode, e.cmd))
            raise ServerMgrException(msg)
        except Exception as e:
            raise e
 def __init__(self, cluster_id):
     self.log = ''
     self.runtime = 0
     self.defaults_file = "/etc/contrail/sm-client-config.ini"
     config = ConfigParser.SafeConfigParser()
     config.read([self.defaults_file])
     self.default_config["smgr"] = dict(config.items("SERVER-MANAGER"))
     self.smgr_ip = self.default_config["smgr"]["listen_ip_addr"]
     self._sm_prov_logger = None
     f = open("/var/log/contrail-server-manager/debug.log", "a")
     f.write("Ansible callback init - smgr_ip: %s" % self.smgr_ip)
     try:
         self._sm_logger = ServerMgrlogger()
         if cluster_id:
             self._sm_prov_logger = ServerMgrProvlogger(cluster_id)
         else:
             self._sm_logger.log(self._sm_logger.ERROR,
                "cluster_id not found in inventory - provision specific "\
                "logging will not be done")
     except:
         f = open("/var/log/contrail-server-manager/debug.log", "a")
         f.write("Ansible Callback Init - ServerMgrlogger init failed\n")
         f.close()
class ServerMgrStatusThread(threading.Thread):

    _smgr_log = None
    _status_serverDb = None
    _base_obj = None
    _smgr_puppet = None
    _smgr_main = None


    ''' Class to run function that keeps validating the cobbler token
        periodically (every 30 minutes) on a new thread. '''
    _pipe_start_app = None
    def __init__(self, timer, server, status_thread_config):
        threading.Thread.__init__(self)
        self._status_thread_config = status_thread_config
        self._smgr_puppet = status_thread_config['smgr_puppet']
        self._smgr_main = status_thread_config['smgr_main']

    def run(self):
        #create the logger
        try:
            self._smgr_log = ServerMgrlogger()
        except:
            print "Error Creating logger object"

        # Connect to the cluster-servers database
        try:
            self._status_serverDb = db(
                self._smgr_main._args.server_manager_base_dir+self._smgr_main._args.database_name)
        except:
            self._smgr_log.log(self._smgr_log.DEBUG,
                     "Error Connecting to Server Database %s"
                    % (self._smgr_main._args.server_manager_base_dir+self._smgr_main._args.database_name))
            exit()

        #set the status related handlers
        status_bottle_app = Bottle()
        status_bottle_app.route('/server_status', 'POST', self.put_server_status)
        status_bottle_app.route('/server_status', 'PUT', self.put_server_status)
        self._base_obj = self._status_thread_config['base_obj']

        try:
            bottle.run(status_bottle_app,
                       host=self._status_thread_config['listen_ip'],
                       port=self._status_thread_config['listen_port'])
        except Exception as e:
            # cleanup gracefully
            exit()

    def put_server_status(self):
        print "put-status"
        #query_args = parse_qs(urlparse(bottle.request.url).query,
                                      #keep_blank_values=True)
        #match_key, match_value = query_args.popitem()
        server_id = request.query['server_id']
        server_state = request.query['state']
        body = request.body.read()
        server_data = {}
        server_data['id'] = server_id
        if server_state == "post_provision_completed":
            server_data['status'] = "provision_completed"
        else:
            server_data['status'] = server_state
        try:
            time_str = strftime("%Y_%m_%d__%H_%M_%S", localtime())
            message = server_id + ' ' + server_state + time_str
            self._smgr_log.log(self._smgr_log.DEBUG, "Server status Data %s" % server_data)
            servers = self._status_serverDb.modify_server(
                                                    server_data)
            if server_state == "reimage_completed":
                payload = dict()
                payload["id"] = server_id
                self._smgr_log.log(self._smgr_log.DEBUG, "Spawning Gevent for Id: %s" % payload["id"])
                if self._base_obj:
                    gevent.spawn(self._base_obj.reimage_run_inventory, self._status_thread_config["listen_ip"],
                                 self._status_thread_config["listen_port"], payload)
            if server_state == "provision_started":
                self._smgr_main.update_provision_started_flag(server_id, server_state)
            self._smgr_main.update_provision_role_sequence(server_id,
                                                           server_state)
            if server_state == "post_provision_completed":
                server_state = "provision_completed"

            if server_state == "provision_completed":
                domain = self._status_serverDb.get_server_domain(server_id)
                environment_name = 'TurningOffPuppetAgent__' + time_str
                if domain:
                    server_fqdn = server_id + "." + domain
                    self._smgr_puppet.update_node_map_file(
                        server_fqdn, environment_name)
            if server_state in email_events:
                self.send_status_mail(server_id, message, message)
        except Exception as e:
#            self.log_trace()
            self._smgr_log.log(self._smgr_log.ERROR, "Error adding to db %s" % repr(e))
            abort(404, repr(e))

    def get_email_list(self, email):
        email_to = []
        if not email:
            return email_to
        if email.startswith('[') and email.endswith(']'):
            email_to = eval(email)
        else:
            email_to = [s.strip() for s in email.split(',')]
        return email_to
    # end get_email_list

    def send_status_mail(self, server_id, event, message):
        # Get server entry and find configured e-mail
        servers = self._status_serverDb.get_server(
            {"id" : server_id}, detail=True)
        if not servers:
            msg = "No server found with server_id " + server_id
            self._smgr_log.log(self._smgr_log.ERROR, msg)
            return -1
        server = servers[0]
        email_to = []
        if 'email' in server and server['email']:
            email_to = self.get_email_list(server['email'])
        else:
            # Get cluster entry to find configured e-mail
            if 'cluster_id' in server and server['cluster_id']:
                cluster_id = server['cluster_id']
                cluster = self._status_serverDb.get_cluster(
                    {"id" : cluster_id}, detail=True)
                if cluster and 'email' in cluster[0] and cluster[0]['email']:
                        email_to = self.get_email_list(cluster[0]['email'])
                else:
                    self._smgr_log.log(self._smgr_log.DEBUG,
                                       "cluster or server doesn't configured for email")
                    return 0
            else:
                self._smgr_log.log(self._smgr_log.DEBUG, "server not associated with a cluster")
                return 0
        send_mail(event, message, '', email_to,
                                    self._status_thread_config['listen_ip'], '25')
        msg = "An email is sent to " + ','.join(email_to) + " with content " + message
        self._smgr_log.log(self._smgr_log.DEBUG, msg)
Example #36
0
class ServerMgrDb:

    _cluster_table_cols = []
    _server_table_cols = []
    _image_table_cols = []
    _status_table_cols = []
    _server_tags_table_cols = []

    # Keep list of table columns
    def _get_table_columns(self):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.execute("SELECT * FROM " + server_table + " WHERE id=?",
                               (_DUMMY_STR, ))
                self._server_table_cols = [x[0] for x in cursor.description]
                cursor.execute(
                    "SELECT * FROM " + server_tags_table + " WHERE tag_id=?",
                    (_DUMMY_STR, ))
                self._server_tags_table_cols = [
                    x[0] for x in cursor.description
                ]
                cursor.execute("SELECT * FROM " + image_table + " WHERE id=?",
                               (_DUMMY_STR, ))
                self._image_table_cols = [x[0] for x in cursor.description]
                cursor.execute(
                    "SELECT * FROM " + cluster_table + " WHERE id=?",
                    (_DUMMY_STR, ))
                self._cluster_table_cols = [x[0] for x in cursor.description]
                cursor.execute(
                    "SELECT * FROM " + server_status_table + " WHERE id=?",
                    (_DUMMY_STR, ))
                self._status_table_cols = [x[0] for x in cursor.description]
        except Exception as e:
            raise e

    # end _get_table_columns

    def __init__(self, db_file_name=def_server_db_file):
        try:
            self._smgr_log = ServerMgrlogger()
            self._con = lite.connect(db_file_name)
            with self._con:
                cursor = self._con.cursor()
                # Create cluster table.
                cursor.execute("CREATE TABLE IF NOT EXISTS " + cluster_table +
                               """ (id TEXT PRIMARY KEY,
                                    parameters TEXT,
                                    email TEXT)""")
                # Create image table
                cursor.execute("CREATE TABLE IF NOT EXISTS " + image_table +
                               """ (id TEXT PRIMARY KEY,
                    version TEXT, type TEXT, path TEXT,
                    parameters TEXT)""")
                # Create status table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               server_status_table + """ (id TEXT PRIMARY KEY,
                            server_status TEXT)""")
                # Create server table
                cursor.execute("CREATE TABLE IF NOT EXISTS " + server_table +
                               """ (mac_address TEXT PRIMARY KEY NOT NULL,
                         id TEXT, host_name TEXT, static_ip varchar default 'N',
                         ip_address TEXT, subnet_mask TEXT, gateway TEXT, domain TEXT,
                         cluster_id TEXT,  base_image_id TEXT,
                         package_image_id TEXT, password TEXT,
                         last_update TEXT, discovered varchar default 'false',
                         parameters TEXT, roles TEXT, ipmi_username TEXT,
                         ipmi_password TEXT, ipmi_address TEXT,
                         ipmi_type TEXT, intf_control TEXT,
                         intf_data TEXT, intf_bond TEXT,
                         email TEXT, status TEXT,
                         tag1 TEXT, tag2 TEXT, tag3 TEXT,
                         tag4 TEXT, tag5 TEXT, tag6 TAXT, tag7 TEXT,
                         UNIQUE (id))""")
                # Create server tags table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               server_tags_table +
                               """ (tag_id TEXT PRIMARY KEY NOT NULL,
                         value TEXT,
                         UNIQUE (tag_id),
                         UNIQUE (value))""")
            self._get_table_columns()
            self._smgr_log.log(self._smgr_log.DEBUG, "Created tables")

            # During init, we check if any of the Cluster in DB are missing any Storage Parameters (Generated UUIDs)
            cluster_list = self._get_items(cluster_table, None, None, True,
                                           None)
            for cluster in cluster_list:
                # Check if storage parameters are present in Cluster, else generate them
                if 'storage_fsid' not in set(
                        eval(cluster['parameters']
                             )) or 'storage_virsh_uuid' not in set(
                                 eval(cluster['parameters'])):
                    self.update_cluster_uuids(cluster)
        except e:
            raise e

    # End of __init__

    def delete_tables(self):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.executescript("""
                DELETE FROM """ + cluster_table + """;
                DELETE FROM """ + server_table + """;
                DELETE FROM """ + server_tags_table + """;
                DELETE FROM """ + server_status_table + """;
                DELETE FROM """ + image_table + ";")
        except:
            raise e

    # End of delete_tables

    def get_server_id(self, server_mac):
        try:
            if server_mac:
                server_mac = str(EUI(server_mac)).replace("-", ":")
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(
                    "SELECT id FROM " + server_table + " WHERE mac_address=?",
                    (server_mac, ))
                row = cursor.fetchone()
                if row:
                    return row[0]
                else:
                    return None
        except:
            return None

    # end get_server_id

    # Below function returns value corresponding to tag_id from
    # server_tags_table
    def get_server_tag(self, tag_id):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(
                    "SELECT value FROM " + server_tags_table +
                    " WHERE tag_id=?", (tag_id, ))
                row = cursor.fetchone()
                if row:
                    return row[0]
                else:
                    return None
        except:
            return None

    # end get_server_tag

    def get_server_mac(self, id):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(
                    "SELECT mac_address FROM " + server_table + " WHERE id=?",
                    (id, ))
                row = cursor.fetchone()
                if row:
                    return row[0]
                else:
                    return None
        except:
            return None

    def _add_row(self, table_name, dict):
        try:
            keys, values = zip(*dict.items())
            insert_str = "INSERT OR IGNORE INTO %s (%s) values (%s)" \
                % (table_name,
                   (",".join(keys)),
                   (",".join('?' * len(keys))))
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(insert_str, values)
        except Exception as e:
            raise e

    # end _add_row

    # Generic function to delete rows matching given criteria
    # from given table.
    # Match dict is dictionary of columns and values to match for.
    # unmatch dict is not of dictionaty of columns and values to match for.
    def _delete_row(self, table_name, match_dict=None, unmatch_dict=None):
        try:
            delete_str = "DELETE FROM %s" % (table_name)
            # form a string to provide to where match clause
            match_list = []
            if match_dict:
                match_list = [
                    "%s = \'%s\'" % (k, v) for k, v in match_dict.iteritems()
                ]
            if unmatch_dict:
                match_list += [
                    "%s != \'%s\'" % (k, v)
                    for k, v in unmatch_dict.iteritems()
                ]
            if match_list:
                match_str = " and ".join(match_list)
                delete_str += " WHERE " + match_str
            # end if match_list
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(delete_str)
        except Exception as e:
            raise e

    # End _delete_row

    def _modify_row(self,
                    table_name,
                    dict,
                    match_dict=None,
                    unmatch_dict=None):
        try:
            keys, values = zip(*dict.items())
            modify_str = "UPDATE %s SET " % (table_name)
            update_list = ",".join(key + "=?" for key in keys)
            modify_str += update_list
            match_list = []
            if match_dict:
                match_list = ["%s = ?" % (k) for k in match_dict.iterkeys()]
                match_values = [v for v in match_dict.itervalues()]
            if unmatch_dict:
                match_list += [
                    "%s != ?" % (k) for k in unmatch_dict.iterkeys()
                ]
                match_values += [v for v in unmatch_dict.itervalues()]
            if match_list:
                match_str = " and ".join(match_list)
                match_values_str = ",".join(match_values)
                modify_str += " WHERE " + match_str
                values += (match_values_str, )
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(modify_str, values)
        except Exception as e:
            raise e

    def _get_items(self,
                   table_name,
                   match_dict=None,
                   unmatch_dict=None,
                   detail=False,
                   always_fields=None):
        try:
            with self._con:
                cursor = self._con.cursor()
                if detail:
                    sel_cols = "*"
                else:
                    sel_cols = ",".join(always_fields)
                select_str = "SELECT %s FROM %s" % (sel_cols, table_name)
                # form a string to provide to where match clause
                match_list = []
                if match_dict:
                    match_list = [
                        "%s = \'%s\'" % (k, v)
                        for k, v in match_dict.iteritems()
                    ]
                if unmatch_dict:
                    match_list += [
                        "%s != \'%s\'" % (k, v)
                        for k, v in unmatch_dict.iteritems()
                    ]
                if match_list:
                    match_str = " and ".join(match_list)
                    select_str += " WHERE " + match_str
                cursor.execute(select_str)
            rows = [x for x in cursor]
            cols = [x[0] for x in cursor.description]
            items = []
            for row in rows:
                item = {}
                for prop, val in zip(cols, row):
                    item[prop] = val
                items.append(item)
            return items
        except Exception as e:
            raise e

    # End _get_items

    def add_cluster(self, cluster_data):
        try:
            # Store cluster_parameters dictionary as a text field
            cluster_parameters = cluster_data.pop("parameters", None)
            if cluster_parameters is not None:
                cluster_data['parameters'] = str(cluster_parameters)
            # Store email list as text field
            email = cluster_data.pop("email", None)
            if email is not None:
                cluster_data['email'] = str(email)
            self._add_row(cluster_table, cluster_data)
        except Exception as e:
            raise e

    # End of add_cluster

    def add_server(self, server_data):
        try:
            if 'mac_address' in server_data:
                server_data['mac_address'] = str(
                    EUI(server_data['mac_address'])).replace("-", ":")
            # Store roles list as a text field
            roles = server_data.pop("roles", None)
            cluster_id = server_data.get('cluster_id', None)
            if cluster_id:
                self.check_obj("cluster", {"id": cluster_id})
            if roles is not None:
                server_data['roles'] = str(roles)
            intf_control = server_data.pop("control_data_network", None)
            if intf_control:
                server_data['intf_control'] = str(intf_control)
            intf_bond = server_data.pop("bond_interface", None)
            if intf_bond:
                server_data['intf_bond'] = str(intf_bond)
            # Store email list as text field
            email = server_data.pop("email", None)
            if email:
                server_data['email'] = str(email)
            # store tags if any
            server_tags = server_data.pop("tag", None)
            if server_tags is not None:
                tags_dict = self.get_server_tags(detail=True)
                rev_tags_dict = dict((v, k) for k, v in tags_dict.iteritems())
                for k, v in server_tags.iteritems():
                    server_data[rev_tags_dict[k]] = v
            # Store server_params dictionary as a text field
            server_parameters = server_data.pop("parameters", None)
            if server_parameters is not None:
                server_data['parameters'] = str(server_parameters)
            self._add_row(server_table, server_data)
        except Exception as e:
            raise e
        return 0

    # End of add_server

    # This function for adding server tag is slightly different
    # compared with add function for other tables. The tag_data
    # contains tag information for all tags.
    # This function is always called with complete list of tags
    # so, clear the table first.
    def add_server_tags(self, tag_data):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.executescript("""
                DELETE FROM """ + server_tags_table + ";")
            for key, value in tag_data.iteritems():
                row_data = {'tag_id': key, 'value': value}
                self._add_row(server_tags_table, row_data)
        except Exception as e:
            raise e

    # End of add_server_tags

    def server_discovery(self, action, entity):
        try:
            if 'mac_address' in entity:
                entity['mac_address'] = str(EUI(
                    entity['mac_address'])).replace("-", ":")
            mac_address = entity.get("mac_address", None)
            if action.lower() == "add":
                # If this server is already present in our table,
                # update IP address if DHCP was not static.
                servers = self._get_items(server_table,
                                          {"mac_address": mac_address},
                                          detail=True)
                if servers:
                    server = servers[0]
                    self._modify_row(server_table, entity,
                                     {"mac_address": mac_address}, {})
                    return
                entity['discovered'] = "true"
                entity['status'] = "server_discovered"
                self._add_row(server_table, entity)
            elif action.lower() == "delete":
                servers = self.get_server({"mac_address": mac_address},
                                          detail=True)
                if ((servers) and (servers[0]['discovered'] == "true")):
                    self._delete_row(server_table,
                                     {"mac_address": mac_address})
            else:
                return
        except:
            return

    # End of server_discovery

    def add_image(self, image_data):
        try:
            # Store image_parameters dictionary as a text field
            image_parameters = image_data.pop("parameters", None)
            if image_parameters is not None:
                image_data['parameters'] = str(image_parameters)
            self._add_row(image_table, image_data)
        except Exception as e:
            raise e

    # End of add_image

    def delete_cluster(self, match_dict=None, unmatch_dict=None):
        try:
            self.check_obj("cluster", match_dict, unmatch_dict)
            cluster_id = match_dict.get("id", None)
            servers = None
            if cluster_id:
                servers = self.get_server({'cluster_id': cluster_id},
                                          detail=True)
            if servers:
                msg = ("Servers are present in this cluster, "
                       "remove cluster association, prior to cluster delete.")
                raise ServerMgrException(msg)
            self._delete_row(cluster_table, match_dict, unmatch_dict)
        except Exception as e:
            raise e

    # End of delete_cluster

    def check_obj(self,
                  type,
                  match_dict=None,
                  unmatch_dict=None,
                  raise_exception=True):
        if type == "server":
            cb = self.get_server
            db_obj = cb(match_dict, unmatch_dict, detail=False)
        elif type == "cluster":
            cb = self.get_cluster
            db_obj = cb(match_dict, unmatch_dict, detail=False)
        elif type == "image":
            cb = self.get_image
            db_obj = cb(match_dict, unmatch_dict, detail=False)

        if not db_obj:
            msg = "%s not found" % (type)
            if raise_exception:
                raise ServerMgrException(msg)
            return False
        return True

    #end of check_obj

    def delete_server(self, match_dict=None, unmatch_dict=None):
        try:
            if match_dict and match_dict.get("mac_address", None):
                if match_dict["mac_address"]:
                    match_dict["mac_address"] = str(
                        EUI(match_dict["mac_address"])).replace("-", ":")
            if unmatch_dict and unmatch_dict.get("mac_address", None):
                if unmatch_dict["mac_address"]:
                    unmatch_dict["mac_address"] = str(
                        EUI(unmatch_dict["mac_address"])).replace("-", ":")
            self.check_obj("server", match_dict, unmatch_dict)
            self._delete_row(server_table, match_dict, unmatch_dict)
        except Exception as e:
            raise e

    # End of delete_server

    def delete_server_tag(self, match_dict=None, unmatch_dict=None):
        try:
            self._delete_row(server_tags_table, match_dict, unmatch_dict)
        except Exception as e:
            raise e

    # End of delete_server_tag

    def delete_image(self, match_dict=None, unmatch_dict=None):
        try:
            self.check_obj("image", match_dict, unmatch_dict)
            self._delete_row(image_table, match_dict, unmatch_dict)
        except Exception as e:
            raise e

    # End of delete_image

    def modify_cluster(self, cluster_data):
        try:
            cluster_id = cluster_data.get('id', None)
            if not cluster_id:
                raise Exception("No cluster id specified")
            self.check_obj("cluster", {"id": cluster_id})
            db_cluster = self.get_cluster({"id": cluster_id}, detail=True)
            if not db_cluster:
                msg = "%s is not valid" % cluster_id
                raise ServerMgrException(msg)
            db_cluster_params_str = db_cluster[0]['parameters']
            db_cluster_params = {}
            if db_cluster_params_str:
                db_cluster_params = eval(db_cluster_params_str)
            if 'uuid' not in db_cluster_params:
                str_uuid = str(uuid.uuid4())
                cluster_data["parameters"].update({"uuid": str_uuid})
            # Store cluster_params dictionary as a text field
            cluster_params = cluster_data.pop("parameters", {})
            for k, v in cluster_params.iteritems():
                if v == '""':
                    v = ''
                db_cluster_params[k] = v
            cluster_params = db_cluster_params
            if cluster_params is not None:
                cluster_data['parameters'] = str(cluster_params)

            # Store email list as text field
            email = cluster_data.pop("email", None)
            if email is not None:
                cluster_data['email'] = str(email)
            self._modify_row(cluster_table, cluster_data, {'id': cluster_id},
                             {})
        except Exception as e:
            raise e

    # End of modify_cluster

    def modify_image(self, image_data):
        try:
            image_id = image_data.get('id', None)
            if not image_id:
                raise Exception("No image id specified")
            #Reject if non mutable field changes
            db_image = self.get_image({'id': image_data['id']}, detail=True)
            if image_data['path'] != db_image[0]['path']:
                raise ServerMgrException('Image path cannnot be modified')
            if image_data['type'] != db_image[0]['type']:
                raise ServerMgrException('Image type cannnot be modified')
            # Store image_params dictionary as a text field
            image_parameters = image_data.pop("parameters", None)
            if image_parameters is not None:
                image_data['parameters'] = str(image_parameters)
            self._modify_row(image_table, image_data, {'id': image_id}, {})
        except Exception as e:
            raise e

    # End of modify_image

    def modify_server(self, server_data):
        db_server = None
        if 'id' in server_data.keys():
            db_server = self.get_server({'id': server_data['id']}, detail=True)
        elif 'mac_address' in server_data.keys():
            db_server = self.get_server(
                {'mac_address': server_data['mac_address']}, detail=True)
        try:
            cluster_id = server_data.get('cluster_id', None)
            if cluster_id:
                self.check_obj("cluster", {"id": cluster_id})

            if 'mac_address' in server_data:
                server_data['mac_address'] = str(
                    EUI(server_data['mac_address'])).replace("-", ":")
            server_mac = server_data.get('mac_address', None)
            if not server_mac:
                server_id = server_data.get('id', None)
                if not server_id:
                    raise Exception("No server MAC or id specified")
                else:
                    server_mac = self.get_server_mac(server_id)
            #Check if object exists
            if 'id' in server_data.keys() and \
                    'server_mac' in server_data.keys():
                self.check_obj('server', {'id': server_data['id']})
                #Reject if primary key values change
                if server_data['mac_address'] != db_server[0]['mac_address']:
                    raise ServerMgrException('MAC address cannnot be modified')

            # Store roles list as a text field
            roles = server_data.pop("roles", None)
            if roles is not None:
                server_data['roles'] = str(roles)
            intf_control = server_data.pop("control_data_network", None)
            if intf_control:
                server_data['intf_control'] = str(intf_control)
            intf_bond = server_data.pop("bond_interface", None)
            if intf_bond:
                server_data['intf_bond'] = str(intf_bond)
            # store tags if any
            server_tags = server_data.pop("tag", None)
            if server_tags is not None:
                tags_dict = self.get_server_tags(detail=True)
                rev_tags_dict = dict((v, k) for k, v in tags_dict.iteritems())
                for k, v in server_tags.iteritems():
                    server_data[rev_tags_dict[k]] = v
            # Store server_params dictionary as a text field
            server_params = server_data.pop("parameters", None)
            #if server_params is not None:
            #    server_data['server_params'] = str(server_params)
            #check for modify in db server_params
            #Always Update DB server parmas
            db_server_params = {}
            db_server_params_str = db_server[0]['parameters']
            if db_server_params_str:
                db_server_params = eval(db_server_params_str)
                if server_params:
                    for k, v in server_params.iteritems():
                        if v == '""':
                            v = ''
                        db_server_params[k] = v
            server_data['parameters'] = str(db_server_params)

            # Store email list as text field
            email = server_data.pop("email", None)
            if email is not None:
                server_data['email'] = str(email)
            self._modify_row(server_table, server_data,
                             {'mac_address': server_mac}, {})
        except Exception as e:
            raise e

    # End of modify_server

    # This function for modifying server tag is slightly different
    # compared with modify function for other tables. The tag_data
    # contains tag information for all tags.
    def modify_server_tags(self, tag_data):
        try:
            for key, value in tag_data.iteritems():
                row_data = {'tag_id': key, 'value': value}
                self._modify_row(server_tags_table, row_data, {'tag_id': key},
                                 {})
        except Exception as e:
            raise e

    # End of modify_server_tags

    def get_image(self, match_dict=None, unmatch_dict=None, detail=False):
        try:
            images = self._get_items(image_table, match_dict, unmatch_dict,
                                     detail, ["id"])
        except Exception as e:
            raise e
        return images

    # End of get_image

    def get_server_tags(self, match_dict=None, unmatch_dict=None, detail=True):
        try:
            tag_dict = {}
            tags = self._get_items(server_tags_table, match_dict, unmatch_dict,
                                   True, ["tag_id"])
            for tag in tags:
                tag_dict[tag['tag_id']] = tag['value']
        except Exception as e:
            raise e
        return tag_dict

    # End of get_server_tags

    def get_status(self, match_key=None, match_value=None, detail=False):
        try:
            status = self._get_items(server_status_table,
                                     {match_key: match_value},
                                     detail=detail,
                                     always_field=["id"])
        except Exception as e:
            raise e
        return status

    # End of get_status

    def put_status(self, server_data):
        try:
            server_id = server_data.get('id', None)
            if not server_id:
                raise Exception("No server id specified")
            # Store vns_params dictionary as a text field
            servers = self._get_items(server_status_table, {"id": server_id},
                                      detail=True)
            if servers:
                self._modify_row(server_status_table, server_data,
                                 {'id': server_id}, {})
            else:
                self._add_row(server_status_table, server_data)
        except Exception as e:
            raise e

    # End of put_status

    def get_server(self,
                   match_dict=None,
                   unmatch_dict=None,
                   detail=False,
                   field_list=None):
        try:
            if match_dict and match_dict.get("mac_address", None):
                if match_dict["mac_address"]:
                    match_dict["mac_address"] = str(
                        EUI(match_dict["mac_address"])).replace("-", ":")
            # For server table, when detail is false, return server_id, mac
            # and ip.
            if field_list:
                servers = self._get_items(server_table, match_dict,
                                          unmatch_dict, detail, field_list)
            else:
                servers = self._get_items(server_table, match_dict,
                                          unmatch_dict, detail,
                                          ["id", "mac_address", "ip_address"])
        except Exception as e:
            raise e
        return servers

    # End of get_server

    def get_cluster(self, match_dict=None, unmatch_dict=None, detail=False):
        try:
            cluster = self._get_items(cluster_table, match_dict, unmatch_dict,
                                      detail, ["id"])
        except Exception as e:
            raise e
        return cluster

    # End of get_cluster

    # If any UUIDs are missing from an existing Cluster, we add them during ServerManager DB init
    def update_cluster_uuids(self, cluster):
        try:
            db_cluster_params_str = cluster['parameters']
            db_cluster_params = {}
            if db_cluster_params_str:
                db_cluster_params = eval(db_cluster_params_str)
            if 'uuid' not in db_cluster_params:
                str_uuid = str(uuid.uuid4())
                db_cluster_params.update({"uuid": str_uuid})
            if 'storage_fsid' not in db_cluster_params:
                storage_fsid = str(uuid.uuid4())
                db_cluster_params.update({"storage_fsid": storage_fsid})
            if 'storage_virsh_uuid' not in db_cluster_params:
                storage_virsh_uuid = str(uuid.uuid4())
                db_cluster_params.update(
                    {"storage_virsh_uuid": storage_virsh_uuid})
        except Exception as e:
            raise e

        cluster['parameters'] = str(db_cluster_params)
        self._modify_row(cluster_table, cluster, {'id': cluster['id']}, {})
 def __init__(self):
     self._smgr_log = ServerMgrlogger()
     self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrValidations Init")
class ServerMgrDb:

    # Input: Specify table name by giving the object name and '_table' will be added by
    # this function while looking for the table
    # Output: Dict of columns, if table matches. Otherwise, empty list will be returned
    def get_table_columns(self, table_name):
        table_columns = {}
        if not table_name:
            return table_columns
        db_table_name = table_name + '_table'
        with self._con:
            cursor = self._con.cursor()
            table_info_cmd = "PRAGMA table_info(%s)" % db_table_name
            cursor.execute(table_info_cmd)
            column_info_table = cursor.fetchall()
            if cursor.description and column_info_table:
                output_header_info = [x[0] for x in cursor.description]
                if 'name' in output_header_info and 'type' in output_header_info:
                    table_columns['header'] = {'column name': 'type'}
                    name_index = output_header_info.index('name')
                    type_index = output_header_info.index('type')
                    columns = {}
                    for column_info in column_info_table:
                        columns[str(column_info[name_index])] = str(
                            column_info[type_index])
                    table_columns['columns'] = columns
            return table_columns

    # end get_table_columns

    def _add_table_column(self, cursor, table, column, column_type):
        try:
            cmd = "ALTER TABLE " + table + " ADD COLUMN " + column + " " + column_type
            cursor.execute(cmd)
        except lite.OperationalError:
            pass

    # end _add_table_column

    def log_and_raise_exception(self, msg, err_code=ERR_OPR_ERROR):
        self._smgr_log.log(self._smgr_log.ERROR, msg)
        raise ServerMgrException(msg, err_code)

    def __init__(self, db_file_name=def_server_db_file):
        try:
            self._smgr_log = ServerMgrlogger()
            self._con = lite.connect(db_file_name)
            with self._con:
                cursor = self._con.cursor()
                # Create cluster table.
                cursor.execute("CREATE TABLE IF NOT EXISTS " + cluster_table +
                               """ (id TEXT PRIMARY KEY,
                                    parameters TEXT,
                                    email TEXT)""")
                # Create image table
                cursor.execute("CREATE TABLE IF NOT EXISTS " + image_table +
                               """ (id TEXT PRIMARY KEY,
                    version TEXT, type TEXT, path TEXT,
                    parameters TEXT)""")
                # Create status table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               server_status_table + """ (id TEXT PRIMARY KEY,
                            server_status TEXT)""")
                # Create server table
                cursor.execute("CREATE TABLE IF NOT EXISTS " + server_table +
                               """ (mac_address TEXT PRIMARY KEY NOT NULL,
                         id TEXT, host_name TEXT, static_ip varchar default 'N',
                         ip_address TEXT, subnet_mask TEXT, gateway TEXT, domain TEXT,
                         cluster_id TEXT,  base_image_id TEXT,
                         package_image_id TEXT, password TEXT,
                         last_update TEXT, discovered varchar default 'false',
                         parameters TEXT, roles TEXT, ipmi_username TEXT,
                         ipmi_password TEXT, ipmi_address TEXT,
                         ipmi_type TEXT, intf_control TEXT,
                         intf_data TEXT, intf_bond TEXT,
                         email TEXT, status TEXT,
                         tag1 TEXT, tag2 TEXT, tag3 TEXT,
                         tag4 TEXT, tag5 TEXT, tag6 TAXT, tag7 TEXT,
                         network TEXT, contrail TEXT, top_of_rack TEXT,
                         UNIQUE (id))""")
                # Create inventory table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               inventory_table +
                               """ (fru_description TEXT PRIMARY KEY NOT NULL,
                         id TEXT, board_serial_number TEXT, chassis_type TEXT,
                         chassis_serial_number TEXT, board_mfg_date TEXT,
                         board_manufacturer TEXT, board_product_name TEXT,
                         board_part_number TEXT, product_manfacturer TEXT,
                         product_name TEXT, product_part_number TEXT,
                         UNIQUE (fru_description))""")
                # Create server tags table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               server_tags_table +
                               """ (tag_id TEXT PRIMARY KEY NOT NULL,
                         value TEXT,
                         UNIQUE (tag_id),
                         UNIQUE (value))""")
                # Add columns for image_table
                self._add_table_column(cursor, image_table, "category", "TEXT")
                # Add columns for cluster_table
                self._add_table_column(cursor, cluster_table, "base_image_id",
                                       "TEXT")
                self._add_table_column(cursor, cluster_table,
                                       "package_image_id", "TEXT")
                self._add_table_column(cursor, cluster_table, "provisioned_id",
                                       "TEXT")
                self._add_table_column(cursor, cluster_table,
                                       "provision_role_sequence", "TEXT")
                # Add columns for server_table
                self._add_table_column(cursor, server_table, "reimaged_id",
                                       "TEXT")
                self._add_table_column(cursor, server_table, "provisioned_id",
                                       "TEXT")
                self._add_table_column(cursor, server_table, "network", "TEXT")
                self._add_table_column(cursor, server_table, "top_of_rack",
                                       "TEXT")
                self._add_table_column(cursor, server_table, "contrail",
                                       "TEXT")
                self._add_table_column(cursor, server_table, "ssh_public_key",
                                       "TEXT")
                self._add_table_column(cursor, server_table, "ssh_private_key",
                                       "TEXT")
                self._add_table_column(cursor, server_table, "ipmi_interface",
                                       "TEXT")

            self._smgr_log.log(self._smgr_log.DEBUG, "Created tables")

            # During init, we check if any of the Cluster in DB are missing any Storage Parameters (Generated UUIDs)
            cluster_list = self._get_items(cluster_table, None, None, True,
                                           None)
            for cluster in cluster_list:
                # Check if storage parameters are present in Cluster, else generate them
                if 'storage_fsid' not in set(
                        eval(cluster['parameters']
                             )) or 'storage_virsh_uuid' not in set(
                                 eval(cluster['parameters'])):
                    self.update_cluster_uuids(cluster)

            self.update_image_table()
        except e:
            raise e

    # End of __init__

    def update_image_version(self, image):
        #pdb.set_trace()
        if not image:
            return

        parameters = image.get('parameters', "")
        if parameters:
            parameters = eval(parameters)

        # if version is not there or NO_VERSION (not found) laster time, try
        # to find the version using dpkg-deb
        if (parameters and 'version' in parameters
                and parameters['version'] != ''):
            # version is already found, no action needed
            if parameters['version'] != 'NO_VERSION':
                return

        # only ubuntu packages are processed for finding version
        if not (image['type'] == 'contrail-storage-ubuntu-package'
                or image['type'] == 'contrail-ubuntu-package'):
            return

        # following used for getting details about ubuntu package
        # dpkg-deb -f /path/to/package.deb Version
        extn = os.path.splitext(image['path'])[1]
        image_path = '/etc/contrail_smgr/images/' + image['id'] + extn
        self._smgr_log.log(self._smgr_log.DEBUG,
                           "update_image_version path : %s" % image_path)
        version = subprocess.check_output(
            ['dpkg-deb', '-f', image_path, 'Version'])
        parameters['version'] = version.strip('\n')

        image['parameters'] = parameters
        self.modify_image(image)

    ##End of update_image_version

    def update_image_table(self):
        #pdb.set_trace()
        images = self.get_image(None, detail=True)
        for image in images:
            self.update_image_version(image)

    def get_subnet_mask(self, server):
        subnet_mask = server.get('subnet_mask', None)
        if not subnet_mask:
            cluster = server.get('cluster', None)
            if cluster:
                subnet_mask = cluster.get('subnet_mask', None)
        return subnet_mask

    # End get_subnet_mask

    def get_server_domain(self, server_id):
        server_domain = ""
        if not server_id:
            return server_domain
        servers = self.get_server({"id": server_id}, detail=True)
        if not servers:
            msg = "No server found with server_id " + server_id
            self._smgr_log.log(self._smgr_log.ERROR, msg)
            return server_domain
        server = servers[0]
        server_domain = server.get('domain', "")
        if not server_domain:
            cluster_id = server.get('cluster_id', "")
            if not cluster_id:
                msg = "No domain found for server_id " + server_id
                self._smgr_log.log(self._smgr_log.ERROR, msg)
                return server_domain
            clusters = self.get_cluster({"id": cluster_id}, detail=True)
            if not clusters:
                msg = "No domain found for server_id %s, no cluster for cluster_id %s," \
                    % (server_id, cluster_id)
                self._smgr_log.log(self._smgr_log.ERROR, msg)
                return server_domain
            cluster = clusters[0]
            cluster_params = eval(cluster['parameters'])
            server_domain = cluster_params.get('domain', "")
            if not server_domain:
                msg = "No domain found for server_id %s, cluster_id %s" \
                    % (server_id, cluster_id)
                self._smgr_log.log(self._smgr_log.ERROR, msg)
                return server_domain
        return server_domain

    # End get_server_domain

    def delete_tables(self):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.executescript("""
                DELETE FROM """ + cluster_table + """;
                DELETE FROM """ + server_table + """;
                DELETE FROM """ + server_tags_table + """;
                DELETE FROM """ + server_status_table + """;
                DELETE FROM """ + inventory_table + """;
                DELETE FROM """ + image_table + ";")
        except:
            raise e

    # End of delete_tables

    def get_server_id(self, server_mac):
        try:
            if server_mac:
                server_mac = str(EUI(server_mac)).replace("-", ":")
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(
                    "SELECT id FROM " + server_table + " WHERE mac_address=?",
                    (server_mac, ))
                row = cursor.fetchone()
                if row:
                    return row[0]
                else:
                    return None
        except:
            return None

    # end get_server_id

    # Below function returns value corresponding to tag_id from
    # server_tags_table
    def get_server_tag(self, tag_id):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(
                    "SELECT value FROM " + server_tags_table +
                    " WHERE tag_id=?", (tag_id, ))
                row = cursor.fetchone()
                if row:
                    return row[0]
                else:
                    return None
        except:
            return None

    # end get_server_tag

    def get_server_mac(self, id):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(
                    "SELECT mac_address FROM " + server_table + " WHERE id=?",
                    (id, ))
                row = cursor.fetchone()
                if row:
                    return row[0]
                else:
                    return None
        except:
            return None

    def _add_row(self, table_name, dict):
        try:
            keys, values = zip(*dict.items())
            insert_str = "INSERT OR IGNORE INTO %s (%s) values (%s)" \
                % (table_name,
                   (",".join(keys)),
                   (",".join('?' * len(keys))))
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(insert_str, values)
        except Exception as e:
            raise e

    # end _add_row

    # Generic function to delete rows matching given criteria
    # from given table.
    # Match dict is dictionary of columns and values to match for.
    # unmatch dict is not of dictionaty of columns and values to match for.
    def _delete_row(self, table_name, match_dict=None, unmatch_dict=None):
        try:
            delete_str = "DELETE FROM %s" % (table_name)
            # form a string to provide to where match clause
            match_list = []
            where = None
            if match_dict:
                where = match_dict.get("where", None)

            if where:
                delete_str += " WHERE " + where
            else:
                if match_dict:
                    match_list = [
                        "%s = \'%s\'" % (k, v)
                        for k, v in match_dict.iteritems()
                    ]
                if unmatch_dict:
                    match_list += [
                        "%s != \'%s\'" % (k, v)
                        for k, v in unmatch_dict.iteritems()
                    ]
                if match_list:
                    match_str = " and ".join(match_list)
                    delete_str += " WHERE " + match_str

            with self._con:
                cursor = self._con.cursor()
                cursor.execute(delete_str)
        except Exception as e:
            raise e

    # End _delete_row

    def _modify_row(self,
                    table_name,
                    dict,
                    match_dict=None,
                    unmatch_dict=None):
        try:
            keys, values = zip(*dict.items())
            modify_str = "UPDATE %s SET " % (table_name)
            update_list = ",".join(key + "=?" for key in keys)
            modify_str += update_list
            match_list = []
            if match_dict:
                match_list = ["%s = ?" % (k) for k in match_dict.iterkeys()]
                match_values = [v for v in match_dict.itervalues()]
            if unmatch_dict:
                match_list += [
                    "%s != ?" % (k) for k in unmatch_dict.iterkeys()
                ]
                match_values += [v for v in unmatch_dict.itervalues()]
            if match_list:
                match_str = " and ".join(match_list)
                match_values_str = ",".join(match_values)
                modify_str += " WHERE " + match_str
                values += (match_values_str, )
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(modify_str, values)
        except Exception as e:
            raise e

    def _get_items(self,
                   table_name,
                   match_dict=None,
                   unmatch_dict=None,
                   detail=False,
                   always_fields=None):
        try:
            with self._con:
                cursor = self._con.cursor()
                if detail:
                    sel_cols = "*"
                else:
                    sel_cols = ",".join(always_fields)
                select_str = "SELECT %s FROM %s" % (sel_cols, table_name)
                # form a string to provide to where match clause
                match_list = []
                where = None
                if match_dict:
                    where = match_dict.get("where", None)
                if where:
                    select_str += " WHERE " + where
                else:
                    if match_dict:
                        match_list = [
                            "%s = \'%s\'" % (k, v)
                            for k, v in match_dict.iteritems()
                        ]
                    if unmatch_dict:
                        match_list += [
                            "%s != \'%s\'" % (k, v)
                            for k, v in unmatch_dict.iteritems()
                        ]
                    if match_list:
                        match_str = " and ".join(match_list)
                        select_str += " WHERE " + match_str
                cursor.execute(select_str)
            rows = [x for x in cursor]
            cols = [x[0] for x in cursor.description]
            items = []
            for row in rows:
                item = {}
                for prop, val in zip(cols, row):
                    item[prop] = val
                items.append(item)
            return items
        except Exception as e:
            raise e

    # End _get_items

    def add_cluster(self, cluster_data):
        try:
            # covert all unicode strings in dict
            cluster_data = ServerMgrUtil.convert_unicode(cluster_data)
            # Store cluster_parameters dictionary as a text field
            if 'parameters' in cluster_data:
                cluster_parameters = cluster_data.pop("parameters")
                cluster_parameters = DictUtils.remove_none_from_dict(
                    cluster_parameters)
                if not cluster_parameters:
                    cluster_parameters = {}
                cluster_data['parameters'] = str(cluster_parameters)
            # Store provision sequence list as a text field
            provision_role_sequence = cluster_data.pop(
                "provision_role_sequence", None)
            if provision_role_sequence is not None:
                cluster_data['provision_role_sequence'] = str(
                    provision_role_sequence)
            # Store email list as text field
            email = cluster_data.pop("email", None)
            if email is not None:
                cluster_data['email'] = str(email)
            self._add_row(cluster_table, cluster_data)
        except Exception as e:
            raise e

    # End of add_cluster

    def add_inventory(self, fru_data):
        try:
            fru_data = dict(fru_data)
            if fru_data and 'id' in fru_data:
                self._add_row(inventory_table, fru_data)
                self._smgr_log.log(self._smgr_log.DEBUG,
                                   "ADDED FRU INFO FOR " + fru_data['id'])
        except Exception as e:
            return e.message
        return 0

    # End of add_inventory

    def add_server(self, server_data):
        try:
            # covert all unicode strings in dict
            server_data = ServerMgrUtil.convert_unicode(server_data)
            if 'mac_address' in server_data and server_data['mac_address']:
                server_data['mac_address'] = str(
                    EUI(server_data['mac_address'])).replace("-", ":")
            # Store roles list as a text field
            roles = server_data.pop("roles", None)
            cluster_id = server_data.get('cluster_id', None)
            if cluster_id:
                self.check_obj("cluster", {"id": cluster_id})
            if roles is not None:
                server_data['roles'] = str(roles)
            intf_control = server_data.pop("control_data_network", None)
            if intf_control:
                server_data['intf_control'] = str(intf_control)
            intf_bond = server_data.pop("bond_interface", None)
            if intf_bond:
                server_data['intf_bond'] = str(intf_bond)
            #Add network
            if 'network' in server_data:
                network = server_data.pop('network')
                network = DictUtils.remove_none_from_dict(network)
                if not network:
                    network = {}
                server_data['network'] = str(network)
            #Add top_of_rack configuration
            if 'top_of_rack' in server_data:
                top_of_rack_data_str = str(server_data.pop(
                    "top_of_rack", None))
                server_data['top_of_rack'] = top_of_rack_data_str
            #Add contrail
            if 'contrail' in server_data:
                contrail = server_data.pop('contrail')
                contrail = DictUtils.remove_none_from_dict(contrail)
                if not contrail:
                    contrail = {}
                server_data['contrail'] = str(contrail)
            # Store email list as text field
            email = server_data.pop("email", None)
            if email:
                server_data['email'] = str(email)
            # store tags if any
            server_tags = server_data.pop("tag", None)
            if server_tags is not None:
                tags_dict = self.get_server_tags(detail=True)
                rev_tags_dict = dict((v, k) for k, v in tags_dict.iteritems())
                for k, v in server_tags.iteritems():
                    server_data[rev_tags_dict[k]] = v
            # Store server_params dictionary as a text field
            if 'parameters' in server_data:
                server_parameters = server_data.pop('parameters')
                server_parameters = DictUtils.remove_none_from_dict(
                    server_parameters)
                if not server_parameters:
                    server_parameters = {}
                server_data['parameters'] = str(server_parameters)
            self._add_row(server_table, server_data)
        except Exception as e:
            raise e
        return 0

    # End of add_server

    # This function for adding server tag is slightly different
    # compared with add function for other tables. The tag_data
    # contains tag information for all tags.
    # This function is always called with complete list of tags
    # so, clear the table first.
    def add_server_tags(self, tag_data):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.executescript("""
                DELETE FROM """ + server_tags_table + ";")
            for key, value in tag_data.iteritems():
                row_data = {'tag_id': key, 'value': value}
                self._add_row(server_tags_table, row_data)
        except Exception as e:
            raise e

    # End of add_server_tags

    def server_discovery(self, action, entity):
        try:
            if 'mac_address' in entity:
                entity['mac_address'] = str(EUI(
                    entity['mac_address'])).replace("-", ":")
            mac_address = entity.get("mac_address", None)
            if action.lower() == "add":
                # If this server is already present in our table,
                # update IP address if DHCP was not static.
                servers = self._get_items(server_table,
                                          {"mac_address": mac_address},
                                          detail=True)
                if servers:
                    server = servers[0]
                    self._modify_row(server_table, entity,
                                     {"mac_address": mac_address}, {})
                    return
                # Adding network and contrail blocks
                entity['parameters'] = "{}"
                entity['network'] = "{}"
                entity['contrail'] = "{}"
                entity['discovered'] = "true"
                entity['status'] = "server_discovered"
                self._add_row(server_table, entity)
            elif action.lower() == "delete":
                servers = self.get_server({"mac_address": mac_address},
                                          detail=True)
                if ((servers) and (servers[0]['discovered'] == "true")):
                    self._delete_row(server_table,
                                     {"mac_address": mac_address})
            else:
                return
        except Exception as e:
            return

    # End of server_discovery

    def add_image(self, image_data):
        try:
            # covert all unicode strings in dict
            image_data = ServerMgrUtil.convert_unicode(image_data)
            # Store image_parameters dictionary as a text field
            image_parameters = image_data.pop("parameters", None)
            if image_parameters is not None:
                image_data['parameters'] = str(image_parameters)
            self._add_row(image_table, image_data)
        except Exception as e:
            raise e

    # End of add_image

    def delete_cluster(self, match_dict=None, unmatch_dict=None):
        try:
            self.check_obj("cluster", match_dict, unmatch_dict)
            cluster_id = match_dict.get("id", None)
            servers = None
            if cluster_id:
                servers = self.get_server({'cluster_id': cluster_id},
                                          detail=True)
            if servers:
                msg = ("Servers are present in this cluster, "
                       "remove cluster association, prior to cluster delete.")
                self.log_and_raise_exception(msg, ERR_OPR_ERROR)
            self._delete_row(cluster_table, match_dict, unmatch_dict)
        except Exception as e:
            raise e

    # End of delete_cluster

    def check_obj(self,
                  type,
                  match_dict=None,
                  unmatch_dict=None,
                  raise_exception=True):
        if type == "server":
            cb = self.get_server
            db_obj = cb(match_dict, unmatch_dict, detail=False)
        elif type == "cluster":
            cb = self.get_cluster
            db_obj = cb(match_dict, unmatch_dict, detail=False)
        elif type == "image":
            cb = self.get_image
            db_obj = cb(match_dict, unmatch_dict, detail=False)

        if not db_obj:
            msg = "%s not found" % (type)
            if raise_exception:
                self.log_and_raise_exception(msg, ERR_OPR_ERROR)
            return False
        return True

    #end of check_obj

    def delete_server(self, match_dict=None, unmatch_dict=None):
        try:
            if match_dict and match_dict.get("mac_address", None):
                if match_dict["mac_address"]:
                    match_dict["mac_address"] = str(
                        EUI(match_dict["mac_address"])).replace("-", ":")
            if unmatch_dict and unmatch_dict.get("mac_address", None):
                if unmatch_dict["mac_address"]:
                    unmatch_dict["mac_address"] = str(
                        EUI(unmatch_dict["mac_address"])).replace("-", ":")
            self.check_obj("server", match_dict, unmatch_dict)
            self._delete_row(server_table, match_dict, unmatch_dict)
        except Exception as e:
            raise e

    # End of delete_server

    def delete_server_tag(self, match_dict=None, unmatch_dict=None):
        try:
            self._delete_row(server_tags_table, match_dict, unmatch_dict)
        except Exception as e:
            raise e

    # End of delete_server_tag

    def delete_image(self, match_dict=None, unmatch_dict=None):
        try:
            self.check_obj("image", match_dict, unmatch_dict)
            self._delete_row(image_table, match_dict, unmatch_dict)
        except Exception as e:
            raise e

    # End of delete_image

    def modify_cluster(self, cluster_data):
        try:
            # covert all unicode strings in dict
            cluster_data = ServerMgrUtil.convert_unicode(cluster_data)
            cluster_id = cluster_data.get('id', None)
            if not cluster_id:
                raise Exception("No cluster id specified")
            self.check_obj("cluster", {"id": cluster_id})
            db_cluster = self.get_cluster({"id": cluster_id}, detail=True)
            if not db_cluster:
                msg = "%s is not valid" % cluster_id
                self.log_and_raise_exception(msg, ERR_OPR_ERROR)

            # Modify cluster parameters
            if 'parameters' in cluster_data:
                cluster_params = cluster_data.pop('parameters')
                if cluster_params is None:
                    db_cluster_params = {}
                else:
                    db_cluster_params = eval(db_cluster[0].get(
                        'parameters', '{}'))
                    db_cluster_params = DictUtils.merge_dict(
                        db_cluster_params, cluster_params)
                    db_cluster_params = DictUtils.remove_none_from_dict(
                        db_cluster_params)
                cluster_data['parameters'] = str(db_cluster_params)

            # Store email list as text field
            email = cluster_data.pop("email", None)
            if email is not None:
                cluster_data['email'] = str(email)

            provision_role_sequence = cluster_data.pop(
                "provision_role_sequence", None)
            if provision_role_sequence is not None:
                cluster_data['provision_role_sequence'] = str(
                    provision_role_sequence)
            self._modify_row(cluster_table, cluster_data, {'id': cluster_id},
                             {})
        except Exception as e:
            raise e

    # End of modify_cluster

    def modify_image(self, image_data):
        try:
            # covert all unicode strings in dict
            image_data = ServerMgrUtil.convert_unicode(image_data)
            image_id = image_data.get('id', None)
            if not image_id:
                raise Exception("No image id specified")
            #Reject if non mutable field changes
            db_image = self.get_image({'id': image_data['id']}, detail=True)
            if image_data['path'] != db_image[0]['path']:
                msg = ('Image path cannnot be modified')
                self.log_and_raise_exception(msg, ERR_OPR_ERROR)
            if image_data['type'] != db_image[0]['type']:
                msg = ('Image type cannnot be modified')
                self.log_and_raise_exception(msg, ERR_OPR_ERROR)
            # Store image_params dictionary as a text field
            image_parameters = image_data.pop("parameters", None)
            if image_parameters is not None:
                image_data['parameters'] = str(image_parameters)
            self._modify_row(image_table, image_data, {'id': image_id}, {})
        except Exception as e:
            raise e

    # End of modify_image

    def modify_server(self, server_data):
        # covert all unicode strings in dict
        server_data = ServerMgrUtil.convert_unicode(server_data)
        db_server = None
        if 'mac_address' in server_data.keys() and \
                 server_data['mac_address'] != None:
            db_server = self.get_server(
                {'mac_address': server_data['mac_address']}, detail=True)
        elif 'id' in server_data.keys() and server_data['id'] != None:
            db_server = self.get_server({'id': server_data['id']}, detail=True)
        if not db_server:
            return db_server
        try:
            cluster_id = server_data.get('cluster_id', None)
            if cluster_id:
                self.check_obj("cluster", {"id": cluster_id})

            if 'mac_address' in server_data:
                server_data['mac_address'] = str(
                    EUI(server_data['mac_address'])).replace("-", ":")
            server_mac = server_data.get('mac_address', None)
            if not server_mac:
                server_id = server_data.get('id', None)
                if not server_id:
                    msg = ("No server MAC or id specified")
                    self.log_and_raise_exception(msg, ERR_OPR_ERROR)
                else:
                    server_mac = self.get_server_mac(server_id)
            #Check if object exists
            if 'id' in server_data.keys() and \
                    'server_mac' in server_data.keys():
                self.check_obj('server', {'id': server_data['id']})
                #Reject if primary key values change
                if server_data['mac_address'] != db_server[0]['mac_address']:
                    msg = ('MAC address cannnot be modified', ERR_OPR_ERROR)
                    self.log_and_raise_exception(msg, ERR_OPR_ERROR)

            # Store roles list as a text field
            roles = server_data.pop("roles", None)
            if roles is not None:
                server_data['roles'] = str(roles)
            intf_control = server_data.pop("control_data_network", None)
            if intf_control:
                server_data['intf_control'] = str(intf_control)
            intf_bond = server_data.pop("bond_interface", None)
            if intf_bond:
                server_data['intf_bond'] = str(intf_bond)

            #Modify network
            if 'network' in server_data:
                network = server_data.pop('network')
                if network is None:
                    db_network = {}
                else:
                    db_network = eval(db_server[0].get('network', '{}'))
                    db_network = DictUtils.merge_dict(db_network, network)
                    db_network = DictUtils.remove_none_from_dict(db_network)
                server_data['network'] = str(db_network)

            #Modify contrail
            if 'contrail' in server_data:
                contrail = server_data.pop('contrail')
                if contrail is None:
                    db_contrail = {}
                else:
                    db_contrail = eval(db_server[0].get('contrail', '{}'))
                    db_contrail = DictUtils.merge_dict(db_contrail, contrail)
                    db_contrail = DictUtils.remove_none_from_dict(db_contrail)
                server_data['contrail'] = str(db_contrail)

            #Add top_of_rack
            if 'top_of_rack' in server_data:
                top_of_rack_data_str = str(server_data.pop(
                    "top_of_rack", None))
                server_data['top_of_rack'] = top_of_rack_data_str

            # store tags if any
            server_tags = server_data.pop("tag", None)
            if server_tags is not None:
                tags_dict = self.get_server_tags(detail=True)
                rev_tags_dict = dict((v, k) for k, v in tags_dict.iteritems())
                for k, v in server_tags.iteritems():
                    server_data[rev_tags_dict[k]] = v

            if "ssh_private_key" and "ssh_public_key" in server_data:
                private_key = str(server_data.pop("ssh_private_key", None))
                public_key = str(server_data.pop("ssh_public_key", None))
                server_data["ssh_private_key"] = private_key
                server_data["ssh_public_key"] = public_key

            # Store server_params dictionary as a text field
            if 'parameters' in server_data:
                server_params = server_data.pop('parameters')
                if server_params is None:
                    db_server_params = {}
                else:
                    db_server_params = eval(db_server[0].get(
                        'parameters', '{}'))
                    db_server_params = DictUtils.merge_dict(
                        db_server_params, server_params)
                    db_server_params = DictUtils.remove_none_from_dict(
                        db_server_params)
                server_data['parameters'] = str(db_server_params)

            # Store email list as text field
            email = server_data.pop("email", None)
            if email is not None:
                server_data['email'] = str(email)
            self._modify_row(server_table, server_data,
                             {'mac_address': server_mac}, {})
            return db_server
        except Exception as e:
            raise e

    # End of modify_server

    # This function for modifying server tag is slightly different
    # compared with modify function for other tables. The tag_data
    # contains tag information for all tags.
    def modify_server_tags(self, tag_data):
        try:
            for key, value in tag_data.iteritems():
                row_data = {'tag_id': key, 'value': value}
                self._modify_row(server_tags_table, row_data, {'tag_id': key},
                                 {})
        except Exception as e:
            raise e

    # End of modify_server_tags

    def get_image(self,
                  match_dict=None,
                  unmatch_dict=None,
                  detail=False,
                  field_list=None):
        try:
            if not field_list:
                field_list = ["id"]
            images = self._get_items(image_table, match_dict, unmatch_dict,
                                     detail, field_list)
        except Exception as e:
            raise e
        return images

    # End of get_image

    def get_server_tags(self, match_dict=None, unmatch_dict=None, detail=True):
        try:
            tag_dict = {}
            tags = self._get_items(server_tags_table, match_dict, unmatch_dict,
                                   True, ["tag_id"])
            for tag in tags:
                tag_dict[tag['tag_id']] = tag['value']
        except Exception as e:
            raise e
        return tag_dict

    # End of get_server_tags

    def get_status(self, match_key=None, match_value=None, detail=False):
        try:
            status = self._get_items(server_status_table,
                                     {match_key: match_value},
                                     detail=detail,
                                     always_field=["id"])
        except Exception as e:
            raise e
        return status

    # End of get_status

    def put_status(self, server_data):
        try:
            server_id = server_data.get('id', None)
            if not server_id:
                raise Exception("No server id specified")
            # Store vns_params dictionary as a text field
            servers = self._get_items(server_status_table, {"id": server_id},
                                      detail=True)
            if servers:
                self._modify_row(server_status_table, server_data,
                                 {'id': server_id}, {})
            else:
                self._add_row(server_status_table, server_data)
        except Exception as e:
            raise e

    # End of put_status

    def get_server(self,
                   match_dict=None,
                   unmatch_dict=None,
                   detail=False,
                   field_list=None):
        try:
            if match_dict and match_dict.get("mac_address", None):
                if match_dict["mac_address"]:
                    match_dict["mac_address"] = str(
                        EUI(match_dict["mac_address"])).replace("-", ":")
            # For server table, when detail is false, return server_id, mac
            # and ip.
            if not field_list:
                field_list = ["id", "mac_address", "ip_address"]
            servers = self._get_items(server_table, match_dict, unmatch_dict,
                                      detail, field_list)
        except Exception as e:
            raise e
        return servers

    # End of get_server

    def get_inventory(self, match_dict=None, unmatch_dict=None):
        try:
            frus = self._get_items(inventory_table, match_dict, unmatch_dict,
                                   True, None)
        except Exception as e:
            raise e
        return frus

    # End of get_inventory

    def get_cluster(self,
                    match_dict=None,
                    unmatch_dict=None,
                    detail=False,
                    field_list=None):
        try:
            if not field_list:
                field_list = ["id"]
            cluster = self._get_items(cluster_table, match_dict, unmatch_dict,
                                      detail, field_list)
        except Exception as e:
            raise e
        return cluster

    # End of get_cluster

    # If any UUIDs are missing from an existing Cluster, we add them during ServerManager DB init
    def update_cluster_uuids(self, cluster):
        try:
            db_cluster_params_str = cluster['parameters']
            db_cluster_params = {}
            if db_cluster_params_str:
                db_cluster_params = eval(db_cluster_params_str)
            if 'uuid' not in db_cluster_params:
                str_uuid = str(uuid.uuid4())
                db_cluster_params.update({"uuid": str_uuid})
            if 'storage_fsid' not in db_cluster_params:
                storage_fsid = str(uuid.uuid4())
                db_cluster_params.update({"storage_fsid": storage_fsid})
            if 'storage_virsh_uuid' not in db_cluster_params:
                storage_virsh_uuid = str(uuid.uuid4())
                db_cluster_params.update(
                    {"storage_virsh_uuid": storage_virsh_uuid})
        except Exception as e:
            raise e

        cluster['parameters'] = str(db_cluster_params)
        self._modify_row(cluster_table, cluster, {'id': cluster['id']}, {})
class ServerMgrCobbler(object):

    _cobbler_ip = _DEF_COBBLER_IP
    _cobbler_port = _DEF_COBBLER_PORT
    _cobbler_username = _DEF_USERNAME
    _cobbler_password = _DEF_PASSWORD
    _server = None
    _token = None
    # 30 minute timer to keep validating the cobbler token
    _COB_TOKEN_CHECK_TIMER = 1800
    _vmware_types = ["esxi5.1", "esxi5.5", "esxi6.0", "esxi6.5"]

    def __init__(self, base_dir=_DEF_BASE_DIR,
                 ip_address=_DEF_COBBLER_IP,
                 port=_DEF_COBBLER_PORT,
                 username=_DEF_USERNAME,
                 password=_DEF_PASSWORD):

        self._smgr_log = ServerMgrlogger()
        self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrCobbler Init")


        # Store the passed in values
        self._cobbler_ip = ip_address
        self._cobbler_port = port
        self._cobbler_username = username
        self._cobbler_password = password
        try:
            if self._cobbler_port:
                self._server = xmlrpclib.Server(
                    "http://" +
                    self._cobbler_ip + ":" +
                    self._cobbler_port + "/cobbler_api")
            else:
                self._server = xmlrpclib.Server(
                    "http://" +
                    self._cobbler_ip + "/cobbler_api")
            self._token = self._server.login(self._cobbler_username,
                                             self._cobbler_password)
        except subprocess.CalledProcessError as e:
            msg = ("Cobbler Init: error %d when executing"
                   "\"%s\"" %(e.returncode, e.cmd))
            self.log_and_raise_exception(msg, ERR_OPR_ERROR)
        except Exception as e:
            raise e
    # End of __init__

    def log_and_raise_exception(self, msg, err_code = ERR_OPR_ERROR):
         self._smgr_log.log(self._smgr_log.ERROR, msg)
         raise ServerMgrException(msg, err_code)

    def _init_create_repo(self, repo_name, base_dir):
        try:
            cobbler_server = self._server
            token = self._token
            repo = cobbler_server.find_repo({"name": repo_name})
            if repo:
                rid = cobbler_server.get_repo_handle(
                    repo_name, token)
            else:
                rid = cobbler_server.new_repo(token)
            cobbler_server.modify_repo(rid, "arch", "x86_64", token)
            repo_dir = base_dir + repo_name
            cobbler_server.modify_repo(
                rid, "name", repo_name, self._token)
            cobbler_server.modify_repo(rid, "mirror", repo_dir, token)
            cobbler_server.modify_repo(rid, "keep_updated", True, token)
            cobbler_server.modify_repo(rid, "priority", "99", token)
            cobbler_server.modify_repo(rid, "rpm_list", [], token)
            cobbler_server.modify_repo(rid, "yumopts", {}, token)
            cobbler_server.modify_repo(rid, "mirror_locally", True, token)
            cobbler_server.modify_repo(rid, "environment", {}, token)
            cobbler_server.modify_repo(rid, "comment", "...", token)
            cobbler_server.save_repo(rid, token)
            # Issue cobbler reposync for this repo
            cmd = "cobbler reposync --only=" + repo_name
            subprocess.check_call(cmd, shell=True)
        except subprocess.CalledProcessError as e:
            msg = ("Cobbler Init: error %d when executing"
                   "\"%s\"" %(e.returncode, e.cmd))
            self.log_and_raise_exception(msg, ERR_OPR_ERROR)
        except Exception as e:
            raise e
    # End of _init_create_repo

    # Function to check if cobbler token is valid or not, before calling any
    # XMLRPC calls that need a valid token. If token is not valid, the function
    # acquires a new token from cobbler.
    def _validate_token(self, token):
        valid = self._server.token_check(token)
        if not valid:
            self._token = self._server.login(
                self._cobbler_username, self._cobbler_password)
    # end _validate_token

    def create_distro(self, distro_name, image_type, path,
                      kernel_file, initrd_file, cobbler_ip_address):
        try:
            # Validate cobbler token
            self._validate_token(self._token)
            # If distro already exists in cobbler, nothing to do.
            distro = self._server.find_distro({"name":  distro_name})
            if distro:
                return
            distro_id = self._server.new_distro(self._token)
            self._server.modify_distro(distro_id, 'name',
                                       distro_name, self._token)
            self._server.modify_distro(distro_id, 'kernel',
                                       path + kernel_file, self._token)
            self._server.modify_distro(distro_id, 'initrd',
                                       path + initrd_file, self._token)
            if ((image_type == 'centos') or (image_type == 'fedora')
                or (image_type == 'redhat')):
                self._server.modify_distro(
                    distro_id, 'ksmeta',
                    'tree=http://' + cobbler_ip_address +
                    '/contrail/images/' + distro_name,
                    self._token)
            if (image_type == 'ubuntu'):
                self._server.modify_distro(distro_id, 'arch',
                                           'x86_64', self._token)
                self._server.modify_distro(distro_id, 'breed',
                                           'ubuntu', self._token)
                self._server.modify_distro(distro_id, 'os_version',
                                           'precise', self._token)
            elif (image_type in self._vmware_types):
                os_version = image_type.replace(".", "")
                self._server.modify_distro(
                    distro_id, 'ksmeta',
                    'tree=http://' + cobbler_ip_address +
                    '/contrail/images/' + distro_name,
                    self._token)
                self._server.modify_distro(
                    distro_id, 'arch', 'x86_64', self._token)
                self._server.modify_distro(
                    distro_id, 'breed', 'vmware', self._token)
                self._server.modify_distro(
                    distro_id, 'os_version', os_version, self._token)
                self._server.modify_distro(
                    distro_id, 'boot_files',
                    '$local_img_path/*.*=' + path + '/*.*',
                    self._token)
                self._server.modify_distro(
                    distro_id, 'template_files',
                    '/etc/cobbler/pxe/bootcfg_%s.template=' %(
                        os_version) +
                    '$local_img_path/cobbler-boot.cfg',
                    self._token)
            else:
                pass
            self._server.save_distro(distro_id, self._token)
        except Exception as e:
            raise e
    # End of create_distro

    def create_profile(self, profile_name,
                       distro_name, image_type, ks_file, kernel_options,
                        ks_meta):
        try:
            # Validate cobbler token
            self._validate_token(self._token)
            # If profile exists, nothing to do, jus return.
            profile = self._server.find_profile({"name":  profile_name})
            if profile:
                return
            profile_id = self._server.new_profile(self._token)
            self._server.modify_profile(profile_id, 'name',
                                        profile_name, self._token)
            self._server.modify_profile(profile_id, "distro",
                                        distro_name, self._token)
            self._server.modify_profile(profile_id, "kickstart",
                                        ks_file, self._token)
            self._server.modify_profile(profile_id, "kernel_options",
                                        kernel_options, self._token)
            self._server.modify_profile(profile_id, "ks_meta",
                                        ks_meta, self._token)
            self._server.save_profile(profile_id, self._token)
        except Exception as e:
            raise e
    # End of create_profile

    def create_repo(self, repo_name, mirror):
        try:
            # Validate cobbler token
            self._validate_token(self._token)
            repo = self._server.find_repo({"name": repo_name})
            if repo:
                rid = self._server.get_repo_handle(
                    repo_name, self._token)
            else:
                rid = self._server.new_repo(self._token)
            self._server.modify_repo(rid, "arch", "x86_64", self._token)
            self._server.modify_repo(
                rid, "name", repo_name, self._token)
            self._server.modify_repo(rid, "mirror", mirror, self._token)
            self._server.modify_repo(rid, "mirror_locally", True, self._token)
            self._server.save_repo(rid, self._token)
            # Issue cobbler reposync for this repo
            cmd = "cobbler reposync --only=" + repo_name
            subprocess.check_call(cmd, shell=True)
        except subprocess.CalledProcessError as e:
            msg = ("create_repo: error %d when executing"
                   "\"%s\"" %(e.returncode, e.cmd))
            self.log_and_raise_exception(msg, ERR_OPR_ERROR)
        except Exception as e:
            raise e
    # End of create_repo

    def create_system(self, system_name, profile_name, package_image_id,
                      mac, ip, subnet, gway, system_domain,
                      ifname, enc_passwd, server_license, esx_nicname,
                      power_type, power_user, power_pass, power_address,
                      base_image, server_ip, partition=None,
                      node_cfg = None, ipmi_interface= None, kernel_version=None, kernel_repo_url=None):
        try:
            # Validate cobbler token
            self._validate_token(self._token)
            system = self._server.find_system({"name":  system_name})
            if system:
                system_id = self._server.get_system_handle(
                    system_name, self._token)
            else:
                system_id = self._server.new_system(self._token)
                self._server.modify_system(system_id, 'name',
                                           system_name, self._token)
            self._server.modify_system(
                system_id, "hostname", system_name, self._token)
            self._server.modify_system(
                system_id, "power_type", power_type, self._token)
            self._server.modify_system(
                system_id, "power_user", power_user, self._token)
            self._server.modify_system(
                system_id, "power_pass", power_pass, self._token)
            self._server.modify_system(
                system_id, "power_address", power_address, self._token)
            # For centos, create a sub-profile that has the repo for
            # package_image_id also made available for this system.
            if ((base_image['type'] == "centos") and
                (package_image_id)):
                sub_profile_name = profile_name + "-" + package_image_id
                sub_profile = self._server.find_profile(
                    {"name":  sub_profile_name})
                if not sub_profile:
                    sub_profile_id = self._server.new_subprofile(self._token)
                    self._server.modify_profile(
                        sub_profile_id, 'name',
                        sub_profile_name, self._token)
                    self._server.modify_profile(
                        sub_profile_id, 'parent',
                        profile_name, self._token)
                    repos = [
                        package_image_id ]
                    self._server.modify_profile(
                        sub_profile_id, 'repos',
                        repos, self._token)
                    self._server.save_profile(
                        sub_profile_id, self._token)
                # end if sub_profile
            else:
                sub_profile_name = profile_name
            #end if
            self._server.modify_system(
                system_id, "profile", sub_profile_name, self._token)
            interface = {}
            if mac:
                interface['macaddress-%s' % (ifname)] = mac
            if ip:
                interface['ipaddress-%s' % (ifname)] = ip
            if system_domain:
                interface['dnsname-%s' %
                          (ifname)] = system_name + '.' + system_domain
            self._server.modify_system(system_id, 'modify_interface',
                                       interface, self._token)
            ks_metadata = 'passwd=' + enc_passwd
            ks_metadata += ' ip_address=' + ip
            ks_metadata += ' system_name=' + system_name
            ks_metadata += ' system_domain=' + system_domain
            if ipmi_interface:
                ks_metadata += ' ipmi_interface=' + ipmi_interface
            if partition:
                ks_metadata += ' partition=' + partition
            else:
                ks_metadata += ' partition=' + '/dev/sd?'
            if package_image_id:
                ks_metadata += ' contrail_repo_name=' + \
                    package_image_id

            if node_cfg:
                ks_metadata += ' device_cfg=' + node_cfg
            # check if kernel_version is passed
            # then pass the kernel_repo_url info as well 
            if kernel_version:
                ks_metadata += ' kernel_version=' + kernel_version
                if kernel_repo_url:
                    ks_metadata += ' kernel_repo_url=' + kernel_repo_url

            if (base_image['type'] in self._vmware_types):
                ks_metadata += ' server_license=' + server_license
                ks_metadata += ' esx_nicname=' + esx_nicname

                # temporary patch to have kickstart work for esxi. ESXi seems
                # to take kickstart from profile instead of system. So need to copy
                # ks_meta parameters at profile level too. This is a hack that would
                # be removed later - TBD Abhay
                profile = self._server.find_profile({"name":  profile_name})
                if profile:
                    profile_id = self._server.get_profile_handle(
                        profile_name, self._token)
                    self._server.modify_profile(
                        profile_id, 'ksmeta', ks_metadata, self._token)
                # end hack workaround
            #end if



            self._server.modify_system(system_id, 'ksmeta',
                                       ks_metadata, self._token)

            if (base_image['type'] == "ubuntu"):
                kernel_options = 'system_name=' + system_name
                kernel_options += ' system_domain=' + system_domain
                kernel_options += ' ip_address=' + ip
                kernel_options += ' server=' + server_ip
                if package_image_id:
                    kernel_options += ' contrail_repo_name=' + \
                        package_image_id
                self._server.modify_system(system_id, 'kernel_options',
                                           kernel_options, self._token)

            # Note : netboot is not enabled for the system yet. This is done
            # when API to power-cycle the server is called. For now set
            # net_enabled to False
            self._server.modify_system(
                system_id, 'netboot_enabled', False, self._token)
            self._server.save_system(system_id, self._token)
            #self._server.sync(self._token)
        except Exception as e:
            raise e
    # End of create_system

    def enable_system_netboot(self, system_name):
        try:
            # Validate cobbler token
            self._validate_token(self._token)
            system = self._server.find_system({"name":  system_name})
            if not system:
                msg = ("cobbler error : System %s not found" % system_name)
                self.log_and_raise_exception(msg, ERR_OPR_ERROR)
            system_id = self._server.get_system_handle(
                system_name, self._token)
            self._server.modify_system(
                system_id, 'netboot_enabled', True, self._token)
            self._server.save_system(system_id, self._token)
            #Sync per every system is long
            #Do it at end
            #self._server.sync(self._token)
        except Exception as e:
            raise e
    # End of enable_system_netboot

    def reboot_system(self, reboot_system_list):
        try:
            # Validate cobbler token
            self._validate_token(self._token)
            power = {
                "power" : "reboot",
                "systems" : reboot_system_list }
            self._smgr_log.log(self._smgr_log.DEBUG, "reboot_system list is %s" % reboot_system_list)
            self._smgr_log.log(self._smgr_log.DEBUG, "Reboot System Start")
            task_id = self._server.background_power_system(power, self._token)
            self._smgr_log.log(self._smgr_log.DEBUG, "Reboot System End")

            # Alternate way using direct cobbler api, not needed, but commented
            # and kept for reference.
            # system = self._capi_handle.get_item(
            #     "system", system_name)
            # if not system:
            #     raise Exception(
            #         "cobbler error : System %s not found" % system_name)
            # else:
            #     self._capi_handle.reboot(system)
        except Exception as e:
            raise e
    # End of reboot_system

    def delete_distro(self, distro_name):
        try:
            # Validate cobbler token
            self._validate_token(self._token)
            self._server.remove_distro(distro_name, self._token)
        except Exception as e:
            pass
    # End of delete_distro

    def delete_repo(self, repo_name):
        try:
            # Validate cobbler token
            self._validate_token(self._token)
            self._server.remove_repo(repo_name, self._token)
        except Exception as e:
            pass
    # End of delete_repo

    def delete_profile(self, profile_name):
        try:
            # Validate cobbler token
            self._validate_token(self._token)
            self._server.remove_profile(profile_name, self._token)
        except Exception as e:
            pass
    # End of delete_profile

    def delete_system(self, system_name):
        try:
            # Validate cobbler token
            self._validate_token(self._token)
            system = self._server.find_system({"name":  system_name})
            if system:
                self._server.remove_system(system_name, self._token)
        except Exception as e:
            raise e
    # End of delete_system

    def sync(self):
        try:
            # Validate cobbler token
            self._validate_token(self._token)
            self._server.sync(self._token)
        except Exception as e:
            raise e
Example #40
0
class ServerMgrValidations:
    def validate_tor_config(self, input_data):
        #self._smgr_log.log(self._smgr_log.DEBUG, "validate input_data=> %s" %(input_data))
        if 'top_of_rack' not in input_data:
            return (1, "top_of_rack configuration not found")
        tor_config = input_data['top_of_rack']
        switch_list = tor_config.get('switches', "")
        if not switch_list:
            return (1, "switches config not found")
        num_switch = len(switch_list)
        self._smgr_log.log(
            self._smgr_log.DEBUG,
            "validate input_data switch_len => %s" % (num_switch))
        if num_switch > 128:
            self._smgr_log.log(
                self._smgr_log.DEBUG,
                "validate input_data switch_len => %s" % (num_switch))
            return (1, "More than 128 switches are not supported")

        ## check for all mandatory params
        required_params = {
            'id': 'positive_number',
            'ip_address': 'ip_address',
            'tunnel_ip_address': 'ip_address',
            'switch_name': 'hostname',
            'type': {
                'fixed': ['ovs']
            },
            'ovs_protocol': {
                'fixed': ['tcp', 'pssl']
            },
            'ovs_port': 'port',
            'http_server_port': 'port',
            'vendor_name': 'string'
        }
        ## chcek if ID, port_numer, IP-address, hostnames are valid
        ## chcek if all IDs are unique
        ## check if all switch_names are unique
        id_set = set()
        ip_address_set = set()
        hostname_set = set()
        for switch in switch_list:
            #data += '  %s%s:\n' %(switch['switch_name'],switch['id'])
            if 'id' in switch:
                if not switch['id'] or switch['id'] == "":
                    return (1, "param 'id' is empty for a switch config")
            else:
                return (1, "param 'id' not found for a switch")

            for param in required_params:
                if param in switch:
                    if not switch[param] or switch[param] == "":
                        msg = "param '%s' is empty for %s" % (param,
                                                              switch['id'])
                        return (1, msg)
                    else:
                        ## we should validate the param now
                        self._smgr_log.log(
                            self._smgr_log.DEBUG,
                            "validate switch-config => %s" %
                            (required_params[param]))
                        if required_params[param] == 'positive_number':
                            status, msg = self.is_valid_number(switch[param])
                        elif required_params[param] == 'ip_address':
                            status, msg = self.is_valid_ip_address(
                                switch[param])
                        elif required_params[param] == 'hostname':
                            status, msg = self.is_valid_hostname(switch[param])
                        elif required_params[param] == 'port':
                            status, msg = self.is_valid_port(switch[param])
                        elif required_params[param] == 'string':
                            self._smgr_log.log(
                                self._smgr_log.DEBUG,
                                "validate string => %s" % (switch[param]))
                        elif type(required_params[param]) == dict:
                            subtype = required_params[param]
                            if 'fixed' in subtype:
                                if switch[param] not in subtype['fixed']:
                                    msg = "param %s for switch %s has invalid value" % (
                                        param, switch['id'])
                                    status = 1
                        else:
                            self._smgr_log.log(
                                self._smgr_log.DEBUG,
                                "invalid type for => %s" %
                                (required_params[param]))
                            msg = "param %s has invalid type for validation" % (
                                required_params[param])
                            return (1, msg)

                        if status != 0:
                            msg = "switch config %s has invalid value '%s' for %s" % (
                                switch['id'], switch[param], param)
                            return ("1", msg)
                else:
                    msg = "param '%s' not found for switch with 'id' as %s" \
                          %(param, switch['id'])
                    return (1, msg)

            ## add the value to set()
            if switch['id'] in id_set:
                msg = "switch id %s is duplicate" % (switch['id'])
                return (1, msg)
            else:
                id_set.add(switch['id'])

            if switch['ip_address'] in ip_address_set:
                msg = "switch %s has duplicate ip_address" % (switch['id'])
                return (1, msg)
            else:
                ip_address_set.add(switch['ip_address'])

            if switch['switch_name'] in hostname_set:
                msg = "switch id %s has duplicate hostname" % (switch['id'])
                return (1, msg)
            else:
                hostname_set.add(switch['switch_name'])

        return (0, "")

    def is_valid_hostname(self, hostname):
        if len(hostname) > 255:
            return (1, "hostname length is more than 255")

        if hostname[-1] == ".":
            hostname = hostname[:
                                -1]  # strip exactly one dot from the right, if present

        allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
        regex_status = all(allowed.match(x) for x in hostname.split("."))
        status = 1
        if regex_status:
            status = 0
        self._smgr_log.log(self._smgr_log.DEBUG,
                           "validate hostname=> %s , %s" % (hostname, status))
        return (status, "")

    def is_valid_ip_address(self, ip_address):
        self._smgr_log.log(self._smgr_log.DEBUG,
                           "validate ip_address => %s" % (ip_address))
        msg = ""
        pattern = r"\b(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b"
        if re.match(pattern, ip_address):
            status = 0
        else:
            status = 1
            msg = "Invalid IP Address"

        return (status, msg)

    def is_valid_port(self, port_number):
        self._smgr_log.log(self._smgr_log.DEBUG,
                           "validate port => %s" % (port_number))
        status, msg = self.is_valid_number(port_number)
        if status == 0:
            ## check for range of port number
            if int(port_number) > 65535 or int(port_number) < 1:
                msg = "port %s has invalid range" % (port_number)
                status = 1

        return (status, msg)

    def is_valid_protocol(self, protocol):
        self._smgr_log.log(self._smgr_log.DEBUG,
                           "validate protocol => %s" % (protocol))
        return (0, "")

    def is_valid_number(self, number):
        self._smgr_log.log(self._smgr_log.DEBUG,
                           "validate valid number => %s" % (number))
        if number.isdigit():
            return (0, "")
        else:
            return (1, "invalid number")

    #The following logic the interface type of servers
    #Will return single if all the servers are single
    #Will return multi if all the interfaces are multi
    #Will return None if you have a combination
    def get_server_interface_type(self, servers_dict):
        all_single = False
        all_multi = False
        for server in servers_dict:
            param = eval(server['parameters'])
            contrail = eval(server['contrail'])
            if param['interface_name'] and contrail:
                all_multi = True
            elif param['interface_name']:
                all_single = True
            else:
                return None
        #If you have a mix of single and multi interface servers then return None
        if all_multi and all_single:
            return None
        if all_multi:
            return "MULTI_INTERFACE"
        return "SINGLE_INTERFACE"

    #This function returns the list of servers with a specific roles assigned to it
    def get_servers_roles_list(self, servers):
        openstack_only_list = []
        config_only_list = []
        openstack_config_list = []
        for server in servers:
            roles_list = server['roles']
            #Check if the server has both config and openstack role assigned to it
            if 'config' and 'openstack' in roles_list:
                openstack_config_list.append(server)
            #Check if the server has config role assigned to it
            elif 'config' in roles_list:
                config_only_list.append(server)
            #Check if the server has openstack role assigned to it
            elif 'openstack' in roles_list:
                openstack_only_list.append(server)
        return (openstack_config_list, config_only_list, openstack_only_list)

    #Function to get the vips defined for a cluster
    def get_vips_in_cluster(self, cluster):
        cluster_params = eval(cluster['parameters'])
        internal_vip = cluster_params.get('internal_vip')
        external_vip = cluster_params.get('external_vip')
        contrail_internal_vip = cluster_params.get('contrail_internal_vip')
        contrail_external_vip = cluster_params.get('contrail_external_vip')
        return (internal_vip, external_vip, contrail_internal_vip,
                contrail_external_vip)

    #Function to validate vip configuration for a multi interface server
    def validate_multi_interface_vip(self, cluster, servers):
        #Get the list of servers with specific roles
        openstack_config_list, config_only_list, openstack_only_list = self.get_servers_roles_list(
            servers)
        #Get the values of all vips in a cluster
        internal_vip, external_vip, contrail_internal_vip, contrail_external_vip = self.get_vips_in_cluster(
            cluster)
        #If no vips are configured then it means no HA is configured. Just skip the validation
        if internal_vip is None and external_vip is None and contrail_internal_vip is None and contrail_external_vip is None:
            return
        #Validation for nodes configured for both contrail and openstack HA
        if len(openstack_config_list) > 1:
            #Both internal and external vip's have be configured
            if not internal_vip or not external_vip:
                raise Exception(
                    "Both internal and external vips need to be configured")
            #If internal and external vips are specified they should not be equal
            if internal_vip and external_vip and internal_vip == external_vip:
                raise Exception(
                    "internal and external vips cannot be the same")
            #If contrail internal vip and external vips are specified they should be equal to the internal and external vips
            if contrail_internal_vip and contrail_external_vip and contrail_internal_vip != internal_vip and contrail_external_vip != external_vip:
                raise Exception(
                    "If contrail internal and external vips are configured they need to be same as the internal and external vips"
                )
            return
        #Validation for nodes configured only for contrail HA
        if len(config_only_list) > 1:
            #Both contrail internal and external vips have to be configured
            if not contrail_internal_vip or not contrail_external_vip:
                raise Exception(
                    "contrail_internal_vip and contrail_external_vip have to be configured"
                )
            #Contrail internal and external vip cannot be the same
            if contrail_internal_vip and contrail_external_vip and contrail_internal_vip == contrail_external_vip:
                raise Exception(
                    "contrail_internal_vip and contrail_external_vip cannot be same"
                )
            return
        #Validation for nodes configured only for Openstack HA
        if len(openstack_only_list) > 1:
            #Both the internal and external vips have to be configured
            if not internal_vip or not external_vip:
                raise Exception(
                    "Both internal and external vips have to be configured")
            #internal and external vips cannot be the same
            if internal_vip and external_vip and internal_vip == external_vip:
                raise Exception(
                    "internal and external vips cannot be the same")
            return

    #Function to validate vip configuration for a multi interface server
    def validate_single_interface_vip(self, cluster, servers):
        #Get the list of servers with specific roles
        openstack_config_list, config_only_list, openstack_only_list = self.get_servers_roles_list(
            servers)
        #Get the values of all vips in a cluster
        internal_vip, external_vip, contrail_internal_vip, contrail_external_vip = self.get_vips_in_cluster(
            cluster)
        #If no vips are configured then it means no HA is configured. Just skip the validation
        if internal_vip is None and external_vip is None and contrail_internal_vip is None and contrail_external_vip is None:
            return
        #Validation for nodes configured for both contrail and openstack HA
        if len(openstack_config_list) > 1:
            #internal vip has to be configured
            if not internal_vip:
                raise Exception(
                    "internal vip has to be configured. external vip or contrail external vip cannot be configured"
                )
            #external and internal vip if configured, has to be the same
            if external_vip and external_vip != internal_vip:
                raise Exception(
                    "internal vip and external vip have to be the same")
            #contrail external vip and internal vip if configured, has to be the same
            if contrail_external_vip and contrail_external_vip != internal_vip:
                raise Exception(
                    "internal vip and contrail external vip have to be the same"
                )
            #contrail internal vip and internal vip if configured, has to be the same
            if contrail_internal_vip and contrail_internal_vip != internal_vip:
                raise Exception(
                    "internal vip and contrail internal vip have to be the same"
                )
            return
        #Validation for nodes configured only for contrail HA
        if len(config_only_list) > 1:
            #contrail internal vip has to be configured
            if not contrail_internal_vip:
                raise Exception("Only contrail internal vip can be configured")
            #If contrail external vip is configured it has to be the same the contrail internal vip
            if contrail_external_vip and contrail_external_vip != contrail_internal_vip:
                raise Exception(
                    "contrail internal vip and contrail external vip have to be the same"
                )
            return
        #Validation for nodes configured only for Openstack HA
        if len(openstack_only_list) > 1:
            #internal vip has to be configured
            if not internal_vip:
                raise Exception("Only internal vip can be configured")
            #If external vip is configured it has to be the same as internal vip
            if external_vip and external_vip != internal_vip:
                raise Exception(
                    "contrail internal vip and contrail external vip have to be the same"
                )
            return

    #Function to do the configuration validation of vips
    def validate_vips(self, cluster_id, serverDb):
        try:
            #Get the cluster given the cluster id
            cluster_list = serverDb.get_cluster({"id": cluster_id},
                                                detail=True)
            #Since we are getting the cluster given an id only one cluster will be there in the list
            cluster = cluster_list[0]
            match_dict = {"cluster_id": cluster['id']}
            #Get the list of servers belonging to that cluster
            servers = serverDb.get_server(match_dict, detail=True)
            #Find out what type of interface do the servers have
            interface_type = self.get_server_interface_type(servers)
            if interface_type == 'MULTI_INTERFACE':
                self.validate_multi_interface_vip(cluster, servers)
            elif interface_type == 'SINGLE_INTERFACE':
                self.validate_single_interface_vip(cluster, servers)
            return
        except Exception as e:
            raise e

    def __init__(self):
        self._smgr_log = ServerMgrlogger()
        self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrValidations Init")
Example #41
0
class ContrailAnsiblePlaybooks(multiprocessing.Process):
    def __init__(self, json_entity, args):
        super(ContrailAnsiblePlaybooks, self).__init__()
        try:
            self.logger = ServerMgrlogger()
        except:
            f = open("/var/log/contrail-server-manager/debug.log", "a")
            f.write("Ansible Callback Init - ServerMgrlogger init failed\n")
            f.close()

        #Initialize common stuff
        self.json_entity = json_entity
        self.args = args
        self.hosts_in_inv = json_entity[0]["hosts_in_inv"]
        if "kolla_inv" in json_entity[0]["parameters"]:
            self.hosts_in_kolla_inv = \
                    SMAnsibleUtils(self.logger).hosts_in_kolla_inventory(\
                        json_entity[0]['parameters']['kolla_inv'])

        self.tasks = re.split(r'[,\ ]+', json_entity[0]["tasks"])

        #Initialize vars required for Ansible Playbook APIs
        self.options = None
        self.extra_vars = None
        self.pbook_path = None
        self.var_mgr = None
        self.inventory = None
        self.pb_executor = None

    def update_status(self, kolla=False):
        if kolla:
            hosts = self.hosts_in_kolla_inv
        else:
            hosts = self.hosts_in_inv

        for h in hosts:
            status_resp = {"server_id": h, "state": self.current_status}
            SMAnsibleUtils(self.logger).send_REST_request(
                self.args.ansible_srvr_ip,
                SM_STATUS_PORT,
                "ansible_status",
                urllib.urlencode(status_resp),
                method='PUT',
                urlencode=True)

    def validate_provision_params(self, inv, defaults):

        keys_to_check = [
            "ansible_playbook", "docker_insecure_registries",
            "docker_registry_insecure"
        ]

        params = inv.get("[all:vars]", None)
        if params == None:
            return ("[all:vars] not defined")

        for x in keys_to_check:
            if not x in params.keys():
                if x == "docker_insecure_registries":
                    params['docker_insecure_registries'] = \
                     defaults.docker_insecure_registries
                elif x == 'docker_registry_insecure':
                    params['docker_registry_insecure'] = \
                     defaults.docker_registry_insecure
                elif x == 'ansible_playbook':
                    params['ansible_playbook'] = \
                     defaults.ansible_playbook
                else:
                    return ("%s not defined in inventory" % x)

        for k, v in vars(defaults).iteritems():
            if not k in params.keys():
                params[k] = v

        pbook = params['ansible_playbook']
        try:
            with open(pbook) as file:
                pass
        except IOError as e:
            return ("Playbook not found : %s" % pbook)

        return STATUS_VALID

    def create_kolla_param_files(self, pw, glbl, pbook_dir):
        self.logger.log(self.logger.INFO,
                        "Changing globals and passwords files")
        pw_file_name = pbook_dir + '/../etc/kolla/passwords.yml'
        try:
            with open(pw_file_name) as kolla_pws:
                #SMAnsibleUtils(self.logger).merge_dict(pw, yaml.load(kolla_pws))
                self.logger.log(self.logger.INFO,
                                "Creating %s" % (pw_file_name))
        except IOError as e:
            self.logger.log(self.logger.INFO,
                            "%s : Creating %s" % (e, pw_file_name))
        finally:
            with open(pw_file_name, 'w+') as kolla_pws:
                yaml.dump(pw,
                          kolla_pws,
                          explicit_start=True,
                          default_flow_style=False,
                          width=1000)

        gl_file_name = pbook_dir + '/../etc/kolla/globals.yml'
        try:
            with open(gl_file_name) as kolla_globals:
                #SMAnsibleUtils(self.logger).merge_dict(glbl,
                #               yaml.load(kolla_globals))
                self.logger.log(self.logger.INFO,
                                "Creating %s" % (gl_file_name))
        except IOError as e:
            self.logger.log(self.logger.INFO,
                            "%s : Creating %s" % (e, gl_file_name))
        finally:
            with open(gl_file_name, 'w+') as kolla_globals:
                yaml.dump(glbl,
                          kolla_globals,
                          explicit_start=True,
                          default_flow_style=False,
                          width=1000)

    def run_playbook(self, pb, kolla, action):
        cluster_id = self.json_entity[0]["cluster_id"]
        parameters = self.json_entity[0]["parameters"]
        self.pbook_path = parameters[pb]
        pbook_dir = os.path.dirname(self.pbook_path)
        inv_dir = pbook_dir + '/inventory/'

        ev = None
        try:
            if kolla:
                inv_file = inv_dir + cluster_id + "_kolla.inv"
                inv_dict = parameters["kolla_inv"]
                kolla_pwds = parameters['kolla_passwords']
                kolla_vars = parameters['kolla_globals']
                self.create_kolla_param_files(kolla_pwds, kolla_vars,
                                              pbook_dir)
                ev = {'action': action}
                with open(pbook_dir + '/../etc/kolla/globals.yml') as info:
                    ev.update(yaml.load(info))
                with open(pbook_dir + '/../etc/kolla/passwords.yml') as info:
                    ev.update(yaml.load(info))
            else:
                inv_file = inv_dir + cluster_id + ".inv"
                inv_dict = parameters["inventory"]
                self.current_status = self.validate_provision_params(
                    inv_dict, self.args)

            Options = namedtuple('Options', [
                'connection', 'forks', 'module_path', 'become',
                'become_method', 'become_user', 'check', 'listhosts',
                'listtasks', 'listtags', 'syntax', 'verbosity', 'extra_vars'
            ])
            self.options = Options(connection='ssh',
                                   forks=100,
                                   module_path=None,
                                   become=True,
                                   become_method='sudo',
                                   become_user='******',
                                   check=False,
                                   listhosts=None,
                                   listtasks=None,
                                   listtags=None,
                                   syntax=None,
                                   verbosity=None,
                                   extra_vars=ev)

            self.logger.log(
                self.logger.INFO, "Creating inventory %s for playbook %s" %
                (inv_file, self.pbook_path))
            SMAnsibleUtils(None).create_inv_file(inv_file, inv_dict)
            self.logger.log(
                self.logger.INFO, "Created inventory %s for playbook %s" %
                (inv_file, self.pbook_path))
            self.var_mgr = VariableManager()
            self.inventory = Inventory(loader=DataLoader(),
                                       variable_manager=self.var_mgr,
                                       host_list=inv_file)
            self.var_mgr.set_inventory(self.inventory)
            if kolla:
                self.var_mgr.extra_vars = ev
            self.pb_executor = PlaybookExecutor(playbooks=[self.pbook_path],
                                                inventory=self.inventory,
                                                variable_manager=self.var_mgr,
                                                loader=DataLoader(),
                                                options=self.options,
                                                passwords={})
            self.logger.log(self.logger.INFO,
                            "Starting playbook %s" % self.pbook_path)

            # Update status before every playbook run
            if kolla:
                self.current_status = "openstack_" + action
            else:
                self.current_status = action
            self.update_status(kolla)

            rv = self.pb_executor.run()
            if rv != 0:
                self.current_status = STATUS_FAILED
                self.update_status(kolla)
                self.logger.log(self.logger.ERROR,
                                "Playbook Failed: %s" % self.pbook_path)
                rv = None
            else:
                rv = self.pb_executor._tqm._stats
        except Exception as e:
            self.logger.log(self.logger.ERROR, e)
            self.current_status = STATUS_FAILED
            self.update_status(kolla)
            rv = None
        return rv

    def run(self):
        self.logger.log(self.logger.INFO,
                        "Executing Ansible Playbook Actions: %s" % self.tasks)
        if 'openstack_bootstrap' in self.tasks:
            rv = self.run_playbook("kolla_bootstrap_pb", True,
                                   "bootstrap-servers")
            if rv == None:
                return rv

        if 'openstack_deploy' in self.tasks:
            rv = self.run_playbook("kolla_deploy_pb", True, "deploy")
            if rv == None:
                return rv

        if 'openstack_post_deploy' in self.tasks:
            rv = self.run_playbook("kolla_post_deploy_pb", True, "post-deploy")
            if rv == None:
                return rv

        if 'openstack_destroy' in self.tasks:
            rv = self.run_playbook("kolla_destroy_pb", True, "destroy")
            if rv == None:
                return rv

        if 'contrail_deploy' in self.tasks:
            rv = self.run_playbook("contrail_deploy_pb", False,
                                   "contrail-deploy")
            if rv == None:
                return rv

        # This has to happen after contrail_deploy
        if 'openstack_post_deploy_contrail' in self.tasks:
            rv = self.run_playbook("kolla_post_deploy_contrail_pb", True,
                                   "post-deploy-contrail")
            if rv == None:
                return rv
class ServerMgrPuppet:
    _node_env_map_file = "puppet/node_mapping.json"

    def __init__(self, smgr_base_dir, puppet_dir):
        self._smgr_log = ServerMgrlogger()
        self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrPuppet Init")

        self.smgr_base_dir = smgr_base_dir
        self.puppet_directory = puppet_dir
        if not os.path.exists(os.path.dirname(puppet_dir)):
            os.makedirs(os.path.dirname(puppet_dir))

    # end __init__

    def storage_get_control_network_mask(self, provision_params, server,
                                         cluster):
        role_ips_dict = provision_params['roles']
        cluster_params = cluster.get('parameters', {})
        server_params = server.get('parameters', {})
        #openstack_ip = cluster_params.get("internal_vip", None)
        cluster_openstack_prov_params = (cluster_params.get(
            "provision", {})).get("openstack", {})
        configured_external_openstack_ip = cluster_openstack_prov_params.get(
            "external_openstack_ip", None)
        openstack_ip = ''
        self_ip = server.get("ip_address", "")
        if configured_external_openstack_ip:
            openstack_ip = configured_external_openstack_ip
        elif self_ip in role_ips_dict['openstack']:
            openstack_ip = self_ip
        elif 'openstack' in role_ips_dict and len(role_ips_dict['openstack']):
            openstack_ip = role_ips_dict['openstack'][0]
        else:
            msg = "Openstack role not defined for cluster AND External Openstack not configured in cluster parameters.\n " \
                  "The cluster needs to point to at least one Openstack node.\n"
            self._smgr_log.log(self._smgr_log.ERROR, msg)
            raise ServerMgrException(msg, ERR_OPR_ERROR)

        subnet_mask = server.get("subnet_mask", "")
        if not subnet_mask:
            subnet_mask = cluster_params.get("subnet_mask", "255.255.255.0")
        subnet_address = ""
        intf_control = {}
        subnet_address = str(
            IPNetwork(openstack_ip + "/" + subnet_mask).network)

        if openstack_ip == configured_external_openstack_ip:
            return '"' + str(IPNetwork(subnet_address).network) + '/' + str(
                IPNetwork(subnet_address).prefixlen) + '"'

        self._smgr_log.log(
            self._smgr_log.DEBUG,
            "control-net : %s" % str(provision_params['control_net']))
        if provision_params['control_net'][openstack_ip]:
            intf_control = eval(provision_params['control_net'][openstack_ip])
            self._smgr_log.log(
                self._smgr_log.DEBUG,
                "openstack-control-net : %s" % str(intf_control))

        for intf, values in intf_control.items():
            if intf:
                self._smgr_log.log(self._smgr_log.DEBUG,
                                   "ip_address : %s" % values['ip_address'])
                return '"' + str(IPNetwork(
                    values['ip_address']).network) + '/' + str(
                        IPNetwork(values['ip_address']).prefixlen) + '"'
            else:
                self._smgr_log.log(self._smgr_log.DEBUG,
                                   "server_ip : %s" % values['server_ip'])
                return '"' + str(
                    IPNetwork(
                        provision_params['server_ip']).network) + '/' + str(
                            IPNetwork(
                                provision_params['server_ip']).prefixlen) + '"'

        return '"' + str(IPNetwork(subnet_address).network) + '/' + str(
            IPNetwork(subnet_address).prefixlen) + '"'

    def delete_node_entry(self, site_file, server_fqdn):
        tempfd, temp_file = tempfile.mkstemp()
        fh = os.fdopen(tempfd, "w")
        node_found = False
        brace_count = 0
        with open(site_file, "r") as site_fh:
            for line in site_fh:
                tokens = line.strip().split()
                if ((len(tokens) >= 2) and (tokens[0] == "node")
                        and ((re.findall(r"['\"](.*?)['\"]", tokens[1]))[0]
                             == server_fqdn)):
                    node_found = True
                #end if tokens...
                if not node_found:
                    fh.write(line)
                else:
                    # skip comments
                    if tokens[0].startswith("#"):
                        continue
                    # Skip lines till closing brace
                    if "{" in line:
                        brace_count += 1
                    if "}" in line:
                        brace_count -= 1
                    if brace_count == 0:
                        node_found = False
                # end else not node_found
            # end for
        # end with
        fh.close()
        shutil.copy(temp_file, site_file)
        os.chmod(site_file, 0644)
        os.remove(temp_file)

    # end def delete_node_entry

    def add_node_entry(self, site_file, server_fqdn, server, cluster,
                       cluster_servers, puppet_version):
        cluster_params = cluster.get('parameters', {})
        data = ''
        data += "node \'%s\' {\n" % server_fqdn
        # Add Stage relationships
        data += '    stage{ \'first\': }\n'
        data += '    stage{ \'last\': }\n'
        data += '    stage{ \'compute\': }\n'
        data += '    stage{ \'pre\': }\n'
        data += '    stage{ \'post\': }\n'
        if 'tsn' in server['roles']:
            data += '    stage{ \'tsn\': }\n'
        if 'toragent' in server['roles']:
            data += '    stage{ \'toragent\': }\n'
        if 'storage-compute' in server['roles'] or 'storage-master' in server[
                'roles']:
            data += '    stage{ \'storage\': }\n'
        data += '    Stage[\'pre\']->Stage[\'first\']->Stage[\'main\']->Stage[\'last\']->Stage[\'compute\']->'
        if 'tsn' in server['roles']:
            data += 'Stage[\'tsn\']->'
        if 'toragent' in server['roles']:
            data += 'Stage[\'toragent\']->'
        if 'storage-compute' in server['roles'] or 'storage-master' in server[
                'roles']:
            data += 'Stage[\'storage\']->'
        data += 'Stage[\'post\']\n'

        # Add pre role
        data += '    class { \'::contrail::provision_start\' : state => \'provision_started\', stage => \'pre\' }\n'
        # Add common role
        data += '    class { \'::sysctl::base\' : stage => \'first\' }\n'
        data += '    class { \'::apt\' : stage => \'first\' }\n'
        data += '    class { \'::contrail::profile::common\' : stage => \'first\' }\n'
        #Include all roles manifest,Each manifest will execute only if that host
        #is configured to have a role.
        #Uninstall manifest will get executed when host_roles doesnt have that
        #role and contrail_roles[] facts has that role.
        #This implies that a role which is not configured is present on
        #the target and uninstall manifest will get executed.

        # Add keepalived (This class is no-op if vip is not configured.)
        data += '    include ::contrail::profile::keepalived\n'
        # Add haproxy (for config node)
        data += '    include ::contrail::profile::haproxy\n'
        # Add database role.
        data += '    include ::contrail::profile::database\n'
        # Add webui role.
        data += '    include ::contrail::profile::webui\n'
        # Add openstack role.
        data += '    include ::contrail::profile::openstack_controller\n'
        # Add ha_config role.
        data += '    include ::contrail::ha_config\n'
        # Add config provision role.
        data += '    include ::contrail::profile::config\n'
        # Add controller role.
        data += '    include ::contrail::profile::controller\n'
        # Add collector role.
        data += '    include ::contrail::profile::collector\n'
        # Add config provision role.
        if ((puppet_version < 3.0) and ('config' in server['roles'])):
            data += '    class { \'::contrail::profile::provision\' : stage => \'last\' }\n'
        # Add compute role
        data += '    class { \'::contrail::profile::compute\' : stage => \'compute\' }\n'

        # Add Tsn Role
        if 'tsn' in server['roles']:
            data += '    class { \'::contrail::profile::tsn\' :  stage => \'tsn\' }\n'
        # Add Toragent Role
        if 'toragent' in server['roles']:
            data += '    class { \'::contrail::profile::toragent\' :  stage => \'toragent\' }\n'
        # Add Storage Role
        if 'storage-compute' in server['roles'] or 'storage-master' in server[
                'roles']:
            data += '    class { \'::contrail::profile::storage\' :  stage => \'storage\' }\n'
        # Add post role
        data += '    class { \'::contrail::provision_complete\' : state => \'post_provision_completed\', stage => \'post\' }\n'

        data += "}\n"
        with open(site_file, "a") as site_fh:
            site_fh.write(data)
        os.chmod(site_file, 0644)
        # end with

    # end def add_node_entry

    def add_node_entry_new(self, site_file, server_fqdn):
        data = "node \'%s\' {\n" % server_fqdn
        data += "   class { '::contrail::contrail_all': }\n"
        data += "}\n"
        with open(site_file, "a") as site_fh:
            site_fh.write(data)
        # end with
        os.chmod(site_file, 0644)

    # end def add_node_entry_new

    def initiate_esx_contrail_vm(self, server, esx_server):
        self._smgr_log.log(self._smgr_log.DEBUG, "esx_server")
        #call scripts to provision esx
        server_params = server.get("parameters", {})
        vm_params = {}
        vm_params['vm'] = "ContrailVM"
        vm_params['vmdk'] = "ContrailVM"
        vm_params['datastore'] = server_params.get('datastore',
                                                   "/vmfs/volumes/datastore1")
        vm_params['eth0_mac'] = server.get('mac_address', '')
        vm_params['eth0_ip'] = server.get('ip_address', '')
        vm_params['eth0_pg'] = server_params.get('esx_fab_port_group', '')
        vm_params['eth0_vswitch'] = server_params.get('esx_fab_vswitch', '')
        vm_params['eth0_vlan'] = None
        vm_params['eth1_vswitch'] = server_params.get('esx_vm_vswitch', '')
        vm_params['eth1_pg'] = server_params.get('esx_vm_port_group', '')
        vm_params['eth1_vlan'] = "4095"
        vm_params['uplink_nic'] = server_params.get('esx_uplink_nic', '')
        vm_params['uplink_vswitch'] = server_params.get('esx_fab_vswitch', '')
        vm_params['server'] = esx_server.get('esx_ip', '')
        vm_params['username'] = '******'
        vm_params['password'] = esx_server.get('esx_password', '')
        vm_params['thindisk'] = server_params.get('esx_vmdk', '')
        vm_params['smgr_ip'] = server_params.get('smgr_ip', '')
        vm_params['domain'] = server_params.get('domain', '')
        vm_params['vm_password'] = server_params.get('password', '')
        vm_params['vm_server'] = server_params.get('id', '')
        vm_params['vm_deb'] = server_params.get('vm_deb', '')
        out = ContrailVM(vm_params)
        self._smgr_log.log(self._smgr_log.DEBUG, "ContrilVM:" % (out))

    # end initiate_esx_contrail_vm

    def generate_tor_certs(self, switch_info, server_id, domain):
        tor_name = switch_info['switch_name']
        tor_vendor_name = switch_info['vendor_name']
        tor_server_fqdn = server_id + '.' + domain
        contrail_module_path = '/etc/contrail_smgr/puppet/ssl/'
        tor_cert_file = contrail_module_path + 'tor.' + tor_name + '.cert.pem'
        tor_key_file = contrail_module_path + 'tor.' + tor_name + '.privkey.pem'

        self._smgr_log.log(self._smgr_log.DEBUG,
                           'module path => %s' % contrail_module_path)
        if os.path.exists(tor_cert_file) and os.path.exists(tor_key_file):
            self._smgr_log.log(
                self._smgr_log.DEBUG,
                'cert exists for %s host %s' % (tor_name, tor_server_fqdn))
            return
        cert_cmd ='openssl req -new -x509 -days 3650 -sha256 -newkey rsa:4096'\
            + ' -nodes -text -subj "/C=US/ST=Global/L=' + tor_name + '/O=' \
            + tor_vendor_name + '/CN=' + tor_server_fqdn + '" -keyout ' \
            + tor_key_file + ' -out ' + tor_cert_file

        if not os.path.exists(contrail_module_path):
            os.makedirs(contrail_module_path)
        self._smgr_log.log(self._smgr_log.DEBUG, 'ssl_cmd => %s' % cert_cmd)

        subprocess.check_call(cert_cmd, shell=True)

    # Function to change key name from new param key name to pre-3.0 puppet hiera names.
    def xlate_key_to_pre_3_0(self, long_key, key):
        xlate_dict = {
            "contrail::analytics::analytics_ip_list": "collector_ip_list",
            "contrail::analytics::analytics_name_list": "collector_name_list",
            "contrail::analytics::data_ttl": "analytics_data_ttl",
            "contrail::analytics::config_audit_ttl":
            "analytics_config_audit_ttl",
            "contrail::analytics::statistics_ttl": "analytics_statistics_ttl",
            "contrail::analytics::flow_ttl": "analytics_flow_ttl",
            "contrail::analytics::syslog_port": "analytics_syslog_port",
            "contrail::analytics::directory": "database_dir",
            "contrail::analytics::data_directory": "analytics_data_dir",
            "contrail::analytics::ssd_data_directory": "ssd_data_dir",
            "contrail::database::directory": "database_dir",
            "contrail::database::minimum_diskGB": "database_minimum_diskGB",
            "contrail::database::initial_token": "database_initial_token",
            "contrail::database::ip_port": "database_ip_port",
            "openstack::keystone::admin_password": "******",
            "openstack::keystone::admin_user": "******",
            "openstack::keystone::admin_tenant": "keystone_admin_tenant",
            "openstack::keystone::service_tenant": "keystone_service_tenant",
            "openstack::keystone::admin_token": "keystone_service_token",
            "openstack::keystone::auth_protocol": "keystone_auth_protocol",
            "openstack::keystone::auth_port": "keystone_auth_port",
            "openstack::keystone::insecure_flag": "keystone_insecure_flag",
            "openstack::region": "keystone_region_name",
            "contrail::ha::haproxy_enable": "haproxy_flag",
            "openstack::neutron::port": "quantum_port",
            "openstack::neutron::service_protocol": "neutron_service_protocol",
            "openstack::amqp::server_ip": "amqp_server_ip",
            "contrail::config::zookeeper_ip_port": "zk_ip_port",
            "contrail::config::healthcheck_interval": "hc_interval",
            "contrail::vmware::ip": "vmware_ip",
            "contrail::vmware::username": "******",
            "contrail::vmware::password": "******",
            "contrail::vmware::vswitch": "vmware_vswitch",
            "openstack::mysql::root_password": "******",
            "contrail::control::encapsulation_priority": "encap_priority",
            "contrail::vgw::public_subnet": "vgw_public_subnet",
            "contrail::vgw::public_vn_name": "vgw_public_vn_name",
            "contrail::vgw::public_interface": "vgw_public_interface",
            "contrail::vgw::public_gateway_routes":
            "vgw_public_gateway_routes",
            "contrail::storage::storage_name_list": "storage_hostnames"
        }
        return xlate_dict.get(long_key, key)

    # end of function to xlate key to pre_3_0

    def add_params_from_dict(self, in_dict, package, prefix=''):
        out_dict = {}
        package_params = package.get("parameters", {})
        if not (isinstance(in_dict, dict)):
            return out_dict
        for key, value in in_dict.iteritems():
            new_prefix = str("::".join(x for x in (prefix, key) if x))
            if (isinstance(value, dict) and (not value.pop("literal", False))):
                out_dict.update(
                    self.add_params_from_dict(value, package, new_prefix))
            else:
                # For pre3.0 contrail, we need to generate hiera data
                # in contrail::params::... format too. This code should
                # be removed when we stop supporting old format contrail (pre-3.0)
                if (package_params.get('puppet_version', 0.0) < 3.0):
                    out_dict[
                        "contrail::params::" +
                        self.xlate_key_to_pre_3_0(new_prefix, key)] = value
                out_dict[new_prefix] = value
        return out_dict

    # end add_params_from_dict

    def add_cluster_provisioning_params(self, cluster, package):
        cluster_parameters = cluster.get("parameters", {})
        provision_params = cluster_parameters.get("provision", {})
        return self.add_params_from_dict(provision_params, package)

    # end of add_cluster_provisioning_params

    def add_server_provisioning_params(self, server, package):
        server_parameters = server.get("parameters", {})
        provision_params = server_parameters.get("provision", {})
        return self.add_params_from_dict(provision_params, package)

    # end of add_server_provisioning_params

    def add_package_provisioning_params(self, package):
        package_parameters = package.get("parameters", {})
        provision_params = package_parameters.get("provision", {})
        return self.add_params_from_dict(provision_params, package)

    # end of add_package_provisioning_params

    def add_cluster_calculated_params(self, cluster, package):
        provision_params = cluster.get("calc_params", {})
        return self.add_params_from_dict(provision_params, package)

    # end of add_cluster_calculated_params

    def add_server_calculated_params(self, server, package):
        provision_params = server.get("calc_params", {})
        return self.add_params_from_dict(provision_params, package)

    # end of add_server_calculated_params

    def add_package_calculated_params(self, package):
        provision_params = package.get("calc_params", {})
        return self.add_params_from_dict(provision_params, package)

    # end of add_package_calculated_params

    def add_sequencing_params(self, cluster, package):
        cluster_params = cluster.get('parameters', {})
        package_params = package.get('parameters', {})
        sequence_provisioning_available = package_params.get(
            'sequence_provisioning_available', False)
        sequence_provisioning = cluster_params.get('sequence_provisioning',
                                                   True)
        if (package_params.get('puppet_version', 0.0) >= 3.0):
            key = "sequencing"
        else:
            key = "params"
        sequencing_params = {}
        if sequence_provisioning_available and sequence_provisioning:
            sequencing_params['contrail'] = {}
            sequencing_params['contrail'][key] = {}
            sequencing_params['contrail'][key]['enable_post_provision'] = False
            sequencing_params['contrail'][key][
                'enable_pre_exec_vnc_galera'] = False
            sequencing_params['contrail'][key][
                'enable_post_exec_vnc_galera'] = False
            sequencing_params['contrail'][key]['enable_keepalived'] = False
            sequencing_params['contrail'][key]['enable_haproxy'] = False
            sequencing_params['contrail'][key][
                'enable_sequence_provisioning'] = True
            sequencing_params['contrail'][key][
                'enable_provision_started'] = True
            sequencing_params['contrail'][key]['enable_storage_master'] = False
            sequencing_params['contrail'][key][
                'enable_storage_compute'] = False
            for role in [
                    'global_controller', 'loadbalancer', 'database', 'config',
                    'openstack', 'control', 'collector', 'webui', 'compute',
                    'tsn', 'toragent'
            ]:
                sequencing_params['contrail'][key]['enable_' + role] = False
        return self.add_params_from_dict(sequencing_params, package)

    # end add_sequencing_params

    def build_contrail_hiera_file_new(self, hiera_filename, server, cluster,
                                      cluster_servers, package):
        cluster_params = cluster.get('parameters', {})
        # By default, sequence provisioning is On.
        server_params = server.get('parameters', {})
        hiera_params = {}
        hiera_params.update(
            self.add_cluster_calculated_params(cluster, package))
        hiera_params.update(self.add_server_calculated_params(server, package))
        hiera_params.update(self.add_package_calculated_params(package))
        hiera_params.update(
            self.add_cluster_provisioning_params(cluster, package))
        hiera_params.update(
            self.add_server_provisioning_params(server, package))
        hiera_params.update(self.add_package_provisioning_params(package))
        hiera_params.update(self.add_sequencing_params(cluster, package))
        # Dump the hiera_params in yaml file.
        data = yaml.dump(hiera_params, default_style='\'', indent=4)
        with open(hiera_filename, "w") as hiera_fh:
            hiera_fh.write(data)

    # end def build_contrail_hiera_file_new

    #generate random string
    def random_string(self, string_length=10):
        """Returns a random string of length string_length."""
        random = str(uuid.uuid4())  # Convert UUID format to a Python string.
        random = random.upper()  # Make all characters uppercase.
        random = random.replace("-", "")  # Remove the UUID '-'.
        return random[0:string_length]  # Return the random string.

    def build_hiera_files(self, hieradata_dir, provision_params, server,
                          cluster, cluster_servers, package, serverDb):
        server_params = server.get("parameters", {})
        cluster_params = cluster.get("parameters", {})
        domain = server.get('domain', '')
        if not domain:
            domain = cluster_params.get('domain', '')
        server_fqdn = server['host_name'] + "." + domain
        contrail_hiera_file = hieradata_dir + server_fqdn + \
            "-contrail.yaml"
        # if cluster parameters has provision key, use new way of building Hiera file, else
        # continue with old way.
        if ("provision" in cluster_params):
            self.build_contrail_hiera_file_new(contrail_hiera_file, server,
                                               cluster, cluster_servers,
                                               package)
        # Check and add contrail-defaults.yaml
        contrail_defaults_file = hieradata_dir + "contrail-defaults.yaml"
        contrail_defaults_source = "/etc/contrail_smgr/contrail-defaults.yaml"
        if not os.path.exists(contrail_defaults_file) and os.path.exists(
                contrail_defaults_source):
            shutil.copy(contrail_defaults_source, contrail_defaults_file)

    # end def build_hieradata_files

    def modify_server_hiera_data(self,
                                 server_id,
                                 hiera_file,
                                 role_steps_list,
                                 enable=True):
        if not server_id or not hiera_file or not role_steps_list:
            return
        try:
            hiera_data_fp = open(hiera_file, 'r')
        except:
            return
        hiera_data_dict = yaml.load(hiera_data_fp)
        hiera_data_fp.close()
        if not hiera_data_dict:
            return
        for role_step_tuple in role_steps_list:
            if server_id == role_step_tuple[0]:
                role_step = role_step_tuple[1].replace('-', '_')
                key = 'contrail::sequencing::enable_' + role_step
                if key not in hiera_data_dict:
                    key = 'contrail::params::enable_' + role_step
                hiera_data_dict[key] = enable
        data = yaml.dump(hiera_data_dict, default_style='\'', indent=4)
        with open(hiera_file, "w") as hiera_fh:
            hiera_fh.write(data)

    # end modify_server_hiera_data

    def new_provision_server(self, provision_params, server, cluster,
                             cluster_servers, package, serverDb):
        server_params = server.get("parameters", {})
        cluster_params = cluster.get("parameters", {})
        package_params = package.get("parameters", {})
        domain = server.get('domain', '')
        if not domain:
            domain = cluster_params.get('domain', '')
        server_fqdn = server['host_name'] + "." + domain
        env_name = package_params.get('puppet_manifest_version', "")
        env_name = env_name.replace('-', '_')
        site_file = self.puppet_directory + "environments/" + \
            env_name + "/manifests/site.pp"
        hieradata_dir = self.puppet_directory + "environments/" + \
            env_name + "/hieradata/"
        # Start contail VM if running compute on esx_server.
        if 'compute' in eval(server['roles']):
            esx_server_id = server_params.get('esx_server', None)
            if esx_server_id:
                esx_servers = serverDb.get_server(
                    {'id': server_params['esx_server']}, detail=True)
                esx_server = esx_servers[0]
                if esx_server:
                    self.initiate_esx_contrail_vm(server, esx_server)
        # Build Hiera data for the server
        self.build_hiera_files(hieradata_dir, provision_params, server,
                               cluster, cluster_servers, package, serverDb)
        # Create an entry for this node in site.pp.
        # First, delete any existing entry and then add a new one.
        self.delete_node_entry(site_file, server_fqdn)
        # Now add a new node entry
        puppet_version = package_params.get("puppet_version", 0.0)
        if (puppet_version >= 3.0):
            self.add_node_entry_new(site_file, server_fqdn)
        else:
            self.add_node_entry(site_file, server_fqdn, server, cluster,
                                cluster_servers, puppet_version)

        # Add entry for the server to environment mapping in
        # node_mapping.json file.
        self.update_node_map_file(server_fqdn, env_name)

    # end def new_provision_server

    # Function to remove puppet files and entries created when provisioning the server. This is called
    # when server is being reimaged. We do not want old provisioning data to be retained.
    def new_unprovision_server(self, server_id, server_domain):
        server_fqdn = server_id + "." + server_domain
        # Remove node to environment mapping from node_mapping.json file.
        node_env_dict = {}
        env_name = self.update_node_map_file(server_fqdn, None)
        if env_name is None:
            return
        # Remove server node entry from site.pp.
        site_file = self.puppet_directory + "environments/" + \
            env_name + "/manifests/site.pp"
        try:
            self.delete_node_entry(site_file, server_fqdn)
        except:
            pass
        # Remove Hiera Data files for the server.
        hiera_datadir = self.puppet_directory + "environments/" + \
            env_name + "/hieradata/"
        try:
            os.remove(hiera_datadir + server_fqdn + "-contrail.yaml")
            os.remove(hiera_datadir + server_fqdn + "-openstack.yaml")
        except:
            pass

    # end new_unprovision_server()

    # env_name empty string or None is to remove the entry from the map file.
    # env_name value specified will be updated to the map file.
    # env_name could be valid one or invalid manifest.
    #        invalid valid manifest is used to turn off the agent puppet run
    # server_fqdn is required for both update and delete of an entry
    def update_node_map_file(self, server_fqdn, env_name):
        if not server_fqdn:
            return None

        node_env_map_file = self.smgr_base_dir + self._node_env_map_file

        try:
            with open(node_env_map_file, "r") as env_file:
                node_env_dict = json.load(env_file)
            # end with
        except:
            msg = "Not able open environment map file %s" % (node_env_map_file)
            self._smgr_log.log(self._smgr_log.ERROR, msg)
            return None

        if env_name:
            node_env_dict[server_fqdn] = env_name
            msg = "Add/Modify map file with env_name %s for server %s" % (
                env_name, server_fqdn)
            self._smgr_log.log(self._smgr_log.DEBUG, msg)
        else:
            env_name = node_env_dict.pop(server_fqdn, None)
            msg = "Remove server from map file for server %s" % (server_fqdn)
            self._smgr_log.log(self._smgr_log.DEBUG, msg)
            if not env_name:
                return env_name

        try:
            with open(node_env_map_file, "w") as env_file:
                json.dump(node_env_dict, env_file, sort_keys=True, indent=4)
            # end with
        except:
            msg = "Not able open environment map file %s for update" % (
                node_env_map_file)
            self._smgr_log.log(self._smgr_log.ERROR, msg)
            return None
        return env_name

    # end update_node_map_file

    def is_new_provisioning(self, puppet_manifest_version):
        environment = puppet_manifest_version.replace('-', '_')
        if ((environment != "") and
            (os.path.isdir("/etc/puppet/environments/" + environment))):
            return True
        return False

    # end is_new_provisioning

    def provision_server(self, provision_params, server, cluster,
                         cluster_servers, package, serverDb):

        # The new way to create necessary puppet manifest files and parameters data.
        # The existing method is kept till the new method is well tested and confirmed
        # to be working.
        package_params = package.get("parameters", {})
        puppet_manifest_version = package_params.get('puppet_manifest_version',
                                                     "")
        environment = puppet_manifest_version.replace('-', '_')
        if self.is_new_provisioning(puppet_manifest_version):
            self.new_provision_server(provision_params, server, cluster,
                                      cluster_servers, package, serverDb)
        else:
            # old puppet manifests not supported anymore, log message
            # and return
            self._smgr_log.log(
                self._smgr_log.DEBUG,
                "No environment for version found AND this version does not support old contrail puppet manifest (2.0 and before)"
            )
            self._smgr_log.log(
                self._smgr_log.DEBUG,
                "Use server manager version 2.21 or earlier if you have old style contrail puppet manifests"
            )
class ServerMgrCobbler:

    _cobbler_ip = _DEF_COBBLER_IP
    _cobbler_port = _DEF_COBBLER_PORT
    _cobbler_username = _DEF_USERNAME
    _cobbler_password = _DEF_PASSWORD
    _server = None
    _token = None
    # 30 minute timer to keep validating the cobbler token
    _COB_TOKEN_CHECK_TIMER = 1800

    def __init__(self, base_dir=_DEF_BASE_DIR,
                 ip_address=_DEF_COBBLER_IP,
                 port=_DEF_COBBLER_PORT,
                 username=_DEF_USERNAME,
                 password=_DEF_PASSWORD):

        self._smgr_log = ServerMgrlogger()
        self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrCobbler Init")


        # Store the passed in values
        self._cobbler_ip = ip_address
        self._cobbler_port = port
        self._cobbler_username = username
        self._cobbler_password = password
        try:
            if self._cobbler_port:
                self._server = xmlrpclib.Server(
                    "http://" +
                    self._cobbler_ip + ":" +
                    self._cobbler_port + "/cobbler_api")
            else:
                self._server = xmlrpclib.Server(
                    "http://" +
                    self._cobbler_ip + "/cobbler_api")
            self._token = self._server.login(self._cobbler_username,
                                             self._cobbler_password)

            # Copy contrail centos repo to cobber repos, so that target
            # systems can install and run puppet agent from kickstart.
            repo = self._server.find_repo({"name": _CONTRAIL_CENTOS_REPO})
            if repo:
                rid = self._server.get_repo_handle(
                    _CONTRAIL_CENTOS_REPO, self._token)
            else:
                rid = self._server.new_repo(self._token)
            self._server.modify_repo(rid, "arch", "x86_64", self._token)
            repo_dir = base_dir + "contrail-centos-repo"
            self._server.modify_repo(
                rid, "name", _CONTRAIL_CENTOS_REPO, self._token)
            self._server.modify_repo(rid, "mirror", repo_dir, self._token)
            self._server.modify_repo(rid, "keep_updated", True, self._token)
            self._server.modify_repo(rid, "priority", "99", self._token)
            self._server.modify_repo(rid, "rpm_list", [], self._token)
            self._server.modify_repo(rid, "yumopts", {}, self._token)
            self._server.modify_repo(rid, "mirror_locally", True, self._token)
            self._server.modify_repo(rid, "environment", {}, self._token)
            self._server.modify_repo(rid, "comment", "...", self._token)
            self._server.save_repo(rid, self._token)
            # Issue cobbler reposync for this repo
            cmd = "cobbler reposync --only=" + _CONTRAIL_CENTOS_REPO
            subprocess.check_call(cmd, shell=True)
        except subprocess.CalledProcessError as e:
            msg = ("Cobbler Init: error %d when executing"
                   "\"%s\"" %(e.returncode, e.cmd))
            raise ServerMgrException(msg)
        except Exception as e:
            raise e
    # End of __init__

    # Function to check if cobbler token is valid or not, before calling any
    # XMLRPC calls that need a valid token. If token is not valid, the function
    # acquires a new token from cobbler.
    def _validate_token(self, token, resource):
        valid = self._server.check_access_no_fail(token, resource)
        if not valid:
            self._token = self._server.login(
                self._cobbler_username, self._cobbler_password)
    # end _validate_token

    def create_distro(self, distro_name, image_type, path,
                      kernel_file, initrd_file, cobbler_ip_address):
        try:
            # Validate cobbler token
            self._validate_token(self._token, "distro")
            # If distro already exists in cobbler, nothing to do.
            distro = self._server.find_distro({"name":  distro_name})
            if distro:
                return
            distro_id = self._server.new_distro(self._token)
            self._server.modify_distro(distro_id, 'name',
                                       distro_name, self._token)
            self._server.modify_distro(distro_id, 'kernel',
                                       path + kernel_file, self._token)
            self._server.modify_distro(distro_id, 'initrd',
                                       path + initrd_file, self._token)
            if ((image_type == 'centos') or (image_type == 'fedora')):
                self._server.modify_distro(
                    distro_id, 'ksmeta',
                    'tree=http://' + cobbler_ip_address +
                    '/contrail/images/' + distro_name,
                    self._token)
            if (image_type == 'ubuntu'):
                self._server.modify_distro(distro_id, 'arch',
                                           'x86_64', self._token)
                self._server.modify_distro(distro_id, 'breed',
                                           'ubuntu', self._token)
                self._server.modify_distro(distro_id, 'os_version',
                                           'precise', self._token)
            elif ((image_type == 'esxi5.1') or
                  (image_type == 'esxi5.5')):
                if (image_type == 'esxi5.1'):
                    os_version = 'esxi51'
                else:
                    os_version = 'esxi55'
                self._server.modify_distro(
                    distro_id, 'ksmeta',
                    'tree=http://' + cobbler_ip_address +
                    '/contrail/images/' + distro_name,
                    self._token)
                self._server.modify_distro(
                    distro_id, 'arch', 'x86_64', self._token)
                self._server.modify_distro(
                    distro_id, 'breed', 'vmware', self._token)
                self._server.modify_distro(
                    distro_id, 'os_version', os_version, self._token)
                self._server.modify_distro(
                    distro_id, 'boot_files',
                    '$local_img_path/*.*=' + path + '/*.*',
                    self._token)
                self._server.modify_distro(
                    distro_id, 'template_files',
                    '/etc/cobbler/pxe/bootcfg_%s.template=' %(
                        os_version) +
                    '$local_img_path/cobbler-boot.cfg',
                    self._token)
            else:
                pass
            self._server.save_distro(distro_id, self._token)
        except Exception as e:
            raise e
    # End of create_distro

    def create_profile(self, profile_name,
                       distro_name, image_type, ks_file, kernel_options,
                        ks_meta):
        try:
            # Validate cobbler token
            self._validate_token(self._token, "profile")
            # If profile exists, nothing to do, jus return.
            profile = self._server.find_profile({"name":  profile_name})
            if profile:
                return
            profile_id = self._server.new_profile(self._token)
            self._server.modify_profile(profile_id, 'name',
                                        profile_name, self._token)
            self._server.modify_profile(profile_id, "distro",
                                        distro_name, self._token)
            self._server.modify_profile(profile_id, "kickstart",
                                        ks_file, self._token)
            self._server.modify_profile(profile_id, "kernel_options",
                                        kernel_options, self._token)
            self._server.modify_profile(profile_id, "ks_meta",
                                        ks_meta, self._token)
            if ((image_type == "centos") or (image_type == "fedora")):
                repo_list = [_CONTRAIL_CENTOS_REPO]
                self._server.modify_profile(profile_id, "repos",
                                            repo_list, self._token)
            self._server.save_profile(profile_id, self._token)
        except Exception as e:
            raise e
    # End of create_profile

    def create_repo(self, repo_name, mirror):
        try:
            # Validate cobbler token
            self._validate_token(self._token, "repo")
            repo = self._server.find_repo({"name": repo_name})
            if repo:
                rid = self._server.get_repo_handle(
                    repo_name, self._token)
            else:
                rid = self._server.new_repo(self._token)
            self._server.modify_repo(rid, "arch", "x86_64", self._token)
            self._server.modify_repo(
                rid, "name", repo_name, self._token)
            self._server.modify_repo(rid, "mirror", mirror, self._token)
            self._server.modify_repo(rid, "mirror_locally", True, self._token)
            self._server.save_repo(rid, self._token)
            # Issue cobbler reposync for this repo
            cmd = "cobbler reposync --only=" + repo_name
            subprocess.check_call(cmd, shell=True)
        except subprocess.CalledProcessError as e:
            msg = ("create_repo: error %d when executing"
                   "\"%s\"" %(e.returncode, e.cmd))
            raise ServerMgrException(msg)
        except Exception as e:
            raise e
    # End of create_repo

    def create_system(self, system_name, profile_name, package_image_id,
                      mac, ip, subnet, gway, system_domain,
                      ifname, enc_passwd, server_license, esx_nicname,
                      power_type, power_user, power_pass, power_address,
                      base_image, server_ip, partition=None):
        try:
            # Validate cobbler token
            self._validate_token(self._token, "system")
            system = self._server.find_system({"name":  system_name})
            if system:
                system_id = self._server.get_system_handle(
                    system_name, self._token)
            else:
                system_id = self._server.new_system(self._token)
                self._server.modify_system(system_id, 'name',
                                           system_name, self._token)
            self._server.modify_system(
                system_id, "hostname", system_name, self._token)
            self._server.modify_system(
                system_id, "power_type", power_type, self._token)
            self._server.modify_system(
                system_id, "power_user", power_user, self._token)
            self._server.modify_system(
                system_id, "power_pass", power_pass, self._token)
            self._server.modify_system(
                system_id, "power_address", power_address, self._token)
            # For centos, create a sub-profile that has the repo for
            # package_image_id also made available for this system.
            if ((base_image['type'] == "centos") and
                (package_image_id)):
                sub_profile_name = profile_name + "-" + package_image_id
                sub_profile = self._server.find_profile(
                    {"name":  sub_profile_name})
                if not sub_profile:
                    sub_profile_id = self._server.new_subprofile(self._token)
                    self._server.modify_profile(
                        sub_profile_id, 'name',
                        sub_profile_name, self._token)
                    self._server.modify_profile(
                        sub_profile_id, 'parent',
                        profile_name, self._token)
                    repos = [
                        package_image_id,
                        _CONTRAIL_CENTOS_REPO ]
                    self._server.modify_profile(
                        sub_profile_id, 'repos',
                        repos, self._token)
                    self._server.save_profile(
                        sub_profile_id, self._token)
                # end if sub_profile
            else:
                sub_profile_name = profile_name
            #end if
            self._server.modify_system(
                system_id, "profile", sub_profile_name, self._token)
            interface = {}
            if mac:
                interface['macaddress-%s' % (ifname)] = mac
            if ip:
                interface['ipaddress-%s' % (ifname)] = ip
            if system_domain:
                interface['dnsname-%s' %
                          (ifname)] = system_name + '.' + system_domain
            self._server.modify_system(system_id, 'modify_interface',
                                       interface, self._token)
            ks_metadata = 'passwd=' + enc_passwd
            ks_metadata += ' ip_address=' + ip
            ks_metadata += ' system_name=' + system_name
            ks_metadata += ' system_domain=' + system_domain
            if partition:
                ks_metadata += ' partition=' + partition
            else:
                ks_metadata += ' partition=' + '/dev/sd?'
            if package_image_id:
                ks_metadata += ' contrail_repo_name=' + \
                    package_image_id
            if ((base_image['type'] == 'esxi5.1') or
                (base_image['type'] == 'esxi5.5')):
                ks_metadata += ' server_license=' + server_license
                ks_metadata += ' esx_nicname=' + esx_nicname

                # temporary patch to have kickstart work for esxi. ESXi seems
                # to take kickstart from profile instead of system. So need to copy
                # ks_meta parameters at profile level too. This is a hack that would
                # be removed later - TBD Abhay
                profile = self._server.find_profile({"name":  profile_name})
                if profile:
                    profile_id = self._server.get_profile_handle(
                        profile_name, self._token)
                    self._server.modify_profile(
                        profile_id, 'ksmeta', ks_metadata, self._token)
                # end hack workaround
            #end if



            self._server.modify_system(system_id, 'ksmeta',
                                       ks_metadata, self._token)

            if (base_image['type'] == "ubuntu"):
                kernel_options = 'system_name=' + system_name
                kernel_options += ' system_domain=' + system_domain
                kernel_options += ' ip_address=' + ip
                kernel_options += ' server=' + server_ip
                if package_image_id:
                    kernel_options += ' contrail_repo_name=' + \
                        package_image_id
                self._server.modify_system(system_id, 'kernel_options',
                                           kernel_options, self._token)

            # Note : netboot is not enabled for the system yet. This is done
            # when API to power-cycle the server is called. For now set
            # net_enabled to False
            self._server.modify_system(
                system_id, 'netboot_enabled', False, self._token)
            self._server.save_system(system_id, self._token)
            #self._server.sync(self._token)
        except Exception as e:
            raise e
    # End of create_system

    def enable_system_netboot(self, system_name):
        try:
            # Validate cobbler token
            self._validate_token(self._token, "system")
            system = self._server.find_system({"name":  system_name})
            if not system:
                raise Exception(
                    "cobbler error : System %s not found" % system_name)
            system_id = self._server.get_system_handle(
                system_name, self._token)
            self._server.modify_system(
                system_id, 'netboot_enabled', True, self._token)
            self._server.save_system(system_id, self._token)
            #Sync per every system is long
            #Do it at end
            #self._server.sync(self._token)
        except Exception as e:
            raise e
    # End of enable_system_netboot

    def reboot_system(self, reboot_system_list):
        try:
            # Validate cobbler token
            self._validate_token(self._token, "system")
            power = {
                "power" : "reboot",
                "systems" : reboot_system_list }
            self._smgr_log.log(self._smgr_log.DEBUG, "reboot_system list is %s" % reboot_system_list)
            self._smgr_log.log(self._smgr_log.DEBUG, "Reboot System Start")
            task_id = self._server.background_power_system(power, self._token)
            self._smgr_log.log(self._smgr_log.DEBUG, "Reboot System End")

            # Alternate way using direct cobbler api, not needed, but commented
            # and kept for reference.
            # system = self._capi_handle.get_item(
            #     "system", system_name)
            # if not system:
            #     raise Exception(
            #         "cobbler error : System %s not found" % system_name)
            # else:
            #     self._capi_handle.reboot(system)
        except Exception as e:
            raise e
    # End of reboot_system

    def delete_distro(self, distro_name):
        try:
            # Validate cobbler token
            self._validate_token(self._token, "distro")
            self._server.remove_distro(distro_name, self._token)
        except Exception as e:
            pass
    # End of delete_distro

    def delete_repo(self, repo_name):
        try:
            # Validate cobbler token
            self._validate_token(self._token, "repo")
            self._server.remove_repo(repo_name, self._token)
        except Exception as e:
            pass
    # End of delete_repo

    def delete_profile(self, profile_name):
        try:
            # Validate cobbler token
            self._validate_token(self._token, "profile")
            self._server.remove_profile(profile_name, self._token)
        except Exception as e:
            pass
    # End of delete_profile

    def delete_system(self, system_name):
        try:
            # Validate cobbler token
            self._validate_token(self._token, "system")
            system = self._server.find_system({"name":  system_name})
            if system:
                self._server.remove_system(system_name, self._token)
        except Exception as e:
            raise e
    # End of delete_system

    def sync(self):
        try:
            # Validate cobbler token
            self._validate_token(self._token, "system")
            self._server.sync(self._token)
        except Exception as e:
            raise e
class ServerMgrMonBasePlugin():
    val = 1
    freq = 300
    _config_set = False
    _serverDb = None
    _monitoring_log = None
    _collectors_ip = None
    _discovery_server = None
    _discovery_port = None
    _default_ipmi_username = None
    _default_ipmi_password = None
    _provision_immediately_after_reimage = False
    DEBUG = "debug"
    INFO = "info"
    WARN = "warn"
    ERROR = "error"
    CRITICAL = "critical"

    def __init__(self):
        ''' Constructor '''
        self.MonitoringCfg = {
            'monitoring_frequency': _DEF_MON_FREQ,
            'monitoring_plugin': _DEF_MONITORING_PLUGIN
        }
        self.InventoryCfg = {
            'inventory_plugin': _DEF_INVENTORY_PLUGIN
        }
        self._smgr_log = ServerMgrlogger()
        self.monitoring_args = None
        self.monitoring_config_set = False
        self.inventory_args = None
        self.inventory_config_set = False
        self.server_monitoring_obj = None
        self.server_inventory_obj = None
        self.monitoring_gevent_thread_obj = None

    def set_serverdb(self, server_db):
        self._serverDb = server_db

    def set_ipmi_defaults(self, ipmi_username, ipmi_password):
        self._default_ipmi_username = ipmi_username
        self._default_ipmi_password = ipmi_password

    def parse_args(self, args_str, section):
        # Source any specified config/ini file
        # Turn off help, so we print all options in response to -h
        conf_parser = argparse.ArgumentParser(add_help=False)

        conf_parser.add_argument(
            "-c", "--config_file",
            help="Specify config file with the parameter values.",
            metavar="FILE")
        args, remaining_argv = conf_parser.parse_known_args(args_str)

        if args.config_file:
            config_file = args.config_file
        else:
            config_file = _DEF_SMGR_CFG_FILE
        config = ConfigParser.SafeConfigParser()
        config.read([config_file])
        parser = argparse.ArgumentParser(
            # Inherit options from config_parser
            # parents=[conf_parser],
            # print script description with -h/--help
            description=__doc__,
            # Don't mess with format of description
            formatter_class=argparse.RawDescriptionHelpFormatter,
        )
        if section == "MONITORING":
            for key in dict(config.items("MONITORING")).keys():
                if key in self.MonitoringCfg.keys():
                    self.MonitoringCfg[key] = dict(config.items("MONITORING"))[key]
                else:
                    self._smgr_log.log(self._smgr_log.DEBUG, "Configuration set for invalid parameter: %s" % key)
            self._smgr_log.log(self._smgr_log.DEBUG,
                               "Arguments read from monitoring config file %s" % self.MonitoringCfg)
            parser.set_defaults(**self.MonitoringCfg)
        elif section == "INVENTORY":
            for key in dict(config.items("INVENTORY")).keys():
                if key in self.InventoryCfg.keys():
                    self.InventoryCfg[key] = dict(config.items("INVENTORY"))[key]
                else:
                    self._smgr_log.log(self._smgr_log.DEBUG, "Configuration set for invalid parameter: %s" % key)
            self._smgr_log.log(self._smgr_log.DEBUG,
                               "Arguments read from inventory config file %s" % self.InventoryCfg)
            parser.set_defaults(**self.InventoryCfg)
        return parser.parse_args(remaining_argv)

    def parse_monitoring_args(self, args_str, args, sm_args, _rev_tags_dict, base_obj):
        config = ConfigParser.SafeConfigParser()
        config.read([args.config_file])
        try:
            if dict(config.items("MONITORING")).keys():
                # Handle parsing for monitoring
                monitoring_args = self.parse_args(args_str, "MONITORING")
                if monitoring_args:
                    self._smgr_log.log(self._smgr_log.DEBUG, "Monitoring arguments read from config.")
                    self.monitoring_args = monitoring_args
                else:
                    self._smgr_log.log(self._smgr_log.DEBUG, "No monitoring configuration set.")
            else:
                self._smgr_log.log(self._smgr_log.DEBUG, "No monitoring configuration set.")
        except ConfigParser.NoSectionError:
            self._smgr_log.log(self._smgr_log.DEBUG, "No monitoring configuration set.")
        if self.monitoring_args:
            try:
                if self.monitoring_args.monitoring_plugin:
                    module_components = str(self.monitoring_args.monitoring_plugin).split('.')
                    monitoring_module = __import__(str(module_components[0]))
                    monitoring_class = getattr(monitoring_module, module_components[1])
                    if sm_args.collectors:
                        self.server_monitoring_obj = monitoring_class(1, self.monitoring_args.monitoring_frequency,
                                                                      sm_args.listen_ip_addr,
                                                                      sm_args.listen_port, sm_args.collectors,
                                                                      sm_args.http_introspect_port, _rev_tags_dict)
                        self.monitoring_config_set = True
                else:
                    self._smgr_log.log(self._smgr_log.ERROR,
                                       "Analytics IP and Monitoring API misconfigured, monitoring aborted")
                    self.server_monitoring_obj = base_obj
            except ImportError as ie:
                self._smgr_log.log(self._smgr_log.ERROR,
                                   "Configured modules are missing. Server Manager will quit now.")
                self._smgr_log.log(self._smgr_log.ERROR, "Error: " + str(ie))
                raise ImportError
        else:
            self.server_monitoring_obj = base_obj
        return self.server_monitoring_obj

    def parse_inventory_args(self, args_str, args, sm_args, _rev_tags_dict, base_obj):
        config = ConfigParser.SafeConfigParser()
        config.read([args.config_file])
        try:
            if dict(config.items("INVENTORY")).keys():
                # Handle parsing for monitoring
                inventory_args = self.parse_args(args_str, "INVENTORY")
                if inventory_args:
                    self._smgr_log.log(self._smgr_log.DEBUG, "Inventory arguments read from config.")
                    self.inventory_args = inventory_args
                else:
                    self._smgr_log.log(self._smgr_log.DEBUG, "No inventory configuration set.")
            else:
                self._smgr_log.log(self._smgr_log.DEBUG, "No inventory configuration set.")
        except ConfigParser.NoSectionError:
            self._smgr_log.log(self._smgr_log.DEBUG, "No inventory configuration set.")

        if self.inventory_args:
            try:
                if self.inventory_args.inventory_plugin:
                    module_components = str(self.inventory_args.inventory_plugin).split('.')
                    inventory_module = __import__(str(module_components[0]))
                    inventory_class = getattr(inventory_module, module_components[1])
                    if sm_args.collectors:
                        self.server_inventory_obj = inventory_class(sm_args.listen_ip_addr, sm_args.listen_port,
                                                                    sm_args.http_introspect_port, _rev_tags_dict)
                        self.inventory_config_set = True
                else:
                    self._smgr_log.log(self._smgr_log.ERROR,
                                       "Iventory API misconfigured, inventory aborted")
                    self.server_inventory_obj = base_obj
            except ImportError:
                self._smgr_log.log(self._smgr_log.ERROR,
                                   "Configured modules are missing. Server Manager will quit now.")
                raise ImportError
        else:
            self.server_inventory_obj = base_obj
        return self.server_inventory_obj

    def validate_rest_api_args(self, request, rev_tags_dict):
        ret_data = {"msg": None, "type_msg": None}
        match_keys = list(['id', 'cluster_id', 'tag', 'where'])
        print_match_keys = list(['server_id', 'cluster_id', 'tag', 'where'])
        self._smgr_log.log(self._smgr_log.DEBUG,
                           "Validating bottle arguments.")
        ret_data['status'] = 1
        query_args = parse_qs(urlparse(request.url).query,
                              keep_blank_values=True)
        if len(query_args) == 0:
            ret_data["type"] = ["all"]
            ret_data["status"] = True
            ret_data["match_key"] = None
            ret_data["match_value"] = None
        elif len(query_args) >= 1:
            select_value_list = None
            if "select" in query_args:
                select_value_list = query_args.get("select", None)[0]
                select_value_list = str(select_value_list).split(',')
                self._smgr_log.log(self._smgr_log.DEBUG,
                                   "Select value list=" + str(select_value_list))
                query_args.pop("select")
            if not select_value_list:
                ret_data["type"] = ["all"]
            else:
                ret_data["type"] = select_value_list
            match_key = match_value = None
            if query_args:
                match_key, match_value = query_args.popitem()
            if match_key and match_key not in match_keys:
                ret_data["status"] = False
                ret_data["msg"] = "Wrong Match Key Specified. " + "Choose one of the following keys: " + \
                                  str(['--{0}'.format(key) for key in print_match_keys]).strip('[]')
                self._smgr_log.log(self._smgr_log.ERROR,
                                   "Wrong Match Key")
            elif match_key and (match_value is None or match_value[0] == ''):
                ret_data["status"] = False
                self._smgr_log.log(self._smgr_log.ERROR,
                                   "No macth value given")
                ret_data["msg"] = "No Match Value Specified.\n"
            else:
                ret_data["status"] = True
                if match_key:
                    ret_data["match_key"] = str(match_key)
                else:
                    ret_data["match_key"] = None
                if match_value:
                    ret_data["match_value"] = str(match_value[0])
                else:
                    ret_data["match_value"] = None
        return ret_data

    def process_server_tags(self, rev_tags_dict, match_value):
        if not match_value:
            return {}
        match_dict = {}
        tag_list = match_value.split(',')
        for x in tag_list:
            tag = x.strip().split('=')
            if tag[0] in rev_tags_dict:
                match_dict[rev_tags_dict[tag[0]]] = tag[1]
            else:
                self._smgr_log.log(self._smgr_log.ERROR, "Wrong tag specified in rest api request.")
                return {}
        return match_dict

    def sandesh_init(self, sm_args, mon_config_set, inv_config_set):
        # Inventory node module initialization part
        try:
            module = None
            port = None
            module_list = None
            self._smgr_log.log(self._smgr_log.INFO, "Initializing sandesh")
            collectors_ip_list = eval(sm_args.collectors)
            if collectors_ip_list:
                self._smgr_log.log(self._smgr_log.INFO, "Collector IPs from config: " + str(collectors_ip_list))
                monitoring = True
                inventory = True

                if mon_config_set and inv_config_set:
                    try:
                        __import__('contrail_sm_monitoring.monitoring')
                    except ImportError:
                        mon_config_set = False
                        pass
                    try:
                        __import__('inventory_daemon.server_inventory')
                    except ImportError:
                        inv_config_set = False
                        pass
                    module = Module.INVENTORY_AGENT
                    port = int(sm_args.http_introspect_port)
                    module_list = ['inventory_daemon.server_inventory', 'contrail_sm_monitoring.monitoring']
                elif inv_config_set:
                    try:
                        __import__('inventory_daemon.server_inventory')
                    except ImportError:
                        inv_config_set = False
                        pass
                    module = Module.INVENTORY_AGENT
                    port = int(sm_args.http_introspect_port)
                    module_list = ['inventory_daemon.server_inventory']
                elif mon_config_set:
                    try:
                        __import__('contrail_sm_monitoring.monitoring')
                    except ImportError:
                        mon_config_set = False
                        pass
                    module = Module.IPMI_STATS_MGR
                    port = int(sm_args.http_introspect_port)
                    module_list = ['contrail_sm_monitoring.monitoring']
                if mon_config_set or inv_config_set:
                    module_name = ModuleNames[module]
                    node_type = Module2NodeType[module]
                    node_type_name = NodeTypeNames[node_type]
                    instance_id = INSTANCE_ID_DEFAULT
                    sandesh_global.init_generator(
                        module_name,
                        socket.gethostname(),
                        node_type_name,
                        instance_id,
                        collectors_ip_list,
                        module_name,
                        port,
                        module_list)
                    sandesh_global.set_logging_params(level=sm_args.sandesh_log_level)
                else:
                    self._smgr_log.log(self._smgr_log.INFO, "Sandesh wasn't initialized")
            else:
                pass
        except Exception as e:
            raise ServerMgrException("Error during Sandesh Init: " + str(e))

    def call_subprocess(self, cmd):
        p = None
        try:
            times = datetime.now()
            p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, close_fds=True)
            while p.poll() is None:
                time.sleep(0.3)
                now = datetime.now()
                diff = now - times
                if diff.seconds > 2:
                    if p and p.poll() != 0:
                        if p.stdout:
                            p.stdout.close()
                        if p.stderr:
                            p.stderr.close()
                        if p.stdin:
                            p.stdin.close()
                        if p:
                            p.terminate()
                    os.waitpid(-1, os.WNOHANG)
                    self._smgr_log.log(self._smgr_log.INFO, "command:" + cmd + " --> hanged")
                    return None
            result = p.communicate()[0].strip()
            return result
        except Exception as e:
            if p and p.poll() != 0:
                if p.stdout:
                    p.stdout.close()
                if p.stderr:
                    p.stderr.close()
                if p.stdin:
                    p.stdin.close()
                if p:
                    p.terminate()
            self._smgr_log.log(self._smgr_log.INFO, "Exception in call_subprocess: " + str(e))
            return None

    def copy_ssh_keys_to_server(self, ip_address, server_id):
      self._smgr_log.log(self._smgr_log.DEBUG, "COPY-KEY: Server: " + str(server_id))

      tries = 0
      gevent.sleep(60)
      success = False
      while True:
        try:
          tries = tries + 1
          #avoid overflow
          if tries > 10000:
            tries = 10000

          # We keep trying infinitely for this, so if server is deleted before
          # this, we need to know about it and pull new_public_key
          servers = self._serverDb.get_server({"id": server_id}, detail=True)
          if not servers :
            self._smgr_log.log(self._smgr_log.DEBUG, "COPY-KEY: Server: " + str(server_id) + " NOT FOUND")
            return False

          server = servers[0]
          ssh_key = server["ssh_public_key"]
          #try to connect first if target node is up
          source_file = "/tmp/" + str(server_id) + ".pub"
          ssh = ServerMgrSSHClient(self._serverDb)
          ssh.connect(ip_address, server_id, option="password")

          subprocess.call(['mkdir', '-p', '/tmp'])
          with open("/tmp/" + str(server_id) + ".pub", 'w+') as content_file:
            content_file.write(str(ssh_key))
            content_file.close()

          key_dest_file = "/root/" + str(server_id) + ".pub"
          dest_file = "/root/.ssh/authorized_keys"
          ssh.exec_command("mkdir -p /root/.ssh/")
          ssh.exec_command("touch " + str(key_dest_file))
          ssh.exec_command("touch " + str(dest_file))

          if os.path.exists(source_file):
            # Copy Public key on Target Server
            #TODO: check if authrized keys are already available
            bytes_sent = ssh.copy(source_file, key_dest_file)
            cmd = "grep -q -f " + str(key_dest_file) + " " + str(dest_file) + ";" \
                  + " RETVAL=$? ; " \
                  + " if [[ $RETVAL -eq 1 ]]; then \
                        echo '' >> " + str(dest_file) + "; \
                        cat " + str(key_dest_file) + " >> " + str(dest_file) + ";\
                        echo '' >> " + str(dest_file) + "; fi; \
                      rm -f " + str(key_dest_file) + "; "

            self._smgr_log.log(self._smgr_log.DEBUG, cmd)
            ssh.exec_command(cmd)
          ssh.close()
          if os.path.exists(source_file):
            os.remove(source_file)
          msg =  "COPY-KEYS: %s bytes copied on %s: " %(str(bytes_sent), str(server_id))
          self._smgr_log.log(self._smgr_log.DEBUG, msg)
          success = True
          return success

        except Exception as e:
          msg = "COPY-KEYS: Host : %s Try: %d: ERROR Copying Keys: %s" % (str(server_id), tries, str(e))
          self._smgr_log.log(self._smgr_log.ERROR, msg)
          if os.path.exists(source_file):
            os.remove(source_file)
          if ssh:
            ssh.close()
          if tries >= 20:
            sleep_time = 120
          else:
            sleep_time = 30

          gevent.sleep(sleep_time)

      #if we are here, then SSH Keys are not copied
      #if tries >= 10:
        #msg = "COPY-KEYS: Host : %s Try: %d: SSH-COPY Failed" % (str(server_id), tries)
        #self._smgr_log.log(self._smgr_log.ERROR, msg)
        #return False

    def create_store_copy_ssh_keys(self, server_id, server_ip, generate_keys = True):
        self._smgr_log.log(self._smgr_log.DEBUG, "Generating : " + str(server_id) + " " + str(generate_keys))
        if generate_keys == True:
          # Create the Keys using Pycrypto
          self._smgr_log.log(self._smgr_log.DEBUG, "Generating & Copying keys for server: " + str(server_id))
          ssh_key = paramiko.RSAKey.generate(bits=2048)
          ssh_private_key_obj = StringIO.StringIO()
          ssh_key.write_private_key(ssh_private_key_obj)

          # Update Server table with ssh public and private keys
          update = {'id': server_id,
                    'ssh_public_key': "ssh-rsa " + str(ssh_key.get_base64()),
                    'ssh_private_key': ssh_private_key_obj.getvalue()}
          self._serverDb.modify_server(update)

        #copy the ssh keys to target server
        self.copy_ssh_keys_to_server(server_ip, server_id)


    def populate_server_data_lists(self, servers, ipmi_list, hostname_list,
                                   server_ip_list, ipmi_username_list, ipmi_password_list, feature):
        smutil = ServerMgrUtil()
        for server in servers:
            server = dict(server)
            server_password= smutil.get_password(server,self._serverDb)
            if 'parameters' in server:
                server_parameters = eval(server['parameters'])
                if feature == "monitoring" and "enable_monitoring" in server_parameters \
                        and server_parameters["enable_monitoring"] in ["true", "True"]:
                    continue
                elif feature == "inventory" and "enable_inventory" in server_parameters \
                        and server_parameters["enable_inventory"] in ["true", "True"]:
                    continue
            if 'ipmi_address' in server and server['ipmi_address'] \
                    and 'id' in server and server['id'] \
                    and 'ip_address' in server and server['ip_address'] \
                    and server_password:
                ipmi_list.append(server['ipmi_address'])
                hostname_list.append(server['id'])
                server_ip_list.append(server['ip_address'])
                if 'ipmi_username' in server and server['ipmi_username'] \
                        and 'ipmi_password' in server and server['ipmi_password']:
                    ipmi_username_list.append(server['ipmi_username'])
                    ipmi_password_list.append(server['ipmi_password'])
                else:
                    ipmi_username_list.append(self._default_ipmi_username)
                    ipmi_password_list.append(self._default_ipmi_password)

    # Packages and sends a REST API call to the ServerManager node
    def copy_ssh_keys_to_servers(self, ip, port, payload, sm_args=None):
        servers = self._serverDb.get_server({"id": str(payload["id"])}, detail=True)
        server = servers[0]
        success = self.copy_ssh_keys_to_server(str(server["ip_address"]), str(server["id"]))

        self._smgr_log.log(self._smgr_log.DEBUG, "COPY-KEY: Host: " + server["id"] + " /Status: " + str(success))

        if success and self._provision_immediately_after_reimage == True:
            gevent.spawn(self.gevent_puppet_agent_action, server, self._serverDb, sm_args, "start")
        if success and self.inventory_config_set:
            try:
                url = "http://%s:%s/run_inventory" % (ip, port)
                payload = json.dumps(payload)
                headers = {'content-type': 'application/json'}
                resp = requests.post(url, headers=headers, timeout=5, data=payload)
                return resp.text
            except Exception as e:
                self._smgr_log.log("error", "Error running inventory on  " + str(payload) + " : " + str(e))
                return None

    def get_list_name(self, lst):
        sname = ""
        for sattr in lst.keys():
            if sattr[0] not in ['@']:
                sname = sattr
        return sname

    def parse_sandesh_xml(self, inp, uve_name):
        try:
            sname = ""
            if '@type' not in inp:
                return None
            if inp['@type'] == 'slist':
                sname = str(uve_name) + "Uve"
                ret = []
                items = inp[sname]
                if not isinstance(items, list):
                    items = [items]
                lst = []
                for elem in items:
                    if not isinstance(elem, dict):
                        lst.append(elem)
                    else:
                        lst_elem = {}
                        for k, v in elem.items():
                            lst_elem[k] = self.parse_sandesh_xml(v, uve_name)
                        lst.append(lst_elem)
                # ret[sname] = lst
                ret = lst
                return ret
            elif inp['@type'] == 'sandesh':
                sname = "data"
                ret = {}
                for k, v in inp[sname].items():
                    ret[k] = self.parse_sandesh_xml(v, uve_name)
                return ret
            elif inp['@type'] == 'struct':
                sname = self.get_list_name(inp)
                if (sname == ""):
                    self._smgr_log.log("error", "Error parsing sandesh xml dict : " + str('Struct Parse Error'))
                    return None
                ret = {}
                for k, v in inp[sname].items():
                    ret[k] = self.parse_sandesh_xml(v, uve_name)
                return ret
            elif (inp['@type'] == 'list'):
                sname = self.get_list_name(inp['list'])
                ret = []
                if (sname == ""):
                    return ret
                items = inp['list'][sname]
                if not isinstance(items, list):
                    items = [items]
                lst = []
                for elem in items:
                    if not isinstance(elem, dict):
                        lst.append(elem)
                    else:
                        lst_elem = {}
                        for k, v in elem.items():
                            lst_elem[k] = self.parse_sandesh_xml(v, uve_name)
                        lst.append(lst_elem)
                # ret[sname] = lst
                ret = lst
                return ret
            else:
                if '#text' not in inp:
                    return None
                if inp['@type'] in ['i16', 'i32', 'i64', 'byte',
                                    'u64', 'u32', 'u16']:
                    return int(inp['#text'])
                elif inp['@type'] in ['float', 'double']:
                    return float(inp['#text'])
                elif inp['@type'] in ['bool']:
                    if inp['#text'] in ["false"]:
                        return False
                    elif inp['#text'] in ["true"]:
                        return True
                    else:
                        return inp['#text']
                else:
                    return inp['#text']
        except Exception as e:
            self._smgr_log.log("error", "Error parsing sandesh xml dict : " + str(e))
            return None

    def get_sandesh_url(self, ip, introspect_port, uve_name, server_id=None):
        if server_id:
            url = "http://%s:%s/Snh_SandeshUVECacheReq?tname=%s&key=%s" % \
                  (str(ip), str(introspect_port), uve_name, server_id)
        else:
            url = "http://%s:%s/Snh_SandeshUVECacheReq?x=%s" % \
                  (str(ip), str(introspect_port), uve_name)
        return url

    def initialize_features(self, sm_args, serverdb):
        self.sandesh_init(sm_args, self.monitoring_config_set, self.inventory_config_set)
        self.set_serverdb(serverdb)
        if self.monitoring_config_set:
            self.server_monitoring_obj.set_serverdb(serverdb)
            self.server_monitoring_obj.set_ipmi_defaults(sm_args.ipmi_username, sm_args.ipmi_password)
            self.monitoring_gevent_thread_obj = gevent.spawn(self.server_monitoring_obj.run)
        else:
            self._smgr_log.log(self._smgr_log.ERROR, "Monitoring configuration not set. "
                                                     "You will be unable to get Monitor information of servers.")

        if self.inventory_config_set:
            self.server_inventory_obj.set_serverdb(serverdb)
            self.server_inventory_obj.set_ipmi_defaults(sm_args.ipmi_username, sm_args.ipmi_password)
            self.server_inventory_obj.add_inventory()
        else:
            self._smgr_log.log(self._smgr_log.ERROR, "Inventory configuration not set. "
                                                     "You will be unable to get Inventory information from servers.")

    def setup_keys(self, server_db=None, new_servers=None):
        if server_db is not None:
            servers = self._serverDb.get_server(None, detail=True)
        elif new_servers is not None:
            servers = new_servers

        for server in servers:
            ## If NO ssh keys are added then create new keys for all the servers, store them in DB, copy them to the target
            ## If keys are the deleted from the DB then create and copy them to the target
            if ('ssh_private_key' not in server or server['ssh_private_key'] is None or \
                  server['ssh_private_key'] == "")  \
                 and 'id' in server and 'ip_address' in server and server['id']:
                self._smgr_log.log(self._smgr_log.DEBUG, "SETUP-KEYS: 2 : " + str(server["id"]))
                gevent.spawn(self.create_store_copy_ssh_keys, server['id'], server['ip_address'])
            ## 
            elif 'ssh_private_key' in server and 'ssh_public_key' in server  \
                 and 'id' in server and 'ip_address' in server and server['id']:

                self._smgr_log.log(self._smgr_log.DEBUG, "SETUP-KEYS: 3 : " + str(server["id"]))
                gevent.spawn(self.create_store_copy_ssh_keys, 
                  server['id'], server['ip_address'], generate_keys = False)

            ## TODO: if keys are configured already or we are starting up, then
            ## try to add again.
            else:
                self._smgr_log.log(self._smgr_log.DEBUG, "SETUP-KEYS: ALREADY configured for Server: " + str(server["id"]))
                #self._smgr_log.log(self._smgr_log.DEBUG, "SETUP-KEYS: " +
                   #str(server["id"]) + "PUB: " + server['ssh_public_key'] +
#"PRIVATE : " + server['ssh_private_key'] )

        if self.inventory_config_set and new_servers:
                self.server_inventory_obj.handle_inventory_trigger("add", servers)


    def create_server_dict(self, servers):
        return_dict = dict()
        for server in servers:
            server = dict(server)
            if 'ipmi_username' not in server or not server['ipmi_username'] \
                    or 'ipmi_password' not in server or not server['ipmi_password']:
                server['ipmi_username'] = self._default_ipmi_username
                server['ipmi_password'] = self._default_ipmi_password
            return_dict[str(server['id'])] = server
        return return_dict

    def get_mon_conf_details(self):
        resp = self.return_error("Monitoring Parameters haven't been configured.\n"
                                 "Reset the configuration correctly and restart Server Manager.\n")
        abort(404, resp)

    def get_inv_conf_details(self):
        resp = self.return_error("Inventory Parameters haven't been configured.\n"
                                 "Reset the configuration correctly and restart Server Manager.\n")
        abort(404, resp)

    def get_inventory_info(self):
        resp = self.return_error("Inventory Parameters haven't been configured.\n"
                                 "Reset the configuration correctly and restart Server Manager.\n")
        abort(404, resp)

    def get_monitoring_info(self):
        resp = self.return_error("Monitoring Parameters haven't been configured.\n"
                                 "Reset the configuration correctly and restart Server Manager.\n")
        abort(404, resp)

    def get_monitoring_info_summary(self):
        resp = self.return_error("Monitoring Parameters haven't been configured.\n"
                                 "Reset the configuration correctly and restart Server Manager.\n")
        abort(404, resp)

    def run_inventory(self):
        resp = self.return_error("Inventory Parameters haven't been configured.\n"
                                 "Reset the configuration correctly and restart Server Manager.\n")
        abort(404, resp)

    def handle_inventory_trigger(self, action=None, servers=None):
        self._smgr_log.log(self._smgr_log.INFO, "Inventory of added servers will not be read.")
        return "Inventory Parameters haven't been configured.\n" \
               "Reset the configuration correctly and restart Server Manager.\n"

    def add_inventory(self):
        self._smgr_log.log(self._smgr_log.ERROR, "Inventory Parameters haven't been configured.\n" +
                                                 "Reset the configuration correctly to add inventory.\n")
        return "Inventory Parameters haven't been configured.\n" \
               "Reset the configuration correctly and restart Server Manager.\n"

    def cleanup(self, obj=None):
        self._smgr_log.log(self._smgr_log.INFO, "Monitoring Parameters haven't been configured.\n" +
                           "No cleanup needed.\n")
        return "Inventory Parameters haven't been configured.\n" \
               "Reset the configuration correctly and restart Server Manager.\n"

    def return_error(self, msg, ret_code=ERR_GENERAL_ERROR, data=None):
        self._smgr_log.log(self._smgr_log.ERROR, msg)
        return_data = dict()
        return_data['return_code'] = ret_code
        return_data['return_msg'] = msg
        return_data['return_data'] = data
        resp = json.dumps(return_data, sort_keys=True, indent=4)
        return resp

    # A place-holder run function that the Server Monitor defaults to in the absence of a configured
    # monitoring API layer to use.
    def run(self):
        self._smgr_log.log(self._smgr_log.INFO,
                           "No monitoring API has been configured. Server Environement Info will not be monitored.")

   
    #Function to stop the puppet agent in the target servers
    def gevent_puppet_agent_action(self,server, serverDb, sm_args, action,access_method="key"):
        success = False
        tries = 0
        gevent.sleep(30)
        self._smgr_log.log("debug", "Going to %s the puppet agent on the server %s"% (action, str(server['id'])))
        while not success and tries < int(sm_args.puppet_agent_retry_count):
            try:
                tries += 1
                sshclient = ServerMgrSSHClient(serverdb=serverDb)
                sshclient.connect(str(server['ip_address']), str(server['id']), access_method)
                op = sshclient.exec_command('python -c "import platform; print platform.linux_distribution()"')
                self._smgr_log.log("debug", "OP is %s" %op)
                os_type = 'centos' if 'centos' in op.lower() else 'ubuntu'
                if os_type == 'centos':
                    enable_puppet_svc_cmd = "chkconfig puppet on"
                    disable_puppet_svc_cmd = "chkconfig puppet off"
                else:
                    enable_puppet_svc_cmd = " sed -i 's/START=.*$/START=yes/' /etc/default/puppet && "\
                                            "/usr/bin/puppet resource service puppet ensure=running enable=true "
                    disable_puppet_svc_cmd = " sed -i 's/START=.*$/START=no/' /etc/default/puppet && " \
                                             "/usr/bin/puppet resource service puppet ensure=stopped enable=false "

                self._smgr_log.log("debug", "PUPPET START Command is %s" %enable_puppet_svc_cmd)
                if action == "start":
                    output = sshclient.exec_command(enable_puppet_svc_cmd)
                    self._smgr_log.log("debug", "OUTPUT1 is %s" %output)
                    output = sshclient.exec_command("puppet agent --enable")
                    self._smgr_log.log("debug", "OUTPUT2 is %s" %output)
                    output = sshclient.exec_command("service puppet restart")
                    self._smgr_log.log("debug", "OUTPUT3 is %s" %output)

                    self._smgr_log.log("debug", "Successfully started the puppet agent on the server " + str(server['id']))
                    self._provision_immediately_after_reimage = False
                else:
                    output = sshclient.exec_command(disable_puppet_svc_cmd)
                    output = sshclient.exec_command("puppet agent --disable")
                    output = sshclient.exec_command("service puppet stop")
                    self._smgr_log.log("debug", "Successfully stopped the puppet agent on the server " + str(server['id']))
                success = True
                sshclient.close()
            except Exception as e:
                if action == "start":
                    servers = self._serverDb.get_server({"id": server['id']}, detail=True)
                    server_state = servers[0]['status']
                    if server_state == "reimage_started" or server_state == "restart_issued" \
                       or server_state == "reimage_completed" or server_state == "provision_issued":
                        self._provision_immediately_after_reimage = True 
                if sshclient:
                    sshclient.close()
                self._smgr_log.log(self._smgr_log.ERROR, "Gevent SSH Connect Exception for server id: " + server['id'] + " Error : " + str(e))
            self._smgr_log.log(self._smgr_log.DEBUG, "Still trying to %s the puppet agent in the server %s, try %s" %(action, str(server["id"]), str(tries)))
            gevent.sleep(int(sm_args.puppet_agent_retry_poll_interval_seconds))
        if tries >= int(sm_args.puppet_agent_retry_count) and success is False:
            if action == "start":
                self._smgr_log.log(self._smgr_log.ERROR, "Starting the puppet agent failed on  " + str(server["id"]))
            else:
                self._smgr_log.log(self._smgr_log.ERROR, "Stopping the puppet agent failed on  " + str(server["id"]))
Example #45
0
 def __init__(self):
     self._docker_client = Client()
     self._smgr_log      = ServerMgrlogger()
Example #46
0
class SMAnsibleServer():
    '''
    Use bottle to provide REST interface for the server manager.
    '''
    def __init__(self, args_str=None):
        try:
            self._smgr_log = ServerMgrlogger()
        except:
            print "Error Creating logger object"

        self._smgr_log.log(self._smgr_log.INFO, "Starting SM Ansible Server")
        self.host_run_results = defaultdict(dict)
        if not args_str:
            args_str = sys.argv[1:]
        self._parse_args(args_str)
        self.joinq = Queue.Queue()
        self.joiner = Joiner(self.joinq)
        self.joiner.start()
        self._smgr_log.log(self._smgr_log.INFO, 'Initializing Bottle App')
        self.app = bottle.app()
        bottle.route('/start_provision', 'POST', self.start_provision)
        bottle.route('/run_playbook', 'POST', self.start_playbook)

    def _parse_args(self, args_str):
        '''
        Eg. python sm_ansible_server.py --config_file serverMgr.cfg
                                         --listen_port 8082
        '''
        # Source any specified config/ini file
        # Turn off help, so we print all options in response to -h
        conf_parser = argparse.ArgumentParser(add_help=False)

        conf_parser.add_argument(
            "-c",
            "--config_file",
            help="Specify config file with the parameter values.",
            metavar="FILE")
        args, remaining_argv = conf_parser.parse_known_args(args_str)

        serverCfg = {
            #'docker_install_method': _DOCKER_INSTALL_METHOD,
            #'docker_package_name': _DOCKER_PACKAGE_NAME,
            'ansible_srvr_ip': _WEB_HOST,
            'ansible_srvr_port': _ANSIBLE_SRVR_PORT,
            'docker_insecure_registries': _ANSIBLE_REGISTRY,
            'docker_registry': _ANSIBLE_REGISTRY,
            'docker_registry_insecure': _ANSIBLE_REGISTRY_INSECURE,
            'ansible_playbook': _ANSIBLE_PLAYBOOK
        }

        if args.config_file:
            config_file = args.config_file
        else:
            config_file = _DEF_ANSIBLE_SRVR_CFG_FILE
        config = ConfigParser.SafeConfigParser()
        config.read([args.config_file])
        self._smgr_config = config
        try:
            for key in dict(config.items("ANSIBLE-SERVER")).keys():
                #if key in serverCfg.keys():
                serverCfg[key] = dict(config.items("ANSIBLE-SERVER"))[key]

            self._smgr_log.log(
                self._smgr_log.DEBUG,
                "Arguments read form config file %s" % serverCfg)
        except ConfigParser.NoSectionError:
            msg = "Server Manager doesn't have a configuration set."
            self._smgr_log.log(self._smgr_log.ERROR, msg)

        self._smgr_log.log(self._smgr_log.DEBUG,
                           "Arguments read form config file %s" % serverCfg)
        # Override with CLI options
        # Don't surpress add_help here so it will handle -h
        parser = argparse.ArgumentParser(
            # Inherit options from config_parser
            # parents=[conf_parser],
            # print script description with -h/--help
            description=__doc__,
            # Don't mess with format of description
            formatter_class=argparse.RawDescriptionHelpFormatter,
        )
        parser.set_defaults(**serverCfg)

        parser.add_argument(
            "-i",
            "--ansible_srvr_ip",
            help="IP address to provide service on, default %s" % (_WEB_HOST))
        parser.add_argument("-p",
                            "--ansible_srvr_port",
                            help="Port to provide service on, default %s" %
                            (_ANSIBLE_SRVR_PORT))
        self._args = parser.parse_args(remaining_argv)
        self._args.config_file = args.config_file

    # end _parse_args

    def start_provision(self):
        self.host_run_results = defaultdict(dict)
        print "starting provision"
        return json.dumps({'status': 'Provision Started'})

    def start_playbook(self):
        print "starting playbook"
        entity = bottle.request.json
        pb = ContrailAnsiblePlayBook(entity, self._args)
        pb.start()
        self.joinq.put(pb)
        # Return success. Actual status will be supplied when the pb thread
        # completes and the next status query is made
        bottle.response.headers['Content-Type'] = 'application/json'
        return json.dumps({'status': 'Provision in Progress'})
Example #47
0
 def __init__(self, q):
     super(Joiner, self).__init__()
     self._smgr_log = ServerMgrlogger()
     self.queue = q
class ServerMgrStatusThread(threading.Thread):

    _smgr_log = None
    _status_serverDb = None
    _base_obj = None
    _smgr_puppet = None
    _smgr_main = None
    ''' Class to run function that keeps validating the cobbler token
        periodically (every 30 minutes) on a new thread. '''
    _pipe_start_app = None

    def __init__(self, timer, server, status_thread_config):
        threading.Thread.__init__(self)
        self._status_thread_config = status_thread_config
        self._smgr_puppet = status_thread_config['smgr_puppet']
        self._smgr_main = status_thread_config['smgr_main']

    def run(self):
        #create the logger
        try:
            self._smgr_log = ServerMgrlogger()
        except:
            print "Error Creating logger object"

        # Connect to the cluster-servers database
        try:
            self._status_serverDb = db(
                self._smgr_main._args.server_manager_base_dir +
                self._smgr_main._args.database_name)
        except:
            self._smgr_log.log(
                self._smgr_log.DEBUG,
                "Error Connecting to Server Database %s" %
                (self._smgr_main._args.server_manager_base_dir +
                 self._smgr_main._args.database_name))
            exit()

        #set the status related handlers
        status_bottle_app = Bottle()
        status_bottle_app.route('/server_status', 'POST',
                                self.put_server_status)
        status_bottle_app.route('/server_status', 'PUT',
                                self.put_server_status)
        self._base_obj = self._status_thread_config['base_obj']

        try:
            bottle.run(status_bottle_app,
                       host=self._status_thread_config['listen_ip'],
                       port=self._status_thread_config['listen_port'])
        except Exception as e:
            # cleanup gracefully
            exit()

    def put_server_status(self):
        print "put-status"
        #query_args = parse_qs(urlparse(bottle.request.url).query,
        #keep_blank_values=True)
        #match_key, match_value = query_args.popitem()
        server_id = request.query['server_id']
        server_state = request.query['state']
        body = request.body.read()
        server_data = {}
        server_data['id'] = server_id
        if server_state == "post_provision_completed":
            server_data['status'] = "provision_completed"
        else:
            server_data['status'] = server_state
        try:
            time_str = strftime("%Y_%m_%d__%H_%M_%S", localtime())
            message = server_id + ' ' + server_state + time_str
            self._smgr_log.log(self._smgr_log.DEBUG,
                               "Server status Data %s" % server_data)
            servers = self._status_serverDb.modify_server(server_data)
            if server_state == "reimage_completed":
                payload = dict()
                payload["id"] = server_id
                self._smgr_log.log(
                    self._smgr_log.DEBUG,
                    "Spawning Gevent for Id: %s" % payload["id"])
                if self._base_obj:
                    gevent.spawn(self._base_obj.copy_ssh_keys_to_servers,
                                 self._status_thread_config["listen_ip"],
                                 self._status_thread_config["listen_port"],
                                 payload, self._smgr_main._args)
            if server_state == "provision_started":
                self._smgr_main.update_provision_started_flag(
                    server_id, server_state)
            self._smgr_main.update_provision_role_sequence(
                server_id, server_state)
            if server_state == "post_provision_completed":
                server_state = "provision_completed"

            if server_state == "provision_completed":
                domain = self._status_serverDb.get_server_domain(server_id)
                environment_name = 'TurningOffPuppetAgent__' + time_str
                if domain:
                    server_fqdn = server_id + "." + domain
                    self._smgr_puppet.update_node_map_file(
                        server_fqdn, environment_name)
                #Stop the puppet agent in the targer server
                servers = self._status_serverDb.get_server({"id": server_id},
                                                           detail=True)
                server = servers[0]
                gevent.spawn(self._base_obj.gevent_puppet_agent_action, server,
                             self._status_serverDb, self._smgr_main._args,
                             "stop")
            if server_state in email_events:
                self.send_status_mail(server_id, message, message)
        except Exception as e:
            #            self.log_trace()
            self._smgr_log.log(self._smgr_log.ERROR,
                               "Error adding to db %s" % repr(e))
            abort(404, repr(e))

    def get_email_list(self, email):
        email_to = []
        if not email:
            return email_to
        if email.startswith('[') and email.endswith(']'):
            email_to = eval(email)
        else:
            email_to = [s.strip() for s in email.split(',')]
        return email_to

    # end get_email_list

    def send_status_mail(self, server_id, event, message):
        # Get server entry and find configured e-mail
        servers = self._status_serverDb.get_server({"id": server_id},
                                                   detail=True)
        if not servers:
            msg = "No server found with server_id " + server_id
            self._smgr_log.log(self._smgr_log.ERROR, msg)
            return -1
        server = servers[0]
        email_to = []
        if 'email' in server and server['email']:
            email_to = self.get_email_list(server['email'])
        else:
            # Get cluster entry to find configured e-mail
            if 'cluster_id' in server and server['cluster_id']:
                cluster_id = server['cluster_id']
                cluster = self._status_serverDb.get_cluster({"id": cluster_id},
                                                            detail=True)
                if cluster and 'email' in cluster[0] and cluster[0]['email']:
                    email_to = self.get_email_list(cluster[0]['email'])
                else:
                    self._smgr_log.log(
                        self._smgr_log.DEBUG,
                        "cluster or server doesn't configured for email")
                    return 0
            else:
                self._smgr_log.log(self._smgr_log.DEBUG,
                                   "server not associated with a cluster")
                return 0
        send_mail(event, message, '', email_to,
                  self._status_thread_config['listen_ip'], '25')
        msg = "An email is sent to " + ','.join(
            email_to) + " with content " + message
        self._smgr_log.log(self._smgr_log.DEBUG, msg)
class ServerMgrMonBasePlugin():
    val = 1
    freq = 300
    _config_set = False
    _serverDb = None
    _monitoring_log = None
    _collectors_ip = None
    _discovery_server = None
    _discovery_port = None
    _default_ipmi_username = None
    _default_ipmi_password = None
    DEBUG = "debug"
    INFO = "info"
    WARN = "warn"
    ERROR = "error"
    CRITICAL = "critical"

    def __init__(self):
        ''' Constructor '''
        self.MonitoringCfg = {
            'monitoring_frequency': _DEF_MON_FREQ,
            'monitoring_plugin': _DEF_MONITORING_PLUGIN
        }
        self.InventoryCfg = {
            'inventory_plugin': _DEF_INVENTORY_PLUGIN
        }
        self._smgr_log = ServerMgrlogger()
        self.monitoring_args = None
        self.monitoring_config_set = False
        self.inventory_args = None
        self.inventory_config_set = False
        self.server_monitoring_obj = None
        self.server_inventory_obj = None
        self.monitoring_gevent_thread_obj = None

    def set_serverdb(self, server_db):
        self._serverDb = server_db

    def set_ipmi_defaults(self, ipmi_username, ipmi_password):
        self._default_ipmi_username = ipmi_username
        self._default_ipmi_password = ipmi_password

    def parse_args(self, args_str, section):
        # Source any specified config/ini file
        # Turn off help, so we print all options in response to -h
        conf_parser = argparse.ArgumentParser(add_help=False)

        conf_parser.add_argument(
            "-c", "--config_file",
            help="Specify config file with the parameter values.",
            metavar="FILE")
        args, remaining_argv = conf_parser.parse_known_args(args_str)

        if args.config_file:
            config_file = args.config_file
        else:
            config_file = _DEF_SMGR_CFG_FILE
        config = ConfigParser.SafeConfigParser()
        config.read([config_file])
        parser = argparse.ArgumentParser(
            # Inherit options from config_parser
            # parents=[conf_parser],
            # print script description with -h/--help
            description=__doc__,
            # Don't mess with format of description
            formatter_class=argparse.RawDescriptionHelpFormatter,
        )
        if section == "MONITORING":
            for key in dict(config.items("MONITORING")).keys():
                if key in self.MonitoringCfg.keys():
                    self.MonitoringCfg[key] = dict(config.items("MONITORING"))[key]
                else:
                    self._smgr_log.log(self._smgr_log.DEBUG, "Configuration set for invalid parameter: %s" % key)
            self._smgr_log.log(self._smgr_log.DEBUG,
                               "Arguments read from monitoring config file %s" % self.MonitoringCfg)
            parser.set_defaults(**self.MonitoringCfg)
        elif section == "INVENTORY":
            for key in dict(config.items("INVENTORY")).keys():
                if key in self.InventoryCfg.keys():
                    self.InventoryCfg[key] = dict(config.items("INVENTORY"))[key]
                else:
                    self._smgr_log.log(self._smgr_log.DEBUG, "Configuration set for invalid parameter: %s" % key)
            self._smgr_log.log(self._smgr_log.DEBUG,
                               "Arguments read from inventory config file %s" % self.InventoryCfg)
            parser.set_defaults(**self.InventoryCfg)
        return parser.parse_args(remaining_argv)

    def parse_monitoring_args(self, args_str, args, sm_args, _rev_tags_dict, base_obj):
        config = ConfigParser.SafeConfigParser()
        config.read([args.config_file])
        try:
            if dict(config.items("MONITORING")).keys():
                # Handle parsing for monitoring
                monitoring_args = self.parse_args(args_str, "MONITORING")
                if monitoring_args:
                    self._smgr_log.log(self._smgr_log.DEBUG, "Monitoring arguments read from config.")
                    self.monitoring_args = monitoring_args
                else:
                    self._smgr_log.log(self._smgr_log.DEBUG, "No monitoring configuration set.")
            else:
                self._smgr_log.log(self._smgr_log.DEBUG, "No monitoring configuration set.")
        except ConfigParser.NoSectionError:
            self._smgr_log.log(self._smgr_log.DEBUG, "No monitoring configuration set.")
        if self.monitoring_args:
            try:
                if self.monitoring_args.monitoring_plugin:
                    module_components = str(self.monitoring_args.monitoring_plugin).split('.')
                    monitoring_module = __import__(str(module_components[0]))
                    monitoring_class = getattr(monitoring_module, module_components[1])
                    if sm_args.collectors:
                        self.server_monitoring_obj = monitoring_class(1, self.monitoring_args.monitoring_frequency,
                                                                      sm_args.listen_ip_addr,
                                                                      sm_args.listen_port, sm_args.collectors,
                                                                      sm_args.http_introspect_port, _rev_tags_dict)
                        self.monitoring_config_set = True
                else:
                    self._smgr_log.log(self._smgr_log.ERROR,
                                       "Analytics IP and Monitoring API misconfigured, monitoring aborted")
                    self.server_monitoring_obj = base_obj
            except ImportError as ie:
                self._smgr_log.log(self._smgr_log.ERROR,
                                   "Configured modules are missing. Server Manager will quit now.")
                self._smgr_log.log(self._smgr_log.ERROR, "Error: " + str(ie))
                raise ImportError
        else:
            self.server_monitoring_obj = base_obj
        return self.server_monitoring_obj

    def parse_inventory_args(self, args_str, args, sm_args, _rev_tags_dict, base_obj):
        config = ConfigParser.SafeConfigParser()
        config.read([args.config_file])
        try:
            if dict(config.items("INVENTORY")).keys():
                # Handle parsing for monitoring
                inventory_args = self.parse_args(args_str, "INVENTORY")
                if inventory_args:
                    self._smgr_log.log(self._smgr_log.DEBUG, "Inventory arguments read from config.")
                    self.inventory_args = inventory_args
                else:
                    self._smgr_log.log(self._smgr_log.DEBUG, "No inventory configuration set.")
            else:
                self._smgr_log.log(self._smgr_log.DEBUG, "No inventory configuration set.")
        except ConfigParser.NoSectionError:
            self._smgr_log.log(self._smgr_log.DEBUG, "No inventory configuration set.")

        if self.inventory_args:
            try:
                if self.inventory_args.inventory_plugin:
                    module_components = str(self.inventory_args.inventory_plugin).split('.')
                    inventory_module = __import__(str(module_components[0]))
                    inventory_class = getattr(inventory_module, module_components[1])
                    if sm_args.collectors:
                        self.server_inventory_obj = inventory_class(sm_args.listen_ip_addr, sm_args.listen_port,
                                                                    sm_args.http_introspect_port, _rev_tags_dict)
                        self.inventory_config_set = True
                else:
                    self._smgr_log.log(self._smgr_log.ERROR,
                                       "Iventory API misconfigured, inventory aborted")
                    self.server_inventory_obj = base_obj
            except ImportError:
                self._smgr_log.log(self._smgr_log.ERROR,
                                   "Configured modules are missing. Server Manager will quit now.")
                raise ImportError
        else:
            self.server_inventory_obj = base_obj
        return self.server_inventory_obj

    def validate_rest_api_args(self, request, rev_tags_dict):
        ret_data = {"msg": None, "type_msg": None}
        match_keys = list(['id', 'cluster_id', 'tag', 'where'])
        print_match_keys = list(['server_id', 'cluster_id', 'tag', 'where'])
        self._smgr_log.log(self._smgr_log.DEBUG,
                           "Validating bottle arguments.")
        ret_data['status'] = 1
        query_args = parse_qs(urlparse(request.url).query,
                              keep_blank_values=True)
        if len(query_args) == 0:
            ret_data["type"] = ["all"]
            ret_data["status"] = True
            ret_data["match_key"] = None
            ret_data["match_value"] = None
        elif len(query_args) >= 1:
            select_value_list = None
            if "select" in query_args:
                select_value_list = query_args.get("select", None)[0]
                select_value_list = str(select_value_list).split(',')
                self._smgr_log.log(self._smgr_log.DEBUG,
                                   "Select value list=" + str(select_value_list))
                query_args.pop("select")
            if not select_value_list:
                ret_data["type"] = ["all"]
            else:
                ret_data["type"] = select_value_list
            match_key = match_value = None
            if query_args:
                match_key, match_value = query_args.popitem()
            if match_key and match_key not in match_keys:
                ret_data["status"] = False
                ret_data["msg"] = "Wrong Match Key Specified. " + "Choose one of the following keys: " + \
                                  str(['--{0}'.format(key) for key in print_match_keys]).strip('[]')
                self._smgr_log.log(self._smgr_log.ERROR,
                                   "Wrong Match Key")
            elif match_key and (match_value is None or match_value[0] == ''):
                ret_data["status"] = False
                self._smgr_log.log(self._smgr_log.ERROR,
                                   "No macth value given")
                ret_data["msg"] = "No Match Value Specified.\n"
            else:
                ret_data["status"] = True
                if match_key:
                    ret_data["match_key"] = str(match_key)
                else:
                    ret_data["match_key"] = None
                if match_value:
                    ret_data["match_value"] = str(match_value[0])
                else:
                    ret_data["match_value"] = None
        return ret_data

    def process_server_tags(self, rev_tags_dict, match_value):
        if not match_value:
            return {}
        match_dict = {}
        tag_list = match_value.split(',')
        for x in tag_list:
            tag = x.strip().split('=')
            if tag[0] in rev_tags_dict:
                match_dict[rev_tags_dict[tag[0]]] = tag[1]
            else:
                self._smgr_log.log(self._smgr_log.ERROR, "Wrong tag specified in rest api request.")
                return {}
        return match_dict

    def sandesh_init(self, sm_args, mon_config_set, inv_config_set):
        # Inventory node module initialization part
        try:
            module = None
            port = None
            module_list = None
            self._smgr_log.log(self._smgr_log.INFO, "Initializing sandesh")
            collectors_ip_list = eval(sm_args.collectors)
            if collectors_ip_list:
                self._smgr_log.log(self._smgr_log.INFO, "Collector IPs from config: " + str(collectors_ip_list))
                monitoring = True
                inventory = True

                if mon_config_set and inv_config_set:
                    try:
                        __import__('contrail_sm_monitoring.monitoring')
                    except ImportError:
                        mon_config_set = False
                        pass
                    try:
                        __import__('inventory_daemon.server_inventory')
                    except ImportError:
                        inv_config_set = False
                        pass
                    module = Module.INVENTORY_AGENT
                    port = int(sm_args.http_introspect_port)
                    module_list = ['inventory_daemon.server_inventory', 'contrail_sm_monitoring.monitoring']
                elif inv_config_set:
                    try:
                        __import__('inventory_daemon.server_inventory')
                    except ImportError:
                        inv_config_set = False
                        pass
                    module = Module.INVENTORY_AGENT
                    port = int(sm_args.http_introspect_port)
                    module_list = ['inventory_daemon.server_inventory']
                elif mon_config_set:
                    try:
                        __import__('contrail_sm_monitoring.monitoring')
                    except ImportError:
                        mon_config_set = False
                        pass
                    module = Module.IPMI_STATS_MGR
                    port = int(sm_args.http_introspect_port)
                    module_list = ['contrail_sm_monitoring.monitoring']
                if mon_config_set or inv_config_set:
                    module_name = ModuleNames[module]
                    node_type = Module2NodeType[module]
                    node_type_name = NodeTypeNames[node_type]
                    instance_id = INSTANCE_ID_DEFAULT
                    sandesh_global.init_generator(
                        module_name,
                        socket.gethostname(),
                        node_type_name,
                        instance_id,
                        collectors_ip_list,
                        module_name,
                        port,
                        module_list)
                    sandesh_global.set_logging_params(level=sm_args.sandesh_log_level)
                else:
                    self._smgr_log.log(self._smgr_log.INFO, "Sandesh wasn't initialized")
            else:
                pass
        except Exception as e:
            raise ServerMgrException("Error during Sandesh Init: " + str(e))

    def call_subprocess(self, cmd):
        p = None
        try:
            times = datetime.now()
            p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, close_fds=True)
            while p.poll() is None:
                time.sleep(0.3)
                now = datetime.now()
                diff = now - times
                if diff.seconds > 2:
                    if p and p.poll() != 0:
                        if p.stdout:
                            p.stdout.close()
                        if p.stderr:
                            p.stderr.close()
                        if p.stdin:
                            p.stdin.close()
                        if p:
                            p.terminate()
                    os.waitpid(-1, os.WNOHANG)
                    self._smgr_log.log(self._smgr_log.INFO, "command:" + cmd + " --> hanged")
                    return None
            result = p.communicate()[0].strip()
            return result
        except Exception as e:
            if p and p.poll() != 0:
                if p.stdout:
                    p.stdout.close()
                if p.stderr:
                    p.stderr.close()
                if p.stdin:
                    p.stdin.close()
                if p:
                    p.terminate()
            self._smgr_log.log(self._smgr_log.INFO, "Exception in call_subprocess: " + str(e))
            return None

    def create_store_copy_ssh_keys(self, server_id, server_ip):

        # Create the Keys using Pycrypto
        self._smgr_log.log(self._smgr_log.DEBUG, "Copying keys for server: " + str(server_id))
        ssh_key = paramiko.RSAKey.generate(bits=2048)
        ssh_private_key_obj = StringIO.StringIO()
        ssh_key.write_private_key(ssh_private_key_obj)
        source_file = ""
        bytes_sent = 0
        try:
            subprocess.call(['mkdir', '-p', '/tmp'])
            # Save Public key on Target Server
            source_file = "/tmp/" + str(server_id) + ".pub"
            with open("/tmp/" + str(server_id) + ".pub", 'w+') as content_file:
                content_file.write("ssh-rsa " + str(ssh_key.get_base64()))
                content_file.close()
            ssh = ServerMgrSSHClient(self._serverDb)
            ssh.connect(server_ip, server_id, option="password")
            key_dest_file = "/root/" + str(server_id) + ".pub"
            dest_file = "/root/.ssh/authorized_keys"
            ssh.exec_command("mkdir -p /root/.ssh/")
            ssh.exec_command("touch " + str(key_dest_file))
            if os.path.exists(source_file):
                bytes_sent = ssh.copy(source_file, key_dest_file)
                ssh.exec_command("echo '' >> " + str(dest_file))
                ssh.exec_command("cat " + str(key_dest_file) + " >> " + str(dest_file))
                ssh.exec_command("echo '' >> " + str(dest_file))
                ssh.exec_command("rm -rf " + str(key_dest_file))
            # Update Server table with ssh public and private keys
            update = {'id': server_id,
                      'ssh_public_key': "ssh-rsa " + str(ssh_key.get_base64()),
                      'ssh_private_key': ssh_private_key_obj.getvalue()}
            self._serverDb.modify_server(update)
            ssh.close()
            if os.path.exists(source_file):
                os.remove(source_file)
            self._smgr_log.log(self._smgr_log.DEBUG, "Bytes copied in ssh_key copy: " + str(bytes_sent))
            return ssh_key
        except Exception as e:
            self._smgr_log.log(self._smgr_log.ERROR, "Error Creating/Copying Keys: " + str(server_id) + " : " + str(e))
            if os.path.exists(source_file):
                os.remove(source_file)
            # Update Server table with ssh public and private keys
            update = {'id': server_id,
                      'ssh_public_key': "ssh-rsa " + str(ssh_key.get_base64()),
                      'ssh_private_key': ssh_private_key_obj.getvalue()}
            self._serverDb.modify_server(update)
            return None

    def populate_server_data_lists(self, servers, ipmi_list, hostname_list,
                                   server_ip_list, ipmi_username_list, ipmi_password_list, feature):
        for server in servers:
            server = dict(server)
            if 'parameters' in server:
                server_parameters = eval(server['parameters'])
                if feature == "monitoring" and "enable_monitoring" in server_parameters \
                        and server_parameters["enable_monitoring"] in ["true", "True"]:
                    continue
                elif feature == "inventory" and "enable_inventory" in server_parameters \
                        and server_parameters["enable_inventory"] in ["true", "True"]:
                    continue
            if 'ipmi_address' in server and server['ipmi_address'] \
                    and 'id' in server and server['id'] \
                    and 'ip_address' in server and server['ip_address'] \
                    and 'password' in server and server['password']:
                ipmi_list.append(server['ipmi_address'])
                hostname_list.append(server['id'])
                server_ip_list.append(server['ip_address'])
                if 'ipmi_username' in server and server['ipmi_username'] \
                        and 'ipmi_password' in server and server['ipmi_password']:
                    ipmi_username_list.append(server['ipmi_username'])
                    ipmi_password_list.append(server['ipmi_password'])
                else:
                    ipmi_username_list.append(self._default_ipmi_username)
                    ipmi_password_list.append(self._default_ipmi_password)

    # Packages and sends a REST API call to the ServerManager node
    def reimage_run_inventory(self, ip, port, payload):
        success = False
        source_file = ""
        sshclient = ServerMgrSSHClient(serverdb=self._serverDb)
        self._smgr_log.log("debug", "Running Reimage Inventory on  " + str(payload))
        if not self.inventory_config_set and not self.monitoring_config_set:
            return "No Inventory or Monitoring Configured"
        tries = 0
        gevent.sleep(60)
        while not success and tries < 10:
            try:
                tries += 1
                server = self._serverDb.get_server({"id": str(payload["id"])}, detail=True)
                if server and len(server) == 1:
                    server = server[0]
                    subprocess.call(['ssh-keygen', '-f', '/root/.ssh/known_hosts', '-R', str(server["ip_address"])])
                    subprocess.call(['mkdir', '-p', '/tmp'])
                    sshclient.connect(str(server["ip_address"]), str(server["id"]), "password")
                    match_dict = dict()
                    match_dict["id"] = str(payload["id"])
                    self._smgr_log.log(self._smgr_log.DEBUG, "Running inventory on " + str(payload["id"]) +
                                       ", try " + str(tries))
                    ssh_public_ket_str = str(server["ssh_public_key"])
                    with open("/tmp/" + str(payload["id"]) + ".pub", 'w+') as content_file:
                        content_file.write(ssh_public_ket_str)
                        content_file.close()
                    source_file = "/tmp/" + str(payload["id"]) + ".pub"
                    dest_file = "/root/.ssh/authorized_keys"
                    if os.path.exists(source_file):
                        sshclient.exec_command('mkdir -p /root/.ssh/')
                        sshclient.exec_command('touch /root/.ssh/authorized_keys')
                        bytes_sent = sshclient.copy(source_file, dest_file)
                        sshclient.exec_command("echo '' >> " + str(dest_file))
                    sshclient.close()
                    self._smgr_log.log(self._smgr_log.DEBUG, "SSH Keys copied on  " + str(payload["id"]) +
                                       ", try " + str(tries))
                    if os.path.exists(source_file):
                        os.remove(source_file)
                    success = True
                else:
                    self._smgr_log.log(self._smgr_log.ERROR, "Server Matching Server Id:  " + str(payload["id"]) +
                                       " not found. SSH Keys will not be copied. ")
                    tries = 10
                    if os.path.exists(source_file):
                        os.remove(source_file)
                    if sshclient:
                        sshclient.close()
                    success = False
            except Exception as e:
                if os.path.exists(source_file):
                    os.remove(source_file)
                if sshclient:
                    sshclient.close()
                gevent.sleep(30)
        if tries >= 10 and success is False:
            self._smgr_log.log(self._smgr_log.ERROR, "SSH Key copy failed on  " + str(payload["id"]))
        if success and self.inventory_config_set:
            try:
                url = "http://%s:%s/run_inventory" % (ip, port)
                payload = json.dumps(payload)
                headers = {'content-type': 'application/json'}
                resp = requests.post(url, headers=headers, timeout=5, data=payload)
                return resp.text
            except Exception as e:
                self._smgr_log.log("error", "Error running inventory on  " + str(payload) + " : " + str(e))
                return None

    def get_list_name(self, lst):
        sname = ""
        for sattr in lst.keys():
            if sattr[0] not in ['@']:
                sname = sattr
        return sname

    def parse_sandesh_xml(self, inp, uve_name):
        try:
            sname = ""
            # pdb.set_trace()
            if '@type' not in inp:
                return None
            if inp['@type'] == 'slist':
                sname = str(uve_name) + "Uve"
                ret = []
                items = inp[sname]
                if not isinstance(items, list):
                    items = [items]
                lst = []
                for elem in items:
                    if not isinstance(elem, dict):
                        lst.append(elem)
                    else:
                        lst_elem = {}
                        for k, v in elem.items():
                            lst_elem[k] = self.parse_sandesh_xml(v, uve_name)
                        lst.append(lst_elem)
                # ret[sname] = lst
                ret = lst
                return ret
            elif inp['@type'] == 'sandesh':
                sname = "data"
                ret = {}
                for k, v in inp[sname].items():
                    ret[k] = self.parse_sandesh_xml(v, uve_name)
                return ret
            elif inp['@type'] == 'struct':
                sname = self.get_list_name(inp)
                if (sname == ""):
                    self._smgr_log.log("error", "Error parsing sandesh xml dict : " + str('Struct Parse Error'))
                    return None
                ret = {}
                for k, v in inp[sname].items():
                    ret[k] = self.parse_sandesh_xml(v, uve_name)
                return ret
            elif (inp['@type'] == 'list'):
                sname = self.get_list_name(inp['list'])
                ret = []
                if (sname == ""):
                    return ret
                items = inp['list'][sname]
                if not isinstance(items, list):
                    items = [items]
                lst = []
                for elem in items:
                    if not isinstance(elem, dict):
                        lst.append(elem)
                    else:
                        lst_elem = {}
                        for k, v in elem.items():
                            lst_elem[k] = self.parse_sandesh_xml(v, uve_name)
                        lst.append(lst_elem)
                # ret[sname] = lst
                ret = lst
                return ret
            else:
                if '#text' not in inp:
                    return None
                if inp['@type'] in ['i16', 'i32', 'i64', 'byte',
                                    'u64', 'u32', 'u16']:
                    return int(inp['#text'])
                elif inp['@type'] in ['float', 'double']:
                    return float(inp['#text'])
                elif inp['@type'] in ['bool']:
                    if inp['#text'] in ["false"]:
                        return False
                    elif inp['#text'] in ["true"]:
                        return True
                    else:
                        return inp['#text']
                else:
                    return inp['#text']
        except Exception as e:
            self._smgr_log.log("error", "Error parsing sandesh xml dict : " + str(e))
            return None

    def get_sandesh_url(self, ip, introspect_port, uve_name, server_id=None):
        if server_id:
            url = "http://%s:%s/Snh_SandeshUVECacheReq?tname=%s&key=%s" % \
                  (str(ip), str(introspect_port), uve_name, server_id)
        else:
            url = "http://%s:%s/Snh_SandeshUVECacheReq?x=%s" % \
                  (str(ip), str(introspect_port), uve_name)
        return url

    def initialize_features(self, sm_args, serverdb):
        self.sandesh_init(sm_args, self.monitoring_config_set, self.inventory_config_set)
        self.set_serverdb(serverdb)
        if self.monitoring_config_set:
            self.server_monitoring_obj.set_serverdb(serverdb)
            self.server_monitoring_obj.set_ipmi_defaults(sm_args.ipmi_username, sm_args.ipmi_password)
            self.monitoring_gevent_thread_obj = gevent.spawn(self.server_monitoring_obj.run)
        else:
            self._smgr_log.log(self._smgr_log.ERROR, "Monitoring configuration not set. "
                                                     "You will be unable to get Monitor information of servers.")

        if self.inventory_config_set:
            self.server_inventory_obj.set_serverdb(serverdb)
            self.server_inventory_obj.set_ipmi_defaults(sm_args.ipmi_username, sm_args.ipmi_password)
            self.server_inventory_obj.add_inventory()
        else:
            gevent.spawn(self.setup_keys, serverdb)
            self._smgr_log.log(self._smgr_log.ERROR, "Inventory configuration not set. "
                                                     "You will be unable to get Inventory information from servers.")

    def setup_keys(self, server_db):
        servers = self._serverDb.get_server(None, detail=True)
        for server in servers:
            if 'ssh_private_key' not in server and 'id' in server and 'ip_address' in server and server['id']:
                self.create_store_copy_ssh_keys(server['id'], server['ip_address'])
            elif server['ssh_private_key'] is None and 'id' in server and 'ip_address' in server and server['id']:
                self.create_store_copy_ssh_keys(server['id'], server['ip_address'])

    def create_server_dict(self, servers):
        return_dict = dict()
        for server in servers:
            server = dict(server)
            if 'ipmi_username' not in server or not server['ipmi_username'] \
                    or 'ipmi_password' not in server or not server['ipmi_password']:
                server['ipmi_username'] = self._default_ipmi_username
                server['ipmi_password'] = self._default_ipmi_password
            return_dict[str(server['id'])] = server
        self._smgr_log.log(self._smgr_log.DEBUG, "Created server dictionary.")
        return return_dict

    @staticmethod
    def get_mon_conf_details(self):
        return "Monitoring Parameters haven't been configured.\n" \
               "Reset the configuration correctly and restart Server Manager.\n"

    @staticmethod
    def get_inv_conf_details(self):
        return "Inventory Parameters haven't been configured.\n" \
               "Reset the configuration correctly and restart Server Manager.\n"

    @staticmethod
    def get_inventory_info(self):
        return "Inventory Parameters haven't been configured.\n" \
               "Reset the configuration correctly and restart Server Manager.\n"

    @staticmethod
    def get_monitoring_info(self):
        return "Monitoring Parameters haven't been configured.\n" \
               "Reset the configuration correctly and restart Server Manager.\n"

    @staticmethod
    def get_monitoring_info_summary(self):
        return "Monitoring Parameters haven't been configured.\n" \
               "Reset the configuration correctly and restart Server Manager.\n"

    @staticmethod
    def run_inventory(self):
        return "Inventory Parameters haven't been configured.\n" \
               "Reset the configuration correctly and restart Server Manager.\n"

    def handle_inventory_trigger(self, action=None, servers=None):
        self._smgr_log.log(self._smgr_log.INFO, "Inventory of added servers will not be read.")
        return "Inventory Parameters haven't been configured.\n" \
               "Reset the configuration correctly and restart Server Manager.\n"

    def add_inventory(self):
        self._smgr_log.log(self._smgr_log.ERROR, "Inventory Parameters haven't been configured.\n" +
                                                 "Reset the configuration correctly to add inventory.\n")
        return "Inventory Parameters haven't been configured.\n" \
               "Reset the configuration correctly and restart Server Manager.\n"

    def cleanup(self, obj=None):
        self._smgr_log.log(self._smgr_log.INFO, "Monitoring Parameters haven't been configured.\n" +
                           "No cleanup needed.\n")
        return "Inventory Parameters haven't been configured.\n" \
               "Reset the configuration correctly and restart Server Manager.\n"

    # A place-holder run function that the Server Monitor defaults to in the absence of a configured
    # monitoring API layer to use.
    def run(self):
        self._smgr_log.log(self._smgr_log.INFO,
                           "No monitoring API has been configured. Server Environement Info will not be monitored.")
class SM_Docker():
    '''
    This class deals with all things docker that server manager needs
    '''

    _docker_client = None

    def __init__(self):
        self._docker_client = Client(timeout=240)
        self._smgr_log = ServerMgrlogger()

    def new_image(self, pre, post):
        found = False
        for x in post:
            found = False
            new_id = x['Id']
            for y in pre:
                if x['Id'] == y['Id']:
                    found = True
                    break
            if found == False:
                return x
        if found == True:
            return None

    def get_image_id(self, image):
        try:
            new_img_id = {}
            tmpdir = "/tmp/contrail_docker"
            cmd = ("mkdir -p %s" % tmpdir)
            subprocess.check_call(cmd, shell=True)

            cmd = ("tar xvzf %s -C %s > /dev/null" % (image, tmpdir))
            subprocess.check_call(cmd, shell=True)

            manifest_file = tmpdir + "/manifest.json"
            if not os.path.isfile(manifest_file):
                self._smgr_log.log(
                    self._smgr_log.ERROR,
                    "Could not determine image_id in %s" % image)
                return None
            f = open(manifest_file, 'r')
            dt = json.load(f)
            cfg = re.split(r'\.', dt[0]['Config'])
            new_img_id['Id'] = str(cfg[0])

            cmd = ("rm -rf %s" % tmpdir)
            subprocess.check_call(cmd, shell=True)
            self._smgr_log.log(
                self._smgr_log.DEBUG,
                "image_id for %s is %s" % (image, new_img_id['Id']))
            return new_img_id
        except Exception as e:
            msg = "Unable to determine image_id for %s (%s)" % (image, e)
            self._smgr_log.log(self._smgr_log.ERROR, msg)
            return None

    def load_containers(self, image):
        try:
            imageid = self.get_image_id(image)
            if imageid == None:
                return [False, None]

            f = open(image, 'r')
            self._docker_client.load_image(f)
            msg = "docker loaded image %s" % (image)
            self._smgr_log.log(self._smgr_log.INFO, msg)
            f.close()

            return [True, imageid]
        except Exception as e:
            msg = "docker load failed for image %s: %s" % (image, e)
            self._smgr_log.log(self._smgr_log.INFO, msg)
            return [False, None]

    def tag_containers(self, image, repo, tag):
        try:
            self._docker_client.tag(image, repo, tag)
            return True
        except Exception as e:
            msg = "tag container failed for image %s: %s" % (repo, tag, e)
            self._smgr_log.log(self._smgr_log.ERROR, msg)
            return msg

    def remove_containers(self, image):
        try:
            self._docker_client.remove_image(image, force=True)
        except:
            pass

    def push_containers(self, image):
        try:
            stream = self._docker_client.push(image, stream=True)
        except Exception as e:
            msg = "docker push failed for image %s: %s" % (image, e)
            #raise ServerMgrException(msg, ERR_OPR_ERROR)
            self._smgr_log.log(self._smgr_log.ERROR, msg)
            return False

        progress = 0
        for line in stream:
            if "connection refused" in line:
                msg = "docker push failed for image %s: %s" % (image, line)
                self._smgr_log.log(self._smgr_log.ERROR, msg)
                return False

            s = eval(line)
            #NOTE: example line is:
            # {"status":"Pushing","progressDetail":{"current":1536,
            #  "total":2913},"progress":"[====\u003e # ]
            # If docker python API changes the format of this, the next
            # assignment will be broken.
            try:
                current = s['progressDetail']['current']
                total = s['progressDetail']['total']
                cur_progress = int(round(
                    (float(current) / float(total)) * 100))
                # Log every 20% of progress
                if (cur_progress >= progress + 20):
                    progress = cur_progress
                    self._smgr_log.log(self._smgr_log.INFO, line)
            except KeyError:
                #self._smgr_log.log(self._smgr_log.DEBUG, line)
                continue
Example #51
0
 def __init__(self):
     self._smgr_log = ServerMgrlogger()
     self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrValidations Init")
 def __init__(self):
     self._docker_client = Client(timeout=240)
     self._smgr_log = ServerMgrlogger()
class ServerMgrDb:

    _cluster_table_cols = []
    _server_table_cols = []
    _image_table_cols = []
    _status_table_cols = []
    _server_tags_table_cols = []

    # Keep list of table columns
    def _get_table_columns(self):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(
                    "SELECT * FROM " +
                    server_table + " WHERE id=?", (_DUMMY_STR,))
                self._server_table_cols = [x[0] for x in cursor.description]
                cursor.execute(
                    "SELECT * FROM " +
                    server_tags_table + " WHERE tag_id=?", (_DUMMY_STR,))
                self._server_tags_table_cols = [x[0] for x in cursor.description]
                cursor.execute(
                    "SELECT * FROM " +
                    image_table + " WHERE id=?", (_DUMMY_STR,))
                self._image_table_cols = [x[0] for x in cursor.description]
                cursor.execute("SELECT * FROM " +
                               cluster_table + " WHERE id=?", (_DUMMY_STR,))
                self._cluster_table_cols = [x[0] for x in cursor.description]
                cursor.execute(
                    "SELECT * FROM " +
                    server_status_table + " WHERE id=?", (_DUMMY_STR,))
                self._status_table_cols = [x[0] for x in cursor.description]
        except Exception as e:
            raise e
    # end _get_table_columns

    def _add_table_column(self, cursor, table, column, column_type):
        try:
            cmd = "ALTER TABLE " + table + " ADD COLUMN " + column + " " + column_type
            cursor.execute(cmd)
        except lite.OperationalError:
            pass
    # end _add_table_column

    def log_and_raise_exception(self, msg, err_code = ERR_OPR_ERROR):
         self._smgr_log.log(self._smgr_log.ERROR, msg)
         raise ServerMgrException(msg, err_code)

    def __init__(self, db_file_name=def_server_db_file):
        try:
            self._smgr_log = ServerMgrlogger()
            self._con = lite.connect(db_file_name)
            with self._con:
                cursor = self._con.cursor()
                # Create cluster table.
                cursor.execute("CREATE TABLE IF NOT EXISTS " + cluster_table +
                               """ (id TEXT PRIMARY KEY,
                                    parameters TEXT,
                                    email TEXT)""")
                # Create image table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               image_table + """ (id TEXT PRIMARY KEY,
                    version TEXT, type TEXT, path TEXT,
                    parameters TEXT)""")
                # Create status table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               server_status_table + """ (id TEXT PRIMARY KEY,
                            server_status TEXT)""")
                # Create server table
                cursor.execute(
                    "CREATE TABLE IF NOT EXISTS " + server_table +
                    """ (mac_address TEXT PRIMARY KEY NOT NULL,
                         id TEXT, host_name TEXT, static_ip varchar default 'N',
                         ip_address TEXT, subnet_mask TEXT, gateway TEXT, domain TEXT,
                         cluster_id TEXT,  base_image_id TEXT,
                         package_image_id TEXT, password TEXT,
                         last_update TEXT, discovered varchar default 'false',
                         parameters TEXT, roles TEXT, ipmi_username TEXT,
                         ipmi_password TEXT, ipmi_address TEXT,
                         ipmi_type TEXT, intf_control TEXT,
                         intf_data TEXT, intf_bond TEXT,
                         email TEXT, status TEXT,
                         tag1 TEXT, tag2 TEXT, tag3 TEXT,
                         tag4 TEXT, tag5 TEXT, tag6 TAXT, tag7 TEXT,
                         UNIQUE (id))""")
                # Create server tags table
                cursor.execute(
                    "CREATE TABLE IF NOT EXISTS " + server_tags_table +
                    """ (tag_id TEXT PRIMARY KEY NOT NULL,
                         value TEXT,
                         UNIQUE (tag_id),
                         UNIQUE (value))""")
                # Add columns for image_table
                self._add_table_column(cursor, image_table, "category", "TEXT")
                # Add columns for cluster_table
                self._add_table_column(cursor, cluster_table, "base_image_id", "TEXT")
                self._add_table_column(cursor, cluster_table, "package_image_id", "TEXT")
                self._add_table_column(cursor, cluster_table, "provisioned_id", "TEXT")
                # Add columns for server_table
                self._add_table_column(cursor, server_table, "reimaged_id", "TEXT")
                self._add_table_column(cursor, server_table, "provisioned_id", "TEXT")

            self._get_table_columns()
            self._smgr_log.log(self._smgr_log.DEBUG, "Created tables")

            # During init, we check if any of the Cluster in DB are missing any Storage Parameters (Generated UUIDs)
            cluster_list = self._get_items(cluster_table, None,
                                       None, True, None)
            for cluster in cluster_list:
                # Check if storage parameters are present in Cluster, else generate them
                if 'storage_fsid' not in set(eval(cluster['parameters'])) or 'storage_virsh_uuid' not in set(eval(
                        cluster['parameters'])):
                    self.update_cluster_uuids(cluster)
        except e:
            raise e
    # End of __init__

    def delete_tables(self):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.executescript("""
                DELETE FROM """ + cluster_table + """;
                DELETE FROM """ + server_table + """;
                DELETE FROM """ + server_tags_table + """;
                DELETE FROM """ + server_status_table + """;
                DELETE FROM """ + image_table + ";")
        except:
            raise e
    # End of delete_tables

    def get_server_id(self, server_mac):
        try:
            if server_mac:
                server_mac = str(EUI(server_mac)).replace("-", ":")
            with self._con:
                cursor = self._con.cursor()
                cursor.execute("SELECT id FROM " +
                               server_table + " WHERE mac_address=?",
                              (server_mac,))
                row = cursor.fetchone()
                if row:
                    return row[0]
                else:
                    return None
        except:
            return None
    # end get_server_id

    # Below function returns value corresponding to tag_id from
    # server_tags_table
    def get_server_tag(self, tag_id):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.execute("SELECT value FROM " +
                               server_tags_table + " WHERE tag_id=?",
                              (tag_id,))
                row = cursor.fetchone()
                if row:
                    return row[0]
                else:
                    return None
        except:
            return None
    # end get_server_tag

    def get_server_mac(self, id):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.execute("SELECT mac_address FROM " +
                               server_table + " WHERE id=?",
                              (id,))
                row = cursor.fetchone()
                if row:
                    return row[0]
                else:
                    return None
        except:
            return None

    def _add_row(self, table_name, dict):
        try:
            keys, values = zip(*dict.items())
            insert_str = "INSERT OR IGNORE INTO %s (%s) values (%s)" \
                % (table_name,
                   (",".join(keys)),
                   (",".join('?' * len(keys))))
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(insert_str, values)
        except Exception as e:
            raise e
    # end _add_row

    # Generic function to delete rows matching given criteria
    # from given table.
    # Match dict is dictionary of columns and values to match for.
    # unmatch dict is not of dictionaty of columns and values to match for.
    def _delete_row(self, table_name,
                    match_dict=None, unmatch_dict=None):
        try:
            delete_str = "DELETE FROM %s" %(table_name)
            # form a string to provide to where match clause
            match_list = []
            where = None
            if match_dict:
                where = match_dict.get("where", None)

            if where:
                delete_str += " WHERE " + where
            else:
                if match_dict:
                    match_list = ["%s = \'%s\'" %(
                            k,v) for k,v in match_dict.iteritems()]
                if unmatch_dict:
                    match_list += ["%s != \'%s\'" %(
                            k,v) for k,v in unmatch_dict.iteritems()]
                if match_list:
                    match_str = " and ".join(match_list)
                    delete_str+= " WHERE " + match_str

            with self._con:
                cursor = self._con.cursor()
                cursor.execute(delete_str)
        except Exception as e:
            raise e
    # End _delete_row

    def _modify_row(self, table_name, dict,
                    match_dict=None, unmatch_dict=None):
        try:
            keys, values = zip(*dict.items())
            modify_str = "UPDATE %s SET " % (table_name)
            update_list = ",".join(key + "=?" for key in keys)
            modify_str += update_list
            match_list = []
            if match_dict:
                match_list = ["%s = ?" %(
                    k) for k in match_dict.iterkeys()]
                match_values = [v for v in match_dict.itervalues()]
            if unmatch_dict:
                match_list += ["%s != ?" %(
                    k) for k in unmatch_dict.iterkeys()]
                match_values += [v for v in unmatch_dict.itervalues()]
            if match_list:
                match_str = " and ".join(match_list)
                match_values_str = ",".join(match_values)
                modify_str += " WHERE " + match_str
                values += (match_values_str,)
            with self._con:
                cursor = self._con.cursor()
                cursor.execute(modify_str, values)
        except Exception as e:
            raise e

    def _get_items(
        self, table_name, match_dict=None,
        unmatch_dict=None, detail=False, always_fields=None):
        try:
            with self._con:
                cursor = self._con.cursor()
                if detail:
                    sel_cols = "*"
                else:
                    sel_cols = ",".join(always_fields)
                select_str = "SELECT %s FROM %s" % (sel_cols, table_name)
                # form a string to provide to where match clause
                match_list = []
                where = None
                if match_dict:
                    where = match_dict.get("where", None)
                if where:
                    select_str += " WHERE " + where
                else:
                    if match_dict:
                        match_list = ["%s = \'%s\'" %(
                                k,v) for k,v in match_dict.iteritems()]
                    if unmatch_dict:
                        match_list += ["%s != \'%s\'" %(
                                k,v) for k,v in unmatch_dict.iteritems()]
                    if match_list:
                        match_str = " and ".join(match_list)
                        select_str+= " WHERE " + match_str
                cursor.execute(select_str)
            rows = [x for x in cursor]
            cols = [x[0] for x in cursor.description]
            items = []
            for row in rows:
                item = {}
                for prop, val in zip(cols, row):
                    item[prop] = val
                items.append(item)
            return items
        except Exception as e:
            raise e
    # End _get_items

    def add_cluster(self, cluster_data):
        try:
            # Store cluster_parameters dictionary as a text field
            cluster_parameters = cluster_data.pop("parameters", None)
            if cluster_parameters is not None:
                cluster_data['parameters'] = str(cluster_parameters)
            # Store email list as text field
            email = cluster_data.pop("email", None)
            if email is not None:
                cluster_data['email'] = str(email)
            self._add_row(cluster_table, cluster_data)
        except Exception as e:
            raise e
    # End of add_cluster

    def add_server(self, server_data):
        try:
            if 'mac_address' in server_data:
                server_data['mac_address'] = str(
                    EUI(server_data['mac_address'])).replace("-", ":")
            # Store roles list as a text field
            roles = server_data.pop("roles", None)
            cluster_id = server_data.get('cluster_id', None)
            if cluster_id:
                self.check_obj(
                    "cluster", {"id" : cluster_id})
            if roles is not None:
                server_data['roles'] = str(roles)
            intf_control = server_data.pop("control_data_network", None)
            if intf_control:
                server_data['intf_control'] = str(intf_control)
            intf_bond = server_data.pop("bond_interface", None)
            if intf_bond:
                server_data['intf_bond'] = str(intf_bond)
            # Store email list as text field
            email = server_data.pop("email", None)
            if email:
                server_data['email'] = str(email)
            # store tags if any
            server_tags = server_data.pop("tag", None)
            if server_tags is not None:
                tags_dict = self.get_server_tags(detail=True)
                rev_tags_dict = dict((v,k) for k,v in tags_dict.iteritems())
                for k,v in server_tags.iteritems():
                    server_data[rev_tags_dict[k]] = v
            # Store server_params dictionary as a text field
            server_parameters = server_data.pop("parameters", None)
            if server_parameters is not None:
                server_data['parameters'] = str(server_parameters)
            self._add_row(server_table, server_data)
        except Exception as e:
            raise e
        return 0
    # End of add_server

    # This function for adding server tag is slightly different
    # compared with add function for other tables. The tag_data
    # contains tag information for all tags.
    # This function is always called with complete list of tags
    # so, clear the table first.
    def add_server_tags(self, tag_data):
        try:
            with self._con:
                cursor = self._con.cursor()
                cursor.executescript("""
                DELETE FROM """ + server_tags_table + ";")
            for key,value in tag_data.iteritems():
                row_data = {
                    'tag_id' : key,
                    'value' : value }
                self._add_row(server_tags_table, row_data)
        except Exception as e:
            raise e
    # End of add_server_tags

    def server_discovery(self, action, entity):
        try:
            if 'mac_address' in entity:
                entity['mac_address'] = str(EUI(entity['mac_address'])).replace("-", ":")
            mac_address = entity.get("mac_address", None)
            if action.lower() == "add":
                # If this server is already present in our table,
                # update IP address if DHCP was not static.
                servers = self._get_items(
                    server_table, {"mac_address" : mac_address},detail=True)
                if servers:
                    server = servers[0]
                    self._modify_row(
                        server_table, entity,
                        {"mac_address": mac_address}, {})
                    return
                entity['discovered'] = "true"
                entity['status'] = "server_discovered"
                self._add_row(server_table, entity)
            elif action.lower() == "delete":
                servers = self.get_server({"mac_address" : mac_address}, detail=True)
                if ((servers) and (servers[0]['discovered'] == "true")):
                    self._delete_row(server_table,
                                     {"mac_address" : mac_address})
            else:
                return
        except Exception as e:
            return
    # End of server_discovery

    def add_image(self, image_data):
        try:
            # Store image_parameters dictionary as a text field
            image_parameters = image_data.pop("parameters", None)
            if image_parameters is not None:
                image_data['parameters'] = str(image_parameters)
            self._add_row(image_table, image_data)
        except Exception as e:
            raise e
    # End of add_image

    def delete_cluster(self, match_dict=None, unmatch_dict=None):
        try:
            self.check_obj("cluster", match_dict, unmatch_dict)
            cluster_id = match_dict.get("id", None)
            servers = None
            if cluster_id:
                servers = self.get_server({'cluster_id' : cluster_id}, detail=True)
            if servers:
                msg = ("Servers are present in this cluster, "
                        "remove cluster association, prior to cluster delete.")
                self.log_and_raise_exception(msg, ERR_OPR_ERROR)
            self._delete_row(cluster_table, match_dict, unmatch_dict)
        except Exception as e:
            raise e
    # End of delete_cluster

    def check_obj(self, type,
                  match_dict=None, unmatch_dict=None, raise_exception=True):
        if type == "server":
            cb = self.get_server
            db_obj = cb(match_dict, unmatch_dict, detail=False)
        elif type == "cluster":
            cb = self.get_cluster
            db_obj = cb(match_dict, unmatch_dict, detail=False)
        elif type == "image":
            cb = self.get_image
            db_obj = cb(match_dict, unmatch_dict, detail=False)

        if not db_obj:
            msg = "%s not found" % (type)
            if raise_exception:
                self.log_and_raise_exception(msg, ERR_OPR_ERROR)
            return False
        return True
    #end of check_obj

    def delete_server(self, match_dict=None, unmatch_dict=None):
        try:
            if match_dict and match_dict.get("mac_address", None):
                if match_dict["mac_address"]:
                    match_dict["mac_address"] = str(
                        EUI(match_dict["mac_address"])).replace("-", ":")
            if unmatch_dict and unmatch_dict.get("mac_address", None):
                if unmatch_dict["mac_address"]:
                    unmatch_dict["mac_address"] = str(
                        EUI(unmatch_dict["mac_address"])).replace("-", ":")
            self.check_obj("server", match_dict, unmatch_dict)
            self._delete_row(server_table,
                             match_dict, unmatch_dict)
        except Exception as e:
            raise e
    # End of delete_server

    def delete_server_tag(self, match_dict=None, unmatch_dict=None):
        try:
            self._delete_row(server_tags_table, match_dict, unmatch_dict)
        except Exception as e:
            raise e
    # End of delete_server_tag

    def delete_image(self, match_dict=None, unmatch_dict=None):
        try:
            self.check_obj("image", match_dict, unmatch_dict)
            self._delete_row(image_table, match_dict, unmatch_dict)
        except Exception as e:
            raise e
    # End of delete_image

    def modify_cluster(self, cluster_data):
        try:
            cluster_id = cluster_data.get('id', None)
            if not cluster_id:
                raise Exception("No cluster id specified")
            self.check_obj("cluster", {"id" : cluster_id})
            db_cluster = self.get_cluster(
                {"id" : cluster_id}, detail=True)
            if not db_cluster:
                msg = "%s is not valid" % cluster_id
                self.log_and_raise_exception(msg, ERR_OPR_ERROR)

            db_cluster_params_str = db_cluster[0] ['parameters']
            db_cluster_params = {}
            if db_cluster_params_str:
                db_cluster_params = eval(db_cluster_params_str)
            if 'uuid' not in db_cluster_params:
                str_uuid = str(uuid.uuid4())
                cluster_data["parameters"].update({"uuid":str_uuid})
            # Store cluster_params dictionary as a text field
            cluster_params = cluster_data.pop("parameters", {})
            for k,v in cluster_params.iteritems():
                if v == '""':
                    v = ''
                db_cluster_params[k] = v
            cluster_params = db_cluster_params
            if cluster_params is not None:
                cluster_data['parameters'] = str(cluster_params)

            # Store email list as text field
            email = cluster_data.pop("email", None)
            if email is not None:
                cluster_data['email'] = str(email)
            self._modify_row(
                cluster_table, cluster_data,
                {'id' : cluster_id}, {})
        except Exception as e:
            raise e
    # End of modify_cluster

    def modify_image(self, image_data):
        try:
            image_id = image_data.get('id', None)
            if not image_id:
                raise Exception("No image id specified")
            #Reject if non mutable field changes
            db_image = self.get_image(
                {'id' : image_data['id']},
                detail=True)
            if image_data['path'] != db_image[0]['path']:
                msg = ('Image path cannnot be modified')
                self.log_and_raise_exception(msg, ERR_OPR_ERROR)
            if image_data['type'] != db_image[0]['type']:
                msg = ('Image type cannnot be modified')
                self.log_and_raise_exception(msg, ERR_OPR_ERROR)
            # Store image_params dictionary as a text field
            image_parameters = image_data.pop("parameters", None)
            if image_parameters is not None:
                image_data['parameters'] = str(image_parameters)
            self._modify_row(
                image_table, image_data,
                {'id' : image_id}, {})
        except Exception as e:
            raise e
    # End of modify_image

    def modify_server(self, server_data):
        db_server = None
        if 'mac_address' in server_data.keys() and \
                 server_data['mac_address'] != None:
            db_server = self.get_server(
                {'mac_address' : server_data['mac_address']},
                detail=True)
        elif 'id' in server_data.keys() and server_data['id'] != None:
            db_server = self.get_server(
                {'id': server_data['id']},
                detail=True)
        if not db_server:
            return db_server
        try:
            cluster_id = server_data.get('cluster_id', None)
            if cluster_id:
                self.check_obj("cluster", {"id" : cluster_id})

            if 'mac_address' in server_data:
                server_data['mac_address'] = str(
                    EUI(server_data['mac_address'])).replace("-", ":")
            server_mac = server_data.get('mac_address', None)
            if not server_mac:
                server_id = server_data.get('id', None)
                if not server_id:
                    msg = ("No server MAC or id specified")
                    self.log_and_raise_exception(msg, ERR_OPR_ERROR)
                else:
                    server_mac = self.get_server_mac(server_id)
            #Check if object exists
            if 'id' in server_data.keys() and \
                    'server_mac' in server_data.keys():
                self.check_obj('server',
                               {'id' : server_data['id']})
                #Reject if primary key values change
                if server_data['mac_address'] != db_server[0]['mac_address']:
                    msg = ('MAC address cannnot be modified', ERR_OPR_ERROR)
                    self.log_and_raise_exception(msg, ERR_OPR_ERROR)


            # Store roles list as a text field
            roles = server_data.pop("roles", None)
            if roles is not None:
                server_data['roles'] = str(roles)
            intf_control = server_data.pop("control_data_network", None)
            if intf_control:
                server_data['intf_control'] = str(intf_control)
            intf_bond = server_data.pop("bond_interface", None)
            if intf_bond:
                server_data['intf_bond'] = str(intf_bond)
            # store tags if any
            server_tags = server_data.pop("tag", None)
            if server_tags is not None:
                tags_dict = self.get_server_tags(detail=True)
                rev_tags_dict = dict((v,k) for k,v in tags_dict.iteritems())
                for k,v in server_tags.iteritems():
                    server_data[rev_tags_dict[k]] = v
            # Store server_params dictionary as a text field
            server_params = server_data.pop("parameters", None)
            #if server_params is not None:
            #    server_data['server_params'] = str(server_params)
            #check for modify in db server_params
            #Always Update DB server parmas
            db_server_params = {}
            if len(db_server) == 0:
                msg = ('DB server not found', ERR_OPR_ERROR)
                self.log_and_raise_exception(msg, ERR_OPR_ERROR)
               
            db_server_params_str = db_server[0] ['parameters']
            if db_server_params_str:
                db_server_params = eval(db_server_params_str)
                if server_params:
                    for k,v in server_params.iteritems():
                        if v == '""':
                            v = ''
                        db_server_params[k] = v
            server_data['parameters'] = str(db_server_params)

            # Store email list as text field                   
            email = server_data.pop("email", None)
            if email is not None:
                server_data['email'] = str(email)
            self._modify_row(
                server_table, server_data,
                {'mac_address' : server_mac}, {})
            return db_server
        except Exception as e:
            raise e
    # End of modify_server

    # This function for modifying server tag is slightly different
    # compared with modify function for other tables. The tag_data
    # contains tag information for all tags.
    def modify_server_tags(self, tag_data):
        try:
            for key,value in tag_data.iteritems():
                row_data = {
                    'tag_id' : key,
                    'value' : value }
                self._modify_row(
                    server_tags_table, row_data,
                    {'tag_id' : key}, {})
        except Exception as e:
            raise e
    # End of modify_server_tags

    def get_image(self, match_dict=None, unmatch_dict=None,
                  detail=False, field_list=None):
        try:
            if not field_list:
                field_list = ["id"]
            images = self._get_items(
                image_table, match_dict,
                unmatch_dict, detail, field_list)
        except Exception as e:
            raise e
        return images
    # End of get_image

    def get_server_tags(self, match_dict=None, unmatch_dict=None,
                  detail=True):
        try:
            tag_dict = {}
            tags = self._get_items(
                server_tags_table, match_dict,
                unmatch_dict, True, ["tag_id"])
            for tag in tags:
                tag_dict[tag['tag_id']] = tag['value']
        except Exception as e:
            raise e
        return tag_dict
    # End of get_server_tags

    def get_status(self, match_key=None, match_value=None,
                  detail=False):
        try:
            status = self._get_items(
                server_status_table, {match_key : match_value},
                detail=detail, always_field=["id"])
        except Exception as e:
            raise e
        return status
    # End of get_status

    def put_status(self, server_data):
        try:
            server_id = server_data.get('id', None)
            if not server_id:
                raise Exception("No server id specified")
            # Store vns_params dictionary as a text field
            servers = self._get_items(
                server_status_table, {"id" : server_id},detail=True)
            if servers:
                self._modify_row(
                    server_status_table, server_data,
                    {'id' : server_id}, {})
            else:
                self._add_row(server_status_table, server_data)
        except Exception as e:
            raise e
    # End of put_status


    def get_server(self, match_dict=None, unmatch_dict=None,
                   detail=False, field_list=None):
        try:
            if match_dict and match_dict.get("mac_address", None):
                if match_dict["mac_address"]:
                    match_dict["mac_address"] = str(
                        EUI(match_dict["mac_address"])).replace("-", ":")
            # For server table, when detail is false, return server_id, mac
            # and ip.
            if not field_list:
                field_list = ["id", "mac_address", "ip_address"]
            servers = self._get_items(
                server_table, match_dict,
                unmatch_dict, detail, field_list)
        except Exception as e:
            raise e
        return servers
    # End of get_server

    def get_cluster(self, match_dict=None,
                unmatch_dict=None, detail=False, field_list=None):
        try:
            if not field_list:
                field_list = ["id"]
            cluster = self._get_items(
                cluster_table, match_dict,
                unmatch_dict, detail, field_list)
        except Exception as e:
            raise e
        return cluster
    # End of get_cluster

    # If any UUIDs are missing from an existing Cluster, we add them during ServerManager DB init
    def update_cluster_uuids(self, cluster):
        try:
            db_cluster_params_str = cluster['parameters']
            db_cluster_params = {}
            if db_cluster_params_str:
                db_cluster_params = eval(db_cluster_params_str)
            if 'uuid' not in db_cluster_params:
                str_uuid = str(uuid.uuid4())
                db_cluster_params.update({"uuid": str_uuid})
            if 'storage_fsid' not in db_cluster_params:
                storage_fsid = str(uuid.uuid4())
                db_cluster_params.update({"storage_fsid": storage_fsid})
            if 'storage_virsh_uuid' not in db_cluster_params:
                storage_virsh_uuid = str(uuid.uuid4())
                db_cluster_params.update({"storage_virsh_uuid": storage_virsh_uuid})
        except Exception as e:
            raise e

        cluster['parameters'] = str(db_cluster_params)
        self._modify_row(
            cluster_table, cluster,
            {'id' : cluster['id']}, {})
 def __init__(self, q):
     super(Joiner, self).__init__()
     self._smgr_log = ServerMgrlogger()
     self.queue = q
class ServerMgrPuppet:
    _node_env_map_file = "puppet/node_mapping.json"

    def __init__(self, smgr_base_dir, puppet_dir):
        self._smgr_log = ServerMgrlogger()
        self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrPuppet Init")


        self.smgr_base_dir = smgr_base_dir
        self.puppet_directory = puppet_dir
        if not os.path.exists(os.path.dirname(puppet_dir)):
            os.makedirs(os.path.dirname(puppet_dir))
    # end __init__

    def storage_get_control_network_mask(self, provision_params,
        server, cluster):
        role_ips_dict = provision_params['roles']
        cluster_params = cluster.get('parameters', {})
        server_params = server.get('parameters', {})
        #openstack_ip = cluster_params.get("internal_vip", None)
        cluster_openstack_prov_params = (
            cluster_params.get("provision", {})).get("openstack", {})
        configured_external_openstack_ip = cluster_openstack_prov_params.get("external_openstack_ip", None)
        openstack_ip = ''
        self_ip = server.get("ip_address", "")
        if configured_external_openstack_ip:
            openstack_ip = configured_external_openstack_ip
        elif 'openstack' in role_ips_dict and len(role_ips_dict['openstack']) and self_ip not in role_ips_dict['openstack']:
            openstack_ip = role_ips_dict['openstack'][0]
        else:
            openstack_ip = self_ip

        subnet_mask = server.get("subnet_mask", "")
        if not subnet_mask:
            subnet_mask = cluster_params.get("subnet_mask", "255.255.255.0")
        subnet_address = ""
        intf_control = {}
        subnet_address = str(IPNetwork(
            openstack_ip + "/" + subnet_mask).network)

        if openstack_ip == configured_external_openstack_ip:
            return '"' + str(IPNetwork(subnet_address).network) + '/' + str(IPNetwork(subnet_address).prefixlen) + '"'

        self._smgr_log.log(self._smgr_log.DEBUG, "control-net : %s" % str( provision_params['control_net']))
        if provision_params['control_net'] [openstack_ip]:
            intf_control = eval(provision_params['control_net'] [openstack_ip])
            self._smgr_log.log(self._smgr_log.DEBUG, "openstack-control-net : %s" % str(intf_control ))

        for intf,values in intf_control.items():
            if intf:
                self._smgr_log.log(self._smgr_log.DEBUG, "ip_address : %s" % values['ip_address'])
                return '"' + str(IPNetwork(values['ip_address']).network) + '/'+ str(IPNetwork(values['ip_address']).prefixlen) + '"'
            else:
                self._smgr_log.log(self._smgr_log.DEBUG, "server_ip : %s" % values['server_ip'])
                return '"' + str(IPNetwork(provision_params['server_ip']).network) + '/'+ str(IPNetwork(provision_params['server_ip']).prefixlen) + '"'

        return '"' + str(IPNetwork(subnet_address).network) + '/'+ str(IPNetwork(subnet_address).prefixlen) + '"'

    def delete_node_entry(self, site_file, server_fqdn):
        tempfd, temp_file = tempfile.mkstemp()
        fh = os.fdopen(tempfd, "w")
        node_found = False
        brace_count = 0
        with open(site_file, "r") as site_fh:
            for line in site_fh:
                tokens = line.strip().split()
                if ((len(tokens) >= 2) and
                    (tokens[0] == "node") and
                    ((re.findall(r"['\"](.*?)['\"]", tokens[1]))[0] == server_fqdn)):
                    node_found = True
                #end if tokens...
                if not node_found:
                    fh.write(line)
                else:
                    # skip comments
                    if tokens[0].startswith("#"):
                        continue
                    # Skip lines till closing brace
                    if "{" in line:
                        brace_count += 1
                    if "}" in line:
                        brace_count -= 1
                    if brace_count == 0:
                        node_found = False
                # end else not node_found
            # end for
        # end with
        fh.close()
        shutil.copy(temp_file, site_file)
        os.chmod(site_file, 0644)
        os.remove(temp_file)
    # end def delete_node_entry

    def add_node_entry(
        self, site_file, server_fqdn,
        server, cluster, cluster_servers, puppet_version):
        cluster_params = cluster.get('parameters', {})
        data = ''
        data += "node \'%s\' {\n" %server_fqdn
        # Add Stage relationships
        data += '    stage{ \'first\': }\n'
        data += '    stage{ \'last\': }\n'
        data += '    stage{ \'compute\': }\n'
        data += '    stage{ \'pre\': }\n'
        data += '    stage{ \'post\': }\n'
        if 'tsn' in server['roles']:
            data += '    stage{ \'tsn\': }\n'
        if 'toragent' in server['roles']:
            data += '    stage{ \'toragent\': }\n'
        if 'storage-compute' in server['roles'] or 'storage-master' in server['roles']:
            data += '    stage{ \'storage\': }\n'
        data += '    Stage[\'pre\']->Stage[\'first\']->Stage[\'main\']->Stage[\'last\']->Stage[\'compute\']->'
        if 'tsn' in server['roles']:
            data += 'Stage[\'tsn\']->'
        if 'toragent' in server['roles']:
            data += 'Stage[\'toragent\']->'
        if 'storage-compute' in server['roles'] or 'storage-master' in server['roles']:
            data += 'Stage[\'storage\']->'
        data += 'Stage[\'post\']\n'

        # Add pre role
        data += '    class { \'::contrail::provision_start\' : state => \'provision_started\', stage => \'pre\' }\n'
        # Add common role
        data += '    class { \'::sysctl::base\' : stage => \'first\' }\n'
        data += '    class { \'::apt\' : stage => \'first\' }\n'
        data += '    class { \'::contrail::profile::common\' : stage => \'first\' }\n'
        #Include all roles manifest,Each manifest will execute only if that host
        #is configured to have a role.
        #Uninstall manifest will get executed when host_roles doesnt have that
        #role and contrail_roles[] facts has that role.
        #This implies that a role which is not configured is present on
        #the target and uninstall manifest will get executed.

        # Add keepalived (This class is no-op if vip is not configured.)
        data += '    include ::contrail::profile::keepalived\n'
        # Add haproxy (for config node)
        data += '    include ::contrail::profile::haproxy\n'
        # Add database role.
        data += '    include ::contrail::profile::database\n'
        # Add webui role.
        data += '    include ::contrail::profile::webui\n'
        # Add openstack role.
        data += '    include ::contrail::profile::openstack_controller\n'
        # Add ha_config role.
        data += '    include ::contrail::ha_config\n'
        # Add config provision role.
        data += '    include ::contrail::profile::config\n'
        # Add controller role.
        data += '    include ::contrail::profile::controller\n'
        # Add collector role.
        data += '    include ::contrail::profile::collector\n'
        # Add config provision role.
        if ((puppet_version < 3.0) and ('config' in server['roles'])):
            data += '    class { \'::contrail::profile::provision\' : stage => \'last\' }\n'
        # Add compute role
        data += '    class { \'::contrail::profile::compute\' : stage => \'compute\' }\n'

        # Add Tsn Role
        if 'tsn' in server['roles']:
            data += '    class { \'::contrail::profile::tsn\' :  stage => \'tsn\' }\n'
        # Add Toragent Role
        if 'toragent' in server['roles']:
            data += '    class { \'::contrail::profile::toragent\' :  stage => \'toragent\' }\n'
        # Add Storage Role
        if 'storage-compute' in server['roles'] or 'storage-master' in server['roles']:
            data += '    class { \'::contrail::profile::storage\' :  stage => \'storage\' }\n'
        # Add post role
        data += '    class { \'::contrail::provision_complete\' : state => \'post_provision_completed\', stage => \'post\' }\n'

        data += "}\n"
        with open(site_file, "a") as site_fh:
            site_fh.write(data)
        os.chmod(site_file, 0644)
        # end with
    # end def add_node_entry

    def add_node_entry_new(
        self, site_file, server_fqdn):
        data = "node \'%s\' {\n" %server_fqdn
        data += "   class { '::contrail::contrail_all': }\n"
        data += "}\n"
        with open(site_file, "a") as site_fh:
            site_fh.write(data)
        # end with
        os.chmod(site_file, 0644)
    # end def add_node_entry_new

    def initiate_esx_contrail_vm(self, server, esx_server):
        self._smgr_log.log(self._smgr_log.DEBUG, "esx_server")
        #call scripts to provision esx
        server_params = server.get("parameters", {})
        vm_params = {}
        vm_params['vm'] = "ContrailVM"
        vm_params['vmdk'] = "ContrailVM"
        vm_params['datastore'] = server_params.get('datastore', "/vmfs/volumes/datastore1")
        vm_params['eth0_mac'] = server.get('mac_address', '')
        vm_params['eth0_ip'] = server.get('ip_address', '')
        vm_params['eth0_pg'] = server_params.get('esx_fab_port_group', '')
        vm_params['eth0_vswitch'] = server_params.get('esx_fab_vswitch', '')
        vm_params['eth0_vlan'] = None
        vm_params['eth1_vswitch'] = server_params.get('esx_vm_vswitch', '')
        vm_params['eth1_pg'] = server_params.get('esx_vm_port_group', '')
        vm_params['eth1_vlan'] = "4095"
        vm_params['uplink_nic'] = server_params.get('esx_uplink_nic', '')
        vm_params['uplink_vswitch'] = server_params.get('esx_fab_vswitch', '')
        vm_params['server'] = esx_server.get('esx_ip', '')
        vm_params['username'] = '******'
        vm_params['password'] = esx_server.get('esx_password', '')
        vm_params['thindisk'] =  server_params.get('esx_vmdk', '')
        vm_params['smgr_ip'] = server_params.get('smgr_ip', '');
        vm_params['domain'] =  server_params.get('domain', '')
        vm_params['vm_password'] = server_params.get('password', '')
        vm_params['vm_server'] = server_params.get('id', '')
        vm_params['vm_deb'] = server_params.get('vm_deb', '')
        out = ContrailVM(vm_params)
        self._smgr_log.log(self._smgr_log.DEBUG, "ContrilVM:" %(out))
    # end initiate_esx_contrail_vm

    def generate_tor_certs(self, switch_info, server_id, domain):
        tor_name = switch_info['name']
        tor_agent_id = switch_info['agent_id']
        tor_vendor_name = switch_info['vendor_name']
        tor_server_fqdn = server_id + '.' + domain
        contrail_module_path = '/etc/contrail_smgr/puppet/ssl/tor/'
        tor_cert_file = contrail_module_path + 'tor.' + str(tor_agent_id) + '.cert.pem'
        tor_key_file = contrail_module_path + 'tor.' + str(tor_agent_id) + '.privkey.pem'

        self._smgr_log.log(self._smgr_log.DEBUG, 'module path => %s' % contrail_module_path)
        if os.path.exists(tor_cert_file) and os.path.exists(tor_key_file):
            self._smgr_log.log(self._smgr_log.DEBUG, 'cert exists for %s host %s' % (tor_name, tor_server_fqdn))
            return
        cert_cmd ='openssl req -new -x509 -days 3650 -sha256 -newkey rsa:4096'\
            + ' -nodes -text -subj "/C=US/ST=Global/L=' + tor_name + '/O=' \
            + tor_vendor_name + '/CN=' + tor_server_fqdn + '" -keyout ' \
            + tor_key_file + ' -out ' + tor_cert_file

        if not os.path.exists(contrail_module_path):
            os.makedirs(contrail_module_path)
        self._smgr_log.log(self._smgr_log.DEBUG, 'ssl_cmd => %s' % cert_cmd)

        subprocess.check_call(cert_cmd, shell=True)

    # Function to change key name from new param key name to pre-3.0 puppet hiera names. 
    def xlate_key_to_pre_3_0(self, long_key, key):
        xlate_dict = {
            "contrail::analytics::analytics_ip_list"	: "collector_ip_list",
            "contrail::analytics::analytics_name_list"	: "collector_name_list",
            "contrail::analytics::data_ttl" 		: "analytics_data_ttl",
            "contrail::analytics::config_audit_ttl"	: "analytics_config_audit_ttl",
            "contrail::analytics::statistics_ttl"	: "analytics_statistics_ttl",
            "contrail::analytics::flow_ttl"		: "analytics_flow_ttl",
            "contrail::analytics::syslog_port"		: "analytics_syslog_port",
            "contrail::analytics::directory"		: "database_dir",
            "contrail::analytics::data_directory"	: "analytics_data_dir",
            "contrail::analytics::ssd_data_directory"	: "ssd_data_dir",
            "contrail::database::directory"		: "database_dir",
            "contrail::database::minimum_diskGB"	: "database_minimum_diskGB",
            "contrail::database::initial_token"		: "database_initial_token",
            "contrail::database::ip_port"		: "database_ip_port",
            "openstack::keystone::admin_password"	: "keystone_admin_password",
            "openstack::keystone::admin_user"		: "keystone_admin_user",
            "openstack::keystone::admin_tenant"		: "keystone_admin_tenant",
            "openstack::keystone::service_tenant"	: "keystone_service_tenant",
            "openstack::keystone::admin_token"		: "keystone_service_token",
            "openstack::keystone::auth_protocol"	: "keystone_auth_protocol",
            "openstack::keystone::auth_port"		: "keystone_auth_port",
            "openstack::keystone::insecure_flag"	: "keystone_insecure_flag",
            "openstack::region"				: "keystone_region_name",
            "contrail::ha::haproxy_enable"		: "haproxy_flag",
            "openstack::neutron::port"			: "quantum_port",
            "openstack::neutron::service_protocol"	: "neutron_service_protocol",
            "openstack::amqp::server_ip"		: "amqp_server_ip",
            "contrail::config::zookeeper_ip_port"	: "zk_ip_port",
            "contrail::config::healthcheck_interval"	: "hc_interval",
            "contrail::vmware::ip"			: "vmware_ip",
            "contrail::vmware::username"		: "vmware_username",
            "contrail::vmware::password"		: "vmware_password",
            "contrail::vmware::vswitch"			: "vmware_vswitch",
            "openstack::mysql::root_password"		: "mysql_root_password",
            "contrail::control::encapsulation_priority"	: "encap_priority",
            "contrail::vgw::public_subnet"		: "vgw_public_subnet",
            "contrail::vgw::public_vn_name"		: "vgw_public_vn_name",
            "contrail::vgw::public_interface"		: "vgw_public_interface",
            "contrail::vgw::public_gateway_routes"	: "vgw_public_gateway_routes",
            "contrail::storage::storage_name_list"	: "storage_hostnames"
        }
        return xlate_dict.get(long_key, key)
    # end of function to xlate key to pre_3_0

    def add_params_from_dict(self, in_dict, package, prefix=''):
        out_dict = {}
        package_params = package.get("parameters", {})
        if not(isinstance(in_dict, dict)):
            return out_dict
        for key, value in in_dict.iteritems():
            new_prefix = str("::".join(x for x in (prefix, key) if x))
            if (isinstance(value, dict) and
                (not value.pop("literal", False))):
                out_dict.update(self.add_params_from_dict(
                    value, package, new_prefix))
            else:
                # For pre3.0 contrail, we need to generate hiera data
                # in contrail::params::... format too. This code should
                # be removed when we stop supporting old format contrail (pre-3.0)
                if (package_params.get('puppet_version', 0.0) < 3.0):
                    out_dict["contrail::params::" + self.xlate_key_to_pre_3_0(new_prefix, key)] = value
                out_dict[new_prefix] = value
        return out_dict
    # end add_params_from_dict

    def add_cluster_provisioning_params(self, cluster, package):
        cluster_parameters = cluster.get("parameters", {})
        provision_params = cluster_parameters.get("provision", {})
        return self.add_params_from_dict(provision_params, package)
    # end of add_cluster_provisioning_params

    def add_server_provisioning_params(self, server, package):
        server_parameters = server.get("parameters", {})
        provision_params = server_parameters.get("provision", {})
        return self.add_params_from_dict(provision_params, package)
    # end of add_server_provisioning_params

    def add_package_provisioning_params(self, package):
        package_parameters = package.get("parameters", {})
        provision_params = package_parameters.get("provision", {})
        return self.add_params_from_dict(provision_params, package)
    # end of add_package_provisioning_params

    def add_cluster_calculated_params(self, cluster, package):
        provision_params = cluster.get("calc_params", {})
        return self.add_params_from_dict(provision_params, package)
    # end of add_cluster_calculated_params

    def add_server_calculated_params(self, server, package):
        provision_params = server.get("calc_params", {})
        return self.add_params_from_dict(provision_params, package)
    # end of add_server_calculated_params

    def add_package_calculated_params(self, package):
        provision_params = package.get("calc_params", {})
        return self.add_params_from_dict(provision_params, package)
    # end of add_package_calculated_params

    def add_sequencing_params(self, cluster, package):
        cluster_params = cluster.get('parameters', {})
        package_params = package.get('parameters', {})
        sequence_provisioning_available = package_params.get(
            'sequence_provisioning_available', False)
        sequence_provisioning = cluster_params.get(
            'sequence_provisioning', True)
        if (package_params.get('puppet_version', 0.0) >= 3.0):
            key = "sequencing"
        else:
            key = "params"
        sequencing_params = {}
        if sequence_provisioning_available and sequence_provisioning:
            sequencing_params['contrail'] = {}
            sequencing_params['contrail'][key] = {}
            sequencing_params['contrail'][key]['enable_post_provision'] = False
            sequencing_params['contrail'][key]['enable_pre_exec_vnc_galera'] = False
            sequencing_params['contrail'][key]['enable_post_exec_vnc_galera'] = False
            sequencing_params['contrail'][key]['enable_keepalived'] = False
            sequencing_params['contrail'][key]['enable_haproxy'] = False
            sequencing_params['contrail'][key]['enable_sequence_provisioning'] = True
            sequencing_params['contrail'][key]['enable_provision_started'] = True
            sequencing_params['contrail'][key]['enable_storage_master'] = False
            sequencing_params['contrail'][key]['enable_storage_compute'] = False
            for role in ['global_controller', 'loadbalancer', 'database', 'config', 'openstack',
                         'control', 'collector',
                         'webui', 'compute', 'tsn', 'toragent']:
                sequencing_params['contrail'][key][
                    'enable_'+role] = False
        return self.add_params_from_dict(sequencing_params, package)
    # end add_sequencing_params

    def build_contrail_hiera_file_new(
        self, hiera_filename, server,
        cluster, cluster_servers, package):
        cluster_params = cluster.get('parameters', {})
        # By default, sequence provisioning is On.
        server_params = server.get('parameters', {})
        hiera_params = {}
        hiera_params.update(self.add_cluster_calculated_params(cluster, package))
        hiera_params.update(self.add_server_calculated_params(server, package))
        hiera_params.update(self.add_package_calculated_params(package))
        hiera_params.update(self.add_cluster_provisioning_params(cluster, package))
        hiera_params.update(self.add_server_provisioning_params(server, package))
        hiera_params.update(self.add_package_provisioning_params(package))
        hiera_params.update(self.add_sequencing_params(
            cluster, package))
        # Dump the hiera_params in yaml file.
        data = yaml.dump(hiera_params, default_style='\'', indent=4)
        with open(hiera_filename, "w") as hiera_fh:
            hiera_fh.write(data)
    # end def build_contrail_hiera_file_new

    #generate random string
    def random_string(self, string_length=10):
        """Returns a random string of length string_length."""
        random = str(uuid.uuid4()) # Convert UUID format to a Python string.
        random = random.upper() # Make all characters uppercase.
        random = random.replace("-","") # Remove the UUID '-'.
        return random[0:string_length] # Return the random string.

    def build_hiera_files(
        self, hieradata_dir, provision_params,
        server, cluster, cluster_servers, package, serverDb):
        server_params = server.get("parameters", {})
        cluster_params = cluster.get("parameters", {})
        domain = server.get('domain', '')
        if not domain:
            domain = cluster_params.get('domain', '')
        server_fqdn = server['host_name'] + "." + domain
        contrail_hiera_file = hieradata_dir + server_fqdn + \
            "-contrail.yaml"
        # if cluster parameters has provision key, use new way of building Hiera file, else
        # continue with old way.
        if ("provision" in cluster_params):
            self.build_contrail_hiera_file_new(
                contrail_hiera_file, server,
                cluster, cluster_servers, package)
        # Check and add contrail-defaults.yaml
        contrail_defaults_file = hieradata_dir + "contrail-defaults.yaml"
        contrail_defaults_source = "/etc/contrail_smgr/contrail-defaults.yaml"
        if not os.path.exists(contrail_defaults_file) and os.path.exists(contrail_defaults_source):
            shutil.copy(contrail_defaults_source, contrail_defaults_file)

    # end def build_hieradata_files

    def modify_server_hiera_data(self, server_id, hiera_file, role_steps_list,
                                 enable=True):
        if not server_id or not hiera_file or not role_steps_list:
            return
        try:
            hiera_data_fp = open(hiera_file, 'r')
        except:
            return
        hiera_data_dict = yaml.load(hiera_data_fp)
        hiera_data_fp.close()
        if not hiera_data_dict:
            return
        for role_step_tuple in role_steps_list:
            if server_id == role_step_tuple[0]:
                role_step = role_step_tuple[1].replace('-', '_')
                key = 'contrail::sequencing::enable_' + role_step
                if key not in hiera_data_dict:
                    key = 'contrail::params::enable_' + role_step
                hiera_data_dict[key] = enable
        data = yaml.dump(hiera_data_dict, default_style='\'', indent=4)
        with open(hiera_file, "w") as hiera_fh:
            hiera_fh.write(data)
    # end modify_server_hiera_data

    def new_provision_server(
        self, provision_params, server, cluster, cluster_servers, package, serverDb):
        server_params = server.get("parameters", {})
        cluster_params = cluster.get("parameters", {})
        package_params = package.get("parameters", {})
        domain = server.get('domain', '')
        if not domain:
            domain = cluster_params.get('domain', '')
        server_fqdn = server['host_name'] + "." + domain
        env_name = package_params.get('puppet_manifest_version',"")
        env_name = env_name.replace('-', '_')
        site_file = self.puppet_directory + "environments/" + \
            env_name + "/manifests/site.pp"
        hieradata_dir = self.puppet_directory + "environments/" + \
            env_name + "/hieradata/"
        # Start contail VM if running compute on esx_server.
        if 'compute' in eval(server['roles']):
            esx_server_id = server_params.get('esx_server', None)
            if esx_server_id:
                esx_servers = serverDb.get_server(
                    {'id' : server_params['esx_server']}, detail=True)
                esx_server = esx_servers[0]
                if esx_server:
                    self.initiate_esx_contrail_vm(server, esx_server)
        # Build Hiera data for the server
        self.build_hiera_files(
            hieradata_dir, provision_params,
            server, cluster, cluster_servers, package, serverDb)
        # Create an entry for this node in site.pp.
        # First, delete any existing entry and then add a new one.
        self.delete_node_entry(site_file, server_fqdn)
        # Now add a new node entry
        puppet_version = package_params.get("puppet_version", 0.0)
        if (puppet_version >= 3.0):
            self.add_node_entry_new(
                site_file, server_fqdn)
        else:
            self.add_node_entry(
                site_file, server_fqdn, server,
                cluster, cluster_servers, puppet_version)

        # Add entry for the server to environment mapping in 
        # node_mapping.json file.
        self.update_node_map_file(server_fqdn, env_name)
    # end def new_provision_server

    # Function to remove puppet files and entries created when provisioning the server. This is called
    # when server is being reimaged. We do not want old provisioning data to be retained.
    def new_unprovision_server(self, server_id, server_domain):
        server_fqdn = server_id + "." + server_domain
        # Remove node to environment mapping from node_mapping.json file.
	node_env_dict = {}
        env_name = self.update_node_map_file(server_fqdn, None)
        if env_name is None:
            return
        # Remove server node entry from site.pp.
        site_file = self.puppet_directory + "environments/" + \
            env_name + "/manifests/site.pp"
        try:
            self.delete_node_entry(site_file, server_fqdn)
	except:
	    pass
        # Remove Hiera Data files for the server.
        hiera_datadir = self.puppet_directory + "environments/" + \
            env_name + "/hieradata/"
        try:
            os.remove(hiera_datadir + server_fqdn + "-contrail.yaml")
            os.remove(hiera_datadir + server_fqdn + "-openstack.yaml")
	except:
	    pass
    # end new_unprovision_server()

    # env_name empty string or None is to remove the entry from the map file.
    # env_name value specified will be updated to the map file.
    # env_name could be valid one or invalid manifest.
    #        invalid valid manifest is used to turn off the agent puppet run
    # server_fqdn is required for both update and delete of an entry
    def update_node_map_file(self, server_fqdn, env_name):
        if not server_fqdn:
            return None

        node_env_map_file = self.smgr_base_dir+self._node_env_map_file
        
        try:
            with open(node_env_map_file, "r") as env_file:
                node_env_dict = json.load(env_file)
            # end with
        except:
            msg = "Not able open environment map file %s" % (node_env_map_file)
            self._smgr_log.log(self._smgr_log.ERROR, msg)
            return None

        if env_name:
            node_env_dict[server_fqdn] = env_name
            msg = "Add/Modify map file with env_name %s for server %s" % (env_name, server_fqdn)
            self._smgr_log.log(self._smgr_log.DEBUG, msg)
        else:
            env_name = node_env_dict.pop(server_fqdn, None)
            msg = "Remove server from map file for server %s" % (server_fqdn)
            self._smgr_log.log(self._smgr_log.DEBUG, msg)
            if not env_name:
                return env_name

        try:
            with open(node_env_map_file, "w") as env_file:
                json.dump(node_env_dict, env_file, sort_keys = True,
                          indent = 4)
            # end with
        except:
            msg = "Not able open environment map file %s for update" % (node_env_map_file)
            self._smgr_log.log(self._smgr_log.ERROR, msg)
            return None
        return env_name
    # end update_node_map_file

    def is_new_provisioning(self, puppet_manifest_version):
        environment = puppet_manifest_version.replace('-','_')
        if ((environment != "") and
            (os.path.isdir(
                    "/etc/puppet/environments/" + environment))):
            return True
        return False
    # end is_new_provisioning

    def provision_server(
        self, provision_params, server,
        cluster, cluster_servers, package,
        serverDb):

        # The new way to create necessary puppet manifest files and parameters data.
        # The existing method is kept till the new method is well tested and confirmed
        # to be working.
        self._sm_prov_log = ServerMgrProvlogger(cluster['id'])
        package_params = package.get("parameters", {})
        puppet_manifest_version = package_params.get(
            'puppet_manifest_version', "")
        environment = puppet_manifest_version.replace('-','_')
        if self.is_new_provisioning(puppet_manifest_version):
            self.new_provision_server(
                provision_params, server,
                cluster, cluster_servers, package, serverDb)
        else:
            # old puppet manifests not supported anymore, log message
            # and return
            self._sm_prov_log.log(
                "debug",
                "No environment for version found AND this version does not support old contrail puppet manifest (2.0 and before)")
            self._smgr_log.log(
                self._smgr_log.DEBUG,
                "No environment for version found AND this version does not support old contrail puppet manifest (2.0 and before)")
            self._sm_prov_log.log(
                "debug",
                "Use server manager version 2.21 or earlier if you have old style contrail puppet manifests")
            self._smgr_log.log(
                self._smgr_log.DEBUG,
                "Use server manager version 2.21 or earlier if you have old style contrail puppet manifests")
class SMAnsibleServer():
    '''
    Use bottle to provide REST interface for the server manager.
    '''
    def __init__(self, args_str=None):
        try:
            self._smgr_log = ServerMgrlogger()
        except:
            print "Error Creating logger object"

        self._smgr_log.log(self._smgr_log.INFO, "Starting SM Ansible Server")
        if not args_str:
            args_str = sys.argv[1:]
        self._parse_args(args_str)
        self.joinq  = Queue.Queue()
        self.joiner = Joiner(self.joinq)
        self.joiner.start()
        self._smgr_log.log(self._smgr_log.INFO,  'Initializing Bottle App')
        self.app = bottle.app()
        bottle.route('/run_ansible_playbooks', 'POST',
                self.start_ansible_playbooks)

    def _parse_args(self, args_str):
        '''
        Eg. python sm_ansible_server.py --config_file serverMgr.cfg
                                         --listen_port 8082
        '''
        # Source any specified config/ini file
        # Turn off help, so we print all options in response to -h
        conf_parser = argparse.ArgumentParser(add_help=False)

        conf_parser.add_argument(
            "-c", "--config_file",
            help="Specify config file with the parameter values.",
            metavar="FILE")
        args, remaining_argv = conf_parser.parse_known_args(args_str)

        serverCfg = {
            'ansible_srvr_ip': _WEB_HOST,
            'ansible_srvr_port': _ANSIBLE_SRVR_PORT,
            'docker_insecure_registries': _ANSIBLE_REGISTRY,
            'docker_registry': _ANSIBLE_REGISTRY,
            'ansible_playbook': ""
        }

        if args.config_file:
            config_file = args.config_file
        else:
            config_file = _DEF_ANSIBLE_SRVR_CFG_FILE
        config = ConfigParser.SafeConfigParser()
        config.read([args.config_file])
        self._smgr_config = config
        try:
            for key in dict(config.items("ANSIBLE-SERVER")).keys():
                #if key in serverCfg.keys():
                serverCfg[key] = dict(config.items("ANSIBLE-SERVER"))[key]


            self._smgr_log.log(self._smgr_log.DEBUG,
                               "Arguments read form config file %s" % serverCfg)
        except ConfigParser.NoSectionError:
            msg = "Server Manager doesn't have a configuration set."
            self._smgr_log.log(self._smgr_log.ERROR, msg)


        self._smgr_log.log(self._smgr_log.DEBUG, "Arguments read form config file %s" % serverCfg)
        # Override with CLI options
        # Don't surpress add_help here so it will handle -h
        parser = argparse.ArgumentParser(
            # Inherit options from config_parser
            # parents=[conf_parser],
            # print script description with -h/--help
            description=__doc__,
            # Don't mess with format of description
            formatter_class=argparse.RawDescriptionHelpFormatter,
        )
        parser.set_defaults(**serverCfg)

        parser.add_argument(
            "-i", "--ansible_srvr_ip",
            help="IP address to provide service on, default %s" % (_WEB_HOST))
        parser.add_argument(
            "-p", "--ansible_srvr_port",
            help="Port to provide service on, default %s" % (_ANSIBLE_SRVR_PORT))
        self._args = parser.parse_args(remaining_argv)
        self._args.config_file = args.config_file
    # end _parse_args

    def start_ansible_playbooks(self):
        entity = bottle.request.json
        pb     = ContrailAnsiblePlaybooks(entity, self._args)
        pb.start()
        self.joinq.put(pb)
        # Return success. Actual status will be supplied when the pb thread
        # completes and the next status query is made
        bottle.response.headers['Content-Type'] = 'application/json'
        return json.dumps({'status': 'Provision in Progress'})
    def __init__(self, db_file_name=def_server_db_file):
        try:
            self._smgr_log = ServerMgrlogger()
            self._con = lite.connect(db_file_name)
            with self._con:
                cursor = self._con.cursor()
                # Create cluster table.
                cursor.execute("CREATE TABLE IF NOT EXISTS " + cluster_table +
                               """ (id TEXT PRIMARY KEY,
                                    parameters TEXT,
                                    email TEXT)""")
                # Create image table
                cursor.execute("CREATE TABLE IF NOT EXISTS " + image_table +
                               """ (id TEXT PRIMARY KEY,
                    version TEXT, type TEXT, path TEXT,
                    parameters TEXT)""")
                # Create status table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               server_status_table + """ (id TEXT PRIMARY KEY,
                            server_status TEXT)""")
                # Create server table
                cursor.execute("CREATE TABLE IF NOT EXISTS " + server_table +
                               """ (mac_address TEXT PRIMARY KEY NOT NULL,
                         id TEXT, host_name TEXT, static_ip varchar default 'N',
                         ip_address TEXT, subnet_mask TEXT, gateway TEXT, domain TEXT,
                         cluster_id TEXT,  base_image_id TEXT,
                         package_image_id TEXT, password TEXT,
                         last_update TEXT, discovered varchar default 'false',
                         parameters TEXT, roles TEXT, ipmi_username TEXT,
                         ipmi_password TEXT, ipmi_address TEXT,
                         ipmi_type TEXT, intf_control TEXT,
                         intf_data TEXT, intf_bond TEXT,
                         email TEXT, status TEXT,
                         tag1 TEXT, tag2 TEXT, tag3 TEXT,
                         tag4 TEXT, tag5 TEXT, tag6 TAXT, tag7 TEXT,
                         network TEXT, contrail TEXT, top_of_rack TEXT,
                         UNIQUE (id))""")
                # Create inventory table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               inventory_table +
                               """ (fru_description TEXT PRIMARY KEY NOT NULL,
                         id TEXT, board_serial_number TEXT, chassis_type TEXT,
                         chassis_serial_number TEXT, board_mfg_date TEXT,
                         board_manufacturer TEXT, board_product_name TEXT,
                         board_part_number TEXT, product_manfacturer TEXT,
                         product_name TEXT, product_part_number TEXT,
                         UNIQUE (fru_description))""")
                # Create server tags table
                cursor.execute("CREATE TABLE IF NOT EXISTS " +
                               server_tags_table +
                               """ (tag_id TEXT PRIMARY KEY NOT NULL,
                         value TEXT,
                         UNIQUE (tag_id),
                         UNIQUE (value))""")
                # Add columns for image_table
                self._add_table_column(cursor, image_table, "category", "TEXT")
                # Add columns for cluster_table
                self._add_table_column(cursor, cluster_table, "base_image_id",
                                       "TEXT")
                self._add_table_column(cursor, cluster_table,
                                       "package_image_id", "TEXT")
                self._add_table_column(cursor, cluster_table, "provisioned_id",
                                       "TEXT")
                self._add_table_column(cursor, cluster_table,
                                       "provision_role_sequence", "TEXT")
                # Add columns for server_table
                self._add_table_column(cursor, server_table, "reimaged_id",
                                       "TEXT")
                self._add_table_column(cursor, server_table, "provisioned_id",
                                       "TEXT")
                self._add_table_column(cursor, server_table, "network", "TEXT")
                self._add_table_column(cursor, server_table, "top_of_rack",
                                       "TEXT")
                self._add_table_column(cursor, server_table, "contrail",
                                       "TEXT")
                self._add_table_column(cursor, server_table, "ssh_public_key",
                                       "TEXT")
                self._add_table_column(cursor, server_table, "ssh_private_key",
                                       "TEXT")
                self._add_table_column(cursor, server_table, "ipmi_interface",
                                       "TEXT")

            self._smgr_log.log(self._smgr_log.DEBUG, "Created tables")

            # During init, we check if any of the Cluster in DB are missing any Storage Parameters (Generated UUIDs)
            cluster_list = self._get_items(cluster_table, None, None, True,
                                           None)
            for cluster in cluster_list:
                # Check if storage parameters are present in Cluster, else generate them
                if 'storage_fsid' not in set(
                        eval(cluster['parameters']
                             )) or 'storage_virsh_uuid' not in set(
                                 eval(cluster['parameters'])):
                    self.update_cluster_uuids(cluster)

            self.update_image_table()
        except e:
            raise e
class ServerMgrValidations:
    def validate_tor_config(self, input_data):
        #self._smgr_log.log(self._smgr_log.DEBUG, "validate input_data=> %s" %(input_data))
        if 'top_of_rack' not in input_data:
            return (1, "top_of_rack configuration not found")
        tor_config = input_data['top_of_rack']
        switch_list = tor_config.get('switches', "")
        if not switch_list:
            return (1, "switches config not found")
        num_switch = len(switch_list)
        self._smgr_log.log(self._smgr_log.DEBUG, "validate input_data switch_len => %s" %(num_switch))
        if num_switch > 128:
            self._smgr_log.log(self._smgr_log.DEBUG, "validate input_data switch_len => %s" %(num_switch))
            return (1, "More than 128 switches are not supported")

        ## check for all mandatory params
        required_params = {'id': 'positive_number',
                           'ip_address':'ip_address',
                           'tunnel_ip_address': 'ip_address', 
                           'switch_name':'hostname',
                           'type': {'fixed': ['ovs']},
                           'ovs_protocol': {'fixed': ['tcp', 'pssl']},
                           'ovs_port':'port',
                           'http_server_port':'port',
                           'vendor_name':'string'}
        ## chcek if ID, port_numer, IP-address, hostnames are valid
        ## chcek if all IDs are unique
        ## check if all switch_names are unique
        id_set = set()
        ip_address_set = set()
        hostname_set = set()
        for  switch in switch_list:
            #data += '  %s%s:\n' %(switch['switch_name'],switch['id'])
            if 'id' in switch:
                if not switch['id'] or switch['id'] == "":
                    return (1, "param 'id' is empty for a switch config")
            else:
                return (1, "param 'id' not found for a switch")

            for param in required_params:
                if param in switch:
                    if not switch[param] or switch[param] == "":
                        msg = "param '%s' is empty for %s"  %(param, switch['id'])
                        return (1, msg)
                    else:
                        ## we should validate the param now
                        self._smgr_log.log(self._smgr_log.DEBUG, "validate switch-config => %s" %(required_params[param]))
                        if required_params[param] == 'positive_number':
                            status,msg = self.is_valid_number(switch[param])
                        elif required_params[param] == 'ip_address':
                            status,msg = self.is_valid_ip_address(switch[param])
                        elif required_params[param] == 'hostname':
                            status,msg = self.is_valid_hostname(switch[param])
                        elif required_params[param] == 'port':
                            status,msg = self.is_valid_port(switch[param])
                        elif required_params[param] == 'string':
                            self._smgr_log.log(self._smgr_log.DEBUG, "validate string => %s" %(switch[param]))
                        elif type(required_params[param]) == dict:
                            subtype=required_params[param]
                            if 'fixed' in subtype:
                                if switch[param] not in subtype['fixed']:
                                    msg = "param %s for switch %s has invalid value" %(param, switch['id'])
                                    status = 1
                        else:
                            self._smgr_log.log(self._smgr_log.DEBUG, "invalid type for => %s" %(required_params[param]))
                            msg = "param %s has invalid type for validation" %(required_params[param])
                            return (1, msg)

                        if status != 0:
                            msg = "switch config %s has invalid value '%s' for %s" %(switch['id'], switch[param], param)
                            return ("1", msg)
                else:
                    msg = "param '%s' not found for switch with 'id' as %s" \
                          %(param, switch['id'])
                    return (1, msg)

            ## add the value to set()
            if switch['id'] in id_set:
                msg = "switch id %s is duplicate" %(switch['id'])
                return (1, msg)
            else:
                id_set.add(switch['id'])

            if switch['ip_address'] in ip_address_set:
                msg = "switch %s has duplicate ip_address" %(switch['id'])
                return (1, msg)
            else:
                ip_address_set.add(switch['ip_address'])

            if switch['switch_name'] in hostname_set:
                msg = "switch id %s has duplicate hostname" %(switch['id'])
                return (1, msg)
            else:
                hostname_set.add(switch['switch_name'])

        return (0, "")
   
        
    def is_valid_hostname (self, hostname):
        if len(hostname) > 255:
            return (1, "hostname length is more than 255")

        if hostname[-1] == ".":
            hostname = hostname[:-1] # strip exactly one dot from the right, if present

        allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
        regex_status = all(allowed.match(x) for x in hostname.split("."))
        status = 1
        if regex_status:
            status = 0
        self._smgr_log.log(self._smgr_log.DEBUG, "validate hostname=> %s , %s" %(hostname, status))
        return (status, "")

    def is_valid_ip_address(self, ip_address):
        self._smgr_log.log(self._smgr_log.DEBUG, "validate ip_address => %s" %(ip_address))
        msg = ""
        pattern = r"\b(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b"
        if re.match(pattern,ip_address):
            status = 0
        else:
            status = 1
            msg = "Invalid IP Address"

        return (status, msg)
    
    def is_valid_port(self, port_number):
        self._smgr_log.log(self._smgr_log.DEBUG, "validate port => %s" %(port_number))
        status,msg = self.is_valid_number(port_number)
        if status == 0:
            ## check for range of port number
            if int(port_number) > 65535 or int(port_number) < 1:
                msg = "port %s has invalid range" %(port_number)
                status =  1
                
        return (status, msg)
    
    def is_valid_protocol(self, protocol):
        self._smgr_log.log(self._smgr_log.DEBUG, "validate protocol => %s" %(protocol))
        return (0, "")

    def is_valid_number(self, number):
        self._smgr_log.log(self._smgr_log.DEBUG, "validate valid number => %s" %(number))
        if number.isdigit():
            return (0, "")
        else:
           return (1, "invalid number")

    #The following logic the interface type of servers
    #Will return single if all the servers are single
    #Will return multi if all the interfaces are multi
    #Will return None if you have a combination
    def get_server_interface_type(self,servers_dict):
        all_single = False
        all_multi = False
        for server in servers_dict:
            param = eval(server['parameters'])
            contrail = eval(server['contrail'])
            if param['interface_name'] and contrail:
                all_multi = True
            elif param['interface_name']:
                all_single = True
            else:
                return None
        #If you have a mix of single and multi interface servers then return None
        if all_multi and all_single:
             return None
        if all_multi:
             return "MULTI_INTERFACE"
        return "SINGLE_INTERFACE"

    #This function returns the list of servers with a specific roles assigned to it
    def get_servers_roles_list(self, servers):
        openstack_only_list =[]
        config_only_list= []
        openstack_config_list= []
        for server in servers:
            roles_list = server['roles']
            #Check if the server has both config and openstack role assigned to it
            if 'config' in roles_list and 'openstack' in roles_list:
                openstack_config_list.append(server)
            #Check if the server has config role assigned to it
            elif 'config' in roles_list:
                config_only_list.append(server)
            #Check if the server has openstack role assigned to it
            elif 'openstack' in roles_list:
                openstack_only_list.append(server)
        return (openstack_config_list, config_only_list, openstack_only_list) 
   
    #Function to get the vips defined for a cluster
    def get_vips_in_cluster(self,cluster):
        cluster_params = eval(cluster['parameters'])
        cluster_provision_params = cluster_params.get("provision", {})
        if cluster_provision_params:
            openstack_params = cluster_provision_params.get("openstack", {})
            ha_params = openstack_params.get("ha", {})
            internal_vip = ha_params.get('internal_vip', None)
            external_vip = ha_params.get('external_vip', None)
            contrail_params = cluster_provision_params.get("contrail", {})
            contrail_ha_params = contrail_params.get("ha", {})
            contrail_internal_vip = contrail_ha_params.get('contrail_internal_vip', None)
            contrail_external_vip = contrail_ha_params.get('contrail_external_vip', None)
        else:
            internal_vip = cluster_params.get('internal_vip', None)
            external_vip = cluster_params.get('external_vip', None)
            contrail_internal_vip = cluster_params.get('contrail_internal_vip', None)
            contrail_external_vip = cluster_params.get('contrail_external_vip', None)
        return (internal_vip, external_vip, contrail_internal_vip, contrail_external_vip)

    # Function to validate ext lb params
    def validate_external_lb_params(self, cluster):
        cl_params = cluster['parameters']['provision']['contrail']
        lb_params = cl_params.get('loadbalancer', None)
        if not lb_params:
            return "cluster does not contain loadbalancer in "\
                    "provision:contrail stanza"
        lb_method = lb_params.get('loadbalancer_method', None)
        if not lb_method:
            return "cluster does not contain loadbalancer method in "\
                   "provision:contrail:loadbalancer stanza"
        return None
        
    #Function to validate vip configuration for a multi interface server
    def validate_multi_interface_vip(self,cluster, servers):
        #Get the list of servers with specific roles
        openstack_config_list, config_only_list, openstack_only_list = self.get_servers_roles_list(servers)
        #Get the values of all vips in a cluster
        internal_vip,external_vip,contrail_internal_vip,contrail_external_vip = self.get_vips_in_cluster(cluster)
        #If no vips are configured then it means no HA is configured. Just skip the validation
        if internal_vip is None and external_vip is None and contrail_internal_vip is None and contrail_external_vip is None:
            return
        #Validation for nodes configured for both contrail and openstack HA
        if len(openstack_config_list) > 1:
            #Both internal and external vip's have be configured
            if not internal_vip or not external_vip:
               raise Exception("Both internal and external vips need to be configured")
            #If internal and external vips are specified they should not be equal
            if internal_vip and external_vip and internal_vip == external_vip:
                raise Exception("internal and external vips cannot be the same")
            #If contrail internal vip and external vips are specified they should be equal to the internal and external vips
            if contrail_internal_vip and contrail_external_vip and contrail_internal_vip != internal_vip and contrail_external_vip != external_vip:
                raise Exception("If contrail internal and external vips are configured they need to be same as the internal and external vips")
            return
        #Validation for nodes configured only for contrail HA
        if len(config_only_list) > 1:
            #Both contrail internal and external vips have to be configured
            if not contrail_internal_vip or not contrail_external_vip:
                raise Exception("contrail_internal_vip and contrail_external_vip have to be configured")
            #Contrail internal and external vip cannot be the same
            if contrail_internal_vip and contrail_external_vip and contrail_internal_vip == contrail_external_vip:
                raise Exception("contrail_internal_vip and contrail_external_vip cannot be same")
            return 
        #Validation for nodes configured only for Openstack HA
        if len(openstack_only_list) > 1:
            #Both the internal and external vips have to be configured
            if not internal_vip or not external_vip:
                raise Exception("Both internal and external vips have to be configured")
            #internal and external vips cannot be the same
            if internal_vip and external_vip and internal_vip == external_vip:
                raise Exception("internal and external vips cannot be the same")
            return

    #Function to validate vip configuration for a multi interface server
    def validate_single_interface_vip(self, cluster, servers):
        #Get the list of servers with specific roles
        openstack_config_list, config_only_list, openstack_only_list = self.get_servers_roles_list(servers)
        #Get the values of all vips in a cluster
        internal_vip,external_vip,contrail_internal_vip,contrail_external_vip = self.get_vips_in_cluster(cluster)
        #If no vips are configured then it means no HA is configured. Just skip the validation
        if internal_vip is None and external_vip is None and contrail_internal_vip is None and contrail_external_vip is None:
            return
        #Validation for nodes configured for both contrail and openstack HA
        if len(openstack_config_list) > 1:
            #internal vip has to be configured
            if not internal_vip:
                raise Exception("internal vip has to be configured. external vip or contrail external vip cannot be configured")
            #external and internal vip if configured, has to be the same
            if external_vip and external_vip != internal_vip:
                raise Exception("internal vip and external vip have to be the same")
            #contrail external vip and internal vip if configured, has to be the same
            if contrail_external_vip and contrail_external_vip != internal_vip:
                raise Exception("internal vip and contrail external vip have to be the same")
            #contrail internal vip and internal vip if configured, has to be the same
            if contrail_internal_vip and contrail_internal_vip != internal_vip:
                raise Exception("internal vip and contrail internal vip have to be the same")
            return
        #Validation for nodes configured only for contrail HA
        if len(config_only_list) > 1:
            #contrail internal vip has to be configured
            if not contrail_internal_vip:
                raise Exception("Only contrail internal vip can be configured")
            #If contrail external vip is configured it has to be the same the contrail internal vip
            if contrail_external_vip and contrail_external_vip != contrail_internal_vip:
                raise Exception("contrail internal vip and contrail external vip have to be the same")
            return
        #Validation for nodes configured only for Openstack HA
        if len(openstack_only_list) > 1:
            #internal vip has to be configured
            if not internal_vip:
                raise Exception("Only internal vip can be configured")
            #If external vip is configured it has to be the same as internal vip
            if external_vip and external_vip != internal_vip:
                raise Exception("contrail internal vip and contrail external vip have to be the same")
            return
    
    #Function to do the configuration validation of vips 
    def validate_vips(self, cluster_id, serverDb):
        try:
            #Get the cluster given the cluster id
            cluster_list =  serverDb.get_cluster({"id": cluster_id}, detail=True) 
            #Since we are getting the cluster given an id only one cluster will be there in the list
            cluster = cluster_list[0]
            match_dict = {"cluster_id": cluster['id']}
            #Get the list of servers belonging to that cluster
            servers = serverDb.get_server(match_dict, detail=True)
            #Find out what type of interface do the servers have
            interface_type = self.get_server_interface_type(servers)
            if interface_type == 'MULTI_INTERFACE':
                self.validate_multi_interface_vip(cluster, servers)              
            elif interface_type == 'SINGLE_INTERFACE':
                self.validate_single_interface_vip(cluster, servers)
            return
        except Exception as e:
            raise e

    def __init__(self):
        self._smgr_log = ServerMgrlogger()
        self._smgr_log.log(self._smgr_log.DEBUG, "ServerMgrValidations Init")