def client(self, ip): #Create a TCP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) retry = True count = 0 #Initiate connection while(retry == True): try: retry = False sock.connect((ip,self.port)) except socket.error: if(count < 5 ): retry = True count += 1 logger.subsection("listening server not ready. retrying in 2 mins...") time.sleep(120) except socket.timeout: exit.open("could not connect to remote server") except: exit.open("a major connection error has occured. check your address and retry installation.") #Receive data from server self.data = sock.recv(self.port) sock.close() #Convert serialized object to dictionary self.hash_data = loads(self.data) try: if(hash_data['TYPE'] != self.data_type): exit.open("primary and secondary server out of sync, restart installation") except: exit.open("failed to read remote data configuration restart installation") return self.hash_data
def sqlfail(self, query, fail): logger.subsection("SQL query failed.") logger.subsection("Query was :") logger.subsection(query) logger.subsection("Exception was :") logger.subsection(fail.__str__()) exit(2)
def insert_db(self, table, get_dict): if not path.exists(self.db_path): logger.subsection("Cannot access database at "+db_path) exit(2) existing_tables = [] existing_tables = self.get_tables() if table not in existing_tables: logger.subsection(table+ " does not exist in database") exit(2) conn = sqlite3.connect(self.db_path) c = conn.cursor() query = "INSERT INTO "+table+" (" for key in get_dict.keys(): query += key query += "," query = query.rstrip(',') query += ") VALUES (" for key in get_dict.keys(): query += "'" query += get_dict[key] query += "'" query += "," query = query.rstrip(',') query += ")" try: c.execute(query) except Exception as fail: self.sqlfail(query,fail) conn.commit() c.close()
def insert_db(self, table, get_dict): if not path.exists(self.db_path): logger.subsection("Cannot access database at " + db_path) exit(2) existing_tables = [] existing_tables = self.get_tables() if table not in existing_tables: logger.subsection(table + " does not exist in database") exit(2) conn = sqlite3.connect(self.db_path) c = conn.cursor() query = "INSERT INTO " + table + " (" for key in get_dict.keys(): query += key query += "," query = query.rstrip(',') query += ") VALUES (" for key in get_dict.keys(): query += "'" query += get_dict[key] query += "'" query += "," query = query.rstrip(',') query += ")" try: c.execute(query) except Exception as fail: self.sqlfail(query, fail) conn.commit() c.close()
def priConfig(self): self.dataConfig() self.netConfig() self.serviceConfig() self.hostnameConfig() logger.subsection("finished generating configuration...") return self.conf_values
def clone(self): """retrieve all necessary data from db""" database = ddriver.DbDriver() primary_conf = database.select_db('Primary_Configuration') secondary_conf = database.select_db('Secondary_Configuration') general_conf = database.select_db('General_Configuration') """now set all variables necessary""" imager_config = [] imager_config.append("GROUP_NAME:ha_group\n") imager_config.append("HA_ETH:"+primary_conf[0]["NIC_INFO"]+"\n") imager_config.append("IMAGE_DIR:/usr/share/haoscar/images\n") imager_config.append("IMAGE_NAME:ha_image\n") imager_config.append("MASK:"+general_conf[0]["MASK"]+"\n") imager_config.append("PRIMARY_HOSTNAME:"+primary_conf[0]["HOSTNAME"]+"\n") imager_config.append("PRIMARY_IP:"+primary_conf[0]["IP_ADDR"]+"\n") imager_config.append("SECONDARY_HOSTNAME:"\ +secondary_conf[0]["HOSTNAME"]+"\n") imager_config.append("SECONDARY_IP:"+secondary_conf[0]["IP_ADDR"]+"\n") imager_config.append("SUBNET:"+general_conf[0]["SUBNET"]+"\n") #Now we do some writing FILE = open("/usr/share/haoscar/sysimager.conf","w") for config in imager_config: FILE.writelines(config) FILE.close() logger.subsection("finished configuring sysimager.conf") logger.subsection("starting cloning process") #print commands.getoutput("echo \"I am in `pwd`\"") print commands.getoutput("haoscar-system-clone.sh")
def clone(self): """retrieve all necessary data from db""" database = ddriver.DbDriver() primary_conf = database.select_db('Primary_Configuration') secondary_conf = database.select_db('Secondary_Configuration') general_conf = database.select_db('General_Configuration') """now set all variables necessary""" imager_config = [] imager_config.append("GROUP_NAME:ha_group\n") imager_config.append("HA_ETH:" + primary_conf[0]["NIC_INFO"] + "\n") imager_config.append("IMAGE_DIR:/usr/share/haoscar/images\n") imager_config.append("IMAGE_NAME:ha_image\n") imager_config.append("MASK:" + general_conf[0]["MASK"] + "\n") imager_config.append("PRIMARY_HOSTNAME:" + primary_conf[0]["HOSTNAME"] + "\n") imager_config.append("PRIMARY_IP:" + primary_conf[0]["IP_ADDR"] + "\n") imager_config.append("SECONDARY_HOSTNAME:"\ +secondary_conf[0]["HOSTNAME"]+"\n") imager_config.append("SECONDARY_IP:" + secondary_conf[0]["IP_ADDR"] + "\n") imager_config.append("SUBNET:" + general_conf[0]["SUBNET"] + "\n") #Now we do some writing FILE = open("/usr/share/haoscar/sysimager.conf", "w") for config in imager_config: FILE.writelines(config) FILE.close() logger.subsection("finished configuring sysimager.conf") logger.subsection("starting cloning process") #print commands.getoutput("echo \"I am in `pwd`\"") print commands.getoutput("haoscar-system-clone.sh")
def getAllModules(): #Globals: all_modules = [] database_driver = ddriver.DbDriver() try: all_modules = database_driver.select_db("Gather_Modules") except: logger.subsection("failed to retrieve modules") return all_modules
def sanityCheck(): logger.subsection("checking to make sure system environment is sane") err = scheck.initialize() if(len(err) > 0): logger.subsection("the following errors resulted from sanity check:") for message in err: logger.subsection(message) logger.subsection("installation cannot continue") return 1 else: logger.subsection("yup, system is sane") return 0
def dataConfig(self): ######################################################################## #Now is time to start populating our configuration file with data ######################################################################## #By default, it attempts to replicate home dir if it is in its own label. if (os.path.isdir('/home')): logger.subsection("added home directory '/home' for replication :)") self.paths.append("/home") self.str_value = raw_input("Enter any other directories that may contain user data seperated by commas [e.g /data,/repos]: ") else: logger.subsection("could not find home partition for synchronization") self.str_value = raw_input("Enter paths to your user data directories seperated by commas [e.g /data,/repos]: ") #Do basic error checking to make sure that is a valid directory logger.subsection("is "+self.str_value+" a valid directory[ies]?") #self.str_value = self.str_value.replace(" ","") #this means paths with spaces eg: /my\ path/ are not supported. Should be split up, then leading and trailing whitespace truncated. if(self.str_value is not '' and not self.str_value.isspace()): #rejects any "empty" path list self.paths = self.paths + self.str_value.split(',') for path in self.paths: path = path.strip() #strips whitespace from front and back, leaving middle spaces intact. if(os.path.exists(path)): logger.subsection(path+" is a valid path") self.validated_paths += path+";" self.conf_values['DATA_DIR'] = self.validated_paths #For planned future support of other synchronization mechanisms like #DRBD, CSYNC ... self.conf_values['DATA_SYNC'] = "RSYNC"
def __load_module(module): code_path = directory + module try: try: code_dir = os.path.dirname(code_path) code_file = os.path.basename(code_path) fin = open(code_path, 'rb') return imp.load_source(hashlib.md5(code_path).hexdigest(), code_path, fin) finally: try: fin.close() except: pass except ImportError, x: logger.subsection("failed to import "+module)
def __load_module(module): code_path = directory + module try: try: code_dir = os.path.dirname(code_path) code_file = os.path.basename(code_path) fin = open(code_path, 'rb') return imp.load_source( hashlib.md5(code_path).hexdigest(), code_path, fin) finally: try: fin.close() except: pass except ImportError, x: logger.subsection("failed to import " + module)
def get_tables(self): if not path.exists(self.db_path): logger.subsection("Cannot access database file at "+ self.db_path) exit(2) conn = sqlite3.connect(self.db_path) c = conn.cursor() query = "select name from sqlite_master where type='table'" try: c.execute(query) except Exception as fail: self.sqlfail(query,fail) result = [] for row in c: result.append(row[0]) c.close() return result
def get_tables(self): if not path.exists(self.db_path): logger.subsection("Cannot access database file at " + self.db_path) exit(2) conn = sqlite3.connect(self.db_path) c = conn.cursor() query = "select name from sqlite_master where type='table'" try: c.execute(query) except Exception as fail: self.sqlfail(query, fail) result = [] for row in c: result.append(row[0]) c.close() return result
def configure(secondary=False, configuration=None): # Does a file pre-exist? rsync_conf_path = "/etc/rsyncd.conf" if path.isfile(rsync_conf_path): logger.subsection( "removing previous rsync configuration file to\ rsyncd.bak" ) system("mv /etc/rsyncd.conf /etc/rsync.bak") logger.subsection("creating new rsync config file") rsync_conf.append(init_comment + "\n") ddriver = database.DbDriver() # Setup details for primary server if secondary is False: # First we set some global rsync variables rsync_conf.append("motd file = /etc/rsyncd.motd\n") rsync_conf.append("log file = /var/log/rsyncd.log\n") rsync_conf.append("pid file = /var/run/haoscar_rsyncd.pid\n") rsync_conf.append("lock file = /var/run/rsync.lock\n") # Now we start setting up all paths specified in # First retrieve all paths and put in a list directory = ddriver.select_db("General_Configuration") ip = ddriver.select_db("Secondary_Configuration") sync_directory = directory[0]["DATA_DIR"] secondary_ip = ip[0]["IP_ADDR"] count = 0 sync_directory = sync_directory.split(";") sync_directory.pop() # Last item is void. for key in sync_directory: rsync_conf.append("[" + key + "]\n") rsync_conf.append("path = " + key + "\n") rsync_conf.append("read only = no\n") rsync_conf.append("list = no\n") rsync_conf.append("hosts allow = " + secondary_ip + "\n") rsync_conf.append("hosts deny = *\n") fp = open(rsync_conf_path, "w") fp.writelines(rsync_conf) fp.close() return 0
def configure(secondary=False, configuration=None): #Does a file pre-exist? rsync_conf_path = "/etc/rsyncd.conf" if (path.isfile(rsync_conf_path)): logger.subsection("removing previous rsync configuration file to\ rsyncd.bak") system("mv /etc/rsyncd.conf /etc/rsync.bak") logger.subsection("creating new rsync config file") rsync_conf.append(init_comment + "\n") ddriver = database.DbDriver() #Setup details for primary server if (secondary is False): #First we set some global rsync variables rsync_conf.append("motd file = /etc/rsyncd.motd\n") rsync_conf.append("log file = /var/log/rsyncd.log\n") rsync_conf.append("pid file = /var/run/haoscar_rsyncd.pid\n") rsync_conf.append("lock file = /var/run/rsync.lock\n") #Now we start setting up all paths specified in #First retrieve all paths and put in a list directory = ddriver.select_db('General_Configuration') ip = ddriver.select_db('Secondary_Configuration') sync_directory = directory[0]["DATA_DIR"] secondary_ip = ip[0]["IP_ADDR"] count = 0 sync_directory = sync_directory.split(';') sync_directory.pop() #Last item is void. for key in sync_directory: rsync_conf.append("[" + key + "]\n") rsync_conf.append("path = " + key + "\n") rsync_conf.append("read only = no\n") rsync_conf.append("list = no\n") rsync_conf.append("hosts allow = " + secondary_ip + "\n") rsync_conf.append("hosts deny = *\n") fp = open(rsync_conf_path, "w") fp.writelines(rsync_conf) fp.close() return 0
def getActiveModules(): #Some globals all_modules = [] active_modules = [] temp_module = dict() database_driver = ddriver.DbDriver() #retreive all modules and check if state is 1 or 0 try: all_modules = database_driver.select_db("Gather_Modules") try: for index in xrange(len(all_modules)): temp_module = all_modules[index] if (temp_module["STATE"] != "0"): active_modules.append(temp_module) except: logger.subsection("an error occured when processing module state") except: exit.open("fatal error, failed to load gather modules!") return active_modules
def truncate_db(self, table): if not path.exists(self.db_path): logger.subsection("Cannot access database") exit(2) existing_tables = [] existing_tables = self.get_tables() if table not in existing_tables: logger.subsection(table + " does not exist in database") exit(2) conn = sqlite3.connect(self.db_path) c = conn.cursor() query = "delete from " + table try: c.execute(query) except Exception as fail: self.sqlfail(query, fail) conn.commit() c.close()
def truncate_db(self, table): if not path.exists(self.db_path): logger.subsection("Cannot access database") exit(2) existing_tables = [] existing_tables = self.get_tables() if table not in existing_tables: logger.subsection(table+ " does not exist in database") exit(2) conn = sqlite3.connect(self.db_path) c = conn.cursor() query = "delete from " + table try: c.execute(query) except Exception as fail: self.sqlfail(query,fail) conn.commit() c.close()
def reset(): #Some globals dict_config = dict() database_driver = ddriver.DbDriver() #First, we attempt to get all Gathering modules in our directory if (not os.path.exists(directory)): logger.subsection("could not find any modules in our gather directory") return False #Remove all .pyc and .pyo files commands.getoutput("rm " + directory + "*.pyc") files = os.listdir(directory) #Clear all previous entries in database database_driver.truncate_db(gather_table) #Now, get information for each module in the directory for file in files: module = __load_module(file) try: dict_config = module.open() dict_config["FULL_PATH"] = directory + file dict_config["STATE"] = "1" database_driver.insert_db(gather_table, dict_config) database_driver.insert_db(gather_active_table, dict_config) logger.subsection("loaded module: " + file) except: logger.subsection("module " + file + " could not be loaded") return True
def reset(): #Some globals dict_config = dict() database_driver = ddriver.DbDriver() #First, we attempt to get all Gathering modules in our directory if(not os.path.exists(directory)): logger.subsection("could not find any modules in our gather directory") return False #Remove all .pyc and .pyo files commands.getoutput("rm "+directory+ "*.pyc") files = os.listdir(directory) #Clear all previous entries in database database_driver.truncate_db(gather_table) #Now, get information for each module in the directory for file in files: module = __load_module(file) try: dict_config = module.open() dict_config["FULL_PATH"] = directory+file dict_config["STATE"] = "1" database_driver.insert_db(gather_table, dict_config) database_driver.insert_db(gather_active_table, dict_config) logger.subsection("loaded module: "+file) except: logger.subsection("module "+file+" could not be loaded") return True
def configure(): rules.append(init_comment) FILE = open("/etc/default/monit", "w") FILE.write(init_comment+"\nstartup=1\n") FILE.close() #TODO: Make this part automatic. #Configure each component for Mon-IT #Apache Config: logger.subsection("adding apache config") rules.append("\n") rules.append(apache.configure()) #Sshd Config: logger.subsection("adding sshd config") rules.append("\n") rules.append(ssh.configure()) #Syslog Config: logger.subsection("adding syslog config") rules.append("\n") rules.append(syslog.configure()) #TODO: Back up previous version of monitrc #We can now write out to config file FILE = open("/etc/monit/monitrc", "w") try: for line in rules: FILE.write(line) FILE.close() except IOError: exit.open("could not write monit rules") return 0
def configure(): rules.append(init_comment) FILE = open("/etc/default/monit", "w") FILE.write(init_comment + "\nstartup=1\n") FILE.close() #TODO: Make this part automatic. #Configure each component for Mon-IT #Apache Config: logger.subsection("adding apache config") rules.append("\n") rules.append(apache.configure()) #Sshd Config: logger.subsection("adding sshd config") rules.append("\n") rules.append(ssh.configure()) #Syslog Config: logger.subsection("adding syslog config") rules.append("\n") rules.append(syslog.configure()) #TODO: Back up previous version of monitrc #We can now write out to config file FILE = open("/etc/monit/monitrc", "w") try: for line in rules: FILE.write(line) FILE.close() except IOError: exit.open("could not write monit rules") return 0
def client(self, ip): #Create a TCP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) retry = True count = 0 #Initiate connection while (retry == True): try: retry = False sock.connect((ip, self.port)) except socket.error: if (count < 5): retry = True count += 1 logger.subsection( "listening server not ready. retrying in 2 mins...") time.sleep(120) except socket.timeout: exit.open("could not connect to remote server") except: exit.open( "a major connection error has occured. check your address and retry installation." ) #Receive data from server self.data = sock.recv(self.port) sock.close() #Convert serialized object to dictionary self.hash_data = loads(self.data) try: if (hash_data['TYPE'] != self.data_type): exit.open( "primary and secondary server out of sync, restart installation" ) except: exit.open( "failed to read remote data configuration restart installation" ) return self.hash_data
def create_database(self): # Delete sqlite database file if it already exists try: unlink(self.db_path) except OSError: pass if not path.exists(self.schema_path): logger.subsection("Cannot access database schema file") exit(2) # Create database creation query from schema file conn = sqlite3.connect(self.db_path) c = conn.cursor() query = "" f = open(self.schema_path) for line in f: query += line try: c.executescript(query) except Exception as fail: self.sqlfail(query,fail) conn.commit() c.close()
def create_database(self): # Delete sqlite database file if it already exists try: unlink(self.db_path) except OSError: pass if not path.exists(self.schema_path): logger.subsection("Cannot access database schema file") exit(2) # Create database creation query from schema file conn = sqlite3.connect(self.db_path) c = conn.cursor() query = "" f = open(self.schema_path) for line in f: query += line try: c.executescript(query) except Exception as fail: self.sqlfail(query, fail) conn.commit() c.close()
def open(message=None): if message is None: os.chdir(os.getenv("HAS_HOME")) logger.subsection("cleaning up environment") os.system("rm -rf .has_lock_file") else: logger.subsection("your installation failed with message:") logger.subsection(message) os.chdir(os.getenv("HAS_HOME")) os.system("rm -rf .has_lock_file") #TODO: Call Uniinstallation routines sys.exit(0)
def select_db(self, table): if not path.exists(self.db_path): logger.subsection("Cannot access database file at " + self.db_path) exit(2) existing_tables = [] existing_tables = self.get_tables() if table not in existing_tables: logger.subsection(table + " does not exist in database") exit(2) if type(table) != str: logger.subsection(table + " must of type String") exit(2) conn = sqlite3.connect(self.db_path) conn.row_factory = sqlite3.Row c = conn.cursor() query = "select * from " + table try: c.execute(query) except Exception as fail: self.sqlfail(query, fail) rows = c.fetchall() c.execute(query) row = c.fetchone() keyz = row.keys() result = [] for i in range(len(rows)): each_dict = {} n = 0 for key in keyz: each_dict[key] = rows[i][n] n += 1 result.append(each_dict) return result
def select_db(self, table): if not path.exists(self.db_path): logger.subsection("Cannot access database file at "+ self.db_path) exit(2) existing_tables = [] existing_tables = self.get_tables() if table not in existing_tables: logger.subsection(table + " does not exist in database") exit(2) if type(table)!=str: logger.subsection(table + " must of type String") exit(2) conn = sqlite3.connect(self.db_path) conn.row_factory = sqlite3.Row c = conn.cursor() query = "select * from " + table try: c.execute(query) except Exception as fail: self.sqlfail(query,fail) rows = c.fetchall() c.execute(query) row = c.fetchone() keyz = row.keys() result = [] for i in range(len(rows)): each_dict = {} n = 0 for key in keyz: each_dict[key] = rows[i][n] n += 1 result.append(each_dict) return result
def databaseSetup(): logger.subsection("initializing database") #Create database database_init = ddriver.DbDriver() database_init.create_database() logger.subsection("database setup completed sucessfully")
exit.open("failed to read remote data configuration restart installation") return self.hash_data #@des: The server is responsible for listening for connections from a # client and sending data. In the event of # an error, it resends then quits #@param: hash_data, data packet to be sent #@param: ip, external ip address of localhost def server(self, hash_data, ip): hash_data['TYPE'] = self.data_type #Control flag #We serialize the data to be sent self.data = dumps(hash_data) #Create server and bind to ourselves try: server = SocketServer.TCPServer((ip, self.port), RemoteSystem) server.handle_request() except socket.error, err: exit.open("address already in use!") except: exit.open("some un-anticipated error occured!") finally: try: server.socket.close() except: pass logger.subsection("connection closed") def handle(self): self.request.send(self.data)
for index in xrange(len(all_modules)): if (all_modules[index]["NAME"] not in module_names): database_driver.insert_db(gather_active_table, all_modules[index]) return True def addActiveModule(): pass def __load_module(module): code_path = directory + module try: try: code_dir = os.path.dirname(code_path) code_file = os.path.basename(code_path) fin = open(code_path, 'rb') return imp.load_source( hashlib.md5(code_path).hexdigest(), code_path, fin) finally: try: fin.close() except: pass except ImportError, x: logger.subsection("failed to import " + module) except: logger.subsection("an unknown import module error has occured.")
) return self.hash_data #@des: The server is responsible for listening for connections from a # client and sending data. In the event of # an error, it resends then quits #@param: hash_data, data packet to be sent #@param: ip, external ip address of localhost def server(self, hash_data, ip): hash_data['TYPE'] = self.data_type #Control flag #We serialize the data to be sent self.data = dumps(hash_data) #Create server and bind to ourselves try: server = SocketServer.TCPServer((ip, self.port), RemoteSystem) server.handle_request() except socket.error, err: exit.open("address already in use!") except: exit.open("some un-anticipated error occured!") finally: try: server.socket.close() except: pass logger.subsection("connection closed") def handle(self): self.request.send(self.data)
def configure(): #check if the auth already exists auth = "/etc/ha.d/authkeys" if(path.isfile(auth)): logger.subsection("authentication configuration already exists, skipping") else: auth_value = [] auth_value.append(init_comment) logger.subsection("creating authentication file") auth_passwd = getpass.getpass("enter heartbeat authentication passwd: ") auth_value.append("\nauth 2\n2 sha1 ") auth_value.append(auth_passwd+"\n") FILE = open(auth,"w+") FILE.writelines(auth_value) system("chmod 600 /etc/ha.d/authkeys") hacf = "/etc/ha.d/ha.cf" if(path.isfile(hacf)): logger.subsection("ha.cf file already exists, re-writing") hacf_value = [] hacf_value.append(init_comment) logger.subsection("auto generating heartbeat configuration file") hacf_value.append(hacf_config) #Edited by Chuka Okoye #*****ALL References to haoscar.conf need to be re-routed to #*****the HA-OSCAR database #We need to get the default interface from haoscar database #FILE = open("/etc/haoscar/haoscar.conf", "r") #line = FILE.readline() #while("NIC_INFO=" not in line): # line = FILE.readline() #temp = line.split("=") ddriver = database_driver.DbDriver() primary_conf = ddriver.select_db('Primary_Configuration') secondary_conf = ddriver.select_db('Secondary_Configuration') nic_info = "" nic_info = primary_conf[0]["NIC_INFO"] if(len(nic_info)): logger.subsection("using interface "+nic_info) hacf_value.append("\nbcast "+nic_info) hacf_value.append("\nauto_failback on\n") hacf_value.append("node "+commands.getoutput("uname -n")+"\n") if(secondary_conf[0]['HOSTNAME']): hacf_value.append("node "+secondary_conf[0]['HOSTNAME']+"\n") FILE = open(hacf, "w") FILE.writelines(hacf_value) FILE.close() else: logger.subsection("a fatal error has occured: could not retreive interface info") return 1 haresources = "/etc/ha.d/haresources" if(path.isfile(haresources)): logger.subsection("haresource configuration exists, skipping") else: logger.subsection("writing haresource configuration") ip_addr = primary_conf[0]['IP_ADDR'] if(len(ip_addr)): haresource = [] haresource.append(commands.getoutput("uname -n") + " "+ ip_addr) FILE = open("/etc/ha.d/haresources","w") FILE.writelines(haresource) else: logger.subsection("a fatal error has occured, could not retrieve ip information") return 1 #If we have not yet died at this point we can assume the configuration was #a success return 0
def netConfig(self): max_possible = 128 #Max no of interfaces bytes = max_possible * 32 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) names = array.array('B', '\0' * bytes) outbytes = struct.unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, #SIOCGIFCONF struct.pack('iL',bytes, names.buffer_info()[0] )))[0] namestr = names.tostring() self.interface_list = [namestr[i:i+32].split('\0',1)[0] for i in range(0, outbytes, 32)] #TODO: Fix parsing if (len(self.interface_list) == 1): logger.subsection("detected only one interface: "+self.interface_list[0]) logger.subsection("adding to config file") self.conf_values['NIC_INFO'] = self.interface_list[0] else: temp = "" for i in self.interface_list: temp = temp + i + ", " temp = temp.strip(', ') #cleans off tailing comma logger.subsection("detected multiple active interfaces: "+temp) self.str_value = raw_input("Select a network interface from the options above: ") self.str_value = self.str_value.strip() cmd_result = commands.getoutput("ifconfig "+self.str_value) if ('error fetching' in cmd_result or self.str_value is ""): logger.subsection("invalid device specified, skipping for now") self.conf_values['NIC_INFO'] = "" self.conf_values['IP_ADDR'] = "" else: logger.subsection("adding interface to config file, proceeding...") self.conf_values['NIC_INFO'] =self.str_value logger.subsection("adding ip address of associated interface...") self.ip_addr = socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, struct.pack('256s', self.str_value[:15]) )[20:24]) self.conf_values['IP_ADDR']=self.ip_addr logger.subsection("setting up netmask and subnet...") maskline = commands.getoutput("echo \""+cmd_result+"\" | grep Mask:") self.conf_values['MASK'] = maskline.partition('Mask:')[2] #retrieves the proper netmask masksub = self.conf_values['MASK'].partition('.') ipsub = self.conf_values['IP_ADDR'].partition('.') self.conf_values['SUBNET'] = '' for i in range(2): self.conf_values['SUBNET'] += str(int(masksub[0]) & int(ipsub[0])) + '.' #Gets next section masksub = masksub[2].partition('.') ipsub = ipsub[2].partition('.') self.conf_values['SUBNET'] += str(int(masksub[0]) & int(ipsub[0])) + '.' #2nd to last self.conf_values['SUBNET'] += str(int(masksub[2]) & int(ipsub[2])) #Gets the last section
def configureEnvironment(): if os.path.isfile("/etc/redhat-release"): logger.subsection("Local system is a redhat based distro") logger.subsection("Setting HAS_HOME directory variable to current dir") if not os.path.isfile("/etc/profile.d/has_home.csh"): #csh shells cwd = os.getcwd() cmd4rpm = "setenv HAS_HOME " + cwd FILE = open("/etc/profile.d/has_home.csh", "w") FILE.writelines(cmd4rpm) FILE.close() os.system("chmod 755 /etc/profile.d/has_home.csh") else: logger.subsection( "It seems HAS_HOME(csh) has been set already, skipping") if not os.path.isfile("/etc/profile.d/has_home.sh"): #sh shells cwd = os.getcwd() cmd4rpm = "HAS_HOME=" + cwd + "\n" + "export HAS_HOME" FILE = open("/etc/profile.d/has_home.sh", "w") FILE.writelines(cmd4rpm) FILE.close() os.system("chmod 755 /etc/profile.d/has_home.sh") else: logger.subsection( "It seems HAS_HOME(sh) has been set already, skipping") else: logger.subsection("Local system is a debian based distro") logger.subsection("Setting HAS_HOME directory variable to current dir") cwd = os.getcwd() cmd4deb0 = "echo HAS_HOME=" + cwd cmd4deb1 = "echo export HAS_HOME >> /root/.bashrc" if commands.getoutput("grep HAS_HOME /root/.bashrc"): logger.subsection( "It seems HAS_HOME has been set already, skipping") else: os.system(cmd4deb0) os.system(cmd4deb1)
def serviceConfig(self): logger.subsection("generating list of 'default' highly available services") self.conf_values['SERVICES']="sshd"
def remoteCommunication(): logger.subsection("initializing remote communication library") remote.initialize() logger.subsection("remote comm lib initalization completed")
def configureEnvironment(): if os.path.isfile("/etc/redhat-release"): logger.subsection("Local system is a redhat based distro") logger.subsection("Setting HAS_HOME directory variable to current dir") if not os.path.isfile("/etc/profile.d/has_home.csh"): #csh shells cwd = os.getcwd() cmd4rpm = "setenv HAS_HOME "+cwd FILE = open("/etc/profile.d/has_home.csh","w") FILE.writelines(cmd4rpm) FILE.close() os.system("chmod 755 /etc/profile.d/has_home.csh") else: logger.subsection("It seems HAS_HOME(csh) has been set already, skipping") if not os.path.isfile("/etc/profile.d/has_home.sh"): #sh shells cwd = os.getcwd() cmd4rpm = "HAS_HOME="+cwd+"\n"+"export HAS_HOME" FILE = open("/etc/profile.d/has_home.sh", "w") FILE.writelines(cmd4rpm) FILE.close() os.system("chmod 755 /etc/profile.d/has_home.sh") else: logger.subsection("It seems HAS_HOME(sh) has been set already, skipping") else: logger.subsection("Local system is a debian based distro") logger.subsection("Setting HAS_HOME directory variable to current dir") cwd = os.getcwd() cmd4deb0 = "echo HAS_HOME="+cwd cmd4deb1 = "echo export HAS_HOME >> /root/.bashrc" if commands.getoutput("grep HAS_HOME /root/.bashrc"): logger.subsection("It seems HAS_HOME has been set already, skipping") else: os.system(cmd4deb0) os.system(cmd4deb1)
def netConfig(self): max_possible = 128 #Max no of interfaces bytes = max_possible * 32 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) names = array.array('B', '\0' * bytes) outbytes = struct.unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, #SIOCGIFCONF struct.pack('iL',bytes, names.buffer_info()[0] )))[0] namestr = names.tostring() self.interface_list = [namestr[i:i+32].split('\0',1)[0] for i in range(0, outbytes, 32)] #TODO: Fix parsing if (len(self.interface_list) == 1): #shouldn't ever happen, and would break as written. logger.subsection("detected only one interface: "+self.interface_list[0]) logger.subsection("adding to config file") self.conf_values['NIC_INFO'] = self.interface_list[0] else: temp = "" for i in self.interface_list: temp = temp + i + ", " temp = temp.strip(', ') #cleans off tailing comma logger.subsection("Detected multiple active interfaces: "+temp) self.str_value = raw_input("Select the local network interface from the options above. If you would like to only migrate specific interfaces on failure, list them after the local interface seperated by commas: ") ha_ifaces = self.str_value.partition(',') #[0] contains local interface. [3] contains rest. self.str_value = ha_ifaces[0].strip() cmd_result = commands.getoutput("ifconfig "+self.str_value) if ('error fetching' in cmd_result or self.str_value is ""): logger.subsection("invalid device specified, skipping for now") self.conf_values['NIC_INFO'] = "" self.conf_values['IP_ADDR'] = "" else: logger.subsection("adding interface to config file, proceeding...") self.conf_values['NIC_INFO'] =self.str_value logger.subsection("adding ip address of associated interface...") self.ip_addr = socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, struct.pack('256s', self.str_value[:15]) )[20:24]) self.conf_values['IP_ADDR']=self.ip_addr logger.subsection("setting up netmask and subnet...") maskline = commands.getoutput("echo \""+cmd_result+"\" | grep Mask:") self.conf_values['MASK'] = maskline.partition('Mask:')[2] #retrieves the proper netmask masksub = self.conf_values['MASK'].partition('.') ipsub = self.conf_values['IP_ADDR'].partition('.') self.conf_values['SUBNET'] = '' for i in range(2): self.conf_values['SUBNET'] += str(int(masksub[0]) & int(ipsub[0])) + '.' #Gets next section masksub = masksub[2].partition('.') ipsub = ipsub[2].partition('.') self.conf_values['SUBNET'] += str(int(masksub[0]) & int(ipsub[0])) + '.' #2nd to last self.conf_values['SUBNET'] += str(int(masksub[2]) & int(ipsub[2])) #Gets the last section self.conf_values['FALLBACK_IPS'] = '' if ha_ifaces[1] == ',': #We have specified interfaces. while ha_ifaces[1] == ',': ha_ifaces = ha_ifaces[2].partition(',') ha_ip = socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, struct.pack('256s', ha_ifaces[0].strip()[:15]) )[20:24]) self.conf_values['FALLBACK_IPS'] += ha_ip + ' ' else: #We should use all availavle interfaces except lo. for interface in self.interface_list: if interface != 'lo' and interface != self.conf_values['NIC_INFO']: ha_ip = socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, struct.pack('256s', interface[:15]) )[20:24]) self.conf_values['FALLBACK_IPS'] += ha_ip + ' ' self.conf_values['FALLBACK_IPS'] = self.conf_values['FALLBACK_IPS'].strip()
def primaryConfigurator(): logger.subsection("gathering system configuration facts") config = sysConfig.SysConfigurator() return config.priConfig()
def configure(): #check if the auth already exists auth = "/etc/ha.d/authkeys" if (path.isfile(auth)): logger.subsection( "authentication configuration already exists, skipping") else: auth_value = [] auth_value.append(init_comment) logger.subsection("creating authentication file") auth_passwd = getpass.getpass( "enter heartbeat authentication passwd: ") auth_value.append("\nauth 2\n2 sha1 ") auth_value.append(auth_passwd + "\n") FILE = open(auth, "w+") FILE.writelines(auth_value) system("chmod 600 /etc/ha.d/authkeys") hacf = "/etc/ha.d/ha.cf" if (path.isfile(hacf)): logger.subsection("ha.cf file already exists, re-writing") hacf_value = [] hacf_value.append(init_comment) logger.subsection("auto generating heartbeat configuration file") hacf_value.append(hacf_config) #Edited by Chuka Okoye #*****ALL References to haoscar.conf need to be re-routed to #*****the HA-OSCAR database #We need to get the default interface from haoscar database #FILE = open("/etc/haoscar/haoscar.conf", "r") #line = FILE.readline() #while("NIC_INFO=" not in line): # line = FILE.readline() #temp = line.split("=") ddriver = database_driver.DbDriver() primary_conf = ddriver.select_db('Primary_Configuration') secondary_conf = ddriver.select_db('Secondary_Configuration') nic_info = "" nic_info = primary_conf[0]["NIC_INFO"] if (len(nic_info)): logger.subsection("using interface " + nic_info) hacf_value.append("\nbcast " + nic_info) hacf_value.append("\nauto_failback on\n") hacf_value.append("node " + commands.getoutput("uname -n") + "\n") if (secondary_conf[0]['HOSTNAME']): hacf_value.append("node " + secondary_conf[0]['HOSTNAME'] + "\n") FILE = open(hacf, "w") FILE.writelines(hacf_value) FILE.close() else: logger.subsection( "a fatal error has occured: could not retreive interface info") return 1 haresources = "/etc/ha.d/haresources" if (path.isfile(haresources)): logger.subsection("haresource configuration exists, skipping") else: logger.subsection("writing haresource configuration") ip_addr = primary_conf[0]['IP_ADDR'] if (len(ip_addr)): haresource = [] haresource.append(commands.getoutput("uname -n") + " " + ip_addr) FILE = open("/etc/ha.d/haresources", "w") FILE.writelines(haresource) else: logger.subsection( "a fatal error has occured, could not retrieve ip information") return 1 #If we have not yet died at this point we can assume the configuration was #a success return 0
all_modules = getAllModules() #Regenerate active table for index in xrange(len(all_modules)): if(all_modules[index]["NAME"] not in module_names): database_driver.insert_db(gather_active_table, all_modules[index]) return True def addActiveModule(): pass def __load_module(module): code_path = directory + module try: try: code_dir = os.path.dirname(code_path) code_file = os.path.basename(code_path) fin = open(code_path, 'rb') return imp.load_source(hashlib.md5(code_path).hexdigest(), code_path, fin) finally: try: fin.close() except: pass except ImportError, x: logger.subsection("failed to import "+module) except: logger.subsection("an unknown import module error has occured.")