def copy_boot(self): cluster = Cluster(mongo_db = self._mongo_db) image_path = str(self.get('path')) kernver = str(self.get('kernver')) tmp_path = '/tmp' # in chroot env initrdfile = str(self.name) + '-initramfs-' + kernver kernfile = str(self.name) + '-vmlinuz-' + kernver path = cluster.get('path') if not path: self._logger.error("Path needs to be configured.") return None path = str(path) user = cluster.get('user') if not user: self._logger.error("User needs to be configured.") return None path_to_store = path + "/boot" user_id = pwd.getpwnam(user).pw_uid grp_id = pwd.getpwnam(user).pw_gid if not os.path.exists(path_to_store): os.makedirs(path_to_store) os.chown(path_to_store, user_id, grp_id) shutil.copy(image_path + '/boot/initramfs-' + kernver + '.img', path_to_store + '/' + initrdfile) shutil.copy(image_path + '/boot/vmlinuz-' + kernver, path_to_store + '/' + kernfile) os.chown(path_to_store + '/' + initrdfile, user_id, grp_id) os.chmod(path_to_store + '/' + initrdfile, 0644) os.chown(path_to_store + '/' + kernfile, user_id, grp_id) os.chmod(path_to_store + '/' + kernfile, 0644) self.set('kernfile', kernfile) self.set('initrdfile', initrdfile) self._logger.warning("Boot files was copied, but luna module might not being added to initrd. Please check /etc/dracut.conf.d in image") return True
def __init__(self, name = None, mongo_db = None, create = False, id = None, path = '', kernver = '', kernopts = '', grab_list = 'grab_default_centos.lst'): """ create - shoulld be True if we need create osimage path - path to / of the image/ can be ralative, if needed (will be converted to absolute) kernver - kernel version (will be checked on creation) kernopt - kernel options grab_list - rsync exclude list for grabbing live node to image """ self._logger.debug("Arguments to function '{}".format(self._debug_function())) self._collection_name = 'osimage' mongo_doc = self._check_name(name, mongo_db, create, id) if bool(kernopts) and type(kernopts) is not str: self._logger.error("Kernel options should be 'str' type") raise RuntimeError self._keylist = {'path': type(''), 'kernver': type(''), 'kernopts': type(''), 'kernmodules': type(''), 'dracutmodules': type(''), 'tarball': type(''), 'torrent': type(''), 'kernfile': type(''), 'initrdfile': type(''), 'grab_exclude_list': type(''), 'grab_filesystems': type('')} if create: cluster = Cluster(mongo_db = self._mongo_db) path = os.path.abspath(path) path_suspected_doc = self._mongo_collection.find_one({'path': path}) if path_suspected_doc and path_suspected_doc['path'] == path: self._logger.error("Cannot create 'osimage' with the same 'path' as name='{}' has".format(path_suspected_doc['name'])) raise RuntimeError if kernver == 'ANY': try: kernver = self.get_package_ver(path, 'kernel')[0] except: pass if not self._check_kernel(path, kernver): raise RuntimeError grab_list_path = cluster.get('path') + '/templates/' + grab_list if not os.path.isfile(grab_list_path): self._logger.error("'{}' is not a file.".format(grab_list_path)) raise RuntimeError with open(grab_list_path) as lst: grab_list_content = lst.read() mongo_doc = {'name': name, 'path': path, 'kernver': kernver, 'kernopts': kernopts, 'dracutmodules': 'luna,-i18n,-plymouth', 'kernmodules': 'ipmi_devintf,ipmi_si,ipmi_msghandler', 'grab_exclude_list': grab_list_content, 'grab_filesystems': '/,/boot'} self._logger.debug("mongo_doc: '{}'".format(mongo_doc)) self._name = name self._id = self._mongo_collection.insert(mongo_doc) self._DBRef = DBRef(self._collection_name, self._id) self.link(cluster) else: self._name = mongo_doc['name'] self._id = mongo_doc['_id'] self._DBRef = DBRef(self._collection_name, self._id) self._logger = logging.getLogger(__name__ + '.' + self._name)
def run(self): counter = self.interval cluster = Cluster(mongo_db = self._mongo_db) while self.active: if counter >= self.interval: if cluster.is_active(): self.update() else: self.logger.info("This is passive node. Doing nothing.") time.sleep(60) counter = 0 counter += 1 time.sleep(1)
def __init__(self, name=None, mongo_db=None, create=False, id=None, network=None, ip=None, comment=''): """ network - the network the device is connected to ip - device's ip """ self.log.debug("function args {}".format(self._debug_function())) # Define the schema used to represent otherdev objects self._collection_name = 'otherdev' self._keylist = {'comment': type('')} # Check if this device is already present in the datastore # Read it if that is the case dev = self._get_object(name, mongo_db, create, id) if create: cluster = Cluster(mongo_db=self._mongo_db) if not network: connected = {} elif not ip: err_msg = "IP needs to be specified" self.log.error(err_msg) raise RuntimeError, err_msg else: net = Network(name=network, mongo_db=self._mongo_db) ipnum = net.reserve_ip(ip, ignore_errors=False) if not ipnum: err_msg = "Unable to allocate IP in network" self.log.error(err_msg) raise RuntimeError, err_msg connected = {str(net.DBRef.id): ipnum} # Store the new device in the datastore dev = {'name': name, 'connected': connected, 'comment': comment} self.log.debug("Saving dev '{}' to the datastore".format(dev)) self.store(dev) # Link this device to its dependencies and the current cluster self.link(cluster) if connected and net: self.link(net) self.log = logging.getLogger('otherdev.' + self._name)
def create_torrent(self): # TODO check if root tarball_uid = self.get('tarball') if not tarball_uid: self.log.error("No tarball in DB.") return False cluster = Cluster(mongo_db=self._mongo_db) tarball = cluster.get('path') + "/torrents/" + tarball_uid + ".tgz" if not os.path.exists(tarball): self.log.error("Wrong path in DB.") return False tracker_address = cluster.get('frontend_address') if tracker_address == '': self.log.error("Tracker address needs to be configured.") return False tracker_port = cluster.get('frontend_port') if tracker_port == 0: self.log.error("Tracker port needs to be configured.") return False user = cluster.get('user') user_id = pwd.getpwnam(user).pw_uid grp_id = pwd.getpwnam(user).pw_gid old_cwd = os.getcwd() os.chdir(os.path.dirname(tarball)) uid = str(uuid.uuid4()) torrentfile = cluster.get('path') + "/torrents/" + uid + ".torrent" fs = libtorrent.file_storage() libtorrent.add_files(fs, os.path.basename(tarball)) t = libtorrent.create_torrent(fs) if cluster.get('frontend_https'): proto = 'https' else: proto = 'http' t.add_tracker((proto + "://" + str(tracker_address) + ":" + str(tracker_port) + "/announce")) t.set_creator(torrent_key) t.set_comment(uid) libtorrent.set_piece_hashes(t, ".") f = open(torrentfile, 'w') f.write(libtorrent.bencode(t.generate())) f.close() os.chown(torrentfile, user_id, grp_id) self.set('torrent', str(uid)) os.chdir(old_cwd) return True
def __init__(self, name = None, mongo_db = None, create = False, id = None, prescript = None, bmcsetup = None, bmcnetwork = None, partscript = None, osimage = None, interfaces = None, postscript = None, boot_if = None, torrent_if = None): """ prescript - preinstall script bmcsetup - bmcsetup options bmcnetwork - used for bmc networking partscript - parition script osimage - osimage interfaces - list of the newtork interfaces postscript - postinstall script """ self._logger.debug("Arguments to function '{}".format(self._debug_function())) self._collection_name = 'group' mongo_doc = self._check_name(name, mongo_db, create, id) self._keylist = {'prescript': type(''), 'partscript': type(''), 'postscript': type(''), 'boot_if': type(''), 'torrent_if': type('')} if create: cluster = Cluster(mongo_db = self._mongo_db) (bmcobj, bmcnetobj) = (None, None) if bool(bmcsetup): bmcobj = BMCSetup(bmcsetup).DBRef if bool(bmcnetwork): bmcnetobj = Network(bmcnetwork, mongo_db = self._mongo_db).DBRef osimageobj = OsImage(osimage) if bool(interfaces) and type(interfaces) is not type([]): self._logger.error("'interfaces' should be list") raise RuntimeError if_dict = {} if not bool(interfaces): interfaces = [] for interface in interfaces: if_dict[interface] = {'network': None, 'params': ''} if not bool(partscript): partscript = "mount -t tmpfs tmpfs /sysroot" if not bool(prescript): prescript = "" if not bool(postscript): postscript = """cat <<EOF>>/sysroot/etc/fstab tmpfs / tmpfs defaults 0 0 EOF""" mongo_doc = {'name': name, 'prescript': prescript, 'bmcsetup': bmcobj, 'bmcnetwork': bmcnetobj, 'partscript': partscript, 'osimage': osimageobj.DBRef, 'interfaces': if_dict, 'postscript': postscript, 'boot_if': boot_if, 'torrent_if': torrent_if} self._logger.debug("mongo_doc: '{}'".format(mongo_doc)) self._name = name self._id = self._mongo_collection.insert(mongo_doc) self._DBRef = DBRef(self._collection_name, self._id) self.link(cluster) if bmcobj: self.link(bmcobj) if bmcnetobj: self.link(bmcnetobj) self.link(osimageobj) else: self._name = mongo_doc['name'] self._id = mongo_doc['_id'] self._DBRef = DBRef(self._collection_name, self._id) self._logger = logging.getLogger('group.' + self._name)
def copy_boot(self): image_path = self.get('path') kernver = self.get('kernver') initrdfile = self.name + '-initramfs-' + kernver kernfile = self.name + '-vmlinuz-' + kernver cluster = Cluster(mongo_db=self._mongo_db) user = cluster.get('user') user_id = pwd.getpwnam(user).pw_uid grp_id = pwd.getpwnam(user).pw_gid path = cluster.get('path') path_to_store = path + "/boot" initrd_path = image_path + '/boot/initramfs-' + kernver + '.img' kernel_path = image_path + '/boot/vmlinuz-' + kernver if not os.path.isfile(kernel_path): self.log.error("Unable to find kernel in {}".format(kernel_path)) return False if not os.path.isfile(initrd_path): self.log.error("Unable to find initrd in {}".format(initrd_path)) return False if not os.path.exists(path_to_store): os.makedirs(path_to_store) os.chown(path_to_store, user_id, grp_id) shutil.copy(initrd_path, path_to_store + '/' + initrdfile) shutil.copy(kernel_path, path_to_store + '/' + kernfile) os.chown(path_to_store + '/' + initrdfile, user_id, grp_id) os.chmod(path_to_store + '/' + initrdfile, 0644) os.chown(path_to_store + '/' + kernfile, user_id, grp_id) os.chmod(path_to_store + '/' + kernfile, 0644) self.set('kernfile', kernfile) self.set('initrdfile', initrdfile) self.log.warning(("Boot files were copied, but luna modules might not " "have been added to initrd. Please check " "/etc/dracut.conf.d in the image")) return True
def __init__(self, name=None, mongo_db=None, create=False, id=None, NETWORK=None, PREFIX=None, ns_hostname=None, ns_ip=None): """ create - should be True if we need create osimage NETWORK - network PREFIX - should be specified network bits or """ self._logger.debug("Arguments to function '{}".format( self._debug_function())) self._collection_name = 'network' self._keylist = { 'NETWORK': long, 'PREFIX': type(0), 'ns_hostname': type(''), 'ns_ip': type('') } mongo_doc = self._check_name(name, mongo_db, create, id) if create: cluster = Cluster(mongo_db=self._mongo_db) num_net = self.get_base_net(NETWORK, PREFIX) if not num_net: self._logger.error("Cannot compute NETWORK/PREFIX") raise RuntimeError if not ns_hostname: ns_hostname = self._guess_ns_hostname() freelist = [{'start': 1, 'end': (1 << (32 - PREFIX)) - 2}] mongo_doc = { 'name': name, 'NETWORK': num_net, 'PREFIX': PREFIX, 'freelist': freelist, 'ns_hostname': ns_hostname, 'ns_ip': None } self._logger.debug("mongo_doc: '{}'".format(mongo_doc)) self._name = name self._id = self._mongo_collection.insert(mongo_doc) self._DBRef = DBRef(self._collection_name, self._id) self.link(cluster) if not ns_ip: ns_ip = self.relnum_to_ip(freelist[0]['end']) if not ns_ip: self._logger.error("Cannot configure IP address for NS") else: self.set('ns_ip', ns_ip) else: self._name = mongo_doc['name'] self._id = mongo_doc['_id'] self._DBRef = DBRef(self._collection_name, self._id) self._logger = logging.getLogger(__name__ + '.' + self._name)
def __init__(self, name=None, mongo_db=None, create=False, id=None, group=None, localboot=False, setupbmc=True, service=False): """ name - can be ommited group - group belongs to; should be specified Flags localboot - boot from localdisk setupbmc - whether we need to setup ipmi on install service - do not perform install but boot to installer (dracut environment) """ self._logger.debug("Arguments to function '{}".format( self._debug_function())) self._collection_name = 'node' if not bool(name) and bool(create): name = self._generate_name(mongo_db=mongo_db) mongo_doc = self._check_name(name, mongo_db, create, id) self._keylist = { 'port': type(''), 'localboot': type(True), 'setupbmc': type(True), 'service': type(True) } if create: cluster = Cluster(mongo_db=self._mongo_db) group = Group(group, mongo_db=self._mongo_db) mongo_doc = { 'name': name, 'group': group.DBRef, 'interfaces': None, 'mac': None, 'switch': None, 'port': None, 'localboot': localboot, 'setupbmc': setupbmc, 'service': service } self._logger.debug("mongo_doc: '{}'".format(mongo_doc)) self._name = name self._id = self._mongo_collection.insert(mongo_doc) self._DBRef = DBRef(self._collection_name, self._id) for interface in group._get_json()['interfaces']: self.add_ip(interface) self.add_bmc_ip() self.link(group) self.link(cluster) else: self._name = mongo_doc['name'] self._id = mongo_doc['_id'] self._DBRef = DBRef(self._collection_name, self._id) self._logger = logging.getLogger(__name__ + '.' + self._name)
def _generate_name(self, mongo_db): cluster = Cluster(mongo_db) prefix = cluster.get('nodeprefix') digits = cluster.get('nodedigits') back_links = cluster.get_back_links() max_num = 0 for link in back_links: if not link['collection'] == self._collection_name: continue node = Node(id = link['DBRef'].id, mongo_db = mongo_db) name = node.name try: nnode = int(name.lstrip(prefix)) except ValueError: continue if nnode > max_num: max_num = nnode ret_name = prefix + str(max_num + 1).zfill(digits) return ret_name
def __init__(self, name=None, mongo_db=None, create=False, id=None, userid=3, user='******', password='******', netchannel=1, mgmtchannel=1): """ userid - default user id user - username password - pasword netchannel - network channel mgmtchannel - management channel """ self._logger.debug("Arguments to function '{}".format( self._debug_function())) self._collection_name = 'bmcsetup' mongo_doc = self._check_name(name, mongo_db, create, id) self._keylist = { 'userid': type(0), 'user': type(''), 'password': type(''), 'netchannel': type(0), 'mgmtchannel': type(0) } if create: cluster = Cluster(mongo_db=self._mongo_db) passed_vars = inspect.currentframe().f_locals for key in self._keylist: if type(passed_vars[key]) is not self._keylist[key]: self._logger.error("Argument '{}' should be '{}'".format( key, self._keylist[key])) raise RuntimeError mongo_doc = { 'name': name, 'userid': userid, 'user': user, 'password': password, 'netchannel': netchannel, 'mgmtchannel': mgmtchannel } self._logger.debug("mongo_doc: '{}'".format(mongo_doc)) self._name = name self._id = self._mongo_collection.insert(mongo_doc) self._DBRef = DBRef(self._collection_name, self._id) self.link(cluster) else: self._name = mongo_doc['name'] self._id = mongo_doc['_id'] self._DBRef = DBRef(self._collection_name, self._id) self._logger = logging.getLogger(__name__ + '.' + self._name)
def render_script(self, name): scripts = ['boot', 'install'] if name not in scripts: self.log.error( "'{}' is not correct script. Valid options are: '{}'" .format(name, scripts) ) return None cluster = Cluster(mongo_db=self._mongo_db) self._get_group() path = cluster.get('path') tloader = template.Loader(path + '/templates') if cluster.get('frontend_https'): protocol = 'https' else: protocol = 'http' server_ip = cluster.get('frontend_address') server_port = cluster.get('frontend_port') if name == 'boot': p = self.boot_params p['protocol'] = protocol p['server_ip'] = server_ip p['server_port'] = server_port return tloader.load('templ_nodeboot.cfg').generate(p=p) if name == 'install': p = self.install_params p['protocol'] = protocol p['server_ip'] = server_ip p['server_port'] = server_port res = tloader.load('templ_install.cfg').generate(p=p) return res
def create_tarball(self): # TODO check if root cluster = Cluster(mongo_db = self._mongo_db) path = cluster.get('path') if not path: self._logger.error("Path needs to be configured.") return None tracker_address = cluster.get('fronend_address') if tracker_address == '': self._logger.error("Tracker address needs to be configured.") return None tracker_port = cluster.get('frontend_port') if tracker_port == 0: self._logger.error("Tracker port needs to be configured.") return None user = cluster.get('user') if not user: self._logger.error("User needs to be configured.") return None path_to_store = path + "/torrents" user_id = pwd.getpwnam(user).pw_uid grp_id = pwd.getpwnam(user).pw_gid if not os.path.exists(path_to_store): os.makedirs(path_to_store) os.chown(path_to_store, user_id, grp_id) os.chmod(path_to_store, 0644) uid = str(uuid.uuid4()) tarfile_path = path_to_store + "/" + uid + ".tgz" image_path = self.get('path') try: tar_out = subprocess.Popen(['/usr/bin/tar', '-C', image_path + '/.', '--one-file-system', '--xattrs', '--selinux', '--acls', '--checkpoint=100', '-c', '-z', '-f', tarfile_path, '.'], stderr=subprocess.PIPE) # dirty, but 4 times faster stat_symb = ['\\', '|', '/', '-'] i = 0 while True: line = tar_out.stderr.readline() if line == '': break i = i + 1 sys.stdout.write(stat_symb[i % len(stat_symb)]) sys.stdout.write('\r') except: os.remove(tarfile_path) sys.stdout.write('\r') return None os.chown(tarfile_path, user_id, grp_id) os.chmod(tarfile_path, 0644) self.set('tarball', str(uid)) return True
def create_torrent(self): # TODO check if root tarball_uid = self.get('tarball') cluster = Cluster(mongo_db = self._mongo_db) if not bool(tarball_uid): self._logger.error("No tarball in DB.") return None tarball = cluster.get('path') + "/torrents/" + tarball_uid + ".tgz" if not os.path.exists(tarball): self._logger.error("Wrong path in DB.") return None tracker_address = cluster.get('frontend_address') if tracker_address == '': self._logger.error("Tracker address needs to be configured.") return None tracker_port = cluster.get('frontend_port') if tracker_port == 0: self._logger.error("Tracker port needs to be configured.") return None user = cluster.get('user') if not user: self._logger.error("User needs to be configured.") return None #group = cluster.get('group') #if not group: # self._logger.error("Group needs to be configured.") # return None user_id = pwd.getpwnam(user).pw_uid grp_id = pwd.getpwnam(user).pw_gid old_cwd = os.getcwd() os.chdir(os.path.dirname(tarball)) uid = str(uuid.uuid4()) torrentfile = str(cluster.get('path')) + "/torrents/" + uid fs = libtorrent.file_storage() libtorrent.add_files(fs, os.path.basename(tarball)) t = libtorrent.create_torrent(fs) t.add_tracker("http://" + str(tracker_address) + ":" + str(tracker_port) + "/announce") t.set_creator(torrent_key) t.set_comment(uid) libtorrent.set_piece_hashes(t, ".") f = open(torrentfile, 'w') f.write(libtorrent.bencode(t.generate())) f.close() self.set('torrent', str(uid)) os.chown(torrentfile, user_id, grp_id) shutil.move(torrentfile, torrentfile + ".torrent") os.chdir(old_cwd) return True
def __init__(self, name = None, mongo_db = None, create = False, id = None, network = None, ip = None): """ netwwork - network device connected ip - ip of the switch """ self._logger.debug("Arguments to function '{}".format(self._debug_function())) self._collection_name = 'otherdev' mongo_doc = self._check_name(name, mongo_db, create, id) self._keylist = {} if create: cluster = Cluster(mongo_db = self._mongo_db) passed_vars = inspect.currentframe().f_locals for key in self._keylist: if type(passed_vars[key]) is not self._keylist[key]: self._logger.error("Argument '{}' should be '{}'".format(key, self._keylist[key])) raise RuntimeError if not bool(network): connected = {} else: if not bool(ip): self._logger.error("IP needs to be specified") raise RuntimeError net = Network(name = network, mongo_db = self._mongo_db) ip = net.reserve_ip(ip, ignore_errors = False) if not bool(ip): raise RuntimeError connected = {str(net.DBRef.id): ip} mongo_doc = { 'name': name, 'connected': connected} self._logger.debug("mongo_doc: '{}'".format(mongo_doc)) self._name = name self._id = self._mongo_collection.insert(mongo_doc) self._DBRef = DBRef(self._collection_name, self._id) self.link(cluster) if bool(connected): self.link(net) else: self._name = mongo_doc['name'] self._id = mongo_doc['_id'] self._DBRef = DBRef(self._collection_name, self._id)
def __init__(self, name = None, mongo_db = None, create = False, id = None, network = None, ip = None, read = 'public', rw = 'private', oid = None): """ ip - ip of the switch read - read community rw - rw community oid - could be, for instance .1.3.6.1.2.1.17.7.1.2.2.1.2 .1.3.6.1.2.1.17.4.3.1.2 .1.3.6.1.2.1.17.7.1.2.2 .1.3.6.1.2.1.17.4.3.1.2 """ self._logger.debug("Arguments to function '{}".format(self._debug_function())) self._collection_name = 'switch' mongo_doc = self._check_name(name, mongo_db, create, id) self._keylist = { 'ip': type(''), 'read': type(''), 'rw': type(''), 'oid': type(''), 'network': type('')} if create: cluster = Cluster(mongo_db = self._mongo_db) passed_vars = inspect.currentframe().f_locals for key in self._keylist: if type(passed_vars[key]) is not self._keylist[key]: self._logger.error("Argument '{}' should be '{}'".format(key, self._keylist[key])) raise RuntimeError net = Network(name = network, mongo_db = self._mongo_db) ip = net.reserve_ip(ip) if not bool(ip): self._logger.error("Could not acquire ip for switch.") raise RuntimeError mongo_doc = { 'name': name, 'network': net.DBRef, 'ip': ip, 'read': read, 'rw': rw, 'oid': oid} self._logger.debug("mongo_doc: '{}'".format(mongo_doc)) self._name = name self._id = self._mongo_collection.insert(mongo_doc) self._DBRef = DBRef(self._collection_name, self._id) self.link(cluster) self.link(net) else: self._name = mongo_doc['name'] self._id = mongo_doc['_id'] self._DBRef = DBRef(self._collection_name, self._id)
def render_script(self, name): scripts = ['boot', 'install'] if name not in scripts: self.log.error( "'{}' is not correct script. Valid options are: '{}'".format( name, scripts)) return None cluster = Cluster(mongo_db=self._mongo_db) self._get_group() path = cluster.get('path') tloader = template.Loader(path + '/templates') if cluster.get('frontend_https'): protocol = 'https' else: protocol = 'http' server_ip = cluster.get('frontend_address') server_port = cluster.get('frontend_port') if name == 'boot': p = self.boot_params p['protocol'] = protocol p['server_ip'] = server_ip p['server_port'] = server_port return tloader.load('templ_nodeboot.cfg').generate(p=p) if name == 'install': p = self.install_params p['protocol'] = protocol p['server_ip'] = server_ip p['server_port'] = server_port res = tloader.load('templ_install.cfg').generate(p=p) return res
def __init__(self, name=None, mongo_db=None, create=False, id=None, group=None, localboot=False, setupbmc=True, service=False, comment=''): """ name - optional group - the group the node belongs to; required FLAGS: localboot - boot from localdisk setupbmc - whether we setup ipmi on install service - do not install, boot into installer (dracut environment) """ self.log.debug("function {} args".format(self._debug_function())) # Define the schema used to represent node objects self._collection_name = 'node' self._keylist = { 'port': type(''), 'localboot': type(True), 'setupbmc': type(True), 'service': type(True), 'mac': type(''), 'comment': type(''), } # Check if this node is already present in the datastore # Read it if that is the case node = self._get_object(name, mongo_db, create, id) self.group = None if group: if type(group) == Group: self.group = group if type(group) == str: self.group = Group(group, mongo_db=self._mongo_db) if create: if not group: err_msg = "Group needs to be specified when creating node." self.log.error(err_msg) raise RuntimeError, err_msg cluster = Cluster(mongo_db=self._mongo_db) # If a name is not provided, generate one if not bool(name): name = self._generate_name(cluster, mongo_db=mongo_db) # Store the new node in the datastore node = { 'name': name, 'group': self.group.DBRef, 'interfaces': {}, 'mac': None, 'switch': None, 'port': None, 'localboot': localboot, 'setupbmc': setupbmc, 'service': service, 'comment': comment } self.log.debug("Saving node '{}' to the datastore".format(node)) self.store(node) # check if we are able to allocate IPs for all interfaces error_on_alloc = False configured_interfaces = [] for interface_name in self.group.list_ifs().keys(): configured_interfaces.append(interface_name) if not self.add_ip(interface_name): error_on_alloc = True break if error_on_alloc: # roll back allocations and throw exception for interface_name in configured_interfaces[:-1]: self.del_ip(interface_name) err_msg = ("Unable to allocate IP address " + "for interface '{}'").format( configured_interfaces[-1]) self.log.error(err_msg) raise RuntimeError, err_msg # Link this node to its group and the current cluster self.link(self.group) self.link(cluster) if group: # check if group specified is the group node belongs to if self.group.DBRef != self._json['group']: err_msg = ("DBref of the group is not the same " + "the node node belongs to") self.log.error(err_msg) raise RuntimeError, err_msg self.log = logging.getLogger(__name__ + '.' + self._json['name'])
def __init__(self, name=None, mongo_db=None, create=False, id=None, NETWORK=None, PREFIX=None, ns_hostname=None, ns_ip=None): """ create - should be True if we need create a network NETWORK - network PREFIX - the prefix in a networks CIDR format """ self.log.debug("function {} args".format(self._debug_function())) # Define the schema used to represent network objects self._collection_name = 'network' self._keylist = {'NETWORK': long, 'PREFIX': type(''), 'ns_hostname': type(''), 'ns_ip': type('')} # Check if this network is already present in the datastore # Read it if that is the case net = self._check_name(name, mongo_db, create, id) if create: cluster = Cluster(mongo_db=self._mongo_db) num_subnet = utils.ip.get_num_subnet(NETWORK, PREFIX) flist = [{'start': 1, 'end': (1 << (32 - int(PREFIX))) - 2}] # Try to guess the nameserver hostname if none provided if not ns_hostname: ns_hostname = utils.ip.guess_ns_hostname() # Define a new mongo document net = {'name': name, 'NETWORK': num_subnet, 'PREFIX': PREFIX, 'freelist': flist, 'ns_hostname': ns_hostname, 'ns_ip': None} # Store the new network in the datastore self.log.debug("Saving net '{}' to the datastore".format(net)) self._name = name self._id = self._mongo_collection.insert(net) self._DBRef = DBRef(self._collection_name, self._id) # Link this network to the current cluster self.link(cluster) # If no IP address is provided for the nameserver, default to # the cluster's frontend address if ns_ip is None: ns_ip = utils.ip.reltoa(num_subnet, flist[0]['end']) self.set('ns_ip', ns_ip) else: self._name = net['name'] self._id = net['_id'] self._DBRef = DBRef(self._collection_name, self._id) self.log = logging.getLogger(__name__ + '.' + self._name)
def __init__(self, name=None, mongo_db=None, create=False, id=None, prescript='', bmcsetup=None, partscript='', osimage=None, interfaces=[], postscript='', torrent_if=None, domain=None, comment=''): """ prescript - preinstall script bmcsetup - bmcsetup options partscript - parition script osimage - osimage interfaces - list of the newtork interfaces postscript - postinstall script """ self.log.debug("function args {}".format(self._debug_function())) # Define the schema used to represent group objects self._collection_name = 'group' self._keylist = { 'prescript': type(''), 'partscript': type(''), 'postscript': type(''), 'torrent_if': type(''), 'comment': type(''), } # Check if this group is already present in the datastore # Read it if that is the case group = self._get_object(name, mongo_db, create, id) if create: cluster = Cluster(mongo_db=self._mongo_db) osimageobj = OsImage(osimage, mongo_db=self._mongo_db) (bmcobj, domainobj) = (None, None) if bmcsetup: bmcobj = BMCSetup(bmcsetup, mongo_db=self._mongo_db).DBRef if domain: domainobj = Network(domain, mongo_db=self._mongo_db).DBRef if interfaces and type(interfaces) is not list: err_msg = "'interfaces' should be list" self.log.error(err_msg) raise RuntimeError, err_msg if not interfaces: interfaces = [] if_dict = {} for interface in interfaces: if_dict[uuid.uuid4().hex] = { 'name': interface, 'network': { '4': None, '6': None }, 'params': '' } if not partscript: partscript = "mount -t tmpfs tmpfs /sysroot" if not postscript: postscript = ("cat << EOF >> /sysroot/etc/fstab\n" "tmpfs / tmpfs defaults 0 0\n" "EOF") # Store the new group in the datastore group = { 'name': name, 'prescript': prescript, 'bmcsetup': bmcobj, 'partscript': partscript, 'osimage': osimageobj.DBRef, 'interfaces': if_dict, 'postscript': postscript, 'domain': domainobj, 'torrent_if': torrent_if, 'comment': comment, } self.log.debug("Saving group '{}' to the datastore".format(group)) self.store(group) # Link this group to its dependencies and the current cluster self.link(cluster) if bmcobj: self.link(bmcobj) if domainobj: self.link(domainobj) self.link(osimageobj) self.log = logging.getLogger('group.' + self._name) self._networks = {}
def create_tarball(self): # TODO check if root cluster = Cluster(mongo_db=self._mongo_db) path = cluster.get('path') user = cluster.get('user') user_id = pwd.getpwnam(user).pw_uid grp_id = pwd.getpwnam(user).pw_gid path_to_store = path + "/torrents" if not os.path.exists(path_to_store): os.makedirs(path_to_store) os.chown(path_to_store, user_id, grp_id) os.chmod(path_to_store, 0644) image_path = self.get('path') real_root = os.open("/", os.O_RDONLY) os.chroot(image_path) uid = str(uuid.uuid4()) tarfile = uid + ".tgz" try: # dirty, but 4 times faster tar_out = subprocess.Popen([ '/usr/bin/tar', '-C', '/', '--one-file-system', '--xattrs', '--selinux', '--acls', '--checkpoint=100', '--exclude=./tmp/' + tarfile, '--use-compress-program=/usr/bin/pigz', '-c', '-f', '/tmp/' + tarfile, '.' ], stderr=subprocess.PIPE) stat_symb = ['\\', '|', '/', '-'] i = 0 while True: line = tar_out.stderr.readline() if line == '': break i = i + 1 sys.stdout.write(stat_symb[i % len(stat_symb)]) sys.stdout.write('\r') except: exc_type, exc_value, exc_traceback = sys.exc_info() if exc_type == exceptions.KeyboardInterrupt: self.log.error('Keyboard interrupt.') else: self.log.error(exc_value) self.log.debug(traceback.format_exc()) if os.path.isfile('/tmp/' + tarfile): os.remove('/tmp/' + tarfile) sys.stdout.write('\r') os.fchdir(real_root) os.chroot(".") os.close(real_root) return False os.fchdir(real_root) os.chroot(".") os.close(real_root) # copy image, so permissions and selinux contexts # will be inherited from parent folder shutil.copy(image_path + '/tmp/' + tarfile, path_to_store) os.remove(image_path + '/tmp/' + tarfile) os.chown(path_to_store + '/' + tarfile, user_id, grp_id) os.chmod(path_to_store + '/' + tarfile, 0644) self.set('tarball', str(uid)) return True
def __init__(self, name=None, mongo_db=None, create=False, id=None, path='', kernver='', kernopts='', comment='', grab_list='grab_default_centos.lst'): """ path - path to / of the image (will be converted to absolute) kernver - kernel version (will be checked on creation) kernopt - kernel options grab_list - rsync exclude list for grabbing live node to image """ self.log.debug("function args {}".format(self._debug_function())) # Define the schema used to represent osimage objects self._collection_name = 'osimage' self._keylist = {'path': type(''), 'kernver': type(''), 'kernopts': type(''), 'kernmodules': type(''), 'dracutmodules': type(''), 'tarball': type(''), 'torrent': type(''), 'kernfile': type(''), 'initrdfile': type(''), 'grab_exclude_list': type(''), 'grab_filesystems': type(''), 'comment': type('')} # Check if this osimage is already present in the datastore # Read it if that is the case osimage = self._get_object(name, mongo_db, create, id) if bool(kernopts) and type(kernopts) is not str: err_msg = "Kernel options should be 'str' type" self.log.error(err_msg) raise RuntimeError, err_msg if create: cluster = Cluster(mongo_db=self._mongo_db) path = os.path.abspath(path) duplicate = self._mongo_collection.find_one({'path': path}) if duplicate: err_msg = ("Path belongs to osimage '{}'" .format(duplicate['name'])) self.log.error(err_msg) raise RuntimeError, err_msg if not os.path.isdir(path): err_msg = "'{}' is not a valid directory".format(path) self.log.error(err_msg) raise RuntimeError, err_msg kernels = self.get_package_ver(path, 'kernel') if not kernels: err_msg = "No kernels installed in '{}'".format(path) self.log.error(err_msg) raise RuntimeError, err_msg elif not kernver: kernver = kernels[0] elif kernver not in kernels: err_msg = "Available kernels are '{}'".format(kernels) self.log.error(err_msg) raise RuntimeError, err_msg grab_list_path = cluster.get('path') + '/templates/' + grab_list if not os.path.isfile(grab_list_path): err_msg = "'{}' is not a file.".format(grab_list_path) self.log.error(err_msg) raise RuntimeError, err_msg with open(grab_list_path) as lst: grab_list_content = lst.read() # Store the new osimage in the datastore osimage = {'name': name, 'path': path, 'kernver': kernver, 'kernopts': kernopts, 'kernfile': '', 'initrdfile': '', 'dracutmodules': 'luna,-i18n,-plymouth', 'kernmodules': 'ipmi_devintf,ipmi_si,ipmi_msghandler', 'grab_exclude_list': grab_list_content, 'grab_filesystems': '/,/boot', 'comment': comment} self.log.debug("Saving osimage '{}' to the datastore" .format(osimage)) self.store(osimage) # Link this osimage to its dependencies and the current cluster self.link(cluster) self.log = logging.getLogger(__name__ + '.' + self._name)
def pack_boot(self): def mount(source, target, fs): subprocess.Popen(['/usr/bin/mount', '-t', fs, source, target]) def umount(source): subprocess.Popen(['/usr/bin/umount', source]) def prepare_mounts(path): mount('devtmpfs', path + '/dev', 'devtmpfs') mount('proc', path + '/proc', 'proc') mount('sysfs', path + '/sys', 'sysfs') def cleanup_mounts(path): umount(path + '/dev') umount(path + '/proc') umount(path + '/sys') tmp_path = '/tmp' # in chroot env image_path = self.get('path') kernver = self.get('kernver') kernfile = self.name + '-vmlinuz-' + kernver initrdfile = self.name + '-initramfs-' + kernver cluster = Cluster(mongo_db=self._mongo_db) path = cluster.get('path') user = cluster.get('user') path_to_store = path + "/boot" user_id = pwd.getpwnam(user).pw_uid grp_id = pwd.getpwnam(user).pw_gid if not os.path.exists(path_to_store): os.makedirs(path_to_store) os.chown(path_to_store, user_id, grp_id) modules_add = [] modules_remove = [] drivers_add = [] drivers_remove = [] dracutmodules = self.get('dracutmodules') if dracutmodules: for i in dracutmodules.split(','): if i[0] != '-': modules_add.extend(['--add', i]) else: modules_remove.extend(['--omit', i[1:]]) kernmodules = self.get('kernmodules') if kernmodules: for i in kernmodules.split(','): if i[0] != '-': drivers_add.extend(['--add-drivers', i]) else: drivers_remove.extend(['--omit-drivers', i[1:]]) prepare_mounts(image_path) real_root = os.open("/", os.O_RDONLY) os.chroot(image_path) chroot_path = os.open("/", os.O_DIRECTORY) os.fchdir(chroot_path) dracut_succeed = True create = None try: dracut_modules = subprocess.Popen( ['/usr/sbin/dracut', '--kver', kernver, '--list-modules'], stdout=subprocess.PIPE) luna_exists = False while dracut_modules.poll() is None: line = dracut_modules.stdout.readline() if line.strip() == 'luna': luna_exists = True break if not luna_exists: err_msg = ("No luna dracut module in osimage '{}'".format( self.name)) self.log.error(err_msg) raise RuntimeError, err_msg dracut_cmd = (['/usr/sbin/dracut', '--force', '--kver', kernver] + modules_add + modules_remove + drivers_add + drivers_remove + [tmp_path + '/' + initrdfile]) create = subprocess.Popen(dracut_cmd, stdout=subprocess.PIPE) while create.poll() is None: line = create.stdout.readline() except: dracut_succeed = False if create and create.returncode: dracut_succeed = False if not create: dracut_succeed = False os.fchdir(real_root) os.chroot(".") os.close(real_root) os.close(chroot_path) cleanup_mounts(image_path) if not dracut_succeed: self.log.error("Error while building initrd.") return False initrd_path = image_path + tmp_path + '/' + initrdfile kernel_path = image_path + '/boot/vmlinuz-' + kernver if not os.path.isfile(kernel_path): self.log.error("Unable to find kernel in {}".format(kernel_path)) return False if not os.path.isfile(initrd_path): self.log.error("Unable to find initrd in {}".format(initrd_path)) return False # copy initrd file to inherit perms from parent folder shutil.copy(initrd_path, path_to_store + '/' + initrdfile) os.remove(initrd_path) shutil.copy(kernel_path, path_to_store + '/' + kernfile) os.chown(path_to_store + '/' + initrdfile, user_id, grp_id) os.chmod(path_to_store + '/' + initrdfile, 0644) os.chown(path_to_store + '/' + kernfile, user_id, grp_id) os.chmod(path_to_store + '/' + kernfile, 0644) self.set('kernfile', kernfile) self.set('initrdfile', initrdfile) return True
def pack_boot(self): def mount(source, target, fs): subprocess.Popen(['/usr/bin/mount', '-t', fs, source, target]) def umount(source): subprocess.Popen(['/usr/bin/umount', source]) def prepare_mounts(path): mount('devtmpfs', path + '/dev', 'devtmpfs') mount('proc', path + '/proc', 'proc') mount('sysfs', path + '/sys', 'sysfs') def cleanup_mounts(path): umount(path + '/dev') umount(path + '/proc') umount(path + '/sys') tmp_path = '/tmp' # in chroot env image_path = self.get('path') kernver = self.get('kernver') kernfile = self.name + '-vmlinuz-' + kernver initrdfile = self.name + '-initramfs-' + kernver cluster = Cluster(mongo_db=self._mongo_db) path = cluster.get('path') user = cluster.get('user') path_to_store = path + "/boot" user_id = pwd.getpwnam(user).pw_uid grp_id = pwd.getpwnam(user).pw_gid if not os.path.exists(path_to_store): os.makedirs(path_to_store) os.chown(path_to_store, user_id, grp_id) modules_add = [] modules_remove = [] drivers_add = [] drivers_remove = [] dracutmodules = self.get('dracutmodules') if dracutmodules: for i in dracutmodules.split(','): if i[0] != '-': modules_add.extend(['--add', i]) else: modules_remove.extend(['--omit', i[1:]]) kernmodules = self.get('kernmodules') if kernmodules: for i in kernmodules.split(','): if i[0] != '-': drivers_add.extend(['--add-drivers', i]) else: drivers_remove.extend(['--omit-drivers', i[1:]]) prepare_mounts(image_path) real_root = os.open("/", os.O_RDONLY) os.chroot(image_path) chroot_path = os.open("/", os.O_DIRECTORY) os.fchdir(chroot_path) dracut_succeed = True create = None try: dracut_modules = subprocess.Popen(['/usr/sbin/dracut', '--kver', kernver, '--list-modules'], stdout=subprocess.PIPE) luna_exists = False while dracut_modules.poll() is None: line = dracut_modules.stdout.readline() if line.strip() == 'luna': luna_exists = True break if not luna_exists: err_msg = ("No luna dracut module in osimage '{}'" .format(self.name)) self.log.error(err_msg) raise RuntimeError, err_msg dracut_cmd = (['/usr/sbin/dracut', '--force', '--kver', kernver] + modules_add + modules_remove + drivers_add + drivers_remove + [tmp_path + '/' + initrdfile]) create = subprocess.Popen(dracut_cmd, stdout=subprocess.PIPE) while create.poll() is None: line = create.stdout.readline() except: dracut_succeed = False if create and create.returncode: dracut_succeed = False if not create: dracut_succeed = False os.fchdir(real_root) os.chroot(".") os.close(real_root) os.close(chroot_path) cleanup_mounts(image_path) if not dracut_succeed: self.log.error("Error while building initrd.") return False initrd_path = image_path + tmp_path + '/' + initrdfile kernel_path = image_path + '/boot/vmlinuz-' + kernver if not os.path.isfile(kernel_path): self.log.error("Unable to find kernel in {}".format(kernel_path)) return False if not os.path.isfile(initrd_path): self.log.error("Unable to find initrd in {}".format(initrd_path)) return False # copy initrd file to inherit perms from parent folder shutil.copy(initrd_path, path_to_store + '/' + initrdfile) os.remove(initrd_path) shutil.copy(kernel_path, path_to_store + '/' + kernfile) os.chown(path_to_store + '/' + initrdfile, user_id, grp_id) os.chmod(path_to_store + '/' + initrdfile, 0644) os.chown(path_to_store + '/' + kernfile, user_id, grp_id) os.chmod(path_to_store + '/' + kernfile, 0644) self.set('kernfile', kernfile) self.set('initrdfile', initrdfile) return True
def __init__(self, name=None, mongo_db=None, create=False, id=None, userid=3, user='******', password='******', netchannel=1, mgmtchannel=1, comment=''): """ userid - default user id user - username password - pasword netchannel - network channel mgmtchannel - management channel """ self.log.debug("function args '{}".format(self._debug_function())) # Define the schema used to represent BMC configuration objects self._collection_name = 'bmcsetup' self._keylist = { 'userid': type(0), 'user': type(''), 'password': type(''), 'netchannel': type(0), 'mgmtchannel': type(0), 'comment': type(''), } # Check if this BMC config is already present in the datastore # Read it if that is the case bmc = self._get_object(name, mongo_db, create, id) if create: cluster = Cluster(mongo_db=self._mongo_db) # Verify that all the keywords arguments have the correct types # as specified in the self._keylist args = locals() for key in self._keylist: if type(args[key]) is not self._keylist[key]: err_msg = ("Argument '{}' should be '{}'".format( key, self._keylist[key])) self.log.error(err_msg) raise RuntimeError, err_msg # Store the new BMC config in the datastore bmc = { 'name': name, 'userid': userid, 'user': user, 'password': password, 'netchannel': netchannel, 'mgmtchannel': mgmtchannel, 'comment': comment } self.log.debug("Saving BMC conf '{}' to the datastore".format(bmc)) self.store(bmc) # Link this BMC config to the current cluster self.link(cluster) self.log = logging.getLogger(__name__ + '.' + self._name)
def create_tarball(self): # TODO check if root cluster = Cluster(mongo_db = self._mongo_db) path = cluster.get('path') if not path: self._logger.error("Path needs to be configured.") return None tracker_address = cluster.get('fronend_address') if tracker_address == '': self._logger.error("Tracker address needs to be configured.") return None tracker_port = cluster.get('frontend_port') if tracker_port == 0: self._logger.error("Tracker port needs to be configured.") return None user = cluster.get('user') if not user: self._logger.error("User needs to be configured.") return None #group = cluster.get('group') #if not group: # self._logger.error("Group needs to be configured.") # return None path_to_store = path + "/torrents" user_id = pwd.getpwnam(user).pw_uid grp_id = pwd.getpwnam(user).pw_gid if not os.path.exists(path_to_store): os.makedirs(path_to_store) os.chown(path_to_store, user_id, grp_id) os.chmod(path_to_store, 0644) uid = str(uuid.uuid4()) tarfile_path = path_to_store + "/" + uid + ".tgz" image_path = self.get('path') #tarball = tarfile.open(tarfile_path, "w:gz") #tarball.add(image_path, arcname=os.path.basename(image_path + "/.")) #tarball.close() try: tar_out = subprocess.Popen(['/usr/bin/tar', '-C', image_path + '/.', '--one-file-system', '--xattrs', '--selinux', '--acls', '--checkpoint=100', '-c', '-z', '-f', tarfile_path, '.'], stderr=subprocess.PIPE) # dirty, but 4 times faster stat_symb = ['\\', '|', '/', '-'] i = 0 while True: line = tar_out.stderr.readline() if line == '': break i = i + 1 sys.stdout.write(stat_symb[i % len(stat_symb)]) sys.stdout.write('\r') except: os.remove(tarfile_path) sys.stdout.write('\r') return None os.chown(tarfile_path, user_id, grp_id) os.chmod(tarfile_path, 0644) self.set('tarball', str(uid)) return True
def __init__(self, name=None, mongo_db=None, create=False, id=None, path='', kernver='', kernopts='', comment='', grab_list='grab_default_centos.lst'): """ path - path to / of the image (will be converted to absolute) kernver - kernel version (will be checked on creation) kernopt - kernel options grab_list - rsync exclude list for grabbing live node to image """ self.log.debug("function args {}".format(self._debug_function())) # Define the schema used to represent osimage objects self._collection_name = 'osimage' self._keylist = { 'path': type(''), 'kernver': type(''), 'kernopts': type(''), 'kernmodules': type(''), 'dracutmodules': type(''), 'tarball': type(''), 'torrent': type(''), 'kernfile': type(''), 'initrdfile': type(''), 'grab_exclude_list': type(''), 'grab_filesystems': type(''), 'comment': type('') } # Check if this osimage is already present in the datastore # Read it if that is the case osimage = self._get_object(name, mongo_db, create, id) if bool(kernopts) and type(kernopts) is not str: err_msg = "Kernel options should be 'str' type" self.log.error(err_msg) raise RuntimeError, err_msg if create: cluster = Cluster(mongo_db=self._mongo_db) path = os.path.abspath(path) duplicate = self._mongo_collection.find_one({'path': path}) if duplicate: err_msg = ("Path belongs to osimage '{}'".format( duplicate['name'])) self.log.error(err_msg) raise RuntimeError, err_msg if not os.path.isdir(path): err_msg = "'{}' is not a valid directory".format(path) self.log.error(err_msg) raise RuntimeError, err_msg kernels = self.get_package_ver(path, 'kernel') if not kernels: err_msg = "No kernels installed in '{}'".format(path) self.log.error(err_msg) raise RuntimeError, err_msg elif not kernver: kernver = kernels[0] elif kernver not in kernels: err_msg = "Available kernels are '{}'".format(kernels) self.log.error(err_msg) raise RuntimeError, err_msg grab_list_path = cluster.get('path') + '/templates/' + grab_list if not os.path.isfile(grab_list_path): err_msg = "'{}' is not a file.".format(grab_list_path) self.log.error(err_msg) raise RuntimeError, err_msg with open(grab_list_path) as lst: grab_list_content = lst.read() # Store the new osimage in the datastore osimage = { 'name': name, 'path': path, 'kernver': kernver, 'kernopts': kernopts, 'kernfile': '', 'initrdfile': '', 'dracutmodules': 'luna,-i18n,-plymouth', 'kernmodules': 'ipmi_devintf,ipmi_si,ipmi_msghandler', 'grab_exclude_list': grab_list_content, 'grab_filesystems': '/,/boot', 'comment': comment } self.log.debug( "Saving osimage '{}' to the datastore".format(osimage)) self.store(osimage) # Link this osimage to its dependencies and the current cluster self.link(cluster) self.log = logging.getLogger(__name__ + '.' + self._name)
def pack_boot(self): def mount(source, target, fs): subprocess.Popen(['/usr/bin/mount', '-t', fs, source, target]) #ret = ctypes.CDLL('libc.so.6', use_errno=True).mount(source, target, fs, 0, options) #if ret < 0: # errno = ctypes.get_errno() # raise RuntimeError("Error mounting {} ({}) on {} with options '{}': {}". # format(source, fs, target, options, os.strerror(errno))) def umount(source): subprocess.Popen(['/usr/bin/umount', source]) #ret = ctypes.CDLL('libc.so.6', use_errno=True).umount(source) #if ret < 0: # errno = ctypes.get_errno() # raise RuntimeError("Error umounting {}: .". # format(source, os.strerror(errno))) def prepare_mounts(path): mount('devtmpfs', path + '/dev', 'devtmpfs') mount('proc', path + '/proc', 'proc') mount('sysfs', path + '/sys', 'sysfs') def cleanup_mounts(path): umount(path + '/dev') umount(path + '/proc') umount(path + '/sys') cluster = Cluster(mongo_db = self._mongo_db) #boot_prefix = '/boot' image_path = str(self.get('path')) kernver = str(self.get('kernver')) tmp_path = '/tmp' # in chroot env initrdfile = str(self.name) + '-initramfs-' + kernver kernfile = str(self.name) + '-vmlinuz-' + kernver #kernel_image = kernel_name + '-' + kernver #kernel_path = image_path + boot_prefix + '/' + kernel_image path = cluster.get('path') if not path: self._logger.error("Path needs to be configured.") return None path = str(path) user = cluster.get('user') if not user: self._logger.error("User needs to be configured.") return None #group = cluster.get('group') #if not group: # self._logger.error("Group needs to be configured.") # return None path_to_store = path + "/boot" user_id = pwd.getpwnam(user).pw_uid grp_id = pwd.getpwnam(user).pw_gid if not os.path.exists(path_to_store): os.makedirs(path_to_store) os.chown(path_to_store, user_id, grp_id) modules_add = [] modules_remove = [] drivers_add = [] drivers_remove = [] dracutmodules = self.get('dracutmodules') if dracutmodules: dracutmodules = str(dracutmodules) modules_add = sum([['--add', i] for i in dracutmodules.split(',') if i[0] != '-'], []) modules_remove = sum([['--omit', i[1:]] for i in dracutmodules.split(',') if i[0] == '-'], []) kernmodules = self.get('kernmodules') if kernmodules: kernmodules = str(kernmodules) drivers_add = sum([['--add-drivers', i] for i in kernmodules.split(',') if i[0] != '-'], []) drivers_remove = sum([['--omit-drivers', i[1:]] for i in kernmodules.split(',') if i[0] == '-'], []) prepare_mounts(image_path) real_root = os.open("/", os.O_RDONLY) os.chroot(image_path) try: dracut_modules = subprocess.Popen(['/usr/sbin/dracut', '--kver', kernver, '--list-modules'], stdout=subprocess.PIPE) luna_exists = False while dracut_modules.poll() is None: line = dracut_modules.stdout.readline() if line.strip() == 'luna': luna_exists = True if not luna_exists: self._logger.error("No luna dracut module in osimage '{}'".format(self.name)) raise RuntimeError dracut_cmd = ['/usr/sbin/dracut', '--force', '--kver', kernver] + modules_add + modules_remove + drivers_add + drivers_remove + [tmp_path + '/' + initrdfile] dracut_create = subprocess.Popen(dracut_cmd, stdout=subprocess.PIPE) while dracut_create.poll() is None: line = dracut_create.stdout.readline() except: os.fchdir(real_root) os.chroot(".") os.close(real_root) cleanup_mounts(image_path) try: pass #os.remove(image_path + '/' + tmp_path + '/' + initrdfile) except: pass return None os.fchdir(real_root) os.chroot(".") os.close(real_root) cleanup_mounts(image_path) shutil.copy(image_path + tmp_path + '/' + initrdfile, path_to_store) shutil.copy(image_path + '/boot/vmlinuz-' + kernver, path_to_store + '/' + kernfile) os.chown(path_to_store + '/' + initrdfile, user_id, grp_id) os.chmod(path_to_store + '/' + initrdfile, 0644) os.chown(path_to_store + '/' + kernfile, user_id, grp_id) os.chmod(path_to_store + '/' + kernfile, 0644) self.set('kernfile', kernfile) self.set('initrdfile', initrdfile)
def __init__(self, name=None, mongo_db=None, create=False, id=None, version=None, NETWORK=None, PREFIX=None, ns_hostname=None, ns_ip=None, comment=''): """ create - should be True if we need create a network NETWORK - network PREFIX - the prefix in a networks CIDR format """ self.log.debug("function {} args".format(self._debug_function())) # Define the schema used to represent network objects self._collection_name = 'network' self._keylist = { 'PREFIX': type(0), 'ns_hostname': type(''), 'include': type(''), 'rev_include': type(''), 'comment': type(''), } # Check if this network is already present in the datastore # Read it if that is the case net = self._get_object(name, mongo_db, create, id) if create: if not version: version = utils.ip.get_ip_version(NETWORK) if version == 0: err_msg = ("Unable to determine protocol version " + "for given network") self.log.error(err_msg) raise RuntimeError, err_msg if version not in [4, 6]: err_msg = "IP version should be 4 or 6" self.log.error(err_msg) raise RuntimeError, err_msg maxbits = 32 if version == 6: maxbits = 128 self.version = version self.maxbits = maxbits cluster = Cluster(mongo_db=self._mongo_db) num_subnet = utils.ip.get_num_subnet(NETWORK, PREFIX, self.version) flist = self._flist_to_str([{ 'start': 1, 'end': (1 << (self.maxbits - int(PREFIX))) - 2 }]) # Try to guess the nameserver hostname if none provided if not ns_hostname: ns_hostname = utils.ip.guess_ns_hostname() # Store the new network in the datastore if self.version == 6: num_subnet = str(num_subnet) # Try to find duplicate net if utils.helpers.find_duplicate_net(num_subnet, mongo_db=self._mongo_db): err_msg = ("Network {}/{} is defined already".format( NETWORK, PREFIX)) self.log.error(err_msg) raise RuntimeError, err_msg net = { 'name': name, 'NETWORK': num_subnet, 'PREFIX': PREFIX, 'freelist': flist, 'ns_hostname': ns_hostname, 'ns_ip': None, 'version': version, 'include': None, 'rev_include': None, 'comment': comment } self.log.debug("Saving net '{}' to the datastore".format(net)) self.store(net) self._convert_to_int() # Link this network to the current cluster self.link(cluster) # If no IP address is provided for the nameserver, default to # the cluster's frontend address if ns_ip is None: ns_ip = utils.ip.reltoa(num_subnet, int(flist[0]['end']), self.version) self.set('ns_ip', ns_ip) self.version = self._json['version'] self.maxbits = 32 if self.version == 6: self.maxbits = 128 self._convert_to_int() self.log = logging.getLogger(__name__ + '.' + self._name)
def __init__(self, name=None, mongo_db=None, create=False, id=None, network=None, ip=None, read='public', rw='private', oid=None, comment=''): """ ip - ip of the switch read - read community rw - rw community oid - could be, for instance .1.3.6.1.2.1.17.7.1.2.2.1.2 .1.3.6.1.2.1.17.4.3.1.2 .1.3.6.1.2.1.17.7.1.2.2 .1.3.6.1.2.1.17.4.3.1.2 """ self.log.debug("function args {}".format(self._debug_function())) # Define the schema used to represent switch objects self._collection_name = 'switch' self._keylist = { 'read': type(''), 'rw': type(''), 'oid': type(''), 'comment': type(''), } # Check if this switch is already present in the datastore # Read it if that is the case switch = self._get_object(name, mongo_db, create, id) if create: cluster = Cluster(mongo_db=self._mongo_db) if not network: err_msg = "Network must be provided" self.log.error(err_msg) raise RuntimeError, err_msg if not name: err_msg = "Name must be provided" self.log.error(err_msg) raise RuntimeError, err_msg net = Network(name=network, mongo_db=self._mongo_db) ip = net.reserve_ip(ip) if not ip: err_msg = "Could not acquire ip for switch" self.log.error(err_msg) raise RuntimeError, err_msg # Store the new switch in the datastore switch = { 'name': name, 'network': net.DBRef, 'ip': ip, 'read': read, 'rw': rw, 'oid': oid, 'comment': comment } self.log.debug( "Saving switch '{}' to the datastore".format(switch)) self.store(switch) # Link this switch to its dependencies and the current cluster self.link(cluster) self.link(net) self.log = logging.getLogger('switch.' + self._name)
def create_tarball(self): # TODO check if root cluster = Cluster(mongo_db=self._mongo_db) path = cluster.get('path') user = cluster.get('user') user_id = pwd.getpwnam(user).pw_uid grp_id = pwd.getpwnam(user).pw_gid path_to_store = path + "/torrents" if not os.path.exists(path_to_store): os.makedirs(path_to_store) os.chown(path_to_store, user_id, grp_id) os.chmod(path_to_store, 0644) image_path = self.get('path') real_root = os.open("/", os.O_RDONLY) os.chroot(image_path) uid = str(uuid.uuid4()) tarfile = uid + ".tgz" try: # dirty, but 4 times faster tar_out = subprocess.Popen( [ '/usr/bin/tar', '-C', '/', '--one-file-system', '--xattrs', '--selinux', '--acls', '--checkpoint=100', '--exclude=./tmp/' + tarfile, '--use-compress-program=/usr/bin/pigz', '-c', '-f', '/tmp/' + tarfile, '.' ], stderr=subprocess.PIPE ) stat_symb = ['\\', '|', '/', '-'] i = 0 while True: line = tar_out.stderr.readline() if line == '': break i = i + 1 sys.stdout.write(stat_symb[i % len(stat_symb)]) sys.stdout.write('\r') except: exc_type, exc_value, exc_traceback = sys.exc_info() if exc_type == exceptions.KeyboardInterrupt: self.log.error('Keyboard interrupt.') else: self.log.error(exc_value) self.log.debug(traceback.format_exc()) if os.path.isfile('/tmp/' + tarfile): os.remove('/tmp/' + tarfile) sys.stdout.write('\r') os.fchdir(real_root) os.chroot(".") os.close(real_root) return False os.fchdir(real_root) os.chroot(".") os.close(real_root) # copy image, so permissions and selinux contexts # will be inherited from parent folder shutil.copy(image_path + '/tmp/' + tarfile, path_to_store) os.remove(image_path + '/tmp/' + tarfile) os.chown(path_to_store + '/' + tarfile, user_id, grp_id) os.chmod(path_to_store + '/' + tarfile, 0644) self.set('tarball', str(uid)) return True