def __init__(self):
     super(IndirectionCloud, self).__init__()
     self.app_config = ApplicationConfiguration().configuration
     self.log = logging.getLogger('%s.%s' %
                                  (__name__, self.__class__.__name__))
     self.pim = PersistentImageManager.default_manager()
     self.res_mgr = ReservationManager()
Exemple #2
0
 def setUp(self):
     self.test_path = '/tmp/imagefactory.unittest.ReservationManager'
     self.test_file = '%s/reservation.test' % self.test_path
     os.mkdir(self.test_path)
     fstat = os.statvfs(self.test_path)
     self.max_free = fstat.f_bavail * fstat.f_frsize
     self.min_free = self.max_free / 2
     self.res_mgr = ReservationManager()
Exemple #3
0
 def __init__(self):
     super(TinMan, self).__init__()
     self.cloud_plugin_content = []
     config_obj = ApplicationConfiguration()
     self.app_config = config_obj.configuration
     self.res_mgr = ReservationManager()
     self.log = logging.getLogger('%s.%s' %
                                  (__name__, self.__class__.__name__))
     self.parameters = None
     self.install_script_object = None
     self.guest = None
 def run(self):
     resmgr = ReservationManager()
     str_args = (self.qname, self.position)
     self.output.append('enter-%s-%d' % str_args)
     resmgr.enter_queue(self.qname)
     self.output.append('start-%s-%d' % str_args)
     if(self.qname == 'local'):
         time.sleep(4)
     else:
         time.sleep(1)
     self.output.append('exit-%s-%d' % str_args)
     resmgr.exit_queue(self.qname)
Exemple #5
0
    def threadsafe_generate_install_media(self, guest):
        # Oz caching of install media and modified install media is not thread safe
        # Make it safe here using some locks
        # We can only have one active generate_install_media() call for each unique tuple:
        #  (OS, update, architecture, installtype)

        tdl = guest.tdl
        queue_name = "%s-%s-%s-%s" % (tdl.distro, tdl.update, tdl.arch, tdl.installtype)
        res_mgr = ReservationManager()
        res_mgr.get_named_lock(queue_name)
        try:
            guest.generate_install_media(force_download=False)
        finally:
            res_mgr.release_named_lock(queue_name)
Exemple #6
0
 def __init__(self):
     super(FedoraOS, self).__init__()
     self.cloud_plugin_content = [ ]
     config_obj = ApplicationConfiguration()
     self.app_config = config_obj.configuration
     self.res_mgr = ReservationManager()
     self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
 def setUp(self):
     self.test_path = '/tmp/imagefactory.unittest.ReservationManager'
     self.test_file = '%s/reservation.test' % self.test_path
     os.mkdir(self.test_path)
     fstat = os.statvfs(self.test_path)
     self.max_free = fstat.f_bavail * fstat.f_frsize
     self.min_free = self.max_free / 2
     self.res_mgr = ReservationManager()
Exemple #8
0
 def _singleton_init(self):
     self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
     self.pending_uploads = dict()
     self.pending_uploads_lock = BoundedSemaphore()
     self.pim = PersistentImageManager.default_manager()
     self.res = ReservationManager()
     self.secondaries = { } 
     if os.path.isfile(SECONDARIES):
         try:
             secs = open(SECONDARIES, "r")
             self.secondaries = json.load(secs)
             secs.close()
         except:
             self.log.warning("Unable to load JSON for secondaries from %s", SECONDARIES)
     if not 'targets' in self.secondaries:
         self.secondaries['targets'] = { }
     if not 'providers' in self.secondaries:
         self.secondaries['providers'] = { }
 def setUp(self):
     self.test_path = "/tmp/imagefactory.unittest.ReservationManager"
     self.test_file = "%s/reservation.test" % self.test_path
     if not os.path.exists(self.test_path):
         os.mkdir(self.test_path)
     fstat = os.statvfs(self.test_path)
     self.max_free = fstat.f_bavail * fstat.f_frsize
     self.min_free = self.max_free / 2
     self.res_mgr = ReservationManager()
Exemple #10
0
 def __init__(self):
     super(TinMan, self).__init__()
     self.cloud_plugin_content = [ ]
     config_obj = ApplicationConfiguration()
     self.app_config = config_obj.configuration
     self.res_mgr = ReservationManager()
     self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
     self.parameters = None
     self.install_script_object = None
     self.guest = None
Exemple #11
0
 def testJobQueue(self):
     """
     TODO: Docstring for testJobQueue
     """
     job_number = 3
     job_threads = []
     job_output = []
     for i in range(job_number):
         for name in ReservationManager().queues:
             job_threads.append(
                 MockJob(kwargs=dict(
                     qname=name, position=i, output=job_output)))
     for job in job_threads:
         job.start()
     for job in job_threads:
         if job.isAlive():
             job.join()
     #self.log.info(job_output)
     self.assertEqual((3 * job_number * len(ReservationManager().queues)),
                      len(job_output))
Exemple #12
0
 def run(self):
     resmgr = ReservationManager()
     str_args = (self.qname, self.position)
     self.output.append('enter-%s-%d' % str_args)
     resmgr.enter_queue(self.qname)
     self.output.append('start-%s-%d' % str_args)
     if (self.qname == 'local'):
         time.sleep(4)
     else:
         time.sleep(1)
     self.output.append('exit-%s-%d' % str_args)
     resmgr.exit_queue(self.qname)
 def _singleton_init(self):
     self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
     self.pending_uploads = dict()
     self.pending_uploads_lock = BoundedSemaphore()
     self.pim = PersistentImageManager.default_manager()
     self.res = ReservationManager()
     self.secondaries = { } 
     if os.path.isfile(SECONDARIES):
         try:
             secs = open(SECONDARIES, "r")
             self.secondaries = json.load(secs)
             secs.close()
         except:
             self.log.warning("Unable to load JSON for secondaries from %s", SECONDARIES)
     if not 'targets' in self.secondaries:
         self.secondaries['targets'] = { }
     if not 'providers' in self.secondaries:
         self.secondaries['providers'] = { }
Exemple #14
0
    def threadsafe_generate_install_media(self, guest):
        # Oz caching of install media and modified install media is not thread safe
        # Make it safe here using some locks
        # We can only have one active generate_install_media() call for each unique tuple:
        #  (OS, update, architecture, installtype)

        tdl = guest.tdl
        queue_name = "%s-%s-%s-%s" % (tdl.distro, tdl.update, tdl.arch,
                                      tdl.installtype)
        res_mgr = ReservationManager()
        res_mgr.get_named_lock(queue_name)
        try:
            guest.generate_install_media(force_download=False)
        finally:
            res_mgr.release_named_lock(queue_name)
class testReservationManager(unittest.TestCase):
    """ TODO: Docstring for testReservationManager  """

    def __init__(self, methodName='runTest'):
        super(testReservationManager, self).__init__(methodName)
        logging.basicConfig(level=logging.NOTSET,
                            format='%(asctime)s \
                                    %(levelname)s \
                                    %(name)s \
                                    pid(%(process)d) \
                                    Message: %(message)s',
                            filename='/tmp/imagefactory-unittests.log')

    def setUp(self):
        self.test_path = '/tmp/imagefactory.unittest.ReservationManager'
        self.test_file = '%s/reservation.test' % self.test_path
        os.mkdir(self.test_path)
        fstat = os.statvfs(self.test_path)
        self.max_free = fstat.f_bavail * fstat.f_frsize
        self.min_free = self.max_free / 2
        self.res_mgr = ReservationManager()

    def tearDown(self):
        self.res_mgr.remove_path(self.test_path)
        os.rmdir(self.test_path)
        del self.res_mgr

    def testSingleton(self):
        """
        Prove this class produces a singelton object.
        """
        self.assertEqual(id(self.res_mgr), id(ReservationManager()))

    def testDefaultMinimumProperty(self):
        """
        TODO: Docstring for testDefaultMinimumProperty
        """
        self.res_mgr.default_minimum = self.min_free
        self.assertEqual(self.min_free, self.res_mgr.default_minimum)

    def testAddRemovePath(self):
        """
        TODO: Docstring for testRemovePath
        """
        path = '/'
        # start off with nothing tracked
        self.assertFalse(path in self.res_mgr.available_space)
        # add / and check that it's listed in the dictionary returned by
        # available_space
        self.res_mgr.add_path('/')
        self.assertTrue(path in self.res_mgr.available_space)
        # remove / and check that it's no longer listed in the dictionary
        # returned by available_space
        self.res_mgr.remove_path('/')
        self.assertFalse(path in self.res_mgr.available_space)

    def testReserveSpaceForFile(self):
        """
        TODO: Docstring for testReserveSpaceForFile
        """
        self.res_mgr.default_minimum = self.min_free
        size = self.min_free / 10
        result = self.res_mgr.reserve_space_for_file(size, self.test_file)
        self.assertTrue(result)
        self.assertTrue(self.test_file in self.res_mgr.reservations)

    def testReserveSpaceForFileThatIsTooBig(self):
        """
        TODO: Docstring for testReserveSpaceForFile
        """
        size = self.max_free * 10
        result = self.res_mgr.reserve_space_for_file(size, self.test_file)
        self.assertFalse(result)
        self.assertFalse(self.test_file in self.res_mgr.reservations)

    def testCancelReservationForFile(self):
        """
        TODO: Docstring for testCancelReservationForFile
        """
        size = self.min_free / 10
        self.res_mgr.default_minimum = self.min_free
        if(self.res_mgr.reserve_space_for_file(size, self.test_file)):
            self.assertTrue(self.test_file in self.res_mgr.reservations)
            self.res_mgr.cancel_reservation_for_file(self.test_file)
            self.assertFalse(self.test_file in self.res_mgr.reservations)
        else:
            self.fail('Failed to reserve space...')

    def testCancelNonExistentReservation(self):
        """
        TODO: Docstring for testCancelNonExistentReservation
        """
        self.assertRaises((TypeError, KeyError), self.res_mgr.cancel_reservation_for_file, *('/tmp/not.there', False))

    def testAvailableSpaceForPath(self):
        """
        TODO: Docstring for testAvailableSpace
        """
        size = self.min_free / 10
        self.res_mgr.add_path(self.test_path, self.min_free)
        available = self.res_mgr.available_space_for_path(self.test_path)
        if(self.res_mgr.reserve_space_for_file(size, self.test_file)):
            now_available = self.res_mgr.available_space_for_path(self.test_path)
            self.assertEqual(now_available, (available - size))
        else:
            self.fail('Failed to reserve space...')

    def testJobQueue(self):
        """
        TODO: Docstring for testJobQueue
        """
        job_number = 3
        job_threads = []
        job_output = []
        for i in range(job_number):
            for name in ReservationManager().queues:
                job_threads.append(MockJob(kwargs=dict(qname=name, position=i, output=job_output)))
        for job in job_threads:
            job.start()
        for job in job_threads:
            if job.isAlive():
                job.join()
        #self.log.info(job_output)
        self.assertEqual((3 * job_number * len(ReservationManager().queues)), len(job_output))
Exemple #16
0
class testReservationManager(unittest.TestCase):
    """ TODO: Docstring for testReservationManager  """
    def __init__(self, methodName='runTest'):
        super(testReservationManager, self).__init__(methodName)
        logging.basicConfig(level=logging.NOTSET,
                            format='%(asctime)s \
                                    %(levelname)s \
                                    %(name)s \
                                    pid(%(process)d) \
                                    Message: %(message)s',
                            filename='/tmp/imagefactory-unittests.log')

    def setUp(self):
        self.test_path = '/tmp/imagefactory.unittest.ReservationManager'
        self.test_file = '%s/reservation.test' % self.test_path
        os.mkdir(self.test_path)
        fstat = os.statvfs(self.test_path)
        self.max_free = fstat.f_bavail * fstat.f_frsize
        self.min_free = self.max_free / 2
        self.res_mgr = ReservationManager()

    def tearDown(self):
        self.res_mgr.remove_path(self.test_path)
        os.rmdir(self.test_path)
        del self.res_mgr

    def testSingleton(self):
        """
        Prove this class produces a singelton object.
        """
        self.assertEqual(id(self.res_mgr), id(ReservationManager()))

    def testDefaultMinimumProperty(self):
        """
        TODO: Docstring for testDefaultMinimumProperty
        """
        self.res_mgr.default_minimum = self.min_free
        self.assertEqual(self.min_free, self.res_mgr.default_minimum)

    def testAddRemovePath(self):
        """
        TODO: Docstring for testRemovePath
        """
        path = '/'
        # start off with nothing tracked
        self.assertFalse(path in self.res_mgr.available_space)
        # add / and check that it's listed in the dictionary returned by
        # available_space
        self.res_mgr.add_path('/')
        self.assertTrue(path in self.res_mgr.available_space)
        # remove / and check that it's no longer listed in the dictionary
        # returned by available_space
        self.res_mgr.remove_path('/')
        self.assertFalse(path in self.res_mgr.available_space)

    def testReserveSpaceForFile(self):
        """
        TODO: Docstring for testReserveSpaceForFile
        """
        self.res_mgr.default_minimum = self.min_free
        size = self.min_free / 10
        result = self.res_mgr.reserve_space_for_file(size, self.test_file)
        self.assertTrue(result)
        self.assertTrue(self.test_file in self.res_mgr.reservations)

    def testReserveSpaceForFileThatIsTooBig(self):
        """
        TODO: Docstring for testReserveSpaceForFile
        """
        size = self.max_free * 10
        result = self.res_mgr.reserve_space_for_file(size, self.test_file)
        self.assertFalse(result)
        self.assertFalse(self.test_file in self.res_mgr.reservations)

    def testCancelReservationForFile(self):
        """
        TODO: Docstring for testCancelReservationForFile
        """
        size = self.min_free / 10
        self.res_mgr.default_minimum = self.min_free
        if (self.res_mgr.reserve_space_for_file(size, self.test_file)):
            self.assertTrue(self.test_file in self.res_mgr.reservations)
            self.res_mgr.cancel_reservation_for_file(self.test_file)
            self.assertFalse(self.test_file in self.res_mgr.reservations)
        else:
            self.fail('Failed to reserve space...')

    def testCancelNonExistentReservation(self):
        """
        TODO: Docstring for testCancelNonExistentReservation
        """
        self.assertRaises((TypeError, KeyError),
                          self.res_mgr.cancel_reservation_for_file,
                          *('/tmp/not.there', False))

    def testAvailableSpaceForPath(self):
        """
        TODO: Docstring for testAvailableSpace
        """
        size = self.min_free / 10
        self.res_mgr.add_path(self.test_path, self.min_free)
        available = self.res_mgr.available_space_for_path(self.test_path)
        if (self.res_mgr.reserve_space_for_file(size, self.test_file)):
            now_available = self.res_mgr.available_space_for_path(
                self.test_path)
            self.assertEqual(now_available, (available - size))
        else:
            self.fail('Failed to reserve space...')

    def testJobQueue(self):
        """
        TODO: Docstring for testJobQueue
        """
        job_number = 3
        job_threads = []
        job_output = []
        for i in range(job_number):
            for name in ReservationManager().queues:
                job_threads.append(
                    MockJob(kwargs=dict(
                        qname=name, position=i, output=job_output)))
        for job in job_threads:
            job.start()
        for job in job_threads:
            if job.isAlive():
                job.join()
        #self.log.info(job_output)
        self.assertEqual((3 * job_number * len(ReservationManager().queues)),
                         len(job_output))
 def __init__(self):
     super(IndirectionCloud, self).__init__()
     self.app_config = ApplicationConfiguration().configuration
     self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
     self.pim = PersistentImageManager.default_manager()
     self.res_mgr = ReservationManager()
class IndirectionCloud(object):
    zope.interface.implements(CloudDelegate)

    def __init__(self):
        super(IndirectionCloud, self).__init__()
        self.app_config = ApplicationConfiguration().configuration
        self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
        self.pim = PersistentImageManager.default_manager()
        self.res_mgr = ReservationManager()


    def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
        # This plugin wants to be the only thing operating on the input image
        # We do all our work here and then return False which stops any additional activity

        self.working_space_image = None
        self.utility_image_tmp = None
        try:
            self._builder_should_create_target_image(builder, target, image_id, template, parameters)
        finally:
            self.log.debug("Cleaning up temporary utility image and working space image")
            for fname in [ self.working_space_image, self.utility_image_tmp ]:
                if fname and os.path.isfile(fname):
                    os.unlink(fname)
        return False


    def _builder_should_create_target_image(self, builder, target, image_id, template, parameters):
        # User may specify a utility image - if they do not we assume we can use the input image
        utility_image_id = parameters.get('utility_image', image_id)

        # The utility image is what we actually re-animate with Oz
        # We borrow these variable names from code that is very similar to the Oz/TinMan OS plugin
        self.active_image = self.pim.image_with_id(utility_image_id)
        if not self.active_image:
            raise Exception("Could not find utility image with ID (%s)" % (utility_image_id) )
        self.tdlobj = oz.TDL.TDL(xmlstring=self.active_image.template)

        # Later on, we will either copy in the base_image content as a file, or expose it as a device
        # to the utility VM.  We cannot do both.  Detect invalid input here before doing any long running
        # work
        input_image_device = parameters.get('input_image_device', None)
        input_image_file = parameters.get('input_image_filename', None)

        if input_image_device and input_image_file:
            raise Exception("You can specify either an input_image_device or an input_image_file but not both")

        if (not input_image_device) and (not input_image_file):
            input_image_file="/input_image.raw"

        # We remove any packages, commands and files from the original TDL - these have already been
        # installed/executed.  We leave the repos in place, as it is possible that commands executed
        # later may depend on them
        self.tdlobj.packages = [ ]
        self.tdlobj.commands = { }
        self.tdlobj.files = { } 

        # This creates a new Oz object - replaces the auto-generated disk file location with
        # a copy of the utility image we will make later, and prepares an initial libvirt_xml string
        self._init_oz()
        self.utility_image_tmp = self.app_config['imgdir'] + "/tmp-utility-image-" + str(builder.target_image.identifier)
        self.guest.diskimage = self.utility_image_tmp
        # Below we will create this file as a qcow2 image using the original utility image as
        # a backing store - For the follow-on XML generation to work correctly, we need to force
        # Oz to use qcow2 as the image type
        self.guest.image_type = 'qcow2'

        if 'utility_cpus' in parameters:
            self.guest.install_cpus = int(parameters['utility_cpus'])

        libvirt_xml = self.guest._generate_xml("hd", None)
        libvirt_doc = libxml2.parseDoc(libvirt_xml)

        # Now we create a second disk image as working/scratch space
        # Hardcode at 30G
        # TODO: Make configurable
        # Make it, format it, copy in the base_image 
        self.working_space_image = self.app_config['imgdir'] + "/working-space-image-" + str(builder.target_image.identifier)
        self.create_ext2_image(self.working_space_image)

        # Modify the libvirt_xml used with Oz to contain a reference to a second "working space" disk image
        working_space_device = parameters.get('working_space_device', 'vdb')
        self.add_disk(libvirt_doc, self.working_space_image, working_space_device)

        self.log.debug("Updated domain XML with working space image:\n%s" % (libvirt_xml))

        # We expect to find a partial TDL document in this parameter - this is what drives the
        # tasks performed by the utility image
        if 'utility_customizations' in parameters:
            self.oz_refresh_customizations(parameters['utility_customizations'])
        else:
            self.log.info('No additional repos, packages, files or commands specified for utility tasks')

        # Create a qcow2 image using the original utility image file (which may be read-only) as a
        # backing store.
        self.log.debug("Creating temporary writeable qcow2 working copy of utlity image (%s) as (%s)" % (self.active_image.data, self.utility_image_tmp))
        self.guest._internal_generate_diskimage(image_filename=self.utility_image_tmp, backing_filename=self.active_image.data)

        if input_image_file: 
            # Here we finally involve the actual Base Image content - it is made available for the utlity image to modify
            self.copy_content_to_image(builder.base_image.data, self.working_space_image, input_image_file)
        else:
            # Note that we know that one or the other of these are set because of code earlier
            self.add_disk(libvirt_doc, builder.base_image.data, input_image_device)

        # Run all commands, repo injection, etc specified
        try:
            self.log.debug("Launching utility image and running any customizations specified")
            libvirt_xml = libvirt_doc.serialize(None, 1)
            self.guest.customize(libvirt_xml)
            self.log.debug("Utility image tasks complete")
        finally:
            self.log.debug("Cleaning up install artifacts")
            self.guest.cleanup_install()

        # After shutdown, extract the results
        results_location = parameters.get('results_location', "/results/images/boot.iso")
        self.copy_content_from_image(results_location, self.working_space_image, builder.target_image.data)


    def add_disk(self, libvirt_doc, disk_image_file, device_name):
	devices = libvirt_doc.xpathEval("/domain/devices")[0]
	new_dev = devices.newChild(None, "disk", None)
	new_dev.setProp("type", "file")
	new_dev.setProp("device", "disk")
	source = new_dev.newChild(None, "source", None)
	source.setProp("file", disk_image_file)
	target = new_dev.newChild(None, "target", None)
	target.setProp("dev", device_name)
	target.setProp("bus", self.guest.disk_bus)


    def oz_refresh_customizations(self, partial_tdl):
        # This takes our already created and well formed TDL object with already blank customizations
        # and attempts to add in any additional customizations found in partial_tdl
        # partial_tdl need not contain the <os>, <name> or <description> sections
        # if it does they will be ignored
        # TODO: Submit an Oz patch to make this shorter or a utility function within the TDL class

        doc = lxml.etree.fromstring(partial_tdl)
        self.tdlobj.doc = doc 

        packageslist = doc.xpath('/template/packages/package')
        self.tdlobj._add_packages(packageslist)

        for afile in doc.xpath('/template/files/file'):
            name = afile.get('name')
            if name is None:
                raise Exception("File without a name was given")
            contenttype = afile.get('type')
            if contenttype is None:
                contenttype = 'raw'

            content = afile.text
            if content:
                content = content.strip()
            else:
                content = ''
            self.tdlobj.files[name] = data_from_type(name, contenttype, content)

        repositorieslist = doc.xpath('/template/repositories/repository')
        self.tdlobj._add_repositories(repositorieslist)

        self.tdlobj.commands = self.tdlobj._parse_commands()


    def _init_oz(self):
        # populate a config object to pass to OZ; this allows us to specify our
        # own output dir but inherit other Oz behavior
        self.oz_config = ConfigParser.SafeConfigParser()
        if self.oz_config.read("/etc/oz/oz.cfg") != []:
            self.oz_config.set('paths', 'output_dir', self.app_config["imgdir"])
            if "oz_data_dir" in self.app_config:
                self.oz_config.set('paths', 'data_dir', self.app_config["oz_data_dir"])
            if "oz_screenshot_dir" in self.app_config:
                self.oz_config.set('paths', 'screenshot_dir', self.app_config["oz_screenshot_dir"])
        else:
            raise ImageFactoryException("No Oz config file found. Can't continue.")

        # Use the factory function from Oz directly
        try:
            # Force uniqueness by overriding the name in the TDL
            self.tdlobj.name = "factory-build-" + self.active_image.identifier
            self.guest = oz.GuestFactory.guest_factory(self.tdlobj, self.oz_config, None)
            # Oz just selects a random port here - This could potentially collide if we are unlucky
            self.guest.listen_port = self.res_mgr.get_next_listen_port()
        except libvirtError, e:
            raise ImageFactoryException("Cannot connect to libvirt.  Make sure libvirt is running. [Original message: %s]" %  e.message)
        except OzException, e:
            if "Unsupported" in e.message:
                raise ImageFactoryException("TinMan plugin does not support distro (%s) update (%s) in TDL" % (self.tdlobj.distro, self.tdlobj.update) )
            else:
                raise e
Exemple #19
0
class SecondaryDispatcher(Singleton):

    def _singleton_init(self):
        self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
        self.pending_uploads = dict()
        self.pending_uploads_lock = BoundedSemaphore()
        self.pim = PersistentImageManager.default_manager()
        self.res = ReservationManager()
        self.secondaries = { } 
        if os.path.isfile(SECONDARIES):
            try:
                secs = open(SECONDARIES, "r")
                self.secondaries = json.load(secs)
                secs.close()
            except:
                self.log.warning("Unable to load JSON for secondaries from %s", SECONDARIES)
        if not 'targets' in self.secondaries:
            self.secondaries['targets'] = { }
        if not 'providers' in self.secondaries:
            self.secondaries['providers'] = { }

    def queue_pending_upload(self, target_image_uuid):
        # Create a UUID - map it to the target_image_uuid and return it
        # TODO: Expire these somehow
        upload_uuid = str(uuid.uuid4())
        self.pending_uploads_lock.acquire()
        try:
            self.pending_uploads[upload_uuid] = target_image_uuid
        finally:
            self.pending_uploads_lock.release()
        return upload_uuid

    def target_image_for_upload_uuid(self, upload_uuid):
        # Return the target_image UUID for a given upload UUID if it exists
        # and remove it from the dict.  Return None if the UUID is not in the dict.
        self.pending_uploads_lock.acquire()
        target_image_uuid = None
        try:
            if upload_uuid in self.pending_uploads:
                target_image_uuid = self.pending_uploads[upload_uuid]
                del self.pending_uploads[upload_uuid]
        finally:
            self.pending_uploads_lock.release()

        return target_image_uuid
                
    def update_target_image_body(self, target_image, new_body):
        # Called during the clone process - we background the actual copy
        # and update the target_image in question to COMPLETED when the copy is finished
        # This allows the HTTP transaction to complete - in testing some upload clients
        # timed out waiting for the local copy to complete
        # (bottle.py internally processes all uploaded files to completion, storing them as unnamed temporary
        # files)
        target_update_thread = Thread(target=self._update_target_image_body, args=(target_image, new_body))
        target_update_thread.start()

    def _update_target_image_body(self, target_image, new_body):
        try:
            self.log.debug("Copying incoming file to %s" % (target_image.data))
            dest = open(target_image.data,"w")
            shutil.copyfileobj(new_body, dest, 16384)
            self.log.debug("Finished copying incoming file to %s" % (target_image.data))
            target_image.status="COMPLETE"
        except Exception as e:
            self.log.debug("Exception encountered when attempting to update target_image body")
            self.log.exception(e)
            target_image.status_detail = {'activity': 'Failed to update image.', 'error': e.message}
            target_image.status="FAILED"
        finally:
            self.pim.save_image(target_image)

    def prep_target_image_clone(self, request_data, target_image_id):
        # Request data should contain all target_image metadata to be cloned
        # If target_image with this ID doest not exist, create it and establish an
        # upload UUID and return both.
        # If taget_image already exists, return just the existing target_image

        upload_id = None
        self.res.get_named_lock(target_image_id)
        # At this point no other thread, either remote or local, can operate on this ID
        # The image either already exists or it doesn't.  If it doesn't we can safely create
        # it without worrying about concurrency problems.
        try:
            target_image = self.pim.image_with_id(target_image_id)
            if not target_image:
                upload_id = self.queue_pending_upload(target_image_id)
                target_image = TargetImage(image_id=target_image_id)
                metadata_keys = target_image.metadata()
                for data_element in request_data.keys():
                    if not (data_element in metadata_keys):
                        self.log.warning("Metadata field (%s) in incoming target_image clone request is non-standard - skipping" % (data_element))
                    else:
                         setattr(target_image, data_element, request_data[data_element])
                # The posted image will, of course, be complete, so fix status here
                target_image.status = "PENDING"
                target_image.percent_complete = 0
                target_image.status_detail = { 'activity': 'Image being cloned from primary factory - initial metadata set', 'error':None }
                self.pim.add_image(target_image)
                self.pim.save_image(target_image)
                self.log.debug("Completed save of target_image (%s)" % target_image.identifier)
        finally:
            self.res.release_named_lock(target_image_id)

        return (target_image, upload_id)

    def get_secondary(self, target, provider):
        if provider in self.secondaries['providers']:
            return self.secondaries['providers'][provider]
        elif target in self.secondaries['targets']:
            return self.secondaries['targets'][target]
        else:
            return None

    def get_helper(self, secondary):
        try:
            helper = SecondaryHelper(**secondary)
            return helper
        except Exception as e:
            self.log.error("Exception encountered when trying to create secondary helper object")
            self.log.error("Secondary details: %s" % (secondary))
            self.log.exception(e) 
class testReservationManager(unittest.TestCase):
    """ TODO: Docstring for testReservationManager  """

    def __init__(self, methodName='runTest'):
        super(testReservationManager, self).__init__(methodName)
        logging.basicConfig(level=logging.NOTSET,
                            format='%(asctime)s \
                                    %(levelname)s \
                                    %(name)s \
                                    pid(%(process)d) \
                                    Message: %(message)s',
                            filename='/tmp/imagefactory-unittests.log')

    def setUp(self):
        self.test_path = '/tmp/imagefactory.unittest.ReservationManager'
        self.test_file = '%s/reservation.test' % self.test_path
        os.mkdir(self.test_path)
        fstat = os.statvfs(self.test_path)
        self.max_free = fstat.f_bavail * fstat.f_frsize
        self.min_free = self.max_free / 2
        self.res_mgr = ReservationManager(self.min_free)

    def tearDown(self):
        self.res_mgr.remove_path(self.test_path)
        os.rmdir(self.test_path)
        del self.res_mgr

    def testSingleton(self):
        """
        Prove this class produces a singelton object.
        """
        self.assertIs(self.res_mgr, ReservationManager())

    def testDefaultMinimumProperty(self):
        """
        TODO: Docstring for testDefaultMinimumProperty
        """
        self.assertEqual(self.min_free, self.res_mgr.default_minimum)

    def testAddRemovePath(self):
        """
        TODO: Docstring for testRemovePath
        """
        path = '/'
        # start off with nothing tracked
        self.assertFalse(path in self.res_mgr.available_space)
        # add / and check that it's listed in the dictionary returned by
        # available_space
        self.res_mgr.add_path('/')
        self.assertTrue(path in self.res_mgr.available_space)
        # remove / and check that it's no longer listed in the dictionary
        # returned by available_space
        self.res_mgr.remove_path('/')
        self.assertFalse(path in self.res_mgr.available_space)

    def testReserveSpaceForFile(self):
        """
        TODO: Docstring for testReserveSpaceForFile
        """
        size = self.min_free / 10
        result = self.res_mgr.reserve_space_for_file(size, self.test_file)
        self.assertTrue(result)
        self.assertTrue(self.test_file in self.res_mgr.reservations)

    def testReserveSpaceForFileThatIsTooBig(self):
        """
        TODO: Docstring for testReserveSpaceForFile
        """
        size = self.max_free * 10
        result = self.res_mgr.reserve_space_for_file(size, self.test_file)
        self.assertFalse(result)
        self.assertFalse(self.test_file in self.res_mgr.reservations)

    def testCancelReservationForFile(self):
        """
        TODO: Docstring for testCancelReservationForFile
        """
        size = self.min_free / 10
        if(self.res_mgr.reserve_space_for_file(size, self.test_file)):
            self.assertTrue(self.test_file in self.res_mgr.reservations)
            self.res_mgr.cancel_reservation_for_file(self.test_file)
            self.assertFalse(self.test_file in self.res_mgr.reservations)
        else:
            self.fail('Failed to reserve space...')

    def testCancelNonExistentReservation(self):
        """
        TODO: Docstring for testCancelNonExistentReservation
        """
        with self.assertRaises((TypeError, KeyError)):
            self.res_mgr.cancel_reservation_for_file('/tmp/not.there', False)

    def testAvailableSpaceForPath(self):
        """
        TODO: Docstring for testAvailableSpace
        """
        size = self.min_free / 10
        self.res_mgr.add_path(self.test_path, self.min_free)
        available = self.res_mgr.available_space_for_path(self.test_path)
        if(self.res_mgr.reserve_space_for_file(size, self.test_file)):
            now_available = self.res_mgr.available_space_for_path(self.test_path)
            self.assertEqual(now_available, (available - size))
        else:
            self.fail('Failed to reserve space...')
Exemple #21
0
class TinMan(object):
    def activity(self, activity):
        # Simple helper function
        # Activity should be a one line human-readable string indicating the task in progress
        # We log it at DEBUG and also set it as the status_detail on our active image
        self.log.debug(activity)
        self.active_image.status_detail['activity'] = activity

    ## INTERFACE METHOD
    def create_target_image(self, builder, target, base_image, parameters):
        self.log.info(
            'create_target_image() called for TinMan plugin - creating a TargetImage'
        )
        self.active_image = builder.target_image
        self.target = target
        self.base_image = builder.base_image

        # populate our target_image bodyfile with the original base image
        # which we do not want to modify in place
        self.activity("Copying BaseImage to modifiable TargetImage")
        self.log.debug(
            "Copying base_image file (%s) to new target_image file (%s)" %
            (builder.base_image.data, builder.target_image.data))
        oz.ozutil.copyfile_sparse(builder.base_image.data,
                                  builder.target_image.data)
        self.image = builder.target_image.data

        # Merge together any TDL-style customizations requested via our plugin-to-plugin interface
        # with any target specific packages, repos and commands and then run a second Oz customization
        # step.
        self.tdlobj = oz.TDL.TDL(
            xmlstring=builder.base_image.template,
            rootpw_required=self.app_config["tdl_require_root_pw"])

        # We remove any packages, commands and files from the original TDL - these have already been
        # installed/executed.  We leave the repos in place, as it is possible that the target
        # specific packages or commands may require them.
        self.tdlobj.packages = []
        self.tdlobj.commands = {}
        self.tdlobj.files = {}
        # This is user-defined target-specific packages and repos in a local config file
        self.add_target_content()
        # This is content deposited by cloud plugins - typically commands to run to prep the image further
        self.merge_cloud_plugin_content()

        # If there are no new commands, packages or files, we can stop here - there is no need to run Oz again
        if (len(self.tdlobj.packages) + len(self.tdlobj.commands) +
                len(self.tdlobj.files)) == 0:
            self.log.debug(
                "No further modification of the TargetImage to perform in the OS Plugin - returning"
            )
            return

        # We have some additional work to do - create a new Oz guest object that we can use to run the guest
        # customization a second time
        self._init_oz()

        self.guest.diskimage = builder.target_image.data

        libvirt_xml = self.guest._generate_xml("hd", None)

        # One last step is required here - The persistent net rules in some Fedora and RHEL versions
        # Will cause our new incarnation of the image to fail to get network - fix that here
        # We unfortunately end up having to duplicate this a second time in the cloud plugins
        # when we are done with our second  stage customizations
        # TODO: Consider moving all of that back here

        guestfs_handle = launch_inspect_and_mount(builder.target_image.data)
        remove_net_persist(guestfs_handle)
        shutdown_and_close(guestfs_handle)

        try:
            self.log.debug(
                "Doing second-stage target_image customization and ICICLE generation"
            )
            #self.percent_complete = 30
            builder.target_image.icicle = self.guest.customize_and_generate_icicle(
                libvirt_xml)
            self.log.debug("Customization and ICICLE generation complete")
            #self.percent_complete = 50
        finally:
            self.activity("Cleaning up install artifacts")
            self.guest.cleanup_install()

    def add_cloud_plugin_content(self, content):
        # This is a method that cloud plugins can call to deposit content/commands to be run
        # during the OS-specific first stage of the Target Image creation.
        # The expected input is a dict containing commands and files
        # No support for repos at the moment as these introduce external deps that we may not be able to count on
        # Add this to an array which will later be merged into the TDL object used to drive Oz
        self.cloud_plugin_content.append(content)

    def merge_cloud_plugin_content(self):
        for content in self.cloud_plugin_content:
            if 'files' in content:
                for fileentry in content['files']:
                    if not 'name' in fileentry:
                        raise ImageFactoryException(
                            "File given without a name")
                    if not 'type' in fileentry:
                        raise ImageFactoryException(
                            "File given without a type")
                    if not 'file' in fileentry:
                        raise ImageFactoryException(
                            "File given without any content")
                    if fileentry['type'] == 'raw':
                        self.tdlobj.files[
                            fileentry['name']] = fileentry['file']
                    elif fileentry['type'] == 'base64':
                        if len(fileentry['file']) == 0:
                            self.tdlobj.files[fileentry['name']] = ""
                        else:
                            self.tdlobj.files[
                                fileentry['name']] = base64.b64decode(
                                    fileentry['file'])
                    else:
                        raise ImageFactoryException(
                            "File given with invalid type (%s)" %
                            (fileentry['type']))

            if 'commands' in content:
                for command in content['commands']:
                    if not 'name' in command:
                        raise ImageFactoryException(
                            "Command given without a name")
                    if not 'type' in command:
                        raise ImageFactoryException(
                            "Command given without a type")
                    if not 'command' in command:
                        raise ImageFactoryException(
                            "Command given without any content")
                    if command['type'] == 'raw':
                        self.tdlobj.commands[
                            command['name']] = command['command']
                    elif command['type'] == 'base64':
                        if len(command['command']) == 0:
                            self.log.warning("Command with zero length given")
                            self.tdlobj.commands[command['name']] = ""
                        else:
                            self.tdlobj.commands[
                                command['name']] = base64.b64decode(
                                    command['command'])
                    else:
                        raise ImageFactoryException(
                            "Command given with invalid type (%s)" %
                            (command['type']))

    def add_target_content(self):
        """Merge in target specific package and repo content.
        TDL object must already exist as self.tdlobj"""
        doc = None
        if isfile("/etc/imagefactory/target_content.xml"):
            doc = libxml2.parseFile("/etc/imagefactory/target_content.xml")
        else:
            self.log.debug(
                "Found neither a call-time config nor a config file - doing nothing"
            )
            return

        # Purely to make the xpath statements below a tiny bit shorter
        target = self.target
        os = self.tdlobj.distro
        version = self.tdlobj.update
        arch = self.tdlobj.arch

        # We go from most to least specific in this order:
        #   arch -> version -> os-> target
        # Note that at the moment we even allow an include statment that covers absolutely everything.
        # That is, one that doesn't even specify a target - this is to support a very simple call-time syntax
        include = doc.xpathEval(
            "/template_includes/include[@target='%s' and @os='%s' and @version='%s' and @arch='%s']"
            % (target, os, version, arch))
        if len(include) == 0:
            include = doc.xpathEval(
                "/template_includes/include[@target='%s' and @os='%s' and @version='%s' and not(@arch)]"
                % (target, os, version))
        if len(include) == 0:
            include = doc.xpathEval(
                "/template_includes/include[@target='%s' and @os='%s' and not(@version) and not(@arch)]"
                % (target, os))
        if len(include) == 0:
            include = doc.xpathEval(
                "/template_includes/include[@target='%s' and not(@os) and not(@version) and not(@arch)]"
                % (target))
        if len(include) == 0:
            include = doc.xpathEval(
                "/template_includes/include[not(@target) and not(@os) and not(@version) and not(@arch)]"
            )
        if len(include) == 0:
            self.log.debug(
                "cannot find a config section that matches our build details - doing nothing"
            )
            return

        # OK - We have at least one config block that matches our build - take the first one, merge it and be done
        # TODO: Merge all of them?  Err out if there is more than one?  Warn?
        include = include[0]

        packages = include.xpathEval("packages")
        if len(packages) > 0:
            self.tdlobj.merge_packages(str(packages[0]))

        repositories = include.xpathEval("repositories")
        if len(repositories) > 0:
            self.tdlobj.merge_repositories(str(repositories[0]))

    def __init__(self):
        super(TinMan, self).__init__()
        self.cloud_plugin_content = []
        config_obj = ApplicationConfiguration()
        self.app_config = config_obj.configuration
        self.res_mgr = ReservationManager()
        self.log = logging.getLogger('%s.%s' %
                                     (__name__, self.__class__.__name__))
        self.parameters = None
        self.install_script_object = None
        self.guest = None

    def abort(self):
        self.log.debug("ABORT called in TinMan plugin")
        # If we have an active Oz VM destroy it - if not do nothing but log why we did nothing
        if not self.guest:
            self.log.debug("No Oz guest object present - nothing to do")
            return

        try:
            # Oz doesn't keep the active domain object as an instance variable so we have to look it up
            guest_dom = self.guest.libvirt_conn.lookupByName(self.tdlobj.name)
        except Exception as e:
            self.log.exception(e)
            self.log.debug("No Oz VM found with name (%s) - nothing to do" %
                           (self.tdlobj.name))
            self.log.debug(
                "This likely means the local VM has already been destroyed or never started"
            )
            return

        try:
            self.log.debug("Attempting to destroy local guest/domain (%s)" %
                           (self.tdlobj.name))
            guest_dom.destroy()
        except Exception as e:
            self.log.exception(e)
            self.log.warning(
                "Exception encountered while destroying domain - it may still exist"
            )

    def _init_oz(self):
        # TODO: This is a convenience variable for refactoring - rename
        self.new_image_id = self.active_image.identifier

        # Create a name combining the TDL name and the UUID for use when tagging EC2 AMIs
        self.longname = self.tdlobj.name + "-" + self.new_image_id
        # Oz assumes unique names - TDL built for multiple backends guarantees they are not unique
        # We don't really care about the name so just force uniqueness
        # 18-Jul-2011 - Moved to constructor and modified to change TDL object name itself
        #   Oz now uses the tdlobject name property directly in several places so we must change it
        self.tdlobj.name = "factory-build-" + self.new_image_id

        # populate a config object to pass to OZ; this allows us to specify our
        # own output dir but inherit other Oz behavior
        self.oz_config = configparser.SafeConfigParser()
        if self.oz_config.read("/etc/oz/oz.cfg") != []:
            if self.parameters.get("oz_overrides", None) != None:
                oz_overrides = json.loads(
                    self.parameters.get("oz_overrides",
                                        None).replace("'", "\""))
                for i in oz_overrides:
                    for key, val in oz_overrides[i].items():
                        self.oz_config.set(i, key, str(val))

            self.oz_config.set('paths', 'output_dir',
                               self.app_config["imgdir"])
            if "oz_data_dir" in self.app_config:
                self.oz_config.set('paths', 'data_dir',
                                   self.app_config["oz_data_dir"])
            if "oz_screenshot_dir" in self.app_config:
                self.oz_config.set('paths', 'screenshot_dir',
                                   self.app_config["oz_screenshot_dir"])
            print("=============== Final Oz Config ================")
            for section in self.oz_config.sections():
                print("[ {0} ]".format(section))
                for option in self.oz_config.options(section):
                    print("  {0} = {1}".format(
                        option, self.oz_config.get(section, option)))
        else:
            raise ImageFactoryException(
                "No Oz config file found. Can't continue.")

        # make this a property to enable quick cleanup on abort
        self.instance = None

        # Here we are always dealing with a local install
        self.init_guest()

    ## INTERFACE METHOD
    def create_base_image(self, builder, template, parameters):
        self.log.info(
            'create_base_image() called for TinMan plugin - creating a BaseImage'
        )

        self.tdlobj = oz.TDL.TDL(
            xmlstring=template.xml,
            rootpw_required=self.app_config["tdl_require_root_pw"])
        if parameters:
            self.parameters = parameters
        else:
            self.parameters = {}

        # TODO: Standardize reference scheme for the persistent image objects in our builder
        #   Having local short-name copies like this may well be a good idea though they
        #   obscure the fact that these objects are in a container "upstream" of our plugin object
        self.base_image = builder.base_image

        # Set to the image object that is actively being created or modified
        # Used in the logging helper function above
        self.active_image = self.base_image

        try:
            self._init_oz()
            self.guest.diskimage = self.base_image.data
            self.activity("Cleaning up any old Oz guest")
            self.guest.cleanup_old_guest()
            self.activity("Generating JEOS install media")
            self.threadsafe_generate_install_media(self.guest)
            self.percent_complete = 10

            # We want to save this later for use by RHEV-M and Condor clouds
            libvirt_xml = ""
            gfs = None

            try:
                self.activity("Generating JEOS disk image")
                # Newer Oz versions introduce a configurable disk size in TDL
                # We must still detect that it is present and pass it in this call
                try:
                    disksize = getattr(self.guest, "disksize")
                except AttributeError:
                    disksize = 10
                self.guest.generate_diskimage(size=disksize)
                # TODO: If we already have a base install reuse it
                #  subject to some rules about updates to underlying repo
                self.activity("Execute JEOS install")
                libvirt_xml = self.guest.install(self.app_config["timeout"])
                self.base_image.parameters['libvirt_xml'] = libvirt_xml
                self.image = self.guest.diskimage
                self.log.debug(
                    "Base install complete - Doing customization and ICICLE generation"
                )
                self.percent_complete = 30
                # Power users may wish to avoid ever booting the guest after the installer is finished
                # They can do so by passing in a { "generate_icicle": False } KV pair in the parameters dict
                if parameter_cast_to_bool(
                        self.parameters.get("generate_icicle", True)):
                    if parameter_cast_to_bool(
                            self.parameters.get("offline_icicle", False)):
                        self.guest.customize(libvirt_xml)
                        gfs = launch_inspect_and_mount(self.image,
                                                       readonly=True)

                        # Monkey-patching is bad
                        # TODO: Work with Chris to incorporate a more elegant version of this into Oz itself
                        def libguestfs_execute_command(gfs, cmd, timeout):
                            stdout = gfs.sh(cmd)
                            return (stdout, None, 0)

                        self.guest.guest_execute_command = libguestfs_execute_command
                        builder.base_image.icicle = self.guest.do_icicle(gfs)
                    else:
                        builder.base_image.icicle = self.guest.customize_and_generate_icicle(
                            libvirt_xml)
                else:
                    # koji errs out if this value is None - set to an empty ICICLE instead
                    builder.base_image.icicle = "<icicle></icicle>"
                    self.guest.customize(libvirt_xml)
                self.log.debug("Customization and ICICLE generation complete")
                self.percent_complete = 50
            finally:
                self.activity("Cleaning up install artifacts")
                if self.guest:
                    self.guest.cleanup_install()
                if self.install_script_object:
                    # NamedTemporaryFile - removed on close
                    self.install_script_object.close()
                if gfs:
                    shutdown_and_close(gfs)

            self.log.debug("Generated disk image (%s)" %
                           (self.guest.diskimage))
            # OK great, we now have a customized KVM image

        finally:
            pass
            # TODO: Create the base_image object representing this
            # TODO: Create the base_image object at the beginning and then set the diskimage accordingly

    def init_guest(self):
        # Use the factory function from Oz directly
        # This raises an exception if the TDL contains an unsupported distro or version
        # Cloud plugins that use KVM directly, such as RHEV-M and openstack-kvm can accept
        # any arbitrary guest that Oz is capable of producing

        install_script_name = None
        install_script = self.parameters.get("install_script", None)
        if install_script:
            self.install_script_object = NamedTemporaryFile(mode='w')
            self.install_script_object.write(install_script)
            self.install_script_object.flush()
            install_script_name = self.install_script_object.name

        try:
            self.guest = oz.GuestFactory.guest_factory(self.tdlobj,
                                                       self.oz_config,
                                                       install_script_name)
            # Oz just selects a random port here - This could potentially collide if we are unlucky
            self.guest.listen_port = self.res_mgr.get_next_listen_port()
        except libvirtError as e:
            raise ImageFactoryException(
                "Cannot connect to libvirt.  Make sure libvirt is running. [Original message: %s]"
                % e.message)
        except OzException as e:
            if "Unsupported" in e.message:
                raise ImageFactoryException(
                    "TinMan plugin does not support distro (%s) update (%s) in TDL"
                    % (self.tdlobj.distro, self.tdlobj.update))
            else:
                raise e

    def log_exc(self):
        self.log.debug("Exception caught in ImageFactory")
        self.log.debug(traceback.format_exc())
        self.active_image.status_detal['error'] = traceback.format_exc()

    def threadsafe_generate_install_media(self, guest):
        # Oz caching of install media and modified install media is not thread safe
        # Make it safe here using some locks
        # We can only have one active generate_install_media() call for each unique tuple:
        #  (OS, update, architecture, installtype)

        tdl = guest.tdl
        queue_name = "%s-%s-%s-%s" % (tdl.distro, tdl.update, tdl.arch,
                                      tdl.installtype)
        self.res_mgr.get_named_lock(queue_name)
        try:
            guest.generate_install_media(force_download=False)
        finally:
            self.res_mgr.release_named_lock(queue_name)
Exemple #22
0
class FedoraOS(object):
    zope.interface.implements(OSDelegate)

    def activity(self, activity):
        # Simple helper function
        # Activity should be a one line human-readable string indicating the task in progress
        # We log it at DEBUG and also set it as the status_detail on our active image
        self.log.debug(activity)
        self.active_image.status_detail['activity'] = activity

    ## INTERFACE METHOD
    def create_target_image(self, builder, target, base_image, parameters):
        self.log.info('create_target_image() called for FedoraOS plugin - creating a TargetImage')
        self.active_image = builder.target_image
        self.target = target
        self.base_image = builder.base_image

        # populate our target_image bodyfile with the original base image
        # which we do not want to modify in place
        self.activity("Copying BaseImage to modifiable TargetImage")
        self.log.debug("Copying base_image file (%s) to new target_image file (%s)" % (builder.base_image.data, builder.target_image.data))
        oz.ozutil.copyfile_sparse(builder.base_image.data, builder.target_image.data)
        self.image = builder.target_image.data

        # Merge together any TDL-style customizations requested via our plugin-to-plugin interface
        # with any target specific packages, repos and commands and then run a second Oz customization
        # step.
        self.tdlobj = oz.TDL.TDL(xmlstring=builder.base_image.template, rootpw_required=self.app_config["tdl_require_root_pw"])
        
        # We remove any packages, commands and files from the original TDL - these have already been
        # installed/executed.  We leave the repos in place, as it is possible that the target
        # specific packages or commands may require them.
        self.tdlobj.packages = [ ]
        self.tdlobj.commands = { }
        self.tdlobj.files = { } 
        # This is user-defined target-specific packages and repos in a local config file
        self.add_target_content()
        # This is content deposited by cloud plugins - typically commands to run to prep the image further
        self.merge_cloud_plugin_content()

        # If there are no new commands, packages or files, we can stop here - there is no need to run Oz again
        if (len(self.tdlobj.packages) + len(self.tdlobj.commands) + len(self.tdlobj.files)) == 0:
            self.log.debug("No further modification of the TargetImage to perform in the OS Plugin - returning")
            return 

        # We have some additional work to do - create a new Oz guest object that we can use to run the guest
        # customization a second time
        self._init_oz()

        self.guest.diskimage = builder.target_image.data

        libvirt_xml = self.guest._generate_xml("hd", None)

        # One last step is required here - The persistent net rules in some Fedora and RHEL versions
        # Will cause our new incarnation of the image to fail to get network - fix that here
        # We unfortunately end up having to duplicate this a second time in the cloud plugins
        # when we are done with our second  stage customizations
        # TODO: Consider moving all of that back here

        guestfs_handle = launch_inspect_and_mount(builder.target_image.data)
        remove_net_persist(guestfs_handle)
        shutdown_and_close(guestfs_handle)

        try:
            self.log.debug("Doing second-stage target_image customization and ICICLE generation")
            #self.percent_complete = 30
            self.output_descriptor = self.guest.customize_and_generate_icicle(libvirt_xml)
            self.log.debug("Customization and ICICLE generation complete")
            #self.percent_complete = 50
        finally:
            self.activity("Cleaning up install artifacts")
            self.guest.cleanup_install()

    def add_cloud_plugin_content(self, content):
        # This is a method that cloud plugins can call to deposit content/commands to be run
        # during the OS-specific first stage of the Target Image creation.
        # The expected input is a dict containing commands and files
        # No support for repos at the moment as these introduce external deps that we may not be able to count on
        # Add this to an array which will later be merged into the TDL object used to drive Oz
        self.cloud_plugin_content.append(content)

    def merge_cloud_plugin_content(self):
        for content in self.cloud_plugin_content:
            if 'files' in content:
                for fileentry in content['files']:
                    if not 'name' in fileentry:
                        raise ImageFactoryException("File given without a name")
                    if not 'type' in fileentry:
                        raise ImageFactoryException("File given without a type")
                    if not 'file' in fileentry:
                        raise ImageFactoryException("File given without any content")
                    if fileentry['type'] == 'raw':
                        self.tdlobj.files[fileentry['name']] = fileentry['file']
                    elif fileentry['type'] == 'base64':
                        if len(fileentry['file']) == 0:
                            self.tdlobj.files[fileentry['name']] = ""
                        else:
                            self.tdlobj.files[fileentry['name']] = base64.b64decode(fileentry['file'])
                    else:
                        raise ImageFactoryException("File given with invalid type (%s)" % (file['type']))

            if 'commands' in content:
                for command in content['commands']:
                    if not 'name' in command:
                        raise ImageFactoryException("Command given without a name")
                    if not 'type' in command:
                        raise ImageFactoryException("Command given without a type")
                    if not 'command' in command:
                        raise ImageFactoryException("Command given without any content")
                    if command['type'] == 'raw':
                        self.tdlobj.commands[command['name']] = command['command']
                    elif command['type'] == 'base64':
                        if len(command['command']) == 0:
                            self.log.warning("Command with zero length given")
                            self.tdlobj.commands[command['name']] = ""
                        else:
                            self.tdlobj.commandss[command['name']] = base64.b64decode(command['command'])
                    else:
                        raise ImageFactoryException("Command given with invalid type (%s)" % (command['type']))


    def add_target_content(self):
        """Merge in target specific package and repo content.
        TDL object must already exist as self.tdlobj"""
        doc = None
        if isfile("/etc/imagefactory/target_content.xml"):
            doc = libxml2.parseFile("/etc/imagefactory/target_content.xml")
        else:
            self.log.debug("Found neither a call-time config nor a config file - doing nothing")
            return

        # Purely to make the xpath statements below a tiny bit shorter
        target = self.target
        os=self.tdlobj.distro
        version=self.tdlobj.update
        arch=self.tdlobj.arch

        # We go from most to least specific in this order:
        #   arch -> version -> os-> target
        # Note that at the moment we even allow an include statment that covers absolutely everything.
        # That is, one that doesn't even specify a target - this is to support a very simple call-time syntax
        include = doc.xpathEval("/template_includes/include[@target='%s' and @os='%s' and @version='%s' and @arch='%s']" %
                  (target, os, version, arch))
        if len(include) == 0:
            include = doc.xpathEval("/template_includes/include[@target='%s' and @os='%s' and @version='%s' and not(@arch)]" %
                      (target, os, version))
        if len(include) == 0:
            include = doc.xpathEval("/template_includes/include[@target='%s' and @os='%s' and not(@version) and not(@arch)]" %
                      (target, os))
        if len(include) == 0:
            include = doc.xpathEval("/template_includes/include[@target='%s' and not(@os) and not(@version) and not(@arch)]" %
                      (target))
        if len(include) == 0:
            include = doc.xpathEval("/template_includes/include[not(@target) and not(@os) and not(@version) and not(@arch)]")
        if len(include) == 0:
            self.log.debug("cannot find a config section that matches our build details - doing nothing")
            return

        # OK - We have at least one config block that matches our build - take the first one, merge it and be done
        # TODO: Merge all of them?  Err out if there is more than one?  Warn?
        include = include[0]

        packages = include.xpathEval("packages")
        if len(packages) > 0:
            self.tdlobj.merge_packages(str(packages[0]))

        repositories = include.xpathEval("repositories")
        if len(repositories) > 0:
            self.tdlobj.merge_repositories(str(repositories[0]))


    def __init__(self):
        super(FedoraOS, self).__init__()
        self.cloud_plugin_content = [ ]
        config_obj = ApplicationConfiguration()
        self.app_config = config_obj.configuration
        self.res_mgr = ReservationManager()
        self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))


    def _init_oz(self):
        # TODO: This is a convenience variable for refactoring - rename
        self.new_image_id = self.active_image.identifier

        # Create a name combining the TDL name and the UUID for use when tagging EC2 AMIs
        self.longname = self.tdlobj.name + "-" + self.new_image_id
        # Oz assumes unique names - TDL built for multiple backends guarantees they are not unique
        # We don't really care about the name so just force uniqueness
        # 18-Jul-2011 - Moved to constructor and modified to change TDL object name itself
        #   Oz now uses the tdlobject name property directly in several places so we must change it
        self.tdlobj.name = "factory-build-" + self.new_image_id

        # populate a config object to pass to OZ; this allows us to specify our
        # own output dir but inherit other Oz behavior
        self.oz_config = ConfigParser.SafeConfigParser()
        if self.oz_config.read("/etc/oz/oz.cfg") != []:
            self.oz_config.set('paths', 'output_dir', self.app_config["imgdir"])
        else:
            raise ImageFactoryException("No Oz config file found. Can't continue.")

        # make this a property to enable quick cleanup on abort
        self.instance = None

        # Here we are always dealing with a local install
        self.init_guest()


    ## INTERFACE METHOD
    def create_base_image(self, builder, template, parameters):
        self.log.info('create_base_image() called for FedoraOS plugin - creating a BaseImage')

        self.tdlobj = oz.TDL.TDL(xmlstring=template.xml, rootpw_required=self.app_config["tdl_require_root_pw"])

        # TODO: Standardize reference scheme for the persistent image objects in our builder
        #   Having local short-name copies like this may well be a good idea though they
        #   obscure the fact that these objects are in a container "upstream" of our plugin object
        self.base_image = builder.base_image

        # Set to the image object that is actively being created or modified
        # Used in the logging helper function above
        self.active_image = self.base_image

        self._init_oz()

        self.guest.diskimage = self.base_image.data
        # The remainder comes from the original build_upload(self, build_id)

        self.status="BUILDING"
        try:
            self.activity("Cleaning up any old Oz guest")
            self.guest.cleanup_old_guest()
            self.activity("Generating JEOS install media")
            self.threadsafe_generate_install_media(self.guest)
            self.percent_complete=10

            # We want to save this later for use by RHEV-M and Condor clouds
            libvirt_xml=""

            try:
                self.activity("Generating JEOS disk image")
                self.guest.generate_diskimage()
                # TODO: If we already have a base install reuse it
                #  subject to some rules about updates to underlying repo
                self.activity("Execute JEOS install")
                libvirt_xml = self.guest.install(self.app_config["timeout"])
                self.base_image.parameters['libvirt_xml'] = libvirt_xml
                self.image = self.guest.diskimage
                self.log.debug("Base install complete - Doing customization and ICICLE generation")
                self.percent_complete = 30
                self.output_descriptor = self.guest.customize_and_generate_icicle(libvirt_xml)
                self.log.debug("Customization and ICICLE generation complete")
                self.percent_complete = 50
            finally:
                self.activity("Cleaning up install artifacts")
                self.guest.cleanup_install()

            self.log.debug("Generated disk image (%s)" % (self.guest.diskimage))
            # OK great, we now have a customized KVM image

        finally:
            pass
            # TODO: Create the base_image object representing this
            # TODO: Create the base_image object at the beginning and then set the diskimage accordingly

    def init_guest(self):
        # Use the factory function from Oz directly
        # This raises an exception if the TDL contains an unsupported distro or version
        # Cloud plugins that use KVM directly, such as RHEV-M and openstack-kvm can accept
        # any arbitrary guest that Oz is capable of producing
        try:
            self.guest = oz.GuestFactory.guest_factory(self.tdlobj, self.oz_config, None)
            # Oz just selects a random port here - This could potentially collide if we are unlucky
            self.guest.listen_port = self.res_mgr.get_next_listen_port()
        except:
            raise ImageFactoryException("OS plugin does not support distro (%s) update (%s) in TDL" % (self.tdlobj.distro, self.tdlobj.update) )

    def log_exc(self):
        self.log.debug("Exception caught in ImageFactory")
        self.log.debug(traceback.format_exc())
        self.active_image.status_detal['error'] = traceback.format_exc()

    def threadsafe_generate_install_media(self, guest):
        # Oz caching of install media and modified install media is not thread safe
        # Make it safe here using some locks
        # We can only have one active generate_install_media() call for each unique tuple:
        #  (OS, update, architecture, installtype)

        tdl = guest.tdl
        queue_name = "%s-%s-%s-%s" % (tdl.distro, tdl.update, tdl.arch, tdl.installtype)
        self.res_mgr.get_named_lock(queue_name)
        try:
            guest.generate_install_media(force_download=False)
        finally:
            self.res_mgr.release_named_lock(queue_name)
Exemple #23
0
 def testSingleton(self):
     """
     Prove this class produces a singelton object.
     """
     self.assertEqual(id(self.res_mgr), id(ReservationManager()))
class IndirectionCloud(object):
    zope.interface.implements(CloudDelegate)

    def __init__(self):
        super(IndirectionCloud, self).__init__()
        self.app_config = ApplicationConfiguration().configuration
        self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
        self.pim = PersistentImageManager.default_manager()
        self.res_mgr = ReservationManager()

    def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
        # This plugin wants to be the only thing operating on the input image
        # We do all our work here and then return False which stops any additional activity

        # User may specify a utility image - if they do not we assume we can use the input image
        utility_image_id = parameters.get('utility_image', image_id)

        # The utility image is what we actually re-animate with Oz
        # We borrow these variable names from code that is very similar to the Oz/TinMan OS plugin
        self.active_image = self.pim.image_with_id(utility_image_id)
        if not self.active_image:
            raise Exception("Could not find utility image with ID (%s)" % (utility_image_id) )
        self.tdlobj = oz.TDL.TDL(xmlstring=self.active_image.template)

        # Later on, we will either copy in the base_image content as a file, or expose it as a device
        # to the utility VM.  We cannot do both.  Detect invalid input here before doing any long running
        # work
        input_image_device = parameters.get('input_image_device', None)
        input_image_file = parameters.get('input_image_filename', None)

        if input_image_device and input_image_file:
            raise Exception("You can specify either an input_image_device or an input_image_file but not both")

        if (not input_image_device) and (not input_image_file):
            input_image_file="/input_image.raw"


        # We remove any packages, commands and files from the original TDL - these have already been
        # installed/executed.  We leave the repos in place, as it is possible that commands executed
        # later may depend on them
        self.tdlobj.packages = [ ]
        self.tdlobj.commands = { }
        self.tdlobj.files = { } 

        # This creates a new Oz object - replaces the auto-generated disk file location with
        # the copy of the utility image made above, and prepares an initial libvirt_xml string
        self._init_oz()
        utility_image_tmp = self.app_config['imgdir'] + "/tmp-utility-image-" + str(builder.target_image.identifier)
        self.guest.diskimage = utility_image_tmp
        # Below we will create this file as a qcow2 image using the original utility image as
        # a backing store - For the follow-on XML generation to work correctly, we need to force
        # Oz to use qcow2 as the image type
        self.guest.image_type = 'qcow2'

        if 'utility_cpus' in parameters:
            self.guest.install_cpus = int(parameters['utility_cpus'])

        libvirt_xml = self.guest._generate_xml("hd", None)
        libvirt_doc = libxml2.parseDoc(libvirt_xml)

        # Now we create a second disk image as working/scratch space
        # Hardcode at 30G
        # TODO: Make configurable
        # Make it, format it, copy in the base_image 
        working_space_image = self.app_config['imgdir'] + "/working-space-image-" + str(builder.target_image.identifier)
        self.create_ext2_image(working_space_image)

        # Modify the libvirt_xml used with Oz to contain a reference to a second "working space" disk image
        working_space_device = parameters.get('working_space_device', 'vdb')
        self.add_disk(libvirt_doc, working_space_image, working_space_device)

        self.log.debug("Updated domain XML with working space image:\n%s" % (libvirt_xml))

        # We expect to find a partial TDL document in this parameter - this is what drives the
        # tasks performed by the utility image
        if 'utility_customizations' in parameters:
            self.oz_refresh_customizations(parameters['utility_customizations'])
        else:
            self.log.info('No additional repos, packages, files or commands specified for utility tasks')

        # Create a qcow2 image using the original utility image file (which may be read-only) as a
        # backing store.
        self.log.debug("Creating temporary writeable qcow2 working copy of utlity image (%s) as (%s)" % (self.active_image.data, utility_image_tmp))
        self.guest._internal_generate_diskimage(image_filename=utility_image_tmp, backing_filename=self.active_image.data)

        if input_image_file: 
            # Here we finally involve the actual Base Image content - it is made available for the utlity image to modify
            self.copy_content_to_image(builder.base_image.data, working_space_image, input_image_file)
        else:
            # Note that we know that one or the other of these are set because of code earlier
            self.add_disk(libvirt_doc, builder.base_image.data, input_image_device)

        # Run all commands, repo injection, etc specified
        try:
            self.log.debug("Launching utility image and running any customizations specified")
            libvirt_xml = libvirt_doc.serialize(None, 1)
            self.guest.customize(libvirt_xml)
            self.log.debug("Utility image tasks complete")
        finally:
            self.log.debug("Cleaning up install artifacts")
            self.guest.cleanup_install()

        # After shutdown, extract the results
        results_location = parameters.get('results_location', "/results/images/boot.iso")
        self.copy_content_from_image(results_location, working_space_image, builder.target_image.data)

        # TODO: Remove working_space image and utility_image_tmp
        return False


    def add_disk(self, libvirt_doc, disk_image_file, device_name):
	devices = libvirt_doc.xpathEval("/domain/devices")[0]
	new_dev = devices.newChild(None, "disk", None)
	new_dev.setProp("type", "file")
	new_dev.setProp("device", "disk")
	source = new_dev.newChild(None, "source", None)
	source.setProp("file", disk_image_file)
	target = new_dev.newChild(None, "target", None)
	target.setProp("dev", device_name)
	target.setProp("bus", self.guest.disk_bus)


    def oz_refresh_customizations(self, partial_tdl):
        # This takes our already created and well formed TDL object with already blank customizations
        # and attempts to add in any additional customizations found in partial_tdl
        # partial_tdl need not contain the <os>, <name> or <description> sections
        # if it does they will be ignored
        # TODO: Submit an Oz patch to make this shorter or a utility function within the TDL class

        doc = lxml.etree.fromstring(partial_tdl)
        self.tdlobj.doc = doc 

        packageslist = doc.xpath('/template/packages/package')
        self.tdlobj._add_packages(packageslist)

        for afile in doc.xpath('/template/files/file'):
            name = afile.get('name')
            if name is None:
                raise Exception("File without a name was given")
            contenttype = afile.get('type')
            if contenttype is None:
                contenttype = 'raw'

            content = afile.text
            if content:
                content = content.strip()
            else:
                content = ''
            self.tdlobj.files[name] = data_from_type(name, contenttype, content)

        repositorieslist = doc.xpath('/template/repositories/repository')
        self.tdlobj._add_repositories(repositorieslist)

        self.tdlobj.commands = self.tdlobj._parse_commands()


    def _init_oz(self):
        # populate a config object to pass to OZ; this allows us to specify our
        # own output dir but inherit other Oz behavior
        self.oz_config = ConfigParser.SafeConfigParser()
        if self.oz_config.read("/etc/oz/oz.cfg") != []:
            self.oz_config.set('paths', 'output_dir', self.app_config["imgdir"])
            if "oz_data_dir" in self.app_config:
                self.oz_config.set('paths', 'data_dir', self.app_config["oz_data_dir"])
            if "oz_screenshot_dir" in self.app_config:
                self.oz_config.set('paths', 'screenshot_dir', self.app_config["oz_screenshot_dir"])
        else:
            raise ImageFactoryException("No Oz config file found. Can't continue.")

        # Use the factory function from Oz directly
        try:
            # Force uniqueness by overriding the name in the TDL
            self.tdlobj.name = "factory-build-" + self.active_image.identifier
            self.guest = oz.GuestFactory.guest_factory(self.tdlobj, self.oz_config, None)
            # Oz just selects a random port here - This could potentially collide if we are unlucky
            self.guest.listen_port = self.res_mgr.get_next_listen_port()
        except libvirtError, e:
            raise ImageFactoryException("Cannot connect to libvirt.  Make sure libvirt is running. [Original message: %s]" %  e.message)
        except OzException, e:
            if "Unsupported" in e.message:
                raise ImageFactoryException("TinMan plugin does not support distro (%s) update (%s) in TDL" % (self.tdlobj.distro, self.tdlobj.update) )
            else:
                raise e
class SecondaryDispatcher(Singleton):

    def _singleton_init(self):
        self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
        self.pending_uploads = dict()
        self.pending_uploads_lock = BoundedSemaphore()
        self.pim = PersistentImageManager.default_manager()
        self.res = ReservationManager()
        self.secondaries = { } 
        if os.path.isfile(SECONDARIES):
            try:
                secs = open(SECONDARIES, "r")
                self.secondaries = json.load(secs)
                secs.close()
            except:
                self.log.warning("Unable to load JSON for secondaries from %s", SECONDARIES)
        if not 'targets' in self.secondaries:
            self.secondaries['targets'] = { }
        if not 'providers' in self.secondaries:
            self.secondaries['providers'] = { }

    def queue_pending_upload(self, target_image_uuid):
        # Create a UUID - map it to the target_image_uuid and return it
        # TODO: Expire these somehow
        upload_uuid = str(uuid.uuid4())
        self.pending_uploads_lock.acquire()
        try:
            self.pending_uploads[upload_uuid] = target_image_uuid
        finally:
            self.pending_uploads_lock.release()
        return upload_uuid

    def target_image_for_upload_uuid(self, upload_uuid):
        # Return the target_image UUID for a given upload UUID if it exists
        # and remove it from the dict.  Return None if the UUID is not in the dict.
        self.pending_uploads_lock.acquire()
        target_image_uuid = None
        try:
            if upload_uuid in self.pending_uploads:
                target_image_uuid = self.pending_uploads[upload_uuid]
                del self.pending_uploads[upload_uuid]
        finally:
            self.pending_uploads_lock.release()

        return target_image_uuid
                
    def update_target_image_body(self, target_image, new_body):
        # Called during the clone process - we background the actual copy
        # and update the target_image in question to COMPLETED when the copy is finished
        # This allows the HTTP transaction to complete - in testing some upload clients
        # timed out waiting for the local copy to complete
        # (bottle.py internally processes all uploaded files to completion, storing them as unnamed temporary
        # files)
        target_update_thread = Thread(target=self._update_target_image_body, args=(target_image, new_body))
        target_update_thread.start()

    def _update_target_image_body(self, target_image, new_body):
        try:
            self.log.debug("Copying incoming file to %s" % (target_image.data))
            dest = open(target_image.data,"w")
            shutil.copyfileobj(new_body, dest, 16384)
            self.log.debug("Finished copying incoming file to %s" % (target_image.data))
            target_image.status="COMPLETE"
        except Exception as e:
            self.log.debug("Exception encountered when attempting to update target_image body")
            self.log.exception(e)
            target_image.status="FAILED"
        finally:
            self.pim.save_image(target_image)

    def prep_target_image_clone(self, request_data, target_image_id):
        # Request data should contain all target_image metadata to be cloned
        # If target_image with this ID doest not exist, create it and establish an
        # upload UUID and return both.
        # If taget_image already exists, return just the existing target_image

        upload_id = None
        self.res.get_named_lock(target_image_id)
        # At this point no other thread, either remote or local, can operate on this ID
        # The image either already exists or it doesn't.  If it doesn't we can safely create
        # it without worrying about concurrency problems.
        try:
            target_image = self.pim.image_with_id(target_image_id)
            if not target_image:
                upload_id = self.queue_pending_upload(target_image_id)
                target_image = TargetImage(image_id=target_image_id)
                metadata_keys = target_image.metadata()
                for data_element in request_data.keys():
                    if not (data_element in metadata_keys):
                        self.log.warning("Metadata field (%s) in incoming target_image clone request is non-standard - skipping" % (data_element))
                    else:
                         setattr(target_image, data_element, request_data[data_element])
                # The posted image will, of course, be complete, so fix status here
                target_image.status = "PENDING"
                target_image.percent_complete = 0
                target_image.status_detail = { 'activity': 'Image being cloned from primary factory - initial metadata set', 'error':None }
                self.pim.add_image(target_image)
                self.pim.save_image(target_image)
                self.log.debug("Completed save of target_image (%s)" % target_image.identifier)
        finally:
            self.res.release_named_lock(target_image_id)

        return (target_image, upload_id)

    def get_secondary(self, target, provider):
        if provider in self.secondaries['providers']:
            return self.secondaries['providers'][provider]
        elif target in self.secondaries['targets']:
            return self.secondaries['targets'][target]
        else:
            return None

    def get_helper(self, secondary):
        try:
            helper = SecondaryHelper(**secondary)
            return helper
        except Exception as e:
            self.log.error("Exception encountered when trying to create secondary helper object")
            self.log.error("Secondary details: %s" % (secondary))
            self.log.exception(e)