Пример #1
0
    def acquire(self):
        basedir = os.path.dirname(self.fname)

        if not os.path.exists(basedir):
            fileutils.ensure_tree(basedir)
            LOG.info(_LI('Created lock path: %s'), basedir)

        self.lockfile = open(self.fname, 'w')

        while True:
            try:
                # Using non-blocking locks since green threads are not
                # patched to deal with blocking locking calls.
                # Also upon reading the MSDN docs for locking(), it seems
                # to have a laughable 10 attempts "blocking" mechanism.
                self.trylock()
                LOG.debug('Got file lock "%s"', self.fname)
                return True
            except IOError as e:
                if e.errno in (errno.EACCES, errno.EAGAIN):
                    # external locks synchronise things like iptables
                    # updates - give it some time to prevent busy spinning
                    time.sleep(0.01)
                else:
                    raise threading.ThreadError(
                        _("Unable to acquire lock on"
                          " `%(filename)s` due to"
                          " %(exception)s") % {
                              'filename': self.fname,
                              'exception': e,
                          })
Пример #2
0
def _cache_ramdisk_kernel(ctx, node, pxe_info):
    """Fetch the necessary kernels and ramdisks for the instance."""
    fileutils.ensure_tree(
        os.path.join(pxe_utils.get_root_dir(), node.uuid))
    LOG.debug("Fetching kernel and ramdisk for node %s",
              node.uuid)
    deploy_utils.fetch_images(ctx, TFTPImageCache(), pxe_info.values())
Пример #3
0
def _cache_instance_image(ctx, node):
    """Fetch the instance's image from Glance

    This method pulls the relevant AMI and associated kernel and ramdisk,
    and the deploy kernel and ramdisk from Glance, and writes them
    to the appropriate places on local disk.

    Both sets of kernel and ramdisk are needed for PXE booting, so these
    are stored under CONF.pxe.tftp_root.

    At present, the AMI is cached and certain files are injected.
    Debian/ubuntu-specific assumptions are made regarding the injected
    files. In a future revision, this functionality will be replaced by a
    more scalable and os-agnostic approach: the deployment ramdisk will
    fetch from Glance directly, and write its own last-mile configuration.

    """
    i_info = _parse_instance_info(node)
    fileutils.ensure_tree(_get_image_dir_path(node.uuid))
    image_path = _get_image_file_path(node.uuid)
    uuid = i_info['image_source']

    LOG.debug("Fetching image %(ami)s for node %(uuid)s" % {
        'ami': uuid,
        'uuid': node.uuid
    })

    _fetch_images(ctx, InstanceImageCache(), [(uuid, image_path)])

    return (uuid, image_path)
Пример #4
0
    def test__cache_tftp_images_master_path(self):
        temp_dir = tempfile.mkdtemp()
        self.config(tftp_root=temp_dir, group='pxe')
        self.config(tftp_master_path=os.path.join(temp_dir,
                                                  'tftp_master_path'),
                    group='pxe')
        image_info = {
            'deploy_kernel': [
                'deploy_kernel',
                os.path.join(temp_dir, self.node.uuid, 'deploy_kernel')
            ]
        }
        fileutils.ensure_tree(CONF.pxe.tftp_master_path)
        fd, tmp_master_image = tempfile.mkstemp(dir=CONF.pxe.tftp_master_path)

        with mock.patch.object(images, 'fetch_to_raw') as fetch_to_raw_mock:
            with mock.patch.object(tempfile, 'mkstemp') as mkstemp_mock:
                fetch_to_raw_mock.return_value = None
                mkstemp_mock.return_value = (fd, tmp_master_image)

                pxe._cache_tftp_images(None, self.node, image_info)

                fetch_to_raw_mock.assert_called_once_with(
                    None, 'deploy_kernel', tmp_master_image, None)
                mkstemp_mock.assert_called_once_with(
                    dir=CONF.pxe.tftp_master_path)
Пример #5
0
def _cache_tftp_images(ctx, node, pxe_info):
    """Fetch the necessary kernels and ramdisks for the instance."""
    fileutils.ensure_tree(
        os.path.join(CONF.pxe.tftp_root, node.uuid))
    LOG.debug("Fetching kernel and ramdisk for node %s",
              node.uuid)
    deploy_utils.fetch_images(ctx, AgentTFTPImageCache(), pxe_info.values())
Пример #6
0
def cache_instance_image(ctx, node):
    """Fetch the instance's image from Glance

    This method pulls the AMI and writes them to the appropriate place
    on local disk.

    :param ctx: context
    :param node: an ironic node object
    :returns: a tuple containing the uuid of the image and the path in
        the filesystem where image is cached.
    """
    i_info = parse_instance_info(node)
    fileutils.ensure_tree(_get_image_dir_path(node.uuid))
    image_path = _get_image_file_path(node.uuid)
    uuid = i_info['image_source']

    LOG.debug("Fetching image %(ami)s for node %(uuid)s", {
        'ami': uuid,
        'uuid': node.uuid
    })

    deploy_utils.fetch_images(ctx, InstanceImageCache(), [(uuid, image_path)],
                              CONF.force_raw_images)

    return (uuid, image_path)
Пример #7
0
    def test__cache_instance_images_master_path(self):
        temp_dir = tempfile.mkdtemp()
        CONF.set_default('images_path', temp_dir, group='pxe')
        CONF.set_default('instance_master_path',
                         os.path.join(temp_dir, 'instance_master_path'),
                         group='pxe')
        fileutils.ensure_tree(CONF.pxe.instance_master_path)
        fd, tmp_master_image = tempfile.mkstemp(
            dir=CONF.pxe.instance_master_path)

        with mock.patch.object(images, 'fetch_to_raw') as fetch_to_raw_mock:
            with mock.patch.object(tempfile, 'mkstemp') as mkstemp_mock:
                with mock.patch.object(service_utils, 'parse_image_ref') \
                        as parse_image_ref_mock:
                    mkstemp_mock.return_value = (fd, tmp_master_image)
                    fetch_to_raw_mock.return_value = None
                    parse_image_ref_mock.return_value = ('image_uuid',
                                                         None,
                                                         None,
                                                         None)

                    (uuid, image_path) = pxe._cache_instance_image(None,
                                                                   self.node)

                    mkstemp_mock.assert_called_once_with(
                         dir=CONF.pxe.instance_master_path)
                    fetch_to_raw_mock.assert_called_once_with(None,
                                                       'glance://image_uuid',
                                                       tmp_master_image,
                                                       None)
                    parse_image_ref_mock.assert_called_once_with(
                                                       'glance://image_uuid')
                    self.assertEqual(uuid, 'glance://image_uuid')
                    self.assertEqual(
                            image_path, temp_dir + '/fake_instance_name/disk')
Пример #8
0
def _cache_tftp_images(ctx, node, pxe_info):
    """Fetch the necessary kernels and ramdisks for the instance."""
    fileutils.ensure_tree(
        os.path.join(CONF.pxe.tftp_root, node.uuid))
    LOG.debug("Fetching kernel and ramdisk for node %s",
              node.uuid)
    _fetch_images(ctx, TFTPImageCache(), pxe_info.values())
Пример #9
0
    def _cache_image(self, context, instance, image_meta):
        """Fetch the instance's image from Glance

        This method pulls the relevant AMI and associated kernel and ramdisk,
        and the deploy kernel and ramdisk from Glance, and writes them
        to the appropriate places on local disk.

        Both sets of kernel and ramdisk are needed for Tilera booting, so these
        are stored under CONF.tftp_root.

        At present, the AMI is cached and certain files are injected.
        Debian/ubuntu-specific assumptions are made regarding the injected
        files. In a future revision, this functionality will be replaced by a
        more scalable and os-agnostic approach: the deployment ramdisk will
        fetch from Glance directly, and write its own last-mile configuration.
        """
        fileutils.ensure_tree(get_image_dir_path(instance))
        image_path = get_image_file_path(instance)

        LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
                        {'ami': image_meta['id'], 'name': instance['name']})
        utils.cache_image(context=context,
                             target=image_path,
                             image_id=image_meta['id'],
                             user_id=instance['user_id'],
                             project_id=instance['project_id']
                        )

        return [image_meta['id'], image_path]
Пример #10
0
 def test__cache_instance_images_master_path(self):
     temp_dir = tempfile.mkdtemp()
     CONF.set_default('images_path', temp_dir, group='pxe')
     CONF.set_default('instance_master_path',
                      os.path.join(temp_dir, 'instance_master_path'),
                      group='pxe')
     fileutils.ensure_tree(CONF.pxe.instance_master_path)
     fd, tmp_master_image = tempfile.mkstemp(
         dir=CONF.pxe.instance_master_path)
     self.mox.StubOutWithMock(images, 'fetch_to_raw')
     self.mox.StubOutWithMock(tempfile, 'mkstemp')
     self.mox.StubOutWithMock(service_utils, 'parse_image_ref')
     tempfile.mkstemp(dir=CONF.pxe.instance_master_path).\
         AndReturn((fd, tmp_master_image))
     images.fetch_to_raw(None, 'glance://image_uuid',
                         tmp_master_image,
                         None).\
         AndReturn(None)
     service_utils.parse_image_ref('glance://image_uuid').\
         AndReturn(('image_uuid', None, None, None))
     self.mox.ReplayAll()
     (uuid, image_path) = pxe._cache_instance_image(None, self.node)
     self.mox.VerifyAll()
     self.assertEqual(uuid, 'glance://image_uuid')
     self.assertEqual(image_path, temp_dir + '/fake_instance_name/disk')
Пример #11
0
def _cache_instance_image(ctx, node):
    """Fetch the instance's image from Glance

    This method pulls the relevant AMI and associated kernel and ramdisk,
    and the deploy kernel and ramdisk from Glance, and writes them
    to the appropriate places on local disk.

    Both sets of kernel and ramdisk are needed for PXE booting, so these
    are stored under CONF.pxe.tftp_root.

    At present, the AMI is cached and certain files are injected.
    Debian/ubuntu-specific assumptions are made regarding the injected
    files. In a future revision, this functionality will be replaced by a
    more scalable and os-agnostic approach: the deployment ramdisk will
    fetch from Glance directly, and write its own last-mile configuration.

    """
    d_info = _parse_driver_info(node)
    fileutils.ensure_tree(_get_image_dir_path(node.uuid))
    image_path = _get_image_file_path(node.uuid)
    uuid = d_info['image_source']

    LOG.debug(_("Fetching image %(ami)s for node %(uuid)s") %
              {'ami': uuid, 'uuid': node.uuid})

    if not os.path.exists(image_path):
        _get_image(ctx, image_path, uuid, CONF.pxe.instance_master_path)

    return (uuid, image_path)
Пример #12
0
    def test__cache_instance_images_master_path(self):
        temp_dir = tempfile.mkdtemp()
        self.config(images_path=temp_dir, group='pxe')
        self.config(instance_master_path=os.path.join(temp_dir,
                                                      'instance_master_path'),
                    group='pxe')
        fileutils.ensure_tree(CONF.pxe.instance_master_path)
        fd, tmp_master_image = tempfile.mkstemp(
            dir=CONF.pxe.instance_master_path)

        with mock.patch.object(images, 'fetch_to_raw') as fetch_to_raw_mock:
            with mock.patch.object(tempfile, 'mkstemp') as mkstemp_mock:
                with mock.patch.object(service_utils, 'parse_image_ref') \
                        as parse_image_ref_mock:
                    mkstemp_mock.return_value = (fd, tmp_master_image)
                    fetch_to_raw_mock.return_value = None
                    parse_image_ref_mock.return_value = ('image_uuid', None,
                                                         None, None)

                    (uuid,
                     image_path) = pxe._cache_instance_image(None, self.node)

                    mkstemp_mock.assert_called_once_with(
                        dir=CONF.pxe.instance_master_path)
                    fetch_to_raw_mock.assert_called_once_with(
                        None, 'glance://image_uuid', tmp_master_image, None)
                    parse_image_ref_mock.assert_called_once_with(
                        'glance://image_uuid')
                    self.assertEqual('glance://image_uuid', uuid)
                    self.assertEqual(
                        os.path.join(temp_dir, self.node.uuid, 'disk'),
                        image_path)
Пример #13
0
    def test__cache_tftp_images_master_path(self):
        temp_dir = tempfile.mkdtemp()
        self.config(tftp_root=temp_dir, group='pxe')
        self.config(tftp_master_path=os.path.join(temp_dir,
                                                  'tftp_master_path'),
                    group='pxe')
        image_info = {'deploy_kernel': ['deploy_kernel',
                                        os.path.join(temp_dir,
                                                     self.node.uuid,
                                                     'deploy_kernel')]}
        fileutils.ensure_tree(CONF.pxe.tftp_master_path)
        fd, tmp_master_image = tempfile.mkstemp(dir=CONF.pxe.tftp_master_path)

        with mock.patch.object(images, 'fetch_to_raw') as fetch_to_raw_mock:
            with mock.patch.object(tempfile, 'mkstemp') as mkstemp_mock:
                fetch_to_raw_mock.return_value = None
                mkstemp_mock.return_value = (fd, tmp_master_image)

                pxe._cache_tftp_images(None, self.node, image_info)

                fetch_to_raw_mock.assert_called_once_with(None,
                                                          'deploy_kernel',
                                                          tmp_master_image,
                                                          None)
                mkstemp_mock.assert_called_once_with(
                                                dir=CONF.pxe.tftp_master_path)
Пример #14
0
    def acquire(self):
        basedir = os.path.dirname(self.fname)

        if not os.path.exists(basedir):
            fileutils.ensure_tree(basedir)
            LOG.info(_LI('Created lock path: %s'), basedir)

        self.lockfile = open(self.fname, 'w')

        while True:
            try:
                # Using non-blocking locks since green threads are not
                # patched to deal with blocking locking calls.
                # Also upon reading the MSDN docs for locking(), it seems
                # to have a laughable 10 attempts "blocking" mechanism.
                self.trylock()
                LOG.debug('Got file lock "%s"', self.fname)
                return True
            except IOError as e:
                if e.errno in (errno.EACCES, errno.EAGAIN):
                    # external locks synchronise things like iptables
                    # updates - give it some time to prevent busy spinning
                    time.sleep(0.01)
                else:
                    raise threading.ThreadError(_("Unable to acquire lock on"
                                                  " `%(filename)s` due to"
                                                  " %(exception)s") %
                                                {
                                                    'filename': self.fname,
                                                    'exception': e,
                                                })
Пример #15
0
def _cache_ramdisk_kernel(ctx, node, pxe_info):
    """Fetch the necessary kernels and ramdisks for the instance."""
    fileutils.ensure_tree(
        os.path.join(pxe_utils.get_root_dir(), node.uuid))
    LOG.debug("Fetching kernel and ramdisk for node %s",
              node.uuid)
    deploy_utils.fetch_images(ctx, TFTPImageCache(), pxe_info.values())
Пример #16
0
def _cache_tftp_images(ctx, node, pxe_info):
    """Fetch the necessary kernels and ramdisks for the instance."""
    fileutils.ensure_tree(os.path.join(CONF.pxe.tftp_root, node.uuid))
    LOG.debug(_("Fetching kernel and ramdisk for node %s") % node.uuid)
    for label in pxe_info:
        (uuid, path) = pxe_info[label]
        if not os.path.exists(path):
            _get_image(ctx, path, uuid, CONF.pxe.tftp_master_path, None)
Пример #17
0
def _ensure_config_dirs_exist(node_uuid):
    """Ensure that the node's and PXE configuration directories exist.

    :param node_uuid: the UUID of the node.

    """
    tftp_root = CONF.pxe.tftp_root
    fileutils.ensure_tree(os.path.join(tftp_root, node_uuid))
    fileutils.ensure_tree(os.path.join(tftp_root, PXE_CFG_DIR_NAME))
Пример #18
0
def _ensure_config_dirs_exist(node_uuid):
    """Ensure that the node's and PXE configuration directories exist.

    :param node_uuid: the UUID of the node.

    """
    tftp_root = CONF.pxe.tftp_root
    fileutils.ensure_tree(os.path.join(tftp_root, node_uuid))
    fileutils.ensure_tree(os.path.join(tftp_root, PXE_CFG_DIR_NAME))
Пример #19
0
def _ensure_config_dirs_exist(node_uuid):
    """Ensure that the node's and PXE configuration directories exist.

    :param node_uuid: the UUID of the node.

    """
    root_dir = get_root_dir()
    fileutils.ensure_tree(os.path.join(root_dir, node_uuid))
    fileutils.ensure_tree(os.path.join(root_dir, PXE_CFG_DIR_NAME))
Пример #20
0
def _ensure_config_dirs_exist(node_uuid):
    """Ensure that the node's and PXE configuration directories exist.

    :param node_uuid: the UUID of the node.

    """
    root_dir = get_root_dir()
    fileutils.ensure_tree(os.path.join(root_dir, node_uuid))
    fileutils.ensure_tree(os.path.join(root_dir, PXE_CFG_DIR_NAME))
Пример #21
0
def _cache_tftp_images(ctx, node, pxe_info):
    """Fetch the necessary kernels and ramdisks for the instance."""
    fileutils.ensure_tree(
        os.path.join(CONF.pxe.tftp_root, node.uuid))
    LOG.debug(_("Fetching kernel and ramdisk for node %s") %
              node.uuid)
    image_cache = PXEImageCache(CONF.pxe.tftp_master_path)
    for label in pxe_info:
        (uuid, path) = pxe_info[label]
        image_cache.fetch_image(uuid, path, ctx=ctx)
Пример #22
0
def _cache_tftp_images(ctx, node, pxe_info):
    """Fetch the necessary kernels and ramdisks for the instance."""
    fileutils.ensure_tree(
        os.path.join(CONF.pxe.tftp_root, node.uuid))
    LOG.debug(_("Fetching kernel and ramdisk for node %s") %
              node.uuid)
    for label in pxe_info:
        (uuid, path) = pxe_info[label]
        if not os.path.exists(path):
            _get_image(ctx, path, uuid, CONF.pxe.tftp_master_path, None)
Пример #23
0
    def test__cache_instance_images_master_path(self, mock_fetch_image):
        temp_dir = tempfile.mkdtemp()
        self.config(images_path=temp_dir, group="pxe")
        self.config(instance_master_path=os.path.join(temp_dir, "instance_master_path"), group="pxe")
        fileutils.ensure_tree(CONF.pxe.instance_master_path)

        (uuid, image_path) = pxe._cache_instance_image(None, self.node)
        mock_fetch_image.assert_called_once_with(None, mock.ANY, [(uuid, image_path)])
        self.assertEqual("glance://image_uuid", uuid)
        self.assertEqual(os.path.join(temp_dir, self.node.uuid, "disk"), image_path)
Пример #24
0
    def test__cache_tftp_images_master_path(self, mock_fetch_image):
        temp_dir = tempfile.mkdtemp()
        self.config(tftp_root=temp_dir, group="pxe")
        self.config(tftp_master_path=os.path.join(temp_dir, "tftp_master_path"), group="pxe")
        image_path = os.path.join(temp_dir, self.node.uuid, "deploy_kernel")
        image_info = {"deploy_kernel": ("deploy_kernel", image_path)}
        fileutils.ensure_tree(CONF.pxe.tftp_master_path)

        pxe._cache_tftp_images(None, self.node, image_info)

        mock_fetch_image.assert_called_once_with(None, mock.ANY, [("deploy_kernel", image_path)])
Пример #25
0
    def __init__(self, master_dir, cache_size, cache_ttl):
        """Constructor.

        :param master_dir: cache directory to work on
        :param cache_size: desired maximum cache size in bytes
        :param cache_ttl: cache entity TTL in seconds
        """
        self.master_dir = master_dir
        self._cache_size = cache_size
        self._cache_ttl = cache_ttl
        if master_dir is not None:
            fileutils.ensure_tree(master_dir)
Пример #26
0
    def __init__(self, master_dir, cache_size, cache_ttl, image_service=None):
        """Constructor.

        :param master_dir: cache directory to work on
        :param cache_size: desired maximum cache size in bytes
        :param cache_ttl: cache entity TTL in seconds
        :param image_service: Glance image service to use, None for default
        """
        self.master_dir = master_dir
        self._cache_size = cache_size
        self._cache_ttl = cache_ttl
        self._image_service = image_service
        if master_dir is not None:
            fileutils.ensure_tree(master_dir)
Пример #27
0
    def test__cache_tftp_images_master_path(self, mock_fetch_image):
        temp_dir = tempfile.mkdtemp()
        self.config(tftp_root=temp_dir, group='pxe')
        self.config(tftp_master_path=os.path.join(temp_dir,
                                                  'tftp_master_path'),
                    group='pxe')
        image_path = os.path.join(temp_dir, self.node.uuid, 'deploy_kernel')
        image_info = {'deploy_kernel': ('deploy_kernel', image_path)}
        fileutils.ensure_tree(CONF.pxe.tftp_master_path)

        pxe._cache_ramdisk_kernel(None, self.node, image_info)

        mock_fetch_image.assert_called_once_with(
            None, mock.ANY, [('deploy_kernel', image_path)], True)
Пример #28
0
    def test_cache_instance_images_master_path(self, mock_fetch_image):
        temp_dir = tempfile.mkdtemp()
        self.config(images_path=temp_dir, group='pxe')
        self.config(instance_master_path=os.path.join(temp_dir,
                                                      'instance_master_path'),
                    group='pxe')
        fileutils.ensure_tree(CONF.pxe.instance_master_path)

        (uuid, image_path) = iscsi_deploy.cache_instance_image(None, self.node)
        mock_fetch_image.assert_called_once_with(None, mock.ANY,
                                                 [(uuid, image_path)], True)
        self.assertEqual('glance://image_uuid', uuid)
        self.assertEqual(os.path.join(temp_dir, self.node.uuid, 'disk'),
                         image_path)
Пример #29
0
def _create_pxe_config(task, pxe_info):
    """Generate pxe configuration file and link mac ports to it for
    tftp booting.
    """
    fileutils.ensure_tree(os.path.join(CONF.pxe.tftp_root, task.node.uuid))
    fileutils.ensure_tree(os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg'))

    pxe_config_file_path = _get_pxe_config_file_path(task.node.uuid)
    pxe_config = _build_pxe_config(task.node, pxe_info, task.context)
    utils.write_to_file(pxe_config_file_path, pxe_config)
    for port in driver_utils.get_node_mac_addresses(task):
        mac_path = _get_pxe_mac_path(port)
        utils.unlink_without_raise(mac_path)
        utils.create_link_without_raise(pxe_config_file_path, mac_path)
Пример #30
0
    def __init__(self, master_dir, cache_size, cache_ttl, image_service=None):
        """Constructor.

        :param master_dir: cache directory to work on
        :param cache_size: desired maximum cache size in bytes
        :param cache_ttl: cache entity TTL in seconds
        :param image_service: Glance image service to use, None for default
        """
        self.master_dir = master_dir
        self._cache_size = cache_size
        self._cache_ttl = cache_ttl
        self._image_service = image_service
        if master_dir is not None:
            fileutils.ensure_tree(master_dir)
Пример #31
0
    def _cache_tftp_images(self, context, instance, image_info):
        """Fetch the necessary kernels and ramdisks for the instance."""
        fileutils.ensure_tree(
                os.path.join(CONF.tftp_root, instance['uuid']))

        LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
                        instance['name'])
        for label in image_info.keys():
            (uuid, path) = image_info[label]
            utils.cache_image(
                    context=context,
                    target=path,
                    image_id=uuid,
                    user_id=instance['user_id'],
                    project_id=instance['project_id'],
                )
Пример #32
0
def _create_pxe_config(task, node, pxe_info):
    """Generate pxe configuration file and link mac ports to it for
    tftp booting.
    """
    fileutils.ensure_tree(os.path.join(CONF.pxe.tftp_root,
                                       node.uuid))
    fileutils.ensure_tree(os.path.join(CONF.pxe.tftp_root,
                                       'pxelinux.cfg'))

    pxe_config_file_path = _get_pxe_config_file_path(node.uuid)
    pxe_config = _build_pxe_config(node, pxe_info, task.context)
    utils.write_to_file(pxe_config_file_path, pxe_config)
    for port in driver_utils.get_node_mac_addresses(task, node):
        mac_path = _get_pxe_mac_path(port)
        utils.unlink_without_raise(mac_path)
        utils.create_link_without_raise(pxe_config_file_path, mac_path)
Пример #33
0
    def test__cache_tftp_images_master_path(self, mock_fetch_image):
        temp_dir = tempfile.mkdtemp()
        self.config(tftp_root=temp_dir, group='pxe')
        self.config(tftp_master_path=os.path.join(temp_dir,
                                                  'tftp_master_path'),
                    group='pxe')
        image_path = os.path.join(temp_dir, self.node.uuid,
                                  'deploy_kernel')
        image_info = {'deploy_kernel': ['deploy_kernel', image_path]}
        fileutils.ensure_tree(CONF.pxe.tftp_master_path)

        pxe._cache_tftp_images(None, self.node, image_info)

        mock_fetch_image.assert_called_once_with('deploy_kernel',
                                                 image_path,
                                                 ctx=None)
Пример #34
0
    def test__cache_instance_images_master_path(self, mock_fetch_image):
        temp_dir = tempfile.mkdtemp()
        self.config(images_path=temp_dir, group='pxe')
        self.config(instance_master_path=os.path.join(temp_dir,
                                                      'instance_master_path'),
                    group='pxe')
        fileutils.ensure_tree(CONF.pxe.instance_master_path)

        (uuid, image_path) = pxe._cache_instance_image(None,
                                                       self.node)
        mock_fetch_image.assert_called_once_with(uuid, image_path, ctx=None)
        self.assertEqual('glance://image_uuid', uuid)
        self.assertEqual(os.path.join(temp_dir,
                                      self.node.uuid,
                                      'disk'),
                         image_path)
Пример #35
0
 def test__get_image_download_in_progress(self):
     def _create_instance_path(*args):
         open(master_path, 'w').close()
         return True
     temp_dir = tempfile.mkdtemp()
     instance_path = os.path.join(temp_dir, 'instance_path')
     fileutils.ensure_tree(temp_dir)
     master_uuid = 'instance_uuid'
     master_path = os.path.join(temp_dir, master_uuid)
     lock_file = os.path.join(temp_dir, 'instance_uuid.lock')
     self.mox.StubOutWithMock(pxe, '_download_in_progress')
     pxe._download_in_progress(lock_file).\
         WithSideEffects(_create_instance_path).\
         AndReturn(True)
     self.mox.ReplayAll()
     pxe._get_image(None, instance_path, master_uuid, temp_dir)
     self.mox.VerifyAll()
     self.assertTrue(os.path.exists(instance_path))
Пример #36
0
    def test__get_image_download_in_progress(self):
        def _create_instance_path(*args):
            open(master_path, 'w').close()
            return True
        temp_dir = tempfile.mkdtemp()
        instance_path = os.path.join(temp_dir, 'instance_path')
        fileutils.ensure_tree(temp_dir)
        master_path = os.path.join(temp_dir, self.node.uuid)
        lock_file = os.path.join(temp_dir, self.node.uuid + '.lock')

        with mock.patch.object(pxe, '_download_in_progress') \
                as download_in_progress_mock:
            download_in_progress_mock.side_effect = _create_instance_path

            pxe._get_image(None, instance_path, self.node.uuid, temp_dir)

            download_in_progress_mock.assert_called_once_with(lock_file)
            self.assertTrue(os.path.exists(instance_path))
Пример #37
0
    def test__get_image_download_in_progress(self):
        def _create_instance_path(*args):
            open(master_path, 'w').close()
            return True

        temp_dir = tempfile.mkdtemp()
        instance_path = os.path.join(temp_dir, 'instance_path')
        fileutils.ensure_tree(temp_dir)
        master_path = os.path.join(temp_dir, self.node.uuid)
        lock_file = os.path.join(temp_dir, self.node.uuid + '.lock')

        with mock.patch.object(pxe, '_download_in_progress') \
                as download_in_progress_mock:
            download_in_progress_mock.side_effect = _create_instance_path

            pxe._get_image(None, instance_path, self.node.uuid, temp_dir)

            download_in_progress_mock.assert_called_once_with(lock_file)
            self.assertTrue(os.path.exists(instance_path))
Пример #38
0
 def test__cache_tftp_images_master_path(self):
     temp_dir = tempfile.mkdtemp()
     CONF.set_default('tftp_root', temp_dir, group='pxe')
     CONF.set_default('tftp_master_path', os.path.join(temp_dir,
                                                       'tftp_master_path'),
                      group='pxe')
     image_info = {'deploy_kernel': ['deploy_kernel', temp_dir +
                                     '/instance_uuid_123/deploy_kernel']}
     fileutils.ensure_tree(CONF.pxe.tftp_master_path)
     fd, tmp_master_image = tempfile.mkstemp(dir=CONF.pxe.tftp_master_path)
     self.mox.StubOutWithMock(images, 'fetch_to_raw')
     self.mox.StubOutWithMock(tempfile, 'mkstemp')
     tempfile.mkstemp(dir=CONF.pxe.tftp_master_path).\
         AndReturn((fd, tmp_master_image))
     images.fetch_to_raw(None, 'deploy_kernel', tmp_master_image, None).\
         AndReturn(None)
     self.mox.ReplayAll()
     pxe._cache_tftp_images(None, self.node, image_info)
     self.mox.VerifyAll()
Пример #39
0
def _get_image(ctx, path, uuid, master_path=None, image_service=None):
    #TODO(ghe): Revise this logic and cdocument process Bug #1199665
    # When master_path defined, we save the images in this dir using the iamge
    # uuid as the file name. Deployments that use this images, creates a hard
    # link to keep track of this. When the link count of a master image is
    # equal to 1, can be deleted.
    #TODO(ghe): have hard links and count links the same behaviour in all fs

    #TODO(ghe): timeout and retry for downloads
    def _wait_for_download():
        if not os.path.exists(lock_file):
            raise loopingcall.LoopingCallDone()

    # If the download of the image needed is in progress (lock file present)
    # we wait until the locks disappears and create the link.

    if master_path is None:
        #NOTE(ghe): We don't share images between instances/hosts
        images.fetch_to_raw(ctx, uuid, path, image_service)

    else:
        master_uuid = os.path.join(master_path,
                                   service_utils.parse_image_ref(uuid)[0])
        lock_file = os.path.join(master_path, master_uuid + '.lock')
        _link_master_image(master_uuid, path)
        if not os.path.exists(path):
            fileutils.ensure_tree(master_path)
            if not _download_in_progress(lock_file):
                with fileutils.remove_path_on_error(lock_file):
                    #TODO(ghe): logging when image cannot be created
                    fd, tmp_path = tempfile.mkstemp(dir=master_path)
                    os.close(fd)
                    images.fetch_to_raw(ctx, uuid, tmp_path, image_service)
                    _create_master_image(tmp_path, master_uuid, path)
                _remove_download_in_progress_lock(lock_file)
            else:
                #TODO(ghe): expiration time
                timer = loopingcall.FixedIntervalLoopingCall(
                    _wait_for_download)
                timer.start(interval=1).wait()
                _link_master_image(master_uuid, path)
Пример #40
0
def cache_instance_image(ctx, node):
    """Fetch the instance's image from Glance

    This method pulls the AMI and writes them to the appropriate place
    on local disk.

    :param ctx: context
    :param node: an ironic node object
    :returns: a tuple containing the uuid of the image and the path in
        the filesystem where image is cached.
    """
    i_info = parse_instance_info(node)
    fileutils.ensure_tree(_get_image_dir_path(node.uuid))
    image_path = _get_image_file_path(node.uuid)
    uuid = i_info["image_source"]

    LOG.debug("Fetching image %(ami)s for node %(uuid)s", {"ami": uuid, "uuid": node.uuid})

    deploy_utils.fetch_images(ctx, InstanceImageCache(), [(uuid, image_path)], CONF.force_raw_images)

    return (uuid, image_path)
Пример #41
0
def _get_image(ctx, path, uuid, master_path=None, image_service=None):
    #TODO(ghe): Revise this logic and cdocument process Bug #1199665
    # When master_path defined, we save the images in this dir using the iamge
    # uuid as the file name. Deployments that use this images, creates a hard
    # link to keep track of this. When the link count of a master image is
    # equal to 1, can be deleted.
    #TODO(ghe): have hard links and count links the same behaviour in all fs

    #TODO(ghe): timeout and retry for downloads
    def _wait_for_download():
        if not os.path.exists(lock_file):
            raise loopingcall.LoopingCallDone()
    # If the download of the image needed is in progress (lock file present)
    # we wait until the locks disappears and create the link.

    if master_path is None:
        #NOTE(ghe): We don't share images between instances/hosts
        images.fetch_to_raw(ctx, uuid, path, image_service)

    else:
        master_uuid = os.path.join(master_path,
                                   service_utils.parse_image_ref(uuid)[0])
        lock_file = os.path.join(master_path, master_uuid + '.lock')
        _link_master_image(master_uuid, path)
        if not os.path.exists(path):
            fileutils.ensure_tree(master_path)
            if not _download_in_progress(lock_file):
                with fileutils.remove_path_on_error(lock_file):
                    #TODO(ghe): logging when image cannot be created
                    fd, tmp_path = tempfile.mkstemp(dir=master_path)
                    os.close(fd)
                    images.fetch_to_raw(ctx, uuid, tmp_path, image_service)
                    _create_master_image(tmp_path, master_uuid, path)
                _remove_download_in_progress_lock(lock_file)
            else:
                #TODO(ghe): expiration time
                timer = loopingcall.FixedIntervalLoopingCall(
                    _wait_for_download)
                timer.start(interval=1).wait()
                _link_master_image(master_uuid, path)
Пример #42
0
        def inner(*args, **kwargs):
            # NOTE(soren): If we ever go natively threaded, this will be racy.
            #              See http://stackoverflow.com/questions/5390569/dyn
            #              amically-allocating-and-destroying-mutexes
            sem = _semaphores.get(name, semaphore.Semaphore())
            if name not in _semaphores:
                # this check is not racy - we're already holding ref locally
                # so GC won't remove the item and there was no IO switch
                # (only valid in greenthreads)
                _semaphores[name] = sem

            with sem:
                LOG.debug(_('Got semaphore "%(lock)s" for method '
                            '"%(method)s"...'), {'lock': name,
                                                 'method': f.__name__})

                # NOTE(mikal): I know this looks odd
                if not hasattr(local.strong_store, 'locks_held'):
                    local.strong_store.locks_held = []
                local.strong_store.locks_held.append(name)

                try:
                    if external and not CONF.disable_process_locking:
                        LOG.debug(_('Attempting to grab file lock "%(lock)s" '
                                    'for method "%(method)s"...'),
                                  {'lock': name, 'method': f.__name__})
                        cleanup_dir = False

                        # We need a copy of lock_path because it is non-local
                        local_lock_path = lock_path
                        if not local_lock_path:
                            local_lock_path = CONF.lock_path

                        if not local_lock_path:
                            cleanup_dir = True
                            local_lock_path = tempfile.mkdtemp()

                        if not os.path.exists(local_lock_path):
                            fileutils.ensure_tree(local_lock_path)

                        # NOTE(mikal): the lock name cannot contain directory
                        # separators
                        safe_name = name.replace(os.sep, '_')
                        lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
                        lock_file_path = os.path.join(local_lock_path,
                                                      lock_file_name)

                        try:
                            lock = InterProcessLock(lock_file_path)
                            with lock:
                                LOG.debug(_('Got file lock "%(lock)s" at '
                                            '%(path)s for method '
                                            '"%(method)s"...'),
                                          {'lock': name,
                                           'path': lock_file_path,
                                           'method': f.__name__})
                                retval = f(*args, **kwargs)
                        finally:
                            LOG.debug(_('Released file lock "%(lock)s" at '
                                        '%(path)s for method "%(method)s"...'),
                                      {'lock': name,
                                       'path': lock_file_path,
                                       'method': f.__name__})
                            # NOTE(vish): This removes the tempdir if we needed
                            #             to create one. This is used to
                            #             cleanup the locks left behind by unit
                            #             tests.
                            if cleanup_dir:
                                shutil.rmtree(local_lock_path)
                    else:
                        retval = f(*args, **kwargs)

                finally:
                    local.strong_store.locks_held.remove(name)

            return retval
Пример #43
0
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
    """Context based lock

    This function yields a `threading.Semaphore` instance (if we don't use
    eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
    True, in which case, it'll yield an InterProcessLock instance.

    :param lock_file_prefix: The lock_file_prefix argument is used to provide
    lock files on disk with a meaningful prefix.

    :param external: The external keyword argument denotes whether this lock
    should work across multiple processes. This means that if two different
    workers both run a a method decorated with @synchronized('mylock',
    external=True), only one of them will execute at a time.

    :param lock_path: The lock_path keyword argument is used to specify a
    special location for external lock files to live. If nothing is set, then
    CONF.lock_path is used as a default.
    """
    with _semaphores_lock:
        try:
            sem = _semaphores[name]
        except KeyError:
            sem = threading.Semaphore()
            _semaphores[name] = sem

    with sem:
        LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})

        # NOTE(mikal): I know this looks odd
        if not hasattr(local.strong_store, 'locks_held'):
            local.strong_store.locks_held = []
        local.strong_store.locks_held.append(name)

        try:
            if external and not CONF.disable_process_locking:
                LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
                          {'lock': name})

                # We need a copy of lock_path because it is non-local
                local_lock_path = lock_path or CONF.lock_path
                if not local_lock_path:
                    raise cfg.RequiredOptError('lock_path')

                if not os.path.exists(local_lock_path):
                    fileutils.ensure_tree(local_lock_path)
                    LOG.info(_('Created lock path: %s'), local_lock_path)

                def add_prefix(name, prefix):
                    if not prefix:
                        return name
                    sep = '' if prefix.endswith('-') else '-'
                    return '%s%s%s' % (prefix, sep, name)

                # NOTE(mikal): the lock name cannot contain directory
                # separators
                lock_file_name = add_prefix(name.replace(os.sep, '_'),
                                            lock_file_prefix)

                lock_file_path = os.path.join(local_lock_path, lock_file_name)

                try:
                    lock = InterProcessLock(lock_file_path)
                    with lock as lock:
                        LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), {
                            'lock': name,
                            'path': lock_file_path
                        })
                        yield lock
                finally:
                    LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), {
                        'lock': name,
                        'path': lock_file_path
                    })
            else:
                yield sem

        finally:
            local.strong_store.locks_held.remove(name)
Пример #44
0
        def inner(*args, **kwargs):
            # NOTE(soren): If we ever go natively threaded, this will be racy.
            #              See http://stackoverflow.com/questions/5390569/dyn
            #              amically-allocating-and-destroying-mutexes
            sem = _semaphores.get(name, semaphore.Semaphore())
            if name not in _semaphores:
                # this check is not racy - we're already holding ref locally
                # so GC won't remove the item and there was no IO switch
                # (only valid in greenthreads)
                _semaphores[name] = sem

            with sem:
                LOG.debug(
                    _('Got semaphore "%(lock)s" for method '
                      '"%(method)s"...'), {
                          'lock': name,
                          'method': f.__name__
                      })

                # NOTE(mikal): I know this looks odd
                if not hasattr(local.strong_store, 'locks_held'):
                    local.strong_store.locks_held = []
                local.strong_store.locks_held.append(name)

                try:
                    if external and not CONF.disable_process_locking:
                        LOG.debug(
                            _('Attempting to grab file lock "%(lock)s" '
                              'for method "%(method)s"...'), {
                                  'lock': name,
                                  'method': f.__name__
                              })
                        cleanup_dir = False

                        # We need a copy of lock_path because it is non-local
                        local_lock_path = lock_path
                        if not local_lock_path:
                            local_lock_path = CONF.lock_path

                        if not local_lock_path:
                            cleanup_dir = True
                            local_lock_path = tempfile.mkdtemp()

                        if not os.path.exists(local_lock_path):
                            fileutils.ensure_tree(local_lock_path)

                        # NOTE(mikal): the lock name cannot contain directory
                        # separators
                        safe_name = name.replace(os.sep, '_')
                        lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
                        lock_file_path = os.path.join(local_lock_path,
                                                      lock_file_name)

                        try:
                            lock = InterProcessLock(lock_file_path)
                            with lock:
                                LOG.debug(
                                    _('Got file lock "%(lock)s" at '
                                      '%(path)s for method '
                                      '"%(method)s"...'), {
                                          'lock': name,
                                          'path': lock_file_path,
                                          'method': f.__name__
                                      })
                                retval = f(*args, **kwargs)
                        finally:
                            LOG.debug(
                                _('Released file lock "%(lock)s" at '
                                  '%(path)s for method "%(method)s"...'), {
                                      'lock': name,
                                      'path': lock_file_path,
                                      'method': f.__name__
                                  })
                            # NOTE(vish): This removes the tempdir if we needed
                            #             to create one. This is used to
                            #             cleanup the locks left behind by unit
                            #             tests.
                            if cleanup_dir:
                                shutil.rmtree(local_lock_path)
                    else:
                        retval = f(*args, **kwargs)

                finally:
                    local.strong_store.locks_held.remove(name)

            return retval
Пример #45
0
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
    """Context based lock

    This function yields a `threading.Semaphore` instance (if we don't use
    eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
    True, in which case, it'll yield an InterProcessLock instance.

    :param lock_file_prefix: The lock_file_prefix argument is used to provide
    lock files on disk with a meaningful prefix.

    :param external: The external keyword argument denotes whether this lock
    should work across multiple processes. This means that if two different
    workers both run a a method decorated with @synchronized('mylock',
    external=True), only one of them will execute at a time.

    :param lock_path: The lock_path keyword argument is used to specify a
    special location for external lock files to live. If nothing is set, then
    CONF.lock_path is used as a default.
    """
    with _semaphores_lock:
        try:
            sem = _semaphores[name]
        except KeyError:
            sem = threading.Semaphore()
            _semaphores[name] = sem

    with sem:
        LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})

        # NOTE(mikal): I know this looks odd
        if not hasattr(local.strong_store, 'locks_held'):
            local.strong_store.locks_held = []
        local.strong_store.locks_held.append(name)

        try:
            if external and not CONF.disable_process_locking:
                LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
                          {'lock': name})

                # We need a copy of lock_path because it is non-local
                local_lock_path = lock_path or CONF.lock_path
                if not local_lock_path:
                    raise cfg.RequiredOptError('lock_path')

                if not os.path.exists(local_lock_path):
                    fileutils.ensure_tree(local_lock_path)
                    LOG.info(_('Created lock path: %s'), local_lock_path)

                def add_prefix(name, prefix):
                    if not prefix:
                        return name
                    sep = '' if prefix.endswith('-') else '-'
                    return '%s%s%s' % (prefix, sep, name)

                # NOTE(mikal): the lock name cannot contain directory
                # separators
                lock_file_name = add_prefix(name.replace(os.sep, '_'),
                                            lock_file_prefix)

                lock_file_path = os.path.join(local_lock_path, lock_file_name)

                try:
                    lock = InterProcessLock(lock_file_path)
                    with lock as lock:
                        LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
                                  {'lock': name, 'path': lock_file_path})
                        yield lock
                finally:
                    LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
                              {'lock': name, 'path': lock_file_path})
            else:
                yield sem

        finally:
            local.strong_store.locks_held.remove(name)