Esempio n. 1
0
    def test_tar_stream(self):
        """Test the stream mode of fs.tar
        """
        # Create directories and files to test tarring
        # /self.root/apps/testapp/tardir
        # /self.root/apps/testapp/tardir/file
        # /self.root/apps/testapp/tardir/subdir
        # /self.root/apps/testapp/tardir2

        testapp_dir = os.path.join(self.root, 'testapp')
        tardir = os.path.join(testapp_dir, 'tardir')
        tardir2 = os.path.join(testapp_dir, 'tardir2')
        os.makedirs(testapp_dir)
        os.mkdir(tardir)
        os.mkdir(os.path.join(tardir, 'subdir'))
        os.mkdir(tardir2)
        with open(os.path.join(tardir, 'file'), 'w+'):
            pass
        with open(os.path.join(tardir2, 'file2'), 'w+'):
            pass

        fileobj = tempfile.TemporaryFile()
        fs.tar(target=fileobj, sources=[tardir, tardir2], compression='gzip')

        # seek fileobj and check content
        fileobj.seek(0)
        tarfileobj = tarfile.open(mode='r:gz', fileobj=fileobj)
        names = tarfileobj.getnames()
        self.assertTrue('subdir' in names and 'file' in names
                        and 'file2' in names)
Esempio n. 2
0
    def test_tar_basic(self):
        """Tests the fs.tar function.
        """
        # Create directories and files to test tarring
        # /self.root/apps/testapp/tardir
        # /self.root/apps/testapp/tardir/file
        # /self.root/apps/testapp/tardir/subdir
        # /self.root/apps/testapp/tardir2
        # Archive:
        # /self.root/apps/testapp/tar.tar

        testapp_dir = os.path.join(self.root, 'testapp')
        tardir = os.path.join(testapp_dir, 'tardir')
        tardir2 = os.path.join(testapp_dir, 'tardir2')
        archive = os.path.join(testapp_dir, 'foo.tar')
        os.makedirs(testapp_dir)
        os.mkdir(tardir)
        os.mkdir(os.path.join(tardir, 'subdir'))
        os.mkdir(tardir2)
        with open(os.path.join(tardir, 'file'), 'w+'):
            pass

        self.assertEquals(
            fs.tar(archive, tardir).name, archive, 'fs.tar runs successfully')
        self.assertTrue(os.path.isfile(archive), 'fs.tar creates a tarfile')

        self.assertEquals(
            fs.tar(archive, tardir2).name, archive,
            'fs.tar will succeed if tarfile already exists')

        self.assertEquals(
            fs.tar(archive, tardir, compression='gzip').name,
            "%s.gz" % archive, 'fs.tar with gzip runs successfully')
        self.assertTrue(os.path.isfile('%s.gz' % archive),
                        'fs.tar creates a tar gzip file')
Esempio n. 3
0
    def test_tar_files(self):
        """Tests the glob/transform of files in fs.tar.
        """
        # Create directories and files to test tarring
        # /self.root/apps/testapp/tardir
        # /self.root/apps/testapp/tardir/file
        # /self.root/apps/testapp/tardir/subdir
        # /self.root/apps/testapp/tardir2
        # Archive:
        # /self.root/apps/testapp/tar.tar

        testapp_dir = os.path.join(self.root, 'testapp')
        tardir = os.path.join(testapp_dir, 'tardir')
        tardir2 = os.path.join(testapp_dir, 'tardir2')
        archive = os.path.join(testapp_dir, 'foo.tar')
        os.makedirs(testapp_dir)
        os.mkdir(tardir)
        os.mkdir(os.path.join(tardir, 'subdir'))
        os.mkdir(tardir2)
        with open(os.path.join(tardir, 'file'), 'w+'):
            pass

        self.assertEquals(
            fs.tar(archive, [tardir, tardir2], compression='gzip').name,
            '%s.gz' % archive, 'fs.tar runs successfully')
        tarfile.TarFile.add.assert_any_call(tardir, '/')
        tarfile.TarFile.add.assert_any_call(tardir2, '/')
Esempio n. 4
0
def collect(approot, archive_filename):
    """Collect node information in case of blackout.

    :param approot:
        treadmill root, usually /var/tmp/treadmill
    :type approot:
        ``str``
    :param archive_filename:
        archive path file
    :type archive_filename:
        ``str``
    """
    destroot = tempfile.mkdtemp()

    _LOGGER.info('save node info in %s', destroot)

    collect_init_services(approot, destroot)
    collect_running_app(approot, destroot)
    if os.name == 'posix':
        collect_sysctl(destroot)
        collect_cgroup(approot, destroot)
        collect_localdisk(approot, destroot)
        collect_network(approot, destroot)
        collect_message(destroot)

    try:
        archive_filename = fs.tar(sources=destroot,
                                  target=archive_filename,
                                  compression='gzip').name
        _LOGGER.info('node info archive file: %s', archive_filename)
        shutil.rmtree(destroot)
        return archive_filename
    except Exception:  # pylint: disable=W0703
        # if tar bar is not generated successfully, we keep destroot
        # we can find destroot path in log to check the files
        _LOGGER.exception('Failed to generate node info archive')
        return None
Esempio n. 5
0
def _cleanup(tm_env, zkclient, container_dir, app):
    """Cleanup a container that actually ran.
    """
    # Too many branches.
    #
    # pylint: disable=R0912

    rootdir = os.path.join(container_dir, 'root')
    # Generate a unique name for the app
    unique_name = appmgr.app_unique_name(app)
    # Create service clients
    cgroup_client = tm_env.svc_cgroup.make_client(
        os.path.join(container_dir, 'cgroups'))
    localdisk_client = tm_env.svc_localdisk.make_client(
        os.path.join(container_dir, 'localdisk'))
    network_client = tm_env.svc_network.make_client(
        os.path.join(container_dir, 'network'))

    # Make sure all processes are killed
    # FIXME(boysson): Should we use `kill_apps_in_cgroup` instead?
    _kill_apps_by_root(rootdir)

    # Setup the archive filename that will hold this container's data
    filetime = utils.datetime_utcnow().strftime('%Y%m%d_%H%M%S%f')
    archive_filename = os.path.join(
        container_dir, '{instance_name}_{hostname}_{timestamp}.tar'.format(
            instance_name=appmgr.appname_task_id(app.name),
            hostname=sysinfo.hostname(),
            timestamp=filetime))

    # Tar up container root filesystem if archive list is in manifest
    try:
        localdisk = localdisk_client.get(unique_name)
        fs.archive_filesystem(localdisk['block_dev'], rootdir,
                              archive_filename, app.archive)
    except services.ResourceServiceError:
        _LOGGER.warning('localdisk never allocated')
    except subprocess.CalledProcessError:
        _LOGGER.exception('Unable to archive root device of %r', unique_name)
    except:  # pylint: disable=W0702
        _LOGGER.exception('Unknown exception while archiving %r', unique_name)

    # Destroy the volume
    try:
        localdisk = localdisk_client.delete(unique_name)
    except (IOError, OSError) as err:
        if err.errno == errno.ENOENT:
            pass
        else:
            raise

    if not app.shared_network:
        _cleanup_network(tm_env, app, network_client)

    # Add metrics to archive
    rrd_file = os.path.join(
        tm_env.metrics_dir, 'apps',
        '{name}-{instanceid}-{uniqueid}.rrd'.format(
            name=app.app,
            instanceid=app.task,
            uniqueid=app.uniqueid,
        ))
    rrdutils.flush_noexc(rrd_file)
    _copy_metrics(rrd_file, container_dir)

    # Cleanup our cgroup resources
    try:
        cgroup_client.delete(unique_name)
    except (IOError, OSError) as err:
        if err.errno == errno.ENOENT:
            pass
        else:
            raise

    try:
        _archive_logs(tm_env, container_dir)
    except Exception:  # pylint: disable=W0703
        _LOGGER.exception('Unexpected exception storing local logs.')

    # Append or create the tarball with folders outside of container
    # Compress and send the tarball to HCP
    try:
        archive_filename = fs.tar(sources=container_dir,
                                  target=archive_filename,
                                  compression='gzip').name
        _send_container_archive(zkclient, app, archive_filename)
    except:  # pylint: disable=W0702
        _LOGGER.exception("Failed to update archive")