Ejemplo n.º 1
0
    def test_contention(self):
        from tests import tpool_test
        prox = tpool.Proxy(tpool_test)

        pile = eventlet.GreenPile(4)
        pile.spawn(lambda: self.assertEqual(prox.one, 1))
        pile.spawn(lambda: self.assertEqual(prox.two, 2))
        pile.spawn(lambda: self.assertEqual(prox.three, 3))
        results = list(pile)
        self.assertEqual(len(results), 3)
Ejemplo n.º 2
0
 def tpooled_application(e, s):
     result = tpool.execute(app, e, s)
     # return builtins directly
     if isinstance(result, (basestring, list, tuple)):
         return result
     else:
         # iterators might execute code when iterating over them,
         # so we wrap them in a Proxy object so every call to
         # next() goes through tpool
         return tpool.Proxy(result)
Ejemplo n.º 3
0
 def sender_loop(loopnum):
     obj = tpool.Proxy(Dummy())
     count = 100
     for n in six.moves.range(count):
         eventlet.sleep(random.random() / 200.0)
         now = time.time()
         token = loopnum * count + n
         rv = obj.foo(now, token=token)
         self.assertEqual(token, rv)
         eventlet.sleep(random.random() / 200.0)
Ejemplo n.º 4
0
 def _api(self):
     if not self._db_api:
         with self._lock:
             if not self._db_api:
                 db_api = self._load_api(*self._args, **self._kwargs)
                 if self._use_tpool:
                     from eventlet import tpool
                     self._db_api = tpool.Proxy(db_api)
                 else:
                     self._db_api = db_api
     return self._db_api
Ejemplo n.º 5
0
    def _wrap_libvirt_proxy(self, obj):
        """Return an object wrapped in a tpool.Proxy using autowrap appropriate
        for the libvirt module.
        """

        # libvirt is not pure python, so eventlet monkey patching doesn't work
        # on it. Consequently long-running libvirt calls will not yield to
        # eventlet's event loop, starving all other greenthreads until
        # completion. eventlet's tpool.Proxy handles this situation for us by
        # executing proxied calls in a native thread.
        return tpool.Proxy(obj, autowrap=self._libvirt_proxy_classes)
Ejemplo n.º 6
0
    def inspect_capabilities(self):
        """Determines whether guestfs is well configured."""
        try:
            g = tpool.Proxy(guestfs.GuestFS())
            g.add_drive("/dev/null")  # sic
            g.launch()
        except Exception as e:
            raise exception.NovaException(
                _("libguestfs installed but not usable (%s)") % e)

        return self
Ejemplo n.º 7
0
    def test_contention(self):
        from greentest import tpool_test
        prox = tpool.Proxy(tpool_test)

        pool = coros.CoroutinePool(max_size=4)
        waiters = []
        waiters.append(pool.execute(lambda: self.assertEquals(prox.one, 1)))
        waiters.append(pool.execute(lambda: self.assertEquals(prox.two, 2)))
        waiters.append(pool.execute(lambda: self.assertEquals(prox.three, 3)))
        for waiter in waiters:
            waiter.wait()
Ejemplo n.º 8
0
    def test_wrap_uniterable(self):
        prox = tpool.Proxy([])

        def index():
            prox[0]

        def key():
            prox['a']

        self.assertRaises(IndexError, index)
        self.assertRaises(TypeError, key)
Ejemplo n.º 9
0
 def tpooled_application(e, s):
     result = tpool.execute(app, e, s)
     # return builtins or Django responses directly
     if isinstance(result, (HttpResponse, HttpResponseRedirect, HttpResponseServerError, HttpResponseNotFound) ):
         return result
     if isinstance(result, (basestring, list, tuple)):
         return result
     else:
         # iterators might execute code when iterating over them,
         # so we wrap them in a Proxy object so every call to
         # next() goes through tpool
         return tpool.Proxy(result)
Ejemplo n.º 10
0
    def test_wrap_uniterable(self):
        # here we're treating the exception as just a normal class
        prox = tpool.Proxy(FloatingPointError())

        def index():
            prox[0]

        def key():
            prox['a']

        self.assertRaises(IndexError, index)
        self.assertRaises(TypeError, key)
Ejemplo n.º 11
0
def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))

    # grpc/eventlet hack taken from https://github.com/embercsi/ember-csi/blob/5bd4dffe9107bc906d14a45cd819d9a659c19047/ember_csi/ember_csi.py#L1106-L1111
    state = server._state
    state.server = ServerProxy(state.server)
    state.completion_queue = tpool.Proxy(state.completion_queue)
    storage_pb2_grpc.add_StorageServicer_to_server(StorageServicer(log),
                                                   server)
    server.add_insecure_port('[::]:50051')
    server.start()
    server.wait_for_termination()
Ejemplo n.º 12
0
    def setup(self):
        LOG.debug(
            _("Setting up appliance for %(imgfile)s %(imgfmt)s") % {
                'imgfile': self.imgfile,
                'imgfmt': self.imgfmt
            })
        try:
            self.handle = tpool.Proxy(guestfs.GuestFS(close_on_exit=False))
        except TypeError as e:
            if 'close_on_exit' in str(e):
                # NOTE(russellb) In case we're not using a version of
                # libguestfs new enough to support the close_on_exit paramater,
                # which was added in libguestfs 1.20.
                self.handle = tpool.Proxy(guestfs.GuestFS())
            else:
                raise

        try:
            self.handle.add_drive_opts(self.imgfile, format=self.imgfmt)
            self.handle.launch()

            self.setup_os()

            self.handle.aug_init("/", 0)
        except RuntimeError as e:
            # explicitly teardown instead of implicit close()
            # to prevent orphaned VMs in cases when an implicit
            # close() is not enough
            self.teardown()
            raise exception.NovaException(
                _("Error mounting %(imgfile)s with libguestfs (%(e)s)") % {
                    'imgfile': self.imgfile,
                    'e': e
                })
        except Exception:
            # explicitly teardown instead of implicit close()
            # to prevent orphaned VMs in cases when an implicit
            # close() is not enough
            self.teardown()
            raise
Ejemplo n.º 13
0
    def inspect_capabilities(self):
        """Determines whether guestfs is well configured."""
        try:
            g = tpool.Proxy(guestfs.GuestFS())
            g.add_drive("/dev/null")  # sic
            g.launch()
        except Exception as e:
            if os.access("/boot/vmlinuz-%s" % os.uname()[2], os.R_OK):
                raise exception.LibguestfsCannotReadKernel()
            raise exception.NovaException(
                _("libguestfs installed but not usable (%s)") % e)

        return self
Ejemplo n.º 14
0
def main():
    # CSI_ENDPOINT should accept multiple formats 0.0.0.0:5000, unix:foo.sock
    endpoint = os.environ.get('CSI_ENDPOINT', DEFAULT_ENDPOINT)
    mode = os.environ.get('CSI_MODE') or 'all'
    if mode not in ('controller', 'node', 'all'):
        print('Invalid mode value (%s)' % mode)
        exit(1)
    server_class = globals()[mode.title()]

    storage_nw_ip = os.environ.get('X_CSI_STORAGE_NW_IP')
    persistence_config = _load_json_config('X_CSI_PERSISTENCE_CONFIG',
                                           DEFAULT_PERSISTENCE_CFG)
    cinderlib_config = _load_json_config('X_CSI_CINDERLIB_CONFIG',
                                         DEFAULT_CINDERLIB_CFG)
    backend_config = _load_json_config('X_CSI_BACKEND_CONFIG')
    node_id = _load_json_config('X_CSI_NODE_ID')
    if not backend_config:
        print('Missing required backend configuration')
        exit(2)

    mode_msg = 'in ' + mode + ' only mode ' if mode != 'all' else ''
    print('Starting cinderlib CSI v%s %s(cinderlib: %s, cinder: %s)' %
          (VENDOR_VERSION, mode_msg, cinderlib.__version__, CINDER_VERSION))

    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))

    # NOTE(geguileo): GRPC library is not compatible with eventlet, so we have
    #                 to hack our way around it proxying objects to run methods
    #                 on native threads.
    state = server._state
    state.server = ServerProxy(state.server)
    state.completion_queue = tpool.Proxy(state.completion_queue)

    csi_plugin = server_class(server,
                              persistence_config,
                              backend_config,
                              cinderlib_config,
                              storage_nw_ip=storage_nw_ip,
                              node_id=node_id)
    print('Running backend %s v%s' % (type(
        csi_plugin.backend.driver).__name__, csi_plugin.backend.get_version()))

    server.add_insecure_port(endpoint)
    server.start()
    print('Now serving on %s...' % endpoint)

    try:
        while True:
            time.sleep(ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        server.stop(0)
Ejemplo n.º 15
0
def sender_loop(pfx):
    n = 0
    obj = tpool.Proxy(yadda())
    while n < 10:
        if not (n % 5):
            stdout.write('.')
            stdout.flush()
        api.sleep(0)
        now = time.time()
        prnt("%s: send (%s,%s)" % (pfx, now, n))
        rv = obj.foo(now, n=n)
        prnt("%s: recv %s" % (pfx, rv))
        assert (n == rv)
        api.sleep(0)
        n += 1
Ejemplo n.º 16
0
def fetch(context: context.RequestContext,
          image_service: glance.GlanceImageService, image_id: str, path: str,
          _user_id, _project_id) -> None:
    # TODO(vish): Improve context handling and add owner and auth data
    #             when it is added to glance.  Right now there is no
    #             auth checking in glance, so we assume that access was
    #             checked before we got here.
    start_time = timeutils.utcnow()
    with fileutils.remove_path_on_error(path):
        with open(path, "wb") as image_file:
            try:
                image_service.download(context, image_id,
                                       tpool.Proxy(image_file))
            except IOError as e:
                if e.errno == errno.ENOSPC:
                    params = {'path': os.path.dirname(path), 'image': image_id}
                    reason = _("No space left in image_conversion_dir "
                               "path (%(path)s) while fetching "
                               "image %(image)s.") % params
                    LOG.exception(reason)
                    raise exception.ImageTooBig(image_id=image_id,
                                                reason=reason)

                reason = ("IOError: %(errno)s %(strerror)s" % {
                    'errno': e.errno,
                    'strerror': e.strerror
                })
                LOG.error(reason)
                raise exception.ImageDownloadFailed(image_href=image_id,
                                                    reason=reason)

    duration = timeutils.delta_seconds(start_time, timeutils.utcnow())

    # NOTE(jdg): use a default of 1, mostly for unit test, but in
    # some incredible event this is 0 (cirros image?) don't barf
    if duration < 1:
        duration = 1
    fsz_mb = os.stat(image_file.name).st_size / units.Mi
    mbps = (fsz_mb / duration)
    msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, "
           "duration %(duration).2f sec")
    LOG.debug(msg, {
        "dest": image_file.name,
        "sz": fsz_mb,
        "duration": duration
    })
    msg = "Image download %(sz).2f MB at %(mbps).2f MB/s"
    LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
Ejemplo n.º 17
0
 def _api(self):
     if not self._db_api:
         with self._lock:
             if not self._db_api:
                 db_api = api.DBAPI.from_config(
                     conf=self._conf, backend_mapping=self._backend_mapping)
                 if self._conf.database.use_tpool:
                     try:
                         from eventlet import tpool
                     except ImportError:
                         LOG.exception("'eventlet' is required for "
                                       "TpoolDbapiWrapper.")
                         raise
                     self._db_api = tpool.Proxy(db_api)
                 else:
                     self._db_api = db_api
     return self._db_api
Ejemplo n.º 18
0
    def __init__(self, driver, name, pool=None, snapshot=None,
                 read_only=False):
        client, ioctx = driver._connect_to_rados(pool)
        try:
            self.volume = tpool.Proxy(rbd.Image(ioctx, name,
                                                snapshot=snapshot,
                                                read_only=read_only))
        except rbd.ImageNotFound:
            with excutils.save_and_reraise_exception():
                LOG.debug("rbd image %s does not exist", name)
                driver._disconnect_from_rados(client, ioctx)
        except rbd.Error:
            with excutils.save_and_reraise_exception():
                LOG.exception("error opening rbd image %s", name)
                driver._disconnect_from_rados(client, ioctx)

        self.driver = driver
        self.client = client
        self.ioctx = ioctx
Ejemplo n.º 19
0
    def setup(self):
        LOG.debug(_("Setting up appliance for %(imgfile)s %(imgfmt)s") %
                  {'imgfile': self.imgfile, 'imgfmt': self.imgfmt})
        self.handle = tpool.Proxy(guestfs.GuestFS())

        try:
            self.handle.add_drive_opts(self.imgfile, format=self.imgfmt)
            if self.handle.get_attach_method() == 'libvirt':
                libvirt_url = 'libvirt:' + libvirt_driver.LibvirtDriver.uri()
                self.handle.set_attach_method(libvirt_url)
            self.handle.launch()

            self.setup_os()

            self.handle.aug_init("/", 0)
        except RuntimeError, e:
            # dereference object and implicitly close()
            self.handle = None
            raise exception.NovaException(
                _("Error mounting %(imgfile)s with libguestfs (%(e)s)") %
                {'imgfile': self.imgfile, 'e': e})
Ejemplo n.º 20
0
    def inspect_capabilities(self):
        """Determines whether guestfs is well configured."""
        try:
            # If guestfs debug is enabled, we can't launch in a thread because
            # the debug logging callback can make eventlet try to switch
            # threads and then the launch hangs, causing eternal sadness.
            if CONF.guestfs.debug:
                LOG.debug('Inspecting guestfs capabilities non-threaded.')
                g = guestfs.GuestFS()
            else:
                g = tpool.Proxy(guestfs.GuestFS())
            g.add_drive("/dev/null")  # sic
            g.launch()
        except Exception as e:
            kernel_file = "/boot/vmlinuz-%s" % os.uname().release
            if not os.access(kernel_file, os.R_OK):
                raise exception.LibguestfsCannotReadKernel(
                    _("Please change permissions on %s to 0x644")
                    % kernel_file)
            raise exception.NovaException(
                _("libguestfs installed but not usable (%s)") % e)

        return self
Ejemplo n.º 21
0
    def setup(self, mount=True):
        LOG.debug("Setting up appliance for %(image)s",
                  {'image': self.image})
        try:
            self.handle = tpool.Proxy(
                guestfs.GuestFS(python_return_dict=False,
                                close_on_exit=False))
        except TypeError as e:
            if 'close_on_exit' in str(e) or 'python_return_dict' in str(e):
                # NOTE(russellb) In case we're not using a version of
                # libguestfs new enough to support parameters close_on_exit
                # and python_return_dict which were added in libguestfs 1.20.
                self.handle = tpool.Proxy(guestfs.GuestFS())
            else:
                raise

        if CONF.guestfs.debug:
            self.configure_debug()

        try:
            if forceTCG:
                # TODO(mriedem): Should we be using set_backend_setting
                # instead to just set the single force_tcg setting? Because
                # according to the guestfs docs, set_backend_settings will
                # overwrite all backend settings. The question is, what would
                # the value be? True? "set_backend_setting" is available
                # starting in 1.27.2 which should be new enough at this point
                # on modern distributions.
                ret = self.handle.set_backend_settings(["force_tcg"])
                if ret != 0:
                    LOG.warning('Failed to force guestfs TCG mode. '
                                'guestfs_set_backend_settings returned: %s',
                                ret)
        except AttributeError as ex:
            # set_backend_settings method doesn't exist in older
            # libguestfs versions, so nothing we can do but ignore
            LOG.warning("Unable to force TCG mode, "
                        "libguestfs too old? %s", ex)
            pass

        try:
            if isinstance(self.image, imgmodel.LocalImage):
                self.handle.add_drive_opts(self.image.path,
                                           format=self.image.format)
            elif isinstance(self.image, imgmodel.RBDImage):
                self.handle.add_drive_opts("%s/%s" % (self.image.pool,
                                                      self.image.name),
                                           protocol="rbd",
                                           format=imgmodel.FORMAT_RAW,
                                           server=self.image.servers,
                                           username=self.image.user,
                                           secret=self.image.password)
            else:
                raise exception.UnsupportedImageModel(
                    self.image.__class__.__name__)

            self.handle.launch()

            if mount:
                self.setup_os()
                self.handle.aug_init("/", 0)
                self.mount = True
        except RuntimeError as e:
            # explicitly teardown instead of implicit close()
            # to prevent orphaned VMs in cases when an implicit
            # close() is not enough
            self.teardown()
            raise exception.NovaException(
                _("Error mounting %(image)s with libguestfs (%(e)s)") %
                {'image': self.image, 'e': e})
        except Exception:
            # explicitly teardown instead of implicit close()
            # to prevent orphaned VMs in cases when an implicit
            # close() is not enough
            self.teardown()
            raise
Ejemplo n.º 22
0
 def RBDProxy(self):
     return tpool.Proxy(self.rbd.RBD())
Ejemplo n.º 23
0
 def __init__(self):
     self._rbd = tpool.Proxy(rbd.RBD())
Ejemplo n.º 24
0
    def _run_restore(self, context, backup, volume):
        orig_key_id = volume.encryption_key_id
        backup_service = self.get_backup_driver(context)

        properties = utils.brick_get_connector_properties()
        secure_enabled = (
            self.volume_rpcapi.secure_file_operations_enabled(context,
                                                              volume))
        attach_info = self._attach_device(context, volume, properties)

        # NOTE(geguileo): Not all I/O disk operations properly do greenthread
        # context switching and may end up blocking the greenthread, so we go
        # with native threads proxy-wrapping the device file object.
        try:
            device_path = attach_info['device']['path']
            open_mode = 'rb+' if os.name == 'nt' else 'wb'
            if (isinstance(device_path, six.string_types) and
                    not os.path.isdir(device_path)):
                if secure_enabled:
                    with open(device_path, open_mode) as device_file:
                        backup_service.restore(backup, volume.id,
                                               tpool.Proxy(device_file))
                else:
                    with utils.temporary_chown(device_path):
                        with open(device_path, open_mode) as device_file:
                            backup_service.restore(backup, volume.id,
                                                   tpool.Proxy(device_file))
            # device_path is already file-like so no need to open it
            else:
                backup_service.restore(backup, volume.id,
                                       tpool.Proxy(device_path))
        finally:
            self._detach_device(context, attach_info, volume, properties,
                                force=True)

        # Regardless of whether the restore was successful, do some
        # housekeeping to ensure the restored volume's encryption key ID is
        # unique, and any previous key ID is deleted. Start by fetching fresh
        # info on the restored volume.
        restored_volume = objects.Volume.get_by_id(context, volume.id)
        restored_key_id = restored_volume.encryption_key_id
        if restored_key_id != orig_key_id:
            LOG.info('Updating encryption key ID for volume %(volume_id)s '
                     'from backup %(backup_id)s.',
                     {'volume_id': volume.id, 'backup_id': backup.id})

            key_mgr = key_manager.API(CONF)
            if orig_key_id is not None:
                LOG.debug('Deleting original volume encryption key ID.')
                volume_utils.delete_encryption_key(context,
                                                   key_mgr,
                                                   orig_key_id)

            if backup.encryption_key_id is None:
                # This backup predates the current code that stores the cloned
                # key ID in the backup database. Fortunately, the key ID
                # restored from the backup data _is_ a clone of the original
                # volume's key ID, so grab it.
                LOG.debug('Gleaning backup encryption key ID from metadata.')
                backup.encryption_key_id = restored_key_id
                backup.save()

            # Clone the key ID again to ensure every restored volume has
            # a unique key ID. The volume's key ID should not be the same
            # as the backup.encryption_key_id (the copy made when the backup
            # was first created).
            new_key_id = volume_utils.clone_encryption_key(
                context,
                key_mgr,
                backup.encryption_key_id)
            restored_volume.encryption_key_id = new_key_id
            restored_volume.save()
        else:
            LOG.debug('Encryption key ID for volume %(volume_id)s already '
                      'matches encryption key ID in backup %(backup_id)s.',
                      {'volume_id': volume.id, 'backup_id': backup.id})
Ejemplo n.º 25
0
def main():
    global DEFAULT_MOUNT_FS
    # CSI_ENDPOINT should accept multiple formats 0.0.0.0:5000, unix:foo.sock
    endpoint = os.environ.get('CSI_ENDPOINT', DEFAULT_ENDPOINT)
    mode = os.environ.get('CSI_MODE') or 'all'
    DEFAULT_MOUNT_FS = os.environ.get('X_CSI_DEFAULT_MOUNT_FS',
                                      DEFAULT_MOUNT_FS)
    if mode not in ('controller', 'node', 'all'):
        sys.stderr.write('Invalid mode value (%s)\n' % mode)
        exit(1)
    server_class = globals()[mode.title()]

    storage_nw_ip = os.environ.get('X_CSI_STORAGE_NW_IP')
    persistence_config = _load_json_config('X_CSI_PERSISTENCE_CONFIG',
                                           DEFAULT_PERSISTENCE_CFG)
    cinderlib_config = _load_json_config('X_CSI_EMBER_CONFIG',
                                         DEFAULT_EMBER_CFG)
    backend_config = _load_json_config('X_CSI_BACKEND_CONFIG')
    node_id = os.environ.get('X_CSI_NODE_ID')
    if mode != 'node' and not backend_config:
        print('Missing required backend configuration')
        exit(2)
    copy_system_files()

    mode_msg = 'in ' + mode + ' only mode ' if mode != 'all' else ''
    print('Starting Ember CSI v%s %s(cinderlib: v%s, cinder: v%s, '
          'CSI spec: v%s)' % (VENDOR_VERSION, mode_msg, cinderlib.__version__,
                              CINDER_VERSION, CSI_SPEC))

    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))

    # NOTE(geguileo): GRPC library is not compatible with eventlet, so we have
    #                 to hack our way around it proxying objects to run methods
    #                 on native threads.
    state = server._state
    state.server = ServerProxy(state.server)
    state.completion_queue = tpool.Proxy(state.completion_queue)

    csi_plugin = server_class(server=server,
                              persistence_config=persistence_config,
                              backend_config=backend_config,
                              cinderlib_config=cinderlib_config,
                              storage_nw_ip=storage_nw_ip,
                              node_id=node_id)
    msg = 'Running as %s' % mode
    if mode != 'node':
        driver_name = type(csi_plugin.backend.driver).__name__
        msg += ' with backend %s v%s' % (driver_name,
                                         csi_plugin.backend.get_version())
    print(msg)

    print('Debugging feature is %s.' %
          ('ENABLED with %s and OFF. Toggle it with SIGUSR1' %
           DEBUG_LIBRARY.__name__ if DEBUG_LIBRARY else 'DISABLED'))

    if not server.add_insecure_port(endpoint):
        sys.stderr.write('\nERROR: Could not bind to %s\n' % endpoint)
        exit(1)

    server.start()
    print('Now serving on %s...' % endpoint)

    try:
        while True:
            time.sleep(ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        server.stop(0)
Ejemplo n.º 26
0
 def test_wrap_module_class(self):
     prox = tpool.Proxy(re)
     self.assertEqual(tpool.Proxy, type(prox))
     exp = prox.compile('(.)(.)(.)')
     self.assertEqual(exp.groups, 3)
     assert repr(prox.compile)
Ejemplo n.º 27
0
 def test_wrap_string(self):
     my_object = "whatever"
     prox = tpool.Proxy(my_object)
     self.assertEqual(str(my_object), str(prox))
     self.assertEqual(len(my_object), len(prox))
     self.assertEqual(my_object.join(['a', 'b']), prox.join(['a', 'b']))
Ejemplo n.º 28
0
 def test_wrap_tuple(self):
     my_tuple = (1, 2)
     prox = tpool.Proxy(my_tuple)
     self.assertEqual(prox[0], 1)
     self.assertEqual(prox[1], 2)
     self.assertEqual(len(my_tuple), 2)
Ejemplo n.º 29
0
 def test_autowrap_both(self):
     from tests import tpool_test
     x = tpool.Proxy(tpool_test, autowrap=(int, ), autowrap_names=('one', ))
     assert isinstance(x.one, tpool.Proxy)
     # violating the abstraction to check that we didn't double-wrap
     assert not isinstance(x._obj, tpool.Proxy)
Ejemplo n.º 30
0
 def test_variable_and_keyword_arguments_with_function_calls(self):
     import optparse
     parser = tpool.Proxy(optparse.OptionParser())
     parser.add_option('-n', action='store', type='string', dest='n')
     opts, args = parser.parse_args(["-nfoo"])
     self.assertEqual(opts.n, 'foo')