Exemplo n.º 1
0
 def __init__(self, config_map=None, environ=None, default_config=None):
     if config_map is None:
         config_map = DEFAULT_CONFIG_MAP
     if environ is None:
         environ = os.environ
     if default_config is None:
         default_config = DEFAULT_CONFIG_MAP
     self.environ = environ
     config = ConfigParser()
     if isinstance(config_map, str):
         self.path = path = config_map
         with Pfx(path):
             read_ok = False
             if pathexists(path):
                 try:
                     config.read(path)
                 except OSError as e:
                     error("read error: %s", e)
                 else:
                     read_ok = True
             else:
                 warning("missing config file")
         if not read_ok:
             warning("falling back to default configuration")
             config.read_dict(default_config)
     else:
         self.path = None
         config.read_dict(config_map)
     self.map = config
     self._clause_stores = {}  # clause_name => Result->Store
     self._lock = Lock()
Exemplo n.º 2
0
 def fspath(self, new_fspath):
   self.pathview.fspath = new_fspath
   # TODO: make the tree display the associated element
   try:
     pathinfo = self.tree[new_fspath]
   except KeyError:
     warning("path not in tree")
Exemplo n.º 3
0
    def __init__(self, input=None, output=None, env=None):
        CGI.__init__(self, input=input, output=output, env=env)
        self.result = {}
        self.content_type('application/x-javascript')

        path_info = self.env['PATH_INFO']
        seqLen = 0
        while seqLen < len(path_info) and path_info[seqLen] == '/':
            seqLen += 1

        path_info = path_info[seqLen:]
        while seqLen < len(path_info) and path_info[seqLen] != '/':
            seqLen += 1
        if seqLen == 0:
            raise ValueError("no sequence token at the start of PATH_INFO: " +
                             repr(path_info))

        self.__seq = path_info[:seqLen]
        while seqLen < len(path_info) and path_info[seqLen] == '/':
            seqLen += 1
        path_info = path_info[seqLen:]

        if len(path_info) == 0:
            self.arg = None
        else:
            (self.arg, etc) = cs.hier.tok(path_info)
            etc = etc.lstrip()
            if len(etc): warning("unparsed arg: " + etc)
Exemplo n.º 4
0
 def __str__(self):
     warning("ChunkyString.__str__: %d bytes in %d strings", self.__len,
             len(self.__strs))
     t0 = time()
     s = ''.join(self.__strs)
     assert len(s) == self.__len
     return s
Exemplo n.º 5
0
 def _autofile(path, *, tagger, no_link, do_remove):
     ''' Wrapper for `Tagger.file_by_tags` which reports actions.
 '''
     if not no_link and not existspath(path):
         warning("no such path, skipped")
         linked_to = []
     else:
         fstags = tagger.fstags
         # apply inferred tags if not already present
         tagged = fstags[path]
         all_tags = tagged.merged_tags()
         for tag_name, tag_value in tagger.infer(path).items():
             if tag_name not in all_tags:
                 tagged[tag_name] = tag_value
         linked_to = tagger.file_by_tags(path,
                                         no_link=no_link,
                                         do_remove=do_remove)
         if linked_to:
             for linked in linked_to:
                 printpath = linked
                 if basename(path) == basename(printpath):
                     printpath = dirname(printpath) + '/'
                 pfxprint('=>', shortpath(printpath))
         else:
             pfxprint('not filed')
     return linked_to
Exemplo n.º 6
0
   def attrfunc(*a, **kw):
       ''' Stub function to report on attributes which get called.
     Intended to report on unimplemented methods.
 '''
       warning("CALL UNKNOWN ATTR: %s(a=%r,kw=%r)", attr, a, kw)
       raise RuntimeError("CALL UNKNOWN ATTR %s(*%r,**%r)" %
                          (attr, a, kw))
Exemplo n.º 7
0
    def fsyncdir(self, fh, datasync):
        ''' Flush the buffers for open directory `fh`.

        http://www.rath.org/llfuse-docs/operations.html#llfuse.Operations.fsyncdir
    '''
        # TODO: commit dir? implies flushing the whole tree
        warning("fsyncdir does nothing at present")
Exemplo n.º 8
0
 def close(self):
   ''' Close the `Channel`, preventing further `put()`s.
   '''
   if self.closed:
     warning("%s: .close() of closed Channel" % (self,))
   else:
     self.closed = True
Exemplo n.º 9
0
    def inference_rules(self, prefix, rule_spec):
        ''' Generator yielding inference functions from `rule_spec`.

        Each yielded function accepts a path
        and returns an iterable of `Tag`s or other values.
        Because some functions are implemented as lambdas it is reasonable
        to return an iterable conatining `None` values
        to be discarded by the consumer of the rule.
    '''
        with Pfx(r(rule_spec)):
            if isinstance(rule_spec, str):
                if rule_spec.startswith('/'):
                    rule = RegexpTagRule(rule_spec[1:])
                    yield lambda path, rule=rule: rule.infer_tags(
                        basename(path)).as_tags()
                else:
                    tag_name, _ = get_dotted_identifier(rule_spec)
                    if tag_name and tag_name == rule_spec:
                        # return the value of tag_name or None, as a 1-tuple
                        yield lambda path: (self.fstags[path].get(tag_name), )
                    else:
                        warning("skipping unrecognised pattern")
            elif isinstance(rule_spec, (list, tuple)):
                for subspec in rule_spec:
                    yield from self.inference_rules(prefix, subspec)
            else:
                warning("skipping unhandled type")
Exemplo n.º 10
0
 def rename(self, new_name):
     if self.name == new_name:
         warning("rename tag %r: no change", self.name)
         return
     X("Tag[%d:%r].rename(new_name=%r)", self.id, self.name, new_name)
     T = self._table
     try:
         otag = T[new_name]
     except KeyError:
         # name not in use, rename current tag
         T.update('name', new_name, 'id = %d' % (self.id, ))
     else:
         X("other tag for new_name = %d:%r", otag.id, otag.name)
         if otag.id == self.id:
             # case insensitive or the like: update the name in place
             T.update('name', new_name, 'id = %d' % (self.id, ))
         else:
             # update related objects (books?)
             # to point at the other tag
             for B in self.books:
                 X("  update Book[%d:%r]{%s} ...", B.id, B.name,
                   ','.join(T.name for T in B.tags))
                 B.add_tag(new_name)
                 B.remove_tag(self.name)
             # delete our tag, become the other tag
             T.delete('id = ?', self.id)
             self.ns.id = otag.id
     self.name = new_name
Exemplo n.º 11
0
  def update_progress(self, ydl_progress):
    ''' Update progress hook called by youtube_dl.

        Updates the relevant status lines.
    '''
    filename = self.filename = ydl_progress['filename']
    progress = self.progresses.get(filename)
    if progress is None:
      total = ydl_progress.get('total_bytes'
                               ) or ydl_progress.get('total_bytes_estimate')
      if total is None:
        message = 'no total_bytes or total_bytes_estimate in ydl_progress'
        if message not in self._warned:
          warning("%s: %r", message, ydl_progress)
          self._warned.add(message)
        return
      progress = self.progresses[filename] = Progress(
          name=self.url + ':' + filename, total=total
      )
      if self.over_progress is not None:
        self.over_progress.add(progress)
    try:
      progress.position = ydl_progress['downloaded_bytes']
    except KeyError:
      pass
    _, fext = splitext(filename)
    status = progress.status(fext, self.proxy.width)
    self.proxy(status)
    self.tick()
Exemplo n.º 12
0
def choose(basepath, preferred_indexclass=None):
  ''' Choose an indexclass from a `basepath` with optional preferred indexclass.
      This prefers an existing index if present.
  '''
  global _CLASSES  # pylint: disable=global-statement
  global _BY_NAME  # pylint: disable=global-statement
  if preferred_indexclass is not None:
    if isinstance(preferred_indexclass, str):
      indexname = preferred_indexclass
      try:
        preferred_indexclass = _BY_NAME[indexname]
      except KeyError:
        warning("ignoring unknown indexclass name %r", indexname)
        preferred_indexclass = None
  indexclasses = list(_CLASSES)
  if preferred_indexclass:
    indexclasses.insert((preferred_indexclass.NAME, preferred_indexclass))
  # look for a preexisting index
  for indexname, indexclass in indexclasses:
    if not indexclass.is_supported():
      continue
    indexpath = indexclass.pathof(basepath)
    if pathexists(indexpath):
      return indexclass
  # otherwise choose the first supported index
  for indexname, indexclass in indexclasses:
    if not indexclass.is_supported():
      continue
    return indexclass
  raise ValueError(
      "no supported index classes available: tried %r" % (indexclasses,)
  )
Exemplo n.º 13
0
def update_domain(MDB, old_domain, new_domain, argv):
  ''' Update the `@domain` of addresses in `MDB`.
  '''
  if not argv:
    addrs = [A.name for A in MDB.ADDRESSes if A.name.endswith(old_domain)]
  else:
    addrs = []
    for pattern in argv:
      if pattern.startswith('/'):
        if pattern.endswith('/'):
          rexp = pattern[1:-1]
        else:
          rexp = pattern[1:]
        addrs.extend([A.name for A in MDB.matchAddresses(rexp)])
      else:
        addrs.append(pattern)
  if not addrs:
    warning("no matching addresses")
  else:
    for addr in addrs:
      with Pfx(addr):
        if not addr.endswith(old_domain):
          warning("does not end in old domain (%s)", old_domain)
        else:
          MDB.update_domain(addr, old_domain, new_domain)
Exemplo n.º 14
0
  def getopt_error_handler(cmd, options, e, usage, subcmd=None):  # pylint: disable=unused-argument
    ''' The `getopt_error_handler` method
        is used to control the handling of `GetoptError`s raised
        during the command line parse
        or during the `main` or `cmd_`*subcmd*` calls.

        This default handler issues a warning containing the exception text,
        prints the usage message to standard error,
        and returns `True` to indicate that the error has been handled.

        The handler is called with these parameters:
        * `cmd`: the command name
        * `options`: the `options` object
        * `e`: the `GetoptError` exception
        * `usage`: the command usage or `None` if this was not provided
        * `subcmd`: optional subcommand name;
          if not `None`, is the name of the subcommand which caused the error

        It returns a true value if the exception is considered handled,
        in which case the main `run` method returns 2.
        It returns a false value if the exception is considered unhandled,
        in which case the main `run` method reraises the `GetoptError`.

        To let the exceptions out unhandled
        this can be overridden with a method which just returns `False`.

        Otherwise,
        the handler may perform any suitable action
        and return `True` to contain the exception
        or `False` to cause the exception to be reraised.
    '''
    warning("%s", e)
    if usage:
      print(usage.rstrip(), file=sys.stderr)
    return True
Exemplo n.º 15
0
def report_offsets(bfr, run_parser):
  ''' Dispatch a parser in a separate Thread, return an IterableQueue yielding offsets.

      Parameters:
      * `bfr`: a `CornuCopyBuffer` providing data to parse
      * `run_parser`: a callable which runs the parser; it should accept a
        `CornuCopyBuffer` as its sole argument.

      This function allocates an `IterableQueue` to receive the parser offset
      reports and sets the `CornuCopyBuffer` with `report_offset` copying
      offsets to the queue.
      It is the task of the parser to call `bfr.report_offset` as
      necessary to indicate suitable offsets.
  '''
  with Pfx("report_offsets(bfr,run_parser=%s)", run_parser):
    offsetQ = IterableQueue()
    if bfr.copy_offsets is not None:
      warning("bfr %s already has copy_offsets, replacing", bfr)
    bfr.copy_offsets = offsetQ.put

    def thread_body():
      with Pfx("parser-thread"):
        try:
          run_parser(bfr)
        except Exception as e:
          exception("exception: %s", e)
          raise
        finally:
          offsetQ.close()

    T = PfxThread(target=thread_body)
    T.start()
    return offsetQ
Exemplo n.º 16
0
 def parse(cls, bfr):
     ''' Parse a 128 byte ID3V1 or ID3v1.1 record.
 '''
     self = cls()
     # pylint: disable=attribute-defined-outside-init
     offset0 = bfr.offset
     hdr = bfr.take(3)
     if hdr != b'TAG':
         raise ValueError("expected leading b'TAG'")
     self.title = parse_padded_text(bfr, 30)
     self.artist = parse_padded_text(bfr, 30)
     self.album = parse_padded_text(bfr, 30)
     year_s = parse_padded_text(bfr, 4)
     if year_s == '':
         self.year = None
     else:
         try:
             self.year = int(year_s)
         except ValueError as e:
             warning("invalid year %r: %s", year_s, e)
             self.year = None
     comment_bs = bfr.take(30)
     if comment_bs[-2] == 0:
         self.track = comment_bs[-1]
         comment_bs = comment_bs[:-2]
     else:
         self.track = 0
     self.comment = comment_bs.decode('ascii').rstrip('\0').rstrip()
     self.genre_id = bfr.byte0()
     assert bfr.offset - offset0 == 128
     return self
Exemplo n.º 17
0
def append_data(wfd, bs):
  ''' Append the bytes `bs` to the writable file descriptor `wfd`.

      An OS level os.flock() call is made to exclude other cooperating writers.
  '''
  try:
    flock(wfd, LOCK_EX)
  except OSError:
    is_locked = False
  else:
    is_locked = True
  offset = os.lseek(wfd, 0, SEEK_END)
  written = os.write(wfd, bs)
  # notice short writes, which should never happen with a regular file...
  while written < len(bs):
    warning(
        "fd %d: tried to write %d bytes but only wrote %d, retrying", wfd,
        len(bs), written
    )
    if written == 0:
      raise ValueError("zero length write, aborting write attempt")
    bs = bs[written:]
    written = os.write(wfd, bs)
  if is_locked:
    flock(wfd, LOCK_UN)
  return offset
Exemplo n.º 18
0
    def _list(argv, options, default_argv, default_format):
        ''' Inner workings of "ls" and "queue".

        Usage: {ls|queue} [-l] [-o format] [recordings...]
          List available downloads.
          -l        Long listing: list tags below each entry.
          -o format Format string for each entry.
    '''
        sqltags = options.sqltags
        long_mode = False
        listing_format = default_format
        opts, argv = getopt(argv, 'lo:', '')
        for opt, val in opts:
            if opt == '-l':
                long_mode = True
            elif opt == '-o':
                listing_format = val
            else:
                raise RuntimeError("unhandled option: %r" % (opt, ))
        if not argv:
            argv = list(default_argv)
        xit = 0
        for arg in argv:
            with Pfx(arg):
                recording_ids = sqltags.recording_ids_from_str(arg)
                if not recording_ids:
                    warning("no recording ids")
                    xit = 1
                    continue
                for dl_id in recording_ids:
                    recording = sqltags[dl_id]
                    with Pfx(recording.name):
                        recording.ls(ls_format=listing_format,
                                     long_mode=long_mode)
        return xit
Exemplo n.º 19
0
 def _vt_i2E(self, inode):
     try:
         E = self._vtfs.i2E(inode)
     except ValueError as e:
         warning("access(inode=%d): %s", inode, e)
         raise FuseOSError(errno.EINVAL)
     return E
Exemplo n.º 20
0
def fromtoken(token, nodedb, doCreate=False):
    ''' Extract a token from the start of a string.
      This is the fallback method used by NodeDB.fromtoken() if none of the Node
      or NodeDB specific formats match, or in non-Node contexts.
      Return the parsed value and the remaining text or raise ValueError.
  '''
    # "foo"
    m = re_JSON_STRING.match(token)
    if m and m.group() == token:
        return json.loads(m.group())

    # int
    m = re_INT.match(token)
    if m and m.group() == token:
        return int(m.group())

    # http://foo/bah etc
    m = re_BAREURL.match(token)
    if m and m.group() == token:
        return token

    try:
        t, name = nodekey(token)
    except ValueError:
        warning("can't infer Node from \"%s\", returning string" % (token, ))
        return token

    N = nodedb.get((t, name), doCreate=doCreate)
    if N is None:
        raise ValueError("no Node with key (%s, %s)" % (t, name))

    return N
Exemplo n.º 21
0
 def _response(self, method):
   rq = self._request(method)
   opener = self.opener
   retries = self.retry_timeout
   with Pfx("open(%s)", rq):
     while retries > 0:
       now = time.time()
       open = opener.open
       try:
         opened_url = open(rq)
       except OSError as e:
         if e.errno == errno.ETIMEDOUT:
           elapsed = time.time() - now
           warning("open %s: %s; elapsed=%gs", self, e, elapsed)
           if retries > 0:
             retries -= 1
             continue
         raise
       except HTTPError as e:
         warning("open %s: %s", self, e)
         raise
       else:
         # success, exit retry loop
         break
   self._info = opened_url.info()
   return opened_url
Exemplo n.º 22
0
def scan_loglines(lines, *, start=1, drop_blanks=False):
  ''' Generator to scan lines and collate into `LogEntry` instances.
  '''
  entry_unixtime = None
  entry_lines = []
  for lineno, line in enumerate(lines, start):
    with Pfx(lineno):
      line = line.rstrip()
      if drop_blanks and not line.lstrip():
        continue
      words = line.split(None, 2)
      curr = None
      if len(words) > 2:
        when_s = words[0] + ' ' + words[1]
        try:
          when = arrow.get(when_s)
        except arrow.ParserError as e:
          warning("%r: %e", when_s, e)
        else:
          curr = when.timestamp
    # leave the Pfx, it does not play well in a generator
    if curr is None:
      entry_lines.append(line)
    else:
      if entry_lines:
        yield LogEntry(entry_lines, unixtime=entry_unixtime)
      entry_lines = []
      entry_unixtime = curr
    entry_lines.append(line)
  if entry_lines:
    yield LogEntry(entry_lines, unixtime=entry_unixtime)
Exemplo n.º 23
0
    def pushto_queue(self, Q, runstate=None, progress=None):
        ''' Push this Block and any implied subblocks to a queue.

        Parameters:
        * `Q`: optional preexisting Queue, which itself should have
          come from a .pushto targetting the Store `S2`.
        * `runstate`: optional RunState used to cancel operation
        * `progress`: optional Progress to update its total

        TODO: optional `no_wait` parameter to control waiting,
        default `False`, which would support closing the Queue but
        not waiting for the worker completion. This is on the premise
        that the final Store shutdown of `S2` will wait for outstanding
        operations anyway.
    '''
        with defaults.S:
            if progress:
                progress.total += len(self)
            Q.put(self)
            if self.indirect:
                # recurse, reusing the Queue
                for subB in self.subblocks:
                    if runstate and runstate.cancelled:
                        warning("%s: push cancelled", self)
                        break
                    subB.pushto_queue(Q, runstate=runstate, progress=progress)
Exemplo n.º 24
0
 def walk(self):
   ''' Walk this tree.
   '''
   pending = [(self, [])]
   while pending:
     node, rparts = pending.pop()
     rpath = PATHSEP.join(rparts)
     dirnames = []
     filenames = []
     for name in sorted(node.keys()):
       subnode = node.get(name)
       if subnode is None:
         continue
       if subnode.isdir:
         dirnames.append(name)
       else:
         filenames.append(name)
     odirnames = set(dirnames)
     yield rpath, dirnames, filenames
     for name in dirnames:
       if name not in odirnames:
         warning(
             "walk(%s): %r: dirname %r not in original set", self, rpath, name
         )
         continue
       subnode = node.get(name)
       if subnode is None:
         warning("walk(%s): %r: dirname %r not in node", self, rapth, name)
         continue
       pending.append((subnode, rparts + [name]))
Exemplo n.º 25
0
 def cmd_stats(argv):
     ''' Usage: {cmd} host:port print [columns...]
       Fetch the statistics from the haproxy at host:port.
 '''
     badopts = False
     if not argv:
         warning("missing host:port")
         badopts = True
     else:
         host_port = argv.pop(0)
         with Pfx("host:port %r", host_port):
             try:
                 host, port = host_port.rsplit(':', 1)
                 port = int(port)
             except ValueError as e:
                 warning("invalid: %s", e)
                 badopts = True
     cols = argv
     if badopts:
         raise GetoptError("invalid arguments")
     S = Stats(host, port)
     for row in S.csvdata():
         if cols:
             print(*[getattr(row, col) for col in cols])
         else:
             print(*row)
Exemplo n.º 26
0
def indirect_blocks(blocks):
    ''' A generator that yields full `IndirectBlock`s from an iterable
      source of `Block`s, except for the last `Block` which need not
      necessarily be bundled into an `IndirectBlock`.
  '''
    subblocks = []
    subsize = 0
    for block in blocks:
        enc = block.encode()
        if subsize + len(enc) > MAX_BLOCKSIZE:
            # overflow
            if not subblocks:
                # do not yield empty indirect block, flag logic error instead
                warning(
                    "no pending subblocks at flush, presumably len(block.encode()) %d > MAX_BLOCKSIZE %d",
                    len(enc), MAX_BLOCKSIZE)
            else:
                yield IndirectBlock.from_subblocks(subblocks)
                subblocks = []
                subsize = 0
        subblocks.append(block)
        subsize += len(enc)

    # handle the termination case
    if subblocks:
        if len(subblocks) == 1:
            # one block unyielded - don't bother wrapping into an iblock
            block = subblocks[0]
        else:
            block = IndirectBlock.from_subblocks(subblocks)
        yield block
Exemplo n.º 27
0
 def run(self, runstate=None):
   print("run...")
   if runstate is None:
     runstate = RunState(str(self))
   # Display and interact with the Window using an Event Loop
   window = self.window
   print("window =", window)
   with runstate:
     while not runstate.cancelled:
       print("event?")
       event, values = window.read()
       print("event =", repr(event), repr(values))
       # See if user wants to quit or window was closed
       if event == sg.WINDOW_CLOSED or event == 'Quit':
         runstate.cancel()
       elif event == self.tree.key:
         record_key, = values[event]
         print("record_key =", record_key)
         try:
           record = self.tree[record_key]
         except KeyError as e:
           warning("no self.tree[%r]: %s", record_key, e)
         else:
           print("record =", record)
           self.fspath = record.fullpath
       else:
         warning("unexpected event %r: %r", event, values)
Exemplo n.º 28
0
 def testhcu00first(self):
   ''' Trivial test adding 2 blocks.
   '''
   M1 = self.S
   KS1 = set()
   # test emptiness
   self.assertLen(M1, 0)
   # add one block
   data = make_randblock(rand0(8193))
   h = M1.add(data)
   self.assertIn(h, M1)
   self.assertEqual(M1[h], data)
   KS1.add(h)
   self.assertIn(h, M1)
   mks = set(M1.keys())
   self.assertIn(h, mks)
   mks = set(M1.hashcodes())
   ##self.assertEqual(set(M1.hashcodes()), KS1)
   if mks != KS1:
     warning(
         "M1.hashcodes != KS1: M1 missing %r, KS1 missing %r", KS1 - mks,
         mks - KS1
     )
   # add another block
   data2 = make_randblock(rand0(8193))
   h2 = M1.add(data2)
   KS1.add(h2)
   mks2 = set(M1.hashcodes())
   ##self.assertEqual(mks2, KS1)
   if mks2 != KS1:
     warning(
         "M1.hashcodes != KS1: M1 missing %r, KS1 missing %r", KS1 - mks2,
         mks2 - KS1
     )
Exemplo n.º 29
0
def Archive(path, missing_ok=False, weird_ok=False, config=None):
  ''' Return an Archive from the specification `path`.

      If the `path` begins with `'['`
      then it is presumed to be a Store Archive
      obtained via the Store's `.get_Archive(name)` method
      and the `path` should have the form:

          [clausename]name

      where *clausename* is a configuration clause name
      and *name* is an identifier used to specify an Archive
      associated with the Store.
  '''
  if path.startswith('['):
    # expect "[clausename]name"
    clause_name, archive_name, offset = get_ini_clause_entryname(path)
    if offset < len(path):
      raise ValueError(
          "unparsed text after archive name: %r" % (path[offset:],)
      )
    S = config[clause_name]
    return S.get_Archive(archive_name, missing_ok=missing_ok)
  # otherwise a file pathname
  if not path.endswith('.vt'):
    if weird_ok:
      warning("unusual Archive path: %r", path)
    else:
      raise ValueError(
          "invalid Archive path (should end in '.vt'): %r" % (path,)
      )
  if not missing_ok and not isfile(path):
    raise ValueError("not a file: %r" % (path,))
  return FilePathArchive(path)
Exemplo n.º 30
0
    def shutdown(self, block=False):
        ''' Shut down the PacketConnection, optionally blocking for outstanding requests.

        Parameters:
        `block`: block for outstanding requests, default False.
    '''
        with self._lock:
            if self.closed:
                # shutdown already called from another thread
                return
            # prevent further request submission either local or remote
            self.closed = True
        ps = self._pending_states()
        if ps:
            warning("PENDING STATES AT SHUTDOWN: %r", ps)
        # wait for completion of requests we're performing
        for LF in list(self._running):
            LF.join()
        # shut down sender, should trigger shutdown of remote receiver
        self._sendQ.close(enforce_final_close=True)
        self._send_thread.join()
        # we do not wait for the receiver - anyone hanging on outstaning
        # requests will get them as they come in, and in theory a network
        # disconnect might leave the receiver hanging anyway
        self._later.close()
        if block:
            self._later.wait()