Example #1
1
File: tcp.py Project: mnot/thor
 def connect(self, host: bytes, port: int, connect_timeout: float = None) -> None:
     """
     Connect to host:port (with an optional connect timeout)
     and emit 'connect' when connected, or 'connect_error' in
     the case of an error.
     """
     self.host = host
     self.port = port
     self.on("fd_writable", self.handle_connect)
     # TODO: use socket.getaddrinfo(); needs to be non-blocking.
     try:
         err = self.sock.connect_ex((host, port))
     except socket.gaierror as why:
         self.handle_conn_error(socket.gaierror, why)
         return
     except socket.error as why:
         self.handle_conn_error(socket.error, why)
         return
     if err != errno.EINPROGRESS:
         self.handle_conn_error(socket.error, socket.error(err, os.strerror(err)))
         return
     if connect_timeout:
         self._timeout_ev = self._loop.schedule(
             connect_timeout,
             self.handle_conn_error,
             socket.error,
             socket.error(errno.ETIMEDOUT, os.strerror(errno.ETIMEDOUT)),
             True,
         )
Example #2
0
 def write_file(self, data, vm, nw):
     '''
     Write config file for container. On restart, VRouter will rebuild its
     container-db from these config-files
     '''
     # Ensure directory first
     try:
         os.makedirs(self.directory)
     except OSError as e:
         if e.errno != errno.EEXIST:
             raise Error(VROUTER_INVALID_DIR,
                         'Error creating config directory :' +
                         self.directory + '. Error : ' +
                         os.strerror(e.errno))
         if os.path.isdir(self.directory) == False:
             raise Error(VROUTER_INVALID_DIR,
                         'Invalid config directory :' + self.directory +
                         '. Error : ' + os.strerror(e.errno))
     # Write config file
     fname = self.make_filename(vm, nw)
     try:
         f = open(fname, 'w')
         f.write(data)
         f.close()
     except IOError as e:
         raise Error(VROUTER_FILE_WRITE_ERROR,
                     'Error writing file : ' + fname + '. Error : ' +
                     os.strerror(e.errno))
     return
Example #3
0
 def _mock_rm(path):
     print "_mock_rm-rmdir_enoent(%s)" % path
     if path == self.rootdir and not seen[0]:
         seen[0] = 1
         raise OSError(errno.ENOTEMPTY, os.strerror(errno.ENOTEMPTY))
     else:
         raise OSError(errno.EACCES, os.strerror(errno.EACCES))
Example #4
0
 def bind(self, address):
     _checkaddrpair(address, False)
     if self.__isbound():
         raise _socket.error('Socket is already bound')
     elif self.__isconnected():
         raise _socket.error("Socket is already connected, cannot be bound")
         
     if self.__conn.proto == _lightbluecommon.L2CAP:
         raise NotImplementedError("L2CAP server sockets not currently supported")
         
     if address[1] != 0:
         raise _socket.error("must bind to port 0, other ports not supported on Mac OS X")
     address = (address[0], _getavailableport(self.__conn.proto))
         
     # address must be either empty string or local device address
     if address[0] != "":
         try:
             import lightblue            
             localaddr = lightblue.gethostaddr()
         except:
             localaddr = None
         if localaddr is None or address[0] != localaddr:
             raise _socket.error(
                 errno.EADDRNOTAVAIL, os.strerror(errno.EADDRNOTAVAIL))            
             
     # is this port already in use?
     if address[1] in self._boundports[self.__conn.proto]:
         raise _socket.error(errno.EADDRINUSE, os.strerror(errno.EADDRINUSE))
 
     self._boundports[self.__conn.proto].add(address[1])
     self.__port = address[1]
  def getpeername(self):
    """getpeername() -> address info

    Return the address of the remote endpoint.  For IP sockets, the address
    info is a pair (hostaddr, port).
    """
    if not self._created:
      self._CreateSocket()
    if not self._socket_descriptor:
      raise error(errno.EBADF, os.strerror(errno.EBADF))
    if not (self._connected or self._connect_in_progress):
      raise error(errno.ENOTCONN, os.strerror(errno.ENOTCONN))

    request = remote_socket_service_pb.GetPeerNameRequest()
    request.set_socket_descriptor(self._socket_descriptor)

    reply = remote_socket_service_pb.GetPeerNameReply()

    try:
      apiproxy_stub_map.MakeSyncCall(
          'remote_socket', 'GetPeerName', request, reply)
    except apiproxy_errors.ApplicationError as e:
      raise _SystemExceptionFromAppError(e)

    if self._connect_in_progress:
      self._connect_in_progress = False
      self._connected = True

    return (
        inet_ntop(self.family, reply.peer_ip().packed_address()),
        reply.peer_ip().port())
Example #6
0
    def recv(self, bufsize, flags=0):
        if self.__commstate in (SHUT_RD, SHUT_RDWR):
            return ""
        self.__checkconnected()

        if not isinstance(bufsize, int):
            raise TypeError("buffer size must be int, was %s" % type(bufsize))
        if bufsize < 0: 
            raise ValueError("negative buffersize in recv") # as for tcp 
        if bufsize == 0: 
            return ""
            
        # need this to ensure the _isclosed() check is up-to-date
        _macutil.looponce()
        
        if self._isclosed():
            if len(self.__incomingdata) == 0:
                raise _socket.error(errno.ECONNRESET,         
                                    os.strerror(errno.ECONNRESET))
            return self.__incomingdata.read(bufsize)
    
        # if incoming data buffer is empty, wait until data is available or
        # channel is closed
        def gotdata():
            return not self.__incomingdata.empty() or self._isclosed()
        if not gotdata():
            self.__waituntil(gotdata, "recv timed out")

        # other side closed connection while waiting?
        if self._isclosed() and len(self.__incomingdata) == 0:    
            raise _socket.error(errno.ECONNRESET, os.strerror(errno.ECONNRESET))
            
        return self.__incomingdata.read(bufsize)
Example #7
0
  def recvfrom(self, buffersize, flags=0):
    """recvfrom(buffersize[, flags]) -> (data, address info)

    Like recv(buffersize, flags) but also return the sender's address info.
    """
    if not self._created:
      self._CreateSocket()
    if not self._socket_descriptor:
      raise error(errno.EBADF, os.strerror(errno.EBADF))

    request = remote_socket_service_pb.ReceiveRequest()
    request.set_socket_descriptor(self._socket_descriptor)
    request.set_data_size(buffersize)
    request.set_flags(flags)
    if self.type == SOCK_STREAM:
      if not (self._connected or self._connect_in_progress):
        raise error(errno.ENOTCONN, os.strerror(errno.ENOTCONN))
    if self._shutdown_read:
      request.set_timeout_seconds(0.0)
    elif self.gettimeout() is not None:
      request.set_timeout_seconds(self.gettimeout())

    reply = remote_socket_service_pb.ReceiveReply()

    try:
      apiproxy_stub_map.MakeSyncCall('remote_socket', 'Receive', request, reply)
    except apiproxy_errors.ApplicationError, e:
      e = _SystemExceptionFromAppError(e)
      if not self._shutdown_read or e.errno != errno.EAGAIN:
        raise e
 def handle(self, *args, **kwargs):
     from django.db import transaction
     try:
         f = open(args[0], 'r')
     except OSError, e:
         os.strerror(e.errno)
         return
Example #9
0
  def listen(self, backlog):
    """listen(backlog)

    Enable a server to accept connections.  The backlog argument must be at
    least 1; it specifies the number of unaccepted connection that the system
    will allow before refusing new connections.
    """
    if not self._created:
      self._CreateSocket(bind_address=('', 0))
    if not self._socket_descriptor:
      raise error(errno.EBADF, os.strerror(errno.EBADF))
    if self._connected:
      raise error(errno.EINVAL, os.strerror(errno.EINVAL))
    if self.type != SOCK_STREAM:
      raise error(errno.EOPNOTSUPP, os.strerror(errno.EOPNOTSUPP))
    self._bound = True
    self._listen = True

    request = remote_socket_service_pb.ListenRequest()
    request.set_socket_descriptor(self._socket_descriptor)
    request.set_backlog(backlog)

    reply = remote_socket_service_pb.ListenReply()

    try:
      apiproxy_stub_map.MakeSyncCall('remote_socket', 'Listen', request, reply)
    except apiproxy_errors.ApplicationError, e:
      raise _SystemExceptionFromAppError(e)
Example #10
0
  def accept(self):
    """accept() -> (socket object, address info)

    Wait for an incoming connection.  Return a new socket representing the
    connection, and the address of the client.  For IP sockets, the address
    info is a pair (hostaddr, port).
    """
    if not self._created:
      self._CreateSocket()
    if not self._socket_descriptor:
      raise error(errno.EBADF, os.strerror(errno.EBADF))
    if not self._listen:
      raise error(errno.EINVAL, os.strerror(errno.EINVAL))

    request = remote_socket_service_pb.AcceptRequest()
    request.set_socket_descriptor(self._socket_descriptor)
    if self.gettimeout() is not None:
      request.set_timeout_seconds(self.gettimeout())

    reply = remote_socket_service_pb.AcceptReply()

    try:
      apiproxy_stub_map.MakeSyncCall('remote_socket', 'Accept', request, reply)
    except apiproxy_errors.ApplicationError, e:
      raise _SystemExceptionFromAppError(e)
Example #11
0
def _SystemExceptionFromAppError(e):
  app_error = e.application_error
  if app_error in (RemoteSocketServiceError.SYSTEM_ERROR,
                   RemoteSocketServiceError.GAI_ERROR):
    error_detail = RemoteSocketServiceError()
    try:
      error_detail.ParseASCII(e.error_detail)
    except NotImplementedError:


      m = re.match(
          r'system_error:\s*(-?\d+)\s*,?\s*error_detail:\s*"([^"]*)"\s*',
          e.error_detail)
      if m:
        error_detail.set_system_error(int(m.group(1)))
        error_detail.set_error_detail(m.group(2))
      else:
        error_detail.set_system_error(-1)
        error_detail.set_error_detail(e.error_detail)
    if app_error == RemoteSocketServiceError.SYSTEM_ERROR:
      return error(error_detail.system_error(),
                   (error_detail.error_detail() or
                    os.strerror(error_detail.system_error())))
    elif app_error == RemoteSocketServiceError.GAI_ERROR:
      return gaierror(error_detail.system_error(),
                      error_detail.error_detail())
  elif app_error in _ERROR_MAP:
    return error(_ERROR_MAP[app_error], os.strerror(_ERROR_MAP[app_error]))
  else:
    return e
Example #12
0
  def bind(self, address):
    """bind(address)

    Bind the socket to a local address.  For IP sockets, the address is a
    pair (host, port); the host must refer to the local host. For raw packet
    sockets the address is a tuple (ifname, proto [,pkttype [,hatype]])
    """
    if not self._created:
      self._CreateSocket(bind_address=address)
      return
    if not self._socket_descriptor:
      raise error(errno.EBADF, os.strerror(errno.EBADF))
    if self._bound:
      raise error(errno.EINVAL, os.strerror(errno.EINVAL))

    request = remote_socket_service_pb.BindRequest()
    request.set_socket_descriptor(self._socket_descriptor)
    self._SetProtoFromAddr(request.mutable_proxy_external_ip(), address)

    reply = remote_socket_service_pb.BindReply()

    try:
      apiproxy_stub_map.MakeSyncCall('remote_socket', 'Bind', request, reply)
    except apiproxy_errors.ApplicationError, e:
      raise _SystemExceptionFromAppError(e)
Example #13
0
    def __init__(self, transport):
        self.transport = transport
        
        # read ACR122U firmware version string
        reader_version = self.ccid_xfr_block(bytearray.fromhex("FF00480000"))
        if not reader_version.startswith("ACR122U"):
            log.error("failed to retrieve ACR122U version string")
            raise IOError(errno.ENODEV, os.strerror(errno.ENODEV))
        
        if int(chr(reader_version[7])) < 2:
            log.error("{0} not supported, need 2.xx".format(frame[10:]))
            raise IOError(errno.ENODEV, os.strerror(errno.ENODEV))

        log.debug("initialize " + str(reader_version))
        
        # set icc power on
        log.debug("CCID ICC-POWER-ON")
        frame = bytearray.fromhex("62000000000000000000")
        transport.write(frame); transport.read(100)
        
        # disable autodetection
        log.debug("Set PICC Operating Parameters")
        self.ccid_xfr_block(bytearray.fromhex("FF00517F00"))
        
        # switch red/green led off/on
        log.debug("Configure Buzzer and LED")
        self.ccid_xfr_block(bytearray.fromhex("FF00400E0400000000"))
        
        super(Chipset, self).__init__(transport, logger=log)
Example #14
0
  def shutdown(self, flag):
    """shutdown(flag)

    Shut down the reading side of the socket (flag == SHUT_RD), the writing side
    of the socket (flag == SHUT_WR), or both ends (flag == SHUT_RDWR).
    """
    if not flag in (SHUT_RD, SHUT_WR, SHUT_RDWR):
      raise error(errno.EINVAL, os.strerror(errno.EINVAL))
    if not self._created:
      self._CreateSocket()
    if not self._socket_descriptor:
      raise error(errno.EBADF, os.strerror(errno.EBADF))
    if (not self._connected or
        (self._shutdown_read and flag in (SHUT_RD, SHUT_RDWR)) or
        (self._shutdown_write and flag in (SHUT_RD, SHUT_RDWR))):
      raise error(errno.ENOTCONN, os.strerror(errno.ENOTCONN))

    request = remote_socket_service_pb.ShutDownRequest()
    request.set_socket_descriptor(self._socket_descriptor)
    request.set_how(flag)
    request.set_send_offset(self._stream_offset)

    reply = remote_socket_service_pb.ShutDownReply()

    try:
      apiproxy_stub_map.MakeSyncCall(
          'remote_socket', 'ShutDown', request, reply)
    except apiproxy_errors.ApplicationError, e:
      raise _SystemExceptionFromAppError(e)
Example #15
0
 def mount(self, passwd=None):
     if self.mountpoint and os.path.ismount(self.mountpoint):
         raise Exception("Virtual disk already mounted")
     signals.emit("filesystems", "pre_mount", self)
     if not os.path.isdir(os.path.join("/media", self.id)):
         os.makedirs(os.path.join("/media", self.id))
     mount_point = self.mountpoint if self.mountpoint else os.path.join("/media", self.id)
     # Find a free loopback device and mount
     loop = losetup.find_unused_loop_device()
     loop.mount(str(self.path), offset=1048576)
     if self.crypt and passwd:
         # If it's an encrypted virtual disk, decrypt first then mount
         s = crypto.luks_open(loop.device, self.id, passwd)
         if s != 0:
             loop.unmount()
             raise Exception("Failed to decrypt %s with errno %s" % (self.id, str(s)))
         s = libc.mount(ctypes.c_char_p(os.path.join("/dev/mapper", self.id)),
             ctypes.c_char_p(mount_point),
             ctypes.c_char_p(self.fstype), 0, ctypes.c_char_p(""))
         if s == -1:
             crypto.luks_close(self.id)
             loop.unmount()
             raise Exception("Failed to mount %s: %s" % (self.id, os.strerror(ctypes.get_errno())))
     elif self.crypt and not passwd:
         raise Exception("Must provide password to decrypt encrypted container")
     else:
         s = libc.mount(ctypes.c_char_p(loop.device), ctypes.c_char_p(mount_point),
             ctypes.c_char_p(self.fstype), 0, ctypes.c_char_p(""))
         if s == -1:
             loop.unmount()
             raise Exception("Failed to mount %s: %s" % (self.id, os.strerror(ctypes.get_errno())))
     signals.emit("filesystems", "post_mount", self)
     self.mountpoint = mount_point
Example #16
0
 def mount(self, passwd=None):
     if self.mountpoint and os.path.ismount(self.mountpoint):
         raise Exception("Disk partition already mounted")
     elif self.fstype == "Unknown":
         raise Exception("Cannot mount a partition of unknown type")
     signals.emit("filesystems", "pre_mount", self)
     mount_point = self.mountpoint if self.mountpoint else os.path.join("/media", self.id)
     if self.crypt and passwd:
         # Decrypt the disk first if it's an encrypted disk
         s = crypto.luks_open(self.path, self.id, passwd)
         if s != 0:
             raise Exception("Failed to decrypt %s with errno %s" % (self.id, str(s)))
         s = libc.mount(ctypes.c_char_p(os.path.join("/dev/mapper", self.id)),
             ctypes.c_char_p(mount_point),
             ctypes.c_char_p(self.fstype), 0, ctypes.c_char_p(""))
         if s == -1:
             crypto.luks_close(self.id)
             raise Exception("Failed to mount %s: %s" % (self.id, os.strerror(ctypes.get_errno())))
     elif self.crypt and not passwd:
         raise Exception("Must provide password to decrypt encrypted disk")
     else:
         s = libc.mount(ctypes.c_char_p(self.path),
             ctypes.c_char_p(mount_point),
             ctypes.c_char_p(self.fstype), 0, ctypes.c_char_p(""))
         if s == -1:
             raise Exception("Failed to mount %s: %s"%(self.id, os.strerror(ctypes.get_errno())))
     signals.emit("filesystems", "post_mount", self)
     self.mountpoint = mount_point
Example #17
0
    def getxattr(self, path, key, size=0):
        """
        Retrieve the value of the extended attribute identified by key
        for path specified.

        :param path: Path to file or directory
        :param key: Key of extended attribute
        :param size: If size is specified as zero, we first determine the
                     size of xattr and then allocate a buffer accordingly.
                     If size is non-zero, it is assumed the caller knows
                     the size of xattr.
        :returns: Value of extended attribute corresponding to key specified.
        """
        if size == 0:
            size = api.glfs_getxattr(self.fs, path, key, None, 0)
            if size < 0:
                err = ctypes.get_errno()
                raise OSError(err, os.strerror(err))

        buf = ctypes.create_string_buffer(size)
        rc = api.glfs_getxattr(self.fs, path, key, buf, size)
        if rc < 0:
            err = ctypes.get_errno()
            raise OSError(err, os.strerror(err))
        return buf.value[:rc]
    def __init__(self, path, no_atime = False, **kw):
        arg_is_unicode = isinstance(path, unicode)
        if _is_linux and no_atime:
            try:
                fd = os.open(path, os.O_RDONLY | os.O_NOCTTY | os.O_NOATIME)
            except OSError as e:
                if e.errno == errno.EPERM:
                    fd = os.open(path, os.O_RDONLY | os.O_NOCTTY)
                else:
                    raise

            self.dirp = _libc.fdopendir(fd)
            if not self.dirp:
                try:
                    os.close(fd)
                except OSError:
                    unhandled_exc_handler()

                e = ctypes.get_errno()
                raise OSError(e, os.strerror(e), path)
        else:
            self.dirp = _libc.opendir(path.encode(sys.getfilesystemencoding()) if arg_is_unicode else path)
            if not self.dirp:
                e = ctypes.get_errno()
                raise OSError(e, os.strerror(e), path)
        self.path = path
        self.my_dirent = Dirent()
        self.my_dirent_p = Dirent_p()
        self.arg_is_unicode = arg_is_unicode
        self.reset = functools.partial(_libc.rewinddir, self.dirp)
Example #19
0
def do_request(name, method, params_string):
    params = ovs.json.from_string(params_string)
    msg = ovs.jsonrpc.Message.create_request(method, params)
    s = msg.is_valid()
    if s:
        sys.stderr.write("not a valid JSON-RPC request: %s\n" % s)
        sys.exit(1)

    error, stream = ovs.stream.Stream.open_block(ovs.stream.Stream.open(name))
    if error:
        sys.stderr.write("could not open \"%s\": %s\n"
                         % (name, os.strerror(error)))
        sys.exit(1)

    rpc = ovs.jsonrpc.Connection(stream)

    error = rpc.send(msg)
    if error:
        sys.stderr.write("could not send request: %s\n" % os.strerror(error))
        sys.exit(1)

    error, msg = rpc.recv_block()
    if error:
        sys.stderr.write("error waiting for reply: %s\n" % os.strerror(error))
        sys.exit(1)
    
    print ovs.json.to_string(msg.to_json())

    rpc.close()
    def kibana_dashboard_install(self, env):
      from params import params
      env.set_params(params)

      Logger.info("Connecting to Elasticsearch on: %s" % (params.es_http_url))

      kibanaTemplate = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dashboard', 'kibana.template')
      if not os.path.isfile(kibanaTemplate):
        raise IOError(
            errno.ENOENT, os.strerror(errno.ENOENT), kibanaTemplate)

      Logger.info("Loading .kibana index template from %s" % kibanaTemplate)
      template_cmd = ambari_format(
          'curl -s -XPOST http://{es_http_url}/_template/.kibana -d @%s' % kibanaTemplate)
      Execute(template_cmd, logoutput=True)

      kibanaDashboardLoad = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dashboard', 'dashboard-bulkload.json')
      if not os.path.isfile(kibanaDashboardLoad):
        raise IOError(
            errno.ENOENT, os.strerror(errno.ENOENT), kibanaDashboardLoad)

      Logger.info("Loading .kibana dashboard from %s" % kibanaDashboardLoad)

      kibana_cmd = ambari_format(
          'curl -s -H "Content-Type: application/x-ndjson" -XPOST http://{es_http_url}/.kibana/_bulk --data-binary @%s' % kibanaDashboardLoad)
      Execute(kibana_cmd, logoutput=True)
Example #21
0
def main(args):
  try:
    value = int(args.error, 0)

    if value < 0:
      value = -value

    if 0x100000000 - value < 0x200:
      value = 0x100000000 - value

    if value not in os.errno.errorcode:
      print "No errno for %s" % value
      return

    name = os.errno.errorcode[value]

  except ValueError:
    name = args.error.upper()

    if not hasattr(os.errno, name):
      print "No errno for %s" % name
      return

    value = getattr(os.errno, name)


  print '#define', name, value
  print os.strerror(value)
Example #22
0
 def run(self):
     self.open()
     up = 1
     while up == 1:
         open = True
         json = 0
         while not isinstance(json, str):
             json = cfunctions.mqposixReceive(self.qin)
         print 'Peticion Recibida: \n' + json
         request = functions.fromJson(json)
         id = int(request['id'])
         if id == 1:
             response = classes.package('0001', '0000000', getAllFlights())
             response = functions.toPrettyJson(response)
             l = string.split(response)
             for s in l:
                 errno = cfunctions.mqposixSend(s, self.qout)
                 if errno != 0:
                     os.strerror(errno)       
         if id == 2:
             checkIn(request['data'],request['passenger'],request['seat'])
         if id == 3:
             addFlight(request['data'])
         if id == 4: 
             removeFlight(request['data'])
Example #23
0
    def test_missing_req_args(self):
        """
        Verify required arguments are caught when missing.
        """
        failing_cases = ([''],
                         ['', '--etcd-uri', 'http://127.0.0.1:2379'],
                         ['', '--kube-uri', 'http://127.0.0.1:8080'])
        for argv in failing_cases:
            sys.argv = argv
            parser = argparse.ArgumentParser()
            with mock.patch('__builtin__.open') as _open, \
                 mock.patch('argparse.ArgumentParser._print_message') as _print:
                # Make sure no config file is opened.
                _open.side_effect = IOError(
                    errno.ENOENT, os.strerror(errno.ENOENT))
                self.assertRaises(SystemExit, script.parse_args, parser)

        # All required arguments; no exception raised.
        sys.argv = ['', '--etcd-uri', 'http://127.0.0.1:2379',
                        '--kube-uri', 'http://127.0.0.1:8080']
        parser = argparse.ArgumentParser()
        with mock.patch('__builtin__.open') as _open:
            # Make sure no config file is opened.
            _open.side_effect = IOError(
                errno.ENOENT, os.strerror(errno.ENOENT))
            args = script.parse_args(parser)
Example #24
0
 def connect(self, address):
     if self.timeout == 0.0:
         return self._sock.connect(address)
     sock = self._sock
     if self.timeout is None:
         while True:
             err = sock.getsockopt(SOL_SOCKET, SO_ERROR)
             if err:
                 raise error(err, strerror(err))
             result = sock.connect_ex(address)
             if not result or result == EISCONN:
                 break
             elif (result in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (result == EINVAL and is_windows):
                 wait_readwrite(sock.fileno())
             else:
                 raise error(result, strerror(result))
     else:
         end = time.time() + self.timeout
         while True:
             err = sock.getsockopt(SOL_SOCKET, SO_ERROR)
             if err:
                 raise error(err, strerror(err))
             result = sock.connect_ex(address)
             if not result or result == EISCONN:
                 break
             elif (result in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (result == EINVAL and is_windows):
                 timeleft = end - time.time()
                 if timeleft <= 0:
                     raise timeout('timed out')
                 wait_readwrite(sock.fileno(), timeout=timeleft)
             else:
                 raise error(result, strerror(result))
Example #25
0
def main(args):
    if len(args) != 1:
        print __doc__
        return

    thing = args[0]

    if thing == '-list':
        for key, value in errorcode.items():
            print '%3d: %-20s %s'%(key, value, os.strerror(key))
        return

    try:
        errnum = int(thing)
        print 'Error %d (0x%x) is %s: %s'%(errnum, errnum,
                                           errorcode[errnum], os.strerror(errnum))
        check_kbus(errorcode[errnum])
        return
    except ValueError:
        pass
    except KeyError:
        print 'Unrecognised error code number %d'%errnum
        return

    reverse = {}
    for key, value in errorcode.items():
        reverse[value] = key

    if thing in reverse:
        errnum = reverse[thing]
        print '%s is error %d (0x%x): %s'%(thing, errnum, errnum, os.strerror(errnum))
    else:
        print 'Unrecognised error code mnemonic %s'%thing

    check_kbus(thing)
Example #26
0
 def connect(self, host, port, connect_timeout=None):
     """
     Connect to host:port (with an optional connect timeout)
     and emit 'connect' when connected, or 'connect_error' in
     the case of an error.
     """
     self.host = host
     self.port = port
     self.once('writable', self.handle_connect)
     # TODO: use socket.getaddrinfo(); needs to be non-blocking.
     try:
         err = self.sock.connect_ex((host, port))
     except socket.gaierror as why:
         self.handle_conn_error(type(why), [why.errno, why.strerror])
         return
     except socket.error as why:
         self.handle_conn_error(type(why), [why.errno, why.strerror])
         return
     if err != errno.EINPROGRESS:
         self.handle_conn_error(socket.error, [err, os.strerror(err)])
         return
     if connect_timeout:
         self._timeout_ev = self._loop.schedule(
             connect_timeout,
             self.handle_conn_error,
             TimeoutError,
             [errno.ETIMEDOUT, os.strerror(errno.ETIMEDOUT)],
             True)
Example #27
0
def main():
    HOST = ''
    PORT = 1053
    buf_size = 1024
    global bytes_recieved
    
    s.connect((HOST, PORT))
    signal.signal(signal.SIGURG, handler)
    fcntl.fcntl(s.fileno(), fcntl.F_SETOWN, os.getpid())
    
    message = s.recv(1024) # [size_of_file] + ' ' + [file_name]
    message_list = message.split(' ')
    file_name = 'incoming_' + message_list[1]
    file_size = int(message_list[0])
    
    if os.path.isfile(file_name):
        i = 0
        while os.path.isfile(str(i) + file_name):
            i += 1
        file_name = str(i) + file_name
            
    recv_file = open(file_name, "a")
    s.send("ok")
    while True:
        try:
            buf = s.recv(buf_size)
        except socket.error, why:
            print os.strerror(why[0]) 
        else:
            if buf == '':
                break
            else:
                s.send(repr(len(buf)))
                recv_file.write(buf)
                bytes_recieved += len(buf)
Example #28
0
File: ublkdev.py Project: dacut/ubd
def unregister(args=None):
    if args is None:
        args = sys.argv[1:]

    if len(args) == 0:
        print("Usage: %s <device>" % sys.argv[0], file=sys.stderr)
        return 1

    try:
        ubd = UserBlockDevice()
    except OSError as e:
        print("Unable to open endpoint /dev/ubd: %s" % os.strerror(e.errno),
              file=sys.stderr)
        return 1

    errors = False
    for endpoint in args:
        try:
            major = os.stat(endpoint).st_rdev >> 8
            ubd.unregister(major)
        except (OSError, IOError) as e:
            print("%s: %s" % (endpoint, os.strerror(e.errno)), file=sys.stderr)
            errors = True

    return 0 if not errors else 1
Example #29
0
def _make_pidfile():
    """If a pidfile has been configured, creates it and stores the running
    process's pid in it.  Ensures that the pidfile will be deleted when the
    process exits."""
    if _pidfile is not None:
        # Create pidfile via temporary file, so that observers never see an
        # empty pidfile or an unlocked pidfile.
        pid = os.getpid()
        tmpfile = "%s.tmp%d" % (_pidfile, pid)
        ovs.fatal_signal.add_file_to_unlink(tmpfile)

        try:
            # This is global to keep Python from garbage-collecting and
            # therefore closing our file after this function exits.  That would
            # unlock the lock for us, and we don't want that.
            global file

            file = open(tmpfile, "w")
        except IOError, e:
            logging.error("%s: create failed: %s"
                          % (tmpfile, os.strerror(e.errno)))
            return
            
        try:
            fcntl.lockf(file, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError, e:
            logging.error("%s: fcntl failed: %s"
                          % (tmpfile, os.strerror(e.errno)))
            file.close()
            return
Example #30
0
def torsocket(family=socket.AF_INET, type=socket.SOCK_STREAM,
              proto=0, _sock=None):
    """
    Factory function usable as a monkey-patch for socket.socket.
    """

    # Pass through local sockets.
    if family in _LOCAL_SOCKETS:
        return orig_socket(family, type, proto, _sock)

    # Tor only supports AF_INET sockets.
    if family != socket.AF_INET:
        raise socket.error(errno.EAFNOSUPPORT, os.strerror(errno.EAFNOSUPPORT))

    # Tor only supports SOCK_STREAM sockets.
    if type != socket.SOCK_STREAM:
        raise socket.error(errno.ESOCKTNOSUPPORT,
                           os.strerror(errno.ESOCKTNOSUPPORT))

    # Acceptable values for PROTO are 0 and IPPROTO_TCP.
    if proto not in (0, socket.IPPROTO_TCP):
        raise socket.error(errno.EPROTONOSUPPORT,
                           os.strerror(errno.EPROTONOSUPPORT))

    return _Torsocket(family, type, proto, _sock)
Example #31
0
 def _raise_not_found(self, path):
     raise IOError(errno.ENOENT, path, os.strerror(errno.ENOENT))
Example #32
0
            f2.write(buf)
    except OSError, e:
        print "[Directories] Error %d: Copying file '%s' to '%s'! (%s)" % (
            e.errno, src, dst, os.strerror(e.errno))
        status = -1
    if f1 is not None:
        f1.close()
    if f2 is not None:
        f2.close()
    try:
        st = os.stat(src)
        try:
            os.chmod(dst, S_IMODE(st.st_mode))
        except OSError, e:
            print "[Directories] Error %d: Setting modes from '%s' to '%s'! (%s)" % (
                e.errno, src, dst, os.strerror(e.errno))
        try:
            os.utime(dst, (st.st_atime, st.st_mtime))
        except OSError, e:
            print "[Directories] Error %d: Setting times from '%s' to '%s'! (%s)" % (
                e.errno, src, dst, os.strerror(e.errno))
    except OSError, e:
        print "[Directories] Error %d: Obtaining stats from '%s' to '%s'! (%s)" % (
            e.errno, src, dst, os.strerror(e.errno))
    return status


def copytree(src, dst, symlinks=False):
    names = os.listdir(src)
    if os.path.isdir(dst):
        dst = os.path.join(dst, os.path.basename(src))
Example #33
0
async def mega_downloader(megadl):
    await megadl.edit("**Collecting information...**")
    if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
        os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
    msg_link = await megadl.get_reply_message()
    link = megadl.pattern_match.group(1)
    if link:
        pass
    elif msg_link:
        link = msg_link.text
    else:
        return await megadl.edit("**Usage:** `.mega` <MEGA.nz link>")
    try:
        link = re.findall(r"\bhttps?://.*mega.*\.nz\S+", link)[0]
        if "file" in link:
            link = link.replace("#", "!").replace("file/", "#!")
        elif "folder" in link or "#F" in link or "#N" in link:
            await megadl.edit("**Folders aren't supported.**")
            return
    except IndexError:
        await megadl.edit("**Error: Broken link.**")
        return None
    cmd = f"userbot/bin/megadown -q -m {link}"
    result = await subprocess_run(megadl, cmd)
    try:
        data = json.loads(result[0])
    except json.JSONDecodeError:
        await megadl.edit("**JSONDecodeError**: `Failed to extract link.`")
        return None
    except (IndexError, TypeError):
        return
    file_name = data["file_name"]
    file_url = data["url"]
    hex_key = data["hex_key"]
    hex_raw_key = data["hex_raw_key"]
    temp_file_name = file_name + ".temp"
    temp_file_path = os.path.join(TEMP_DOWNLOAD_DIRECTORY, temp_file_name)
    file_path = os.path.join(TEMP_DOWNLOAD_DIRECTORY, file_name)
    if os.path.isfile(file_path):
        try:
            raise FileExistsError(errno.EEXIST, os.strerror(errno.EEXIST),
                                  file_path)
        except FileExistsError as e:
            await megadl.edit(f"`{e}`")
            return None
    downloader = SmartDL(file_url, temp_file_path, progress_bar=False)
    display_message = None
    try:
        downloader.start(blocking=False)
    except HTTPError as e:
        await megadl.edit(f"**HTTPError**: `{e}`")
        return None
    start = time.time()
    while not downloader.isFinished():
        status = downloader.get_status().capitalize()
        total_length = downloader.filesize or None
        downloaded = downloader.get_dl_size()
        percentage = int(downloader.get_progress() * 100)
        speed = downloader.get_speed(human=True)
        estimated_total_time = round(downloader.get_eta())
        progress_str = "**{}:** `[{}{}]` **{}%**".format(
            status,
            "".join("●" for _ in range(math.floor(percentage / 10))),
            "".join("○" for _ in range(10 - math.floor(percentage / 10))),
            round(percentage, 2),
        )

        diff = time.time() - start
        try:
            current_message = (
                f"**Name:** `{file_name}`\n"
                f"{progress_str}\n"
                f"{humanbytes(downloaded)} of {humanbytes(total_length)}"
                f" @ {speed}\n"
                f"**Duration:** {time_formatter(round(diff))}\n"
                f"**ETA:** {time_formatter(estimated_total_time)}")
            if round(diff % 15.00) == 0 and (display_message != current_message
                                             or total_length == downloaded):
                await megadl.edit(current_message)
                await asyncio.sleep(0.2)
                display_message = current_message
        except Exception:
            pass
        finally:
            if status == "Combining":
                wait = round(downloader.get_eta())
                await asyncio.sleep(wait)
    if downloader.isSuccessful():
        download_time = round(downloader.get_dl_time() + wait)
        try:
            P = multiprocessing.Process(
                target=await decrypt_file(megadl, file_path, temp_file_path,
                                          hex_key, hex_raw_key),
                name="Decrypt_File",
            )
            P.start()
            P.join()
        except FileNotFoundError as e:
            await megadl.edit(f"`{str(e)}`")
            return None
        else:
            await megadl.edit("**Downloaded successfully!**\n\n"
                              f"**Name:** `{file_name}`\n"
                              f"**Path:** `{file_path}`\n"
                              f"**Duration:** {time_formatter(download_time)}")
            return None
    else:
        await megadl.edit("**Error: Couldn't download given file.**\n"
                          "Check Heroku logs for more details (`.logs`)")
        for e in downloader.get_errors():
            LOGS.info(str(e))
    return
Example #34
0
def describe_sys_errno(e):
    try:
        strerror = os.strerror(e)
    except ValueError:
        strerror = 'no description available'
    return '{} ({})'.format(strerror, errno.errorcode.get(e, 'UNKNOWN ERROR'))
Example #35
0
 def chdir(self, path):
     p = self._normpath(path)
     if self._isdir(p):
         self._cwd = p
     else:
         raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path)
Example #36
0
def prepro(hp):
    """Load raw data -> Preprocessing -> Segmenting with sentencepice
    hp: hyperparams. argparse.
    """
    logging.info("# Check if raw files exist")
    train1 = "iwslt2016/de-en/train.tags.de-en.de"
    train2 = "iwslt2016/de-en/train.tags.de-en.en"
    eval1 = "iwslt2016/de-en/IWSLT16.TED.tst2013.de-en.de.xml"
    eval2 = "iwslt2016/de-en/IWSLT16.TED.tst2013.de-en.en.xml"
    test1 = "iwslt2016/de-en/IWSLT16.TED.tst2014.de-en.de.xml"
    test2 = "iwslt2016/de-en/IWSLT16.TED.tst2014.de-en.en.xml"
    for f in (train1, train2, eval1, eval2, test1, test2):
        if not os.path.isfile(f):
            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
                                    f)  # TODO 注意这里对错误输出的利用

    logging.info("# Preprocessing")
    # train
    _prepro = lambda x:  [line.strip() for line in open(x, 'r', encoding='utf-8').read().split("\n") \
                      if not line.startswith("<")]  # TODO 注意字符串的这个方法
    prepro_train1, prepro_train2 = _prepro(train1), _prepro(train2)
    assert len(prepro_train1) == len(
        prepro_train2), "Check if train source and target files match."

    # eval
    _prepro = lambda x: [re.sub("<[^>]+>", "", line).strip() \
                     for line in open(x, 'r', encoding='utf-8').read().split("\n") \
                     if line.startswith("<seg id")]  # TODO 注意这里使用了re模块进行数据处理
    prepro_eval1, prepro_eval2 = _prepro(eval1), _prepro(eval2)
    assert len(prepro_eval1) == len(
        prepro_eval2), "Check if eval source and target files match."

    # test
    prepro_test1, prepro_test2 = _prepro(test1), _prepro(test2)
    assert len(prepro_test1) == len(
        prepro_test2), "Check if test source and target files match."

    logging.info("Let's see how preprocessed data look like")
    logging.info("prepro_train1:", prepro_train1[0])
    logging.info("prepro_train2:", prepro_train2[0])
    logging.info("prepro_eval1:", prepro_eval1[0])
    logging.info("prepro_eval2:", prepro_eval2[0])
    logging.info("prepro_test1:", prepro_test1[0])
    logging.info("prepro_test2:", prepro_test2[0])

    logging.info("# write preprocessed files to disk")
    os.makedirs("iwslt2016/prepro", exist_ok=True)  # TODO 注意这个exist_ok属性

    def _write(sents, fname):
        with open(fname, 'w', encoding='utf-8') as fout:
            fout.write("\n".join(sents))

    _write(prepro_train1, "iwslt2016/prepro/train.de")
    _write(prepro_train2, "iwslt2016/prepro/train.en")
    _write(prepro_train1 + prepro_train2, "iwslt2016/prepro/train")
    _write(prepro_eval1, "iwslt2016/prepro/eval.de")
    _write(prepro_eval2, "iwslt2016/prepro/eval.en")
    _write(prepro_test1, "iwslt2016/prepro/test.de")
    _write(prepro_test2, "iwslt2016/prepro/test.en")

    logging.info("# Train a joint BPE model with sentencepiece")
    os.makedirs("iwslt2016/segmented", exist_ok=True)
    train = '--input=iwslt2016/prepro/train --pad_id=0 --unk_id=1 \
             --bos_id=2 --eos_id=3\
             --model_prefix=iwslt2016/segmented/bpe --vocab_size={} \
             --model_type=bpe'.format(hp.vocab_size)
    spm.SentencePieceTrainer.Train(train)  # TODO 注意这里对于sentencepiece的使用

    logging.info("# Load trained bpe model")
    sp = spm.SentencePieceProcessor()
    sp.Load("iwslt2016/segmented/bpe.model")

    logging.info("# Segment")

    def _segment_and_write(sents, fname):
        with open(fname, "w", encoding='utf-8') as fout:
            for sent in sents:
                pieces = sp.EncodeAsPieces(sent)
                fout.write(" ".join(pieces) + "\n")

    _segment_and_write(prepro_train1, "iwslt2016/segmented/train.de.bpe")
    _segment_and_write(prepro_train2, "iwslt2016/segmented/train.en.bpe")
    _segment_and_write(prepro_eval1, "iwslt2016/segmented/eval.de.bpe")
    _segment_and_write(prepro_eval2, "iwslt2016/segmented/eval.en.bpe")
    _segment_and_write(prepro_test1, "iwslt2016/segmented/test.de.bpe")

    logging.info("Let's see how segmented data look like")
    print(
        "train1:",
        open("iwslt2016/segmented/train.de.bpe", 'r',
             encoding='utf-8').readline())
    print(
        "train2:",
        open("iwslt2016/segmented/train.en.bpe", 'r',
             encoding='utf-8').readline())
    print(
        "eval1:",
        open("iwslt2016/segmented/eval.de.bpe", 'r',
             encoding='utf-8').readline())
    print(
        "eval2:",
        open("iwslt2016/segmented/eval.en.bpe", 'r',
             encoding='utf-8').readline())
    print(
        "test1:",
        open("iwslt2016/segmented/test.de.bpe", 'r',
             encoding='utf-8').readline())
Example #37
0
def error_text(errnumber):
    return '%s: %s' % (errno.errorcode[errnumber], os.strerror(errnumber))
Example #38
0
def _EnsureDir(mode, dest):
    if not os.path.isdir(dest):
        if os.path.exists(dest):
            raise OSError(errno.EEXIST, os.strerror(errno.EEXIST))
        os.makedirs(dest, mode)
	def handle_timeout(signum, frame):
		import errno
		raise TimeOut(os.strerror(errno.ETIME))
Example #40
0
def do_idl(remote, *commands):
    idl = ovs.db.idl.Idl(remote, "idltest")

    if commands:
        error, stream = ovs.stream.Stream.open_block(
            ovs.stream.Stream.open(remote))
        if error:
            sys.stderr.write("failed to connect to \"%s\"" % remote)
            sys.exit(1)
        rpc = ovs.jsonrpc.Connection(stream)
    else:
        rpc = None

    symtab = {}
    seqno = 0
    step = 0
    for command in commands:
        if command.startswith("+"):
            # The previous transaction didn't change anything.
            command = command[1:]
        else:
            # Wait for update.
            while idl.get_seqno() == seqno and not idl.run():
                rpc.run()

                poller = ovs.poller.Poller()
                idl.wait(poller)
                rpc.wait(poller)
                poller.block()

            print_idl(idl, step)
            step += 1

        seqno = idl.get_seqno()

        if command == "reconnect":
            print("%03d: reconnect" % step)
            step += 1
            idl.force_reconnect()
        elif not command.startswith("["):
            idl_set(idl, command, step)
            step += 1
        else:
            json = ovs.json.from_string(command)
            if type(json) in [str, unicode]:
                sys.stderr.write("\"%s\": %s\n" % (command, json))
                sys.exit(1)
            json = substitute_uuids(json, symtab)
            request = ovs.jsonrpc.Message.create_request("transact", json)
            error, reply = rpc.transact_block(request)
            if error:
                sys.stderr.write("jsonrpc transaction failed: %s" %
                                 os.strerror(error))
                sys.exit(1)
            sys.stdout.write("%03d: " % step)
            sys.stdout.flush()
            step += 1
            if reply.result is not None:
                parse_uuids(reply.result, symtab)
            reply.id = None
            sys.stdout.write("%s\n" % ovs.json.to_string(reply.to_json()))

    if rpc:
        rpc.close()
    while idl.get_seqno() == seqno and not idl.run():
        poller = ovs.poller.Poller()
        idl.wait(poller)
        poller.block()
    print_idl(idl, step)
    step += 1
    idl.close()
    print("%03d: done" % step)
Example #41
0
def do_idl(schema_file, remote, *commands):
    schema_helper = ovs.db.idl.SchemaHelper(schema_file)
    track_notify = False

    if commands and commands[0] == "track-notify":
        commands = commands[1:]
        track_notify = True

    if commands and commands[0].startswith("?"):
        readonly = {}
        for x in commands[0][1:].split("?"):
            readonly = []
            table, columns = x.split(":")
            columns = columns.split(",")
            for index, column in enumerate(columns):
                if column[-1] == '!':
                    columns[index] = columns[index][:-1]
                    readonly.append(columns[index])
            schema_helper.register_columns(table, columns, readonly)
        commands = commands[1:]
    else:
        schema_helper.register_all()
    idl = ovs.db.idl.Idl(remote, schema_helper)

    if commands:
        error, stream = ovs.stream.Stream.open_block(
            ovs.stream.Stream.open(remote))
        if error:
            sys.stderr.write("failed to connect to \"%s\"" % remote)
            sys.exit(1)
        rpc = ovs.jsonrpc.Connection(stream)
    else:
        rpc = None

    symtab = {}
    seqno = 0
    step = 0

    def mock_notify(event, row, updates=None):
        output = "%03d: " % step
        output += "event:" + str(event) + ", row={"
        output += get_simple_table_printable_row(row) + "}, updates="
        if updates is None:
            output += "None"
        else:
            output += "{" + get_simple_table_printable_row(updates) + "}"

        output += '\n'
        sys.stdout.write(output)
        sys.stdout.flush()

    if track_notify and "simple" in idl.tables:
        idl.notify = mock_notify

    commands = list(commands)
    if len(commands) >= 1 and "condition" in commands[0]:
        update_condition(idl, commands.pop(0))
        sys.stdout.write("%03d: change conditions\n" % step)
        sys.stdout.flush()
        step += 1

    for command in commands:
        if command.startswith("+"):
            # The previous transaction didn't change anything.
            command = command[1:]
        else:
            # Wait for update.
            while idl.change_seqno == seqno and not idl.run():
                rpc.run()

                poller = ovs.poller.Poller()
                idl.wait(poller)
                rpc.wait(poller)
                poller.block()

            print_idl(idl, step)
            step += 1

        seqno = idl.change_seqno

        if command == "reconnect":
            print("%03d: reconnect" % step)
            sys.stdout.flush()
            step += 1
            idl.force_reconnect()
        elif "condition" in command:
            update_condition(idl, command)
            sys.stdout.write("%03d: change conditions\n" % step)
            sys.stdout.flush()
            step += 1
        elif not command.startswith("["):
            idl_set(idl, command, step)
            step += 1
        else:
            json = ovs.json.from_string(command)
            if isinstance(json, six.string_types):
                sys.stderr.write("\"%s\": %s\n" % (command, json))
                sys.exit(1)
            json = substitute_uuids(json, symtab)
            request = ovs.jsonrpc.Message.create_request("transact", json)
            error, reply = rpc.transact_block(request)
            if error:
                sys.stderr.write("jsonrpc transaction failed: %s" %
                                 os.strerror(error))
                sys.exit(1)
            elif reply.error is not None:
                sys.stderr.write("jsonrpc transaction failed: %s" %
                                 reply.error)
                sys.exit(1)

            sys.stdout.write("%03d: " % step)
            sys.stdout.flush()
            step += 1
            if reply.result is not None:
                parse_uuids(reply.result, symtab)
            reply.id = None
            sys.stdout.write("%s\n" % ovs.json.to_string(reply.to_json()))
            sys.stdout.flush()

    if rpc:
        rpc.close()
    while idl.change_seqno == seqno and not idl.run():
        poller = ovs.poller.Poller()
        idl.wait(poller)
        poller.block()
    print_idl(idl, step)
    step += 1
    idl.close()
    print("%03d: done" % step)
Example #42
0
import errno
import os
if sys.version_info[0] == 2:
    from urllib import unquote
else:
    from urllib.parse import unquote

pattern = re.compile(r"\[[^\]]+?]\(([^)]+?)\)")

doc_relevant = set()

for arg in sys.argv[1].split('\n'):

    if not os.path.exists(arg):
        if sys.version_info[0] == 2:
            raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), arg)
        else:
            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), arg)
    else:
        (dirname, basename) = os.path.split(arg)

    doc_relevant.add(arg)

    for i, line in enumerate(open(arg)):
        for match in re.finditer(pattern, line):
            for group in match.groups():
                ref_file = '{}/{}'.format(dirname, group.rsplit('#page=', 1)[0])
                if os.path.exists(ref_file):
                    doc_relevant.add(ref_file)
                elif os.path.exists(unquote(ref_file)):
                    doc_relevant.add(unquote(ref_file))
Example #43
0
 def raise_oserr(cls):
     errn = cls.geterrno()
     raise OSError(errn, os.strerror(errn))
Example #44
0
def nl_error_handler_verbose(_, err, arg):
    """https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L78."""
    ofd = arg or _LOGGER.debug
    ofd('-- Error received: ' + strerror(-err.error))
    ofd('-- Original message: ' + print_header_content(err.msg))
    return -nl_syserr2nlerr(err.error)
Example #45
0
	def __str__(self):
		return "\n\t%s failed: %d (%s)" % (strmap.nl80211_commands2str[self._cmd], self._errno, os.strerror(-self._errno))
Example #46
0
async def mega_downloader(megadl):
    await megadl.edit("`Collecting information...`")
    if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
        os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
    msg_link = await megadl.get_reply_message()
    link = megadl.pattern_match.group(1)
    if link:
        pass
    elif msg_link:
        link = msg_link.text
    else:
        return await megadl.edit("Usage: `.mega` **<MEGA.nz link>**")
    try:
        link = re.findall(r"\bhttps?://.*mega.*\.nz\S+", link)[0]
        # - Mega changed their URL again -
        if "file" in link:
            link = link.replace("#", "!").replace("file/", "#!")
        elif "folder" in link or "#F" in link or "#N" in link:
            await megadl.edit("`folder download support are removed...`")
            return
    except IndexError:
        await megadl.edit("`MEGA.nz link not found...`")
        return None
    cmd = f"bin/megadown -q -m {link}"
    result = await subprocess_run(megadl, cmd)
    try:
        data = json.loads(result[0])
    except json.JSONDecodeError:
        await megadl.edit("**JSONDecodeError**: `failed to extract link...`")
        return None
    except (IndexError, TypeError):
        return
    file_name = data["file_name"]
    file_url = data["url"]
    hex_key = data["hex_key"]
    hex_raw_key = data["hex_raw_key"]
    temp_file_name = file_name + ".temp"
    temp_file_path = os.path.join(TEMP_DOWNLOAD_DIRECTORY, temp_file_name)
    file_path = os.path.join(TEMP_DOWNLOAD_DIRECTORY, file_name)
    if os.path.isfile(file_path):
        try:
            raise FileExistsError(errno.EEXIST, os.strerror(errno.EEXIST),
                                  file_path)
        except FileExistsError as e:
            await megadl.edit(f"`{str(e)}`")
            return None
    downloader = SmartDL(file_url, temp_file_path, progress_bar=False)
    display_message = None
    try:
        downloader.start(blocking=False)
    except HTTPError as e:
        await megadl.edit(f"**HTTPError**: `{str(e)}`")
        return None
    start = time.time()
    while not downloader.isFinished():
        status = downloader.get_status().capitalize()
        total_length = downloader.filesize if downloader.filesize else None
        downloaded = downloader.get_dl_size()
        percentage = int(downloader.get_progress() * 100)
        speed = downloader.get_speed(human=True)
        estimated_total_time = round(downloader.get_eta())
        progress_str = "`{0}` | [{1}{2}] `{3}%`".format(
            status,
            "".join(["⬤" for i in range(math.floor(percentage / 10))]),
            "".join(["◯" for i in range(10 - math.floor(percentage / 10))]),
            round(percentage, 2),
        )
        diff = time.time() - start
        try:
            current_message = (
                f"**➥file name : **`{file_name}`\n\n"
                "**➥Status**\n"
                f"{progress_str}\n"
                f"`{humanbytes(downloaded)}` of `{humanbytes(total_length)}`"
                f" @ `{speed}`\n"
                f"**➥ETA -> **`{time_formatter(estimated_total_time)}`\n"
                f"**➥ Duration -> **`{time_formatter(round(diff))}`")
            if round(diff % 15.00) == 0 and (display_message != current_message
                                             or total_length == downloaded):
                await megadl.edit(current_message)
                await asyncio.sleep(0.2)
                display_message = current_message
        except Exception:
            pass
        finally:
            if status == "Combining":
                wait = round(downloader.get_eta())
                await asyncio.sleep(wait)
    if downloader.isSuccessful():
        download_time = round(downloader.get_dl_time() + wait)
        try:
            P = multiprocessing.Process(
                target=await decrypt_file(megadl, file_path, temp_file_path,
                                          hex_key, hex_raw_key),
                name="Decrypt_File",
            )
            P.start()
            P.join()
        except FileNotFoundError as e:
            await megadl.edit(f"`{str(e)}`")
            return None
        else:
            await megadl.edit(
                f"**➥ file name : **`{file_name}`\n\n"
                f"**➥ Successfully downloaded in : ** `{file_path}`.\n"
                f"**➥ Download took :** {time_formatter(download_time)}.")
            return None
    else:
        await megadl.edit("`Failed to download, "
                          "check heroku Logs for more details.`")
        for e in downloader.get_errors():
            LOGS.info(str(e))
    return
Example #47
0
 def __init__(self, errno):
     super(FuseOSError, self).__init__(errno, strerror(errno))
Example #48
0
def get_sock_error(sock):
    error_number = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
    return socket.error(error_number, os.strerror(error_number))
Example #49
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--input_dir",
                        type=str,
                        required=True,
                        help="either a directory containing subdirectories "
                        "train, val, test, etc, or a directory containing "
                        "the tfrecords")
    parser.add_argument(
        "--val_input_dirs",
        type=str,
        nargs='+',
        help="directories containing the tfrecords. default: [input_dir]")
    parser.add_argument("--logs_dir",
                        default='logs',
                        help="ignored if output_dir is specified")
    parser.add_argument(
        "--output_dir",
        help=
        "output directory where json files, summary, model, gifs, etc are saved. "
        "default is logs_dir/model_fname, where model_fname consists of "
        "information from model and model_hparams")
    parser.add_argument(
        "--checkpoint",
        help=
        "directory with checkpoint or checkpoint name (e.g. checkpoint_dir/model-200000)"
    )
    parser.add_argument("--resume",
                        action='store_true',
                        help='resume from lastest checkpoint in output_dir.')

    parser.add_argument("--dataset", type=str, help="dataset class name")
    parser.add_argument(
        "--dataset_hparams",
        type=str,
        help="a string of comma separated list of dataset hyperparameters")
    parser.add_argument("--dataset_hparams_dict",
                        type=str,
                        help="a json file of dataset hyperparameters")
    parser.add_argument("--model", type=str, help="model class name")
    parser.add_argument(
        "--model_hparams",
        type=str,
        help="a string of comma separated list of model hyperparameters")
    parser.add_argument("--model_hparams_dict",
                        type=str,
                        help="a json file of model hyperparameters")

    parser.add_argument(
        "--summary_freq",
        type=int,
        default=1000,
        help=
        "save summaries (except for image and eval summaries) every summary_freq steps"
    )
    parser.add_argument(
        "--image_summary_freq",
        type=int,
        default=5000,
        help="save image summaries every image_summary_freq steps")
    parser.add_argument(
        "--eval_summary_freq",
        type=int,
        default=0,
        help="save eval summaries every eval_summary_freq steps")
    parser.add_argument("--progress_freq",
                        type=int,
                        default=100,
                        help="display progress every progress_freq steps")
    parser.add_argument("--metrics_freq",
                        type=int,
                        default=0,
                        help="run and display metrics every metrics_freq step")
    parser.add_argument(
        "--gif_freq",
        type=int,
        default=0,
        help="save gifs of predicted frames every gif_freq steps")
    parser.add_argument("--save_freq",
                        type=int,
                        default=5000,
                        help="save model every save_freq steps, 0 to disable")

    parser.add_argument("--gpu_mem_frac",
                        type=float,
                        default=0,
                        help="fraction of gpu memory to use")
    parser.add_argument("--seed", type=int)

    args = parser.parse_args()

    if args.seed is not None:
        tf.set_random_seed(args.seed)
        np.random.seed(args.seed)
        random.seed(args.seed)

    if args.output_dir is None:
        list_depth = 0
        model_fname = ''
        for t in ('model=%s,%s' % (args.model, args.model_hparams)):
            if t == '[':
                list_depth += 1
            if t == ']':
                list_depth -= 1
            if list_depth and t == ',':
                t = '..'
            if t in '=,':
                t = '.'
            if t in '[]':
                t = ''
            model_fname += t
        args.output_dir = os.path.join(args.logs_dir, model_fname)

    if args.resume:
        if args.checkpoint:
            raise ValueError('resume and checkpoint cannot both be specified')
        args.checkpoint = args.output_dir

    dataset_hparams_dict = {}
    model_hparams_dict = {}
    if args.dataset_hparams_dict:
        with open(args.dataset_hparams_dict) as f:
            dataset_hparams_dict.update(json.loads(f.read()))
    if args.model_hparams_dict:
        with open(args.model_hparams_dict) as f:
            model_hparams_dict.update(json.loads(f.read()))
    if args.checkpoint:
        checkpoint_dir = os.path.normpath(args.checkpoint)
        if not os.path.exists(checkpoint_dir):
            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
                                    checkpoint_dir)
        if not os.path.isdir(args.checkpoint):
            checkpoint_dir, _ = os.path.split(checkpoint_dir)
        with open(os.path.join(checkpoint_dir, "options.json")) as f:
            print("loading options from checkpoint %s" % args.checkpoint)
            options = json.loads(f.read())
            args.dataset = args.dataset or options['dataset']
            args.model = args.model or options['model']
        try:
            with open(os.path.join(checkpoint_dir,
                                   "dataset_hparams.json")) as f:
                dataset_hparams_dict.update(json.loads(f.read()))
        except FileNotFoundError:
            print(
                "dataset_hparams.json was not loaded because it does not exist"
            )
        try:
            with open(os.path.join(checkpoint_dir, "model_hparams.json")) as f:
                model_hparams_dict.update(json.loads(f.read()))
                model_hparams_dict.pop('num_gpus',
                                       None)  # backwards-compatibility
        except FileNotFoundError:
            print(
                "model_hparams.json was not loaded because it does not exist")

    print(
        '----------------------------------- Options ------------------------------------'
    )
    for k, v in args._get_kwargs():
        print(k, "=", v)
    print(
        '------------------------------------- End --------------------------------------'
    )

    VideoDataset = datasets.get_dataset_class(args.dataset)
    train_dataset = VideoDataset(args.input_dir,
                                 mode='train',
                                 hparams_dict=dataset_hparams_dict,
                                 hparams=args.dataset_hparams)
    val_input_dirs = args.val_input_dirs or [args.input_dir]
    val_datasets = [
        VideoDataset(val_input_dir,
                     mode='val',
                     hparams_dict=dataset_hparams_dict,
                     hparams=args.dataset_hparams)
        for val_input_dir in val_input_dirs
    ]
    if len(val_input_dirs) > 1:
        if isinstance(val_datasets[-1], datasets.KTHVideoDataset):
            val_datasets[-1].set_sequence_length(40)
        else:
            val_datasets[-1].set_sequence_length(30)

    def override_hparams_dict(dataset):
        hparams_dict = dict(model_hparams_dict)
        hparams_dict['context_frames'] = dataset.hparams.context_frames
        hparams_dict['sequence_length'] = dataset.hparams.sequence_length
        hparams_dict['repeat'] = dataset.hparams.time_shift
        return hparams_dict

    VideoPredictionModel = models.get_model_class(args.model)
    train_model = VideoPredictionModel(
        mode='train',
        hparams_dict=override_hparams_dict(train_dataset),
        hparams=args.model_hparams)
    val_models = [
        VideoPredictionModel(mode='val',
                             hparams_dict=override_hparams_dict(val_dataset),
                             hparams=args.model_hparams)
        for val_dataset in val_datasets
    ]

    batch_size = train_model.hparams.batch_size
    with tf.variable_scope('') as training_scope:
        train_model.build_graph(*train_dataset.make_batch(batch_size))
    for val_model, val_dataset in zip(val_models, val_datasets):
        with tf.variable_scope(training_scope, reuse=True):
            val_model.build_graph(*val_dataset.make_batch(batch_size))

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    with open(os.path.join(args.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(args), sort_keys=True, indent=4))
    with open(os.path.join(args.output_dir, "dataset_hparams.json"), "w") as f:
        f.write(
            json.dumps(train_dataset.hparams.values(),
                       sort_keys=True,
                       indent=4))
    with open(os.path.join(args.output_dir, "model_hparams.json"), "w") as f:
        f.write(
            json.dumps(train_model.hparams.values(), sort_keys=True, indent=4))

    if args.gif_freq:
        val_model = val_models[0]
        val_tensors = OrderedDict()
        context_images = val_model.inputs['images'][:, :val_model.hparams.
                                                    context_frames]
        val_tensors['gen_images_vis'] = tf.concat(
            [context_images, val_model.gen_images], axis=1)
        if val_model.gen_images_enc is not None:
            val_tensors['gen_images_enc_vis'] = tf.concat(
                [context_images, val_model.gen_images_enc], axis=1)
        val_tensors.update({
            name: tensor
            for name, tensor in val_model.inputs.items()
            if tensor.shape.ndims >= 4
        })
        val_tensors['targets'] = val_model.targets
        val_tensors.update({
            name: tensor
            for name, tensor in val_model.outputs.items()
            if tensor.shape.ndims >= 4
        })
        val_tensor_clips = OrderedDict([
            (name, tf_utils.tensor_to_clip(output))
            for name, output in val_tensors.items()
        ])

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum(
            [tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    saver = tf.train.Saver(max_to_keep=50)
    summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
    image_summaries = set(tf.get_collection(tf_utils.IMAGE_SUMMARIES))
    eval_summaries = set(tf.get_collection(tf_utils.EVAL_SUMMARIES))
    eval_image_summaries = image_summaries & eval_summaries
    image_summaries -= eval_image_summaries
    eval_summaries -= eval_image_summaries
    if args.summary_freq:
        summary_op = tf.summary.merge(summaries)
    if args.image_summary_freq:
        image_summary_op = tf.summary.merge(list(image_summaries))
    if args.eval_summary_freq:
        eval_summary_op = tf.summary.merge(list(eval_summaries))
        eval_image_summary_op = tf.summary.merge(list(eval_image_summaries))

    if args.summary_freq or args.image_summary_freq or args.eval_summary_freq:
        summary_writer = tf.summary.FileWriter(args.output_dir)

    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=args.gpu_mem_frac)
    config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
    global_step = tf.train.get_or_create_global_step()
    max_steps = train_model.hparams.max_steps
    with tf.Session(config=config) as sess:
        print("parameter_count =", sess.run(parameter_count))

        sess.run(tf.global_variables_initializer())
        train_model.restore(sess, args.checkpoint)

        start_step = sess.run(global_step)
        # start at one step earlier to log everything without doing any training
        # step is relative to the start_step
        for step in range(-1, max_steps - start_step):
            if step == 0:
                start = time.time()

            def should(freq):
                return freq and ((step + 1) % freq == 0 or
                                 (step + 1) in (0, max_steps - start_step))

            fetches = {"global_step": global_step}
            if step >= 0:
                fetches["train_op"] = train_model.train_op

            if should(args.progress_freq):
                fetches['d_losses'] = train_model.d_losses
                fetches['g_losses'] = train_model.g_losses
                if isinstance(train_model.learning_rate, tf.Tensor):
                    fetches["learning_rate"] = train_model.learning_rate
            if should(args.metrics_freq):
                fetches['metrics'] = train_model.metrics
            if should(args.summary_freq):
                fetches["summary"] = summary_op
            if should(args.image_summary_freq):
                fetches["image_summary"] = image_summary_op
            if should(args.eval_summary_freq):
                fetches["eval_summary"] = eval_summary_op
                fetches["eval_image_summary"] = eval_image_summary_op

            run_start_time = time.time()
            results = sess.run(fetches)
            run_elapsed_time = time.time() - run_start_time
            if run_elapsed_time > 1.5:
                print('session.run took %0.1fs' % run_elapsed_time)

            if should(args.summary_freq):
                print("recording summary")
                summary_writer.add_summary(results["summary"],
                                           results["global_step"])
                print("done")
            if should(args.image_summary_freq):
                print("recording image summary")
                summary_writer.add_summary(
                    tf_utils.convert_tensor_to_gif_summary(
                        results["image_summary"]), results["global_step"])
                print("done")
            if should(args.eval_summary_freq):
                print("recording eval summary")
                summary_writer.add_summary(results["eval_summary"],
                                           results["global_step"])
                summary_writer.add_summary(
                    tf_utils.convert_tensor_to_gif_summary(
                        results["eval_image_summary"]), results["global_step"])
                print("done")
            if should(args.summary_freq) or should(
                    args.image_summary_freq) or should(args.eval_summary_freq):
                summary_writer.flush()
            if should(args.progress_freq):
                # global_step will have the correct step count if we resume from a checkpoint
                steps_per_epoch = math.ceil(
                    train_dataset.num_examples_per_epoch() / batch_size)
                train_epoch = math.ceil(results["global_step"] /
                                        steps_per_epoch)
                train_step = (results["global_step"] - 1) % steps_per_epoch + 1
                print("progress  global step %d  epoch %d  step %d" %
                      (results["global_step"], train_epoch, train_step))
                if step >= 0:
                    elapsed_time = time.time() - start
                    average_time = elapsed_time / (step + 1)
                    images_per_sec = batch_size / average_time
                    remaining_time = (max_steps -
                                      (start_step + step)) * average_time
                    print(
                        "          image/sec %0.1f  remaining %dm (%0.1fh) (%0.1fd)"
                        %
                        (images_per_sec, remaining_time / 60, remaining_time /
                         60 / 60, remaining_time / 60 / 60 / 24))

                for name, loss in itertools.chain(results['d_losses'].items(),
                                                  results['g_losses'].items()):
                    print(name, loss)
                if isinstance(train_model.learning_rate, tf.Tensor):
                    print("learning_rate", results["learning_rate"])
            if should(args.metrics_freq):
                for name, metric in results['metrics'].items():
                    print(name, metric)

            if should(args.save_freq):
                print("saving model to", args.output_dir)
                saver.save(sess,
                           os.path.join(args.output_dir, "model"),
                           global_step=global_step)
                print("done")

            if should(args.gif_freq):
                image_dir = os.path.join(args.output_dir, 'images')
                if not os.path.exists(image_dir):
                    os.makedirs(image_dir)

                gif_clips = sess.run(val_tensor_clips)
                gif_step = results["global_step"]
                for name, clip in gif_clips.items():
                    filename = "%08d-%s.gif" % (gif_step, name)
                    print("saving gif to", os.path.join(image_dir, filename))
                    ffmpeg_gif.save_gif(os.path.join(image_dir, filename),
                                        clip,
                                        fps=4)
                    print("done")
Example #50
0
                self.detach(sr_uuid)
                raise xs_errors.XenError('SRExists')
        else:
            try:
                util.ioretry(lambda: util.makedirs(self.linkpath))
                os.symlink(self.linkpath, self.path)
            except util.CommandException, inst:
                if inst.code != errno.EEXIST:
                    try:
                        self.unmount(self.mountpoint, True)
                    except GlusterFSException:
                        util.logException('GlusterFSSR.unmount()')
                    raise SR.SROSError(
                        116,
                        "Failed to create GlusterFS SR. remote directory creation error: {}"
                        .format(os.strerror(inst.code)))
        self.detach(sr_uuid)

    def delete(self, sr_uuid):
        # try to remove/delete non VDI contents first
        super(GlusterFSSR, self).delete(sr_uuid)
        try:
            if self.checkmount():
                self.detach(sr_uuid)
            self.mount()
            if util.ioretry(lambda: util.pathexists(self.linkpath)):
                util.ioretry(lambda: os.rmdir(self.linkpath))
            self.unmount(self.mountpoint, True)
        except util.CommandException, inst:
            self.detach(sr_uuid)
            if inst.code != errno.ENOENT:
Example #51
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--input_dir",
                        type=str,
                        required=True,
                        help="either a directory containing subdirectories "
                        "train, val, test, etc, or a directory containing "
                        "the tfrecords")
    parser.add_argument(
        "--val_input_dir",
        type=str,
        help="directories containing the tfrecords. default: input_dir")
    parser.add_argument("--logs_dir",
                        default='logs',
                        help="ignored if output_dir is specified")
    parser.add_argument(
        "--output_dir",
        help=
        "output directory where json files, summary, model, gifs, etc are saved. "
        "default is logs_dir/model_fname, where model_fname consists of "
        "information from model and model_hparams")
    parser.add_argument("--output_dir_postfix", default="")
    parser.add_argument(
        "--checkpoint",
        help=
        "directory with checkpoint or checkpoint name (e.g. checkpoint_dir/model-200000)"
    )
    parser.add_argument("--resume",
                        action='store_true',
                        help='resume from lastest checkpoint in output_dir.')

    parser.add_argument("--dataset", type=str, help="dataset class name")
    parser.add_argument(
        "--dataset_hparams",
        type=str,
        help="a string of comma separated list of dataset hyperparameters")
    parser.add_argument("--dataset_hparams_dict",
                        type=str,
                        help="a json file of dataset hyperparameters")
    parser.add_argument("--model", type=str, help="model class name")
    parser.add_argument(
        "--model_hparams",
        type=str,
        help="a string of comma separated list of model hyperparameters")
    parser.add_argument("--model_hparams_dict",
                        type=str,
                        help="a json file of model hyperparameters")

    parser.add_argument(
        "--summary_freq",
        type=int,
        default=1000,
        help=
        "save frequency of summaries (except for image and eval summaries) for train/validation set"
    )
    parser.add_argument(
        "--image_summary_freq",
        type=int,
        default=5000,
        help="save frequency of image summaries for train/validation set")
    parser.add_argument(
        "--eval_summary_freq",
        type=int,
        default=25000,
        help="save frequency of eval summaries for train/validation set")
    parser.add_argument(
        "--accum_eval_summary_freq",
        type=int,
        default=100000,
        help=
        "save frequency of accumulated eval summaries for validation set only")
    parser.add_argument("--progress_freq",
                        type=int,
                        default=100,
                        help="display progress every progress_freq steps")
    parser.add_argument("--save_freq",
                        type=int,
                        default=5000,
                        help="save frequence of model, 0 to disable")

    parser.add_argument(
        "--aggregate_nccl",
        type=int,
        default=0,
        help=
        "whether to use nccl or cpu for gradient aggregation in multi-gpu training"
    )
    parser.add_argument("--gpu_mem_frac",
                        type=float,
                        default=0,
                        help="fraction of gpu memory to use")
    parser.add_argument("--seed", type=int)

    args = parser.parse_args()

    if args.seed is not None:
        tf.set_random_seed(args.seed)
        np.random.seed(args.seed)
        random.seed(args.seed)

    if args.output_dir is None:
        list_depth = 0
        model_fname = ''
        for t in ('model=%s,%s' % (args.model, args.model_hparams)):
            if t == '[':
                list_depth += 1
            if t == ']':
                list_depth -= 1
            if list_depth and t == ',':
                t = '..'
            if t in '=,':
                t = '.'
            if t in '[]':
                t = ''
            model_fname += t
        args.output_dir = os.path.join(args.logs_dir,
                                       model_fname) + args.output_dir_postfix

    if args.resume:
        if args.checkpoint:
            raise ValueError('resume and checkpoint cannot both be specified')
        args.checkpoint = args.output_dir

    dataset_hparams_dict = {}
    model_hparams_dict = {}
    if args.dataset_hparams_dict:
        with open(args.dataset_hparams_dict) as f:
            dataset_hparams_dict.update(json.loads(f.read()))
    if args.model_hparams_dict:
        with open(args.model_hparams_dict) as f:
            model_hparams_dict.update(json.loads(f.read()))
    if args.checkpoint:
        checkpoint_dir = os.path.normpath(args.checkpoint)
        if not os.path.isdir(args.checkpoint):
            checkpoint_dir, _ = os.path.split(checkpoint_dir)
        if not os.path.exists(checkpoint_dir):
            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
                                    checkpoint_dir)
        with open(os.path.join(checkpoint_dir, "options.json")) as f:
            print("loading options from checkpoint %s" % args.checkpoint)
            options = json.loads(f.read())
            args.dataset = args.dataset or options['dataset']
            args.model = args.model or options['model']
        try:
            with open(os.path.join(checkpoint_dir,
                                   "dataset_hparams.json")) as f:
                dataset_hparams_dict.update(json.loads(f.read()))
        except FileNotFoundError:
            print(
                "dataset_hparams.json was not loaded because it does not exist"
            )
        try:
            with open(os.path.join(checkpoint_dir, "model_hparams.json")) as f:
                model_hparams_dict.update(json.loads(f.read()))
        except FileNotFoundError:
            print(
                "model_hparams.json was not loaded because it does not exist")

    print(
        '----------------------------------- Options ------------------------------------'
    )
    for k, v in args._get_kwargs():
        print(k, "=", v)
    print(
        '------------------------------------- End --------------------------------------'
    )

    VideoDataset = datasets.get_dataset_class(args.dataset)
    train_dataset = VideoDataset(args.input_dir,
                                 mode='train',
                                 hparams_dict=dataset_hparams_dict,
                                 hparams=args.dataset_hparams)
    val_dataset = VideoDataset(args.val_input_dir or args.input_dir,
                               mode='val',
                               hparams_dict=dataset_hparams_dict,
                               hparams=args.dataset_hparams)
    if val_dataset.hparams.long_sequence_length != val_dataset.hparams.sequence_length:
        # the longer dataset is only used for the accum_eval_metrics
        long_val_dataset = VideoDataset(args.val_input_dir or args.input_dir,
                                        mode='val',
                                        hparams_dict=dataset_hparams_dict,
                                        hparams=args.dataset_hparams)
        long_val_dataset.set_sequence_length(
            val_dataset.hparams.long_sequence_length)
    else:
        long_val_dataset = None

    variable_scope = tf.get_variable_scope()
    variable_scope.set_use_resource(True)

    VideoPredictionModel = models.get_model_class(args.model)
    hparams_dict = dict(model_hparams_dict)
    hparams_dict.update({
        'context_frames': train_dataset.hparams.context_frames,
        'sequence_length': train_dataset.hparams.sequence_length,
        'repeat': train_dataset.hparams.time_shift,
    })
    model = VideoPredictionModel(hparams_dict=hparams_dict,
                                 hparams=args.model_hparams,
                                 aggregate_nccl=args.aggregate_nccl)

    batch_size = model.hparams.batch_size
    train_tf_dataset = train_dataset.make_dataset(batch_size)
    train_iterator = train_tf_dataset.make_one_shot_iterator()
    train_handle = train_iterator.string_handle()
    val_tf_dataset = val_dataset.make_dataset(batch_size)
    val_iterator = val_tf_dataset.make_one_shot_iterator()
    val_handle = val_iterator.string_handle()
    iterator = tf.data.Iterator.from_string_handle(
        train_handle, train_tf_dataset.output_types,
        train_tf_dataset.output_shapes)
    inputs = iterator.get_next()

    # inputs comes from the training dataset by default, unless train_handle is remapped to the val_handles
    model.build_graph(inputs)

    if long_val_dataset is not None:
        # separately build a model for the longer sequence.
        # this is needed because the model doesn't support dynamic shapes.
        long_hparams_dict = dict(hparams_dict)
        long_hparams_dict[
            'sequence_length'] = long_val_dataset.hparams.sequence_length
        # use smaller batch size for longer model to prevenet running out of memory
        long_hparams_dict['batch_size'] = model.hparams.batch_size // 2
        long_model = VideoPredictionModel(
            mode="test",  # to not build the losses and discriminators
            hparams_dict=long_hparams_dict,
            hparams=args.model_hparams,
            aggregate_nccl=args.aggregate_nccl)
        tf.get_variable_scope().reuse_variables()
        long_model.build_graph(long_val_dataset.make_batch(batch_size))
    else:
        long_model = None

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    with open(os.path.join(args.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(args), sort_keys=True, indent=4))
    with open(os.path.join(args.output_dir, "dataset_hparams.json"), "w") as f:
        f.write(
            json.dumps(train_dataset.hparams.values(),
                       sort_keys=True,
                       indent=4))
    with open(os.path.join(args.output_dir, "model_hparams.json"), "w") as f:
        f.write(json.dumps(model.hparams.values(), sort_keys=True, indent=4))

    with tf.name_scope("parameter_count"):
        # exclude trainable variables that are replicas (used in multi-gpu setting)
        trainable_variables = set(tf.trainable_variables()) & set(
            model.saveable_variables)
        parameter_count = tf.reduce_sum(
            [tf.reduce_prod(tf.shape(v)) for v in trainable_variables])

    saver = tf.train.Saver(var_list=model.saveable_variables, max_to_keep=2)

    # None has the special meaning of evaluating at the end, so explicitly check for non-equality to zero
    if (args.summary_freq != 0 or args.image_summary_freq != 0
            or args.eval_summary_freq != 0
            or args.accum_eval_summary_freq != 0):
        summary_writer = tf.summary.FileWriter(args.output_dir)

    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=args.gpu_mem_frac)
    config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
    global_step = tf.train.get_or_create_global_step()
    max_steps = model.hparams.max_steps
    with tf.Session(config=config) as sess:
        print("parameter_count =", sess.run(parameter_count))

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        model.restore(sess, args.checkpoint)
        sess.run(model.post_init_ops)
        val_handle_eval = sess.run(val_handle)
        sess.graph.finalize()

        start_step = sess.run(global_step)

        def should(step, freq):
            if freq is None:
                return (step + 1) == (max_steps - start_step)
            else:
                return freq and ((step + 1) % freq == 0 or
                                 (step + 1) in (0, max_steps - start_step))

        def should_eval(step, freq):
            # never run eval summaries at the beginning since it's expensive, unless it's the last iteration
            return should(step,
                          freq) and (step >= 0 or
                                     (step + 1) == (max_steps - start_step))

        # start at one step earlier to log everything without doing any training
        # step is relative to the start_step
        for step in range(-1, max_steps - start_step):
            if step == 1:
                # skip step -1 and 0 for timing purposes (for warmstarting)
                start_time = time.time()

            fetches = {"global_step": global_step}
            if step >= 0:
                fetches["train_op"] = model.train_op
            if should(step, args.progress_freq):
                fetches['d_loss'] = model.d_loss
                fetches['g_loss'] = model.g_loss
                fetches['d_losses'] = model.d_losses
                fetches['g_losses'] = model.g_losses
                if isinstance(model.learning_rate, tf.Tensor):
                    fetches["learning_rate"] = model.learning_rate
            if should(step, args.summary_freq):
                fetches["summary"] = model.summary_op
            if should(step, args.image_summary_freq):
                fetches["image_summary"] = model.image_summary_op
            if should_eval(step, args.eval_summary_freq):
                fetches["eval_summary"] = model.eval_summary_op

            run_start_time = time.time()
            results = sess.run(fetches)
            run_elapsed_time = time.time() - run_start_time
            if run_elapsed_time > 1.5 and step > 0 and set(
                    fetches.keys()) == {"global_step", "train_op"}:
                print('running train_op took too long (%0.1fs)' %
                      run_elapsed_time)

            if (should(step, args.summary_freq)
                    or should(step, args.image_summary_freq)
                    or should_eval(step, args.eval_summary_freq)):
                val_fetches = {"global_step": global_step}
                if should(step, args.summary_freq):
                    val_fetches["summary"] = model.summary_op
                if should(step, args.image_summary_freq):
                    val_fetches["image_summary"] = model.image_summary_op
                if should_eval(step, args.eval_summary_freq):
                    val_fetches["eval_summary"] = model.eval_summary_op
                val_results = sess.run(
                    val_fetches, feed_dict={train_handle: val_handle_eval})
                for name, summary in val_results.items():
                    if name == 'global_step':
                        continue
                    val_results[name] = add_tag_suffix(summary, '_1')

            if should(step, args.summary_freq):
                print("recording summary")
                summary_writer.add_summary(results["summary"],
                                           results["global_step"])
                summary_writer.add_summary(val_results["summary"],
                                           val_results["global_step"])
                print("done")
            if should(step, args.image_summary_freq):
                print("recording image summary")
                summary_writer.add_summary(results["image_summary"],
                                           results["global_step"])
                summary_writer.add_summary(val_results["image_summary"],
                                           val_results["global_step"])
                print("done")
            if should_eval(step, args.eval_summary_freq):
                print("recording eval summary")
                summary_writer.add_summary(results["eval_summary"],
                                           results["global_step"])
                summary_writer.add_summary(val_results["eval_summary"],
                                           val_results["global_step"])
                print("done")
            if should_eval(step, args.accum_eval_summary_freq):
                val_datasets = [val_dataset]
                val_models = [model]
                if long_model is not None:
                    val_datasets.append(long_val_dataset)
                    val_models.append(long_model)
                for i, (val_dataset_,
                        val_model) in enumerate(zip(val_datasets, val_models)):
                    sess.run(val_model.accum_eval_metrics_reset_op)
                    # traverse (roughly up to rounding based on the batch size) all the validation dataset
                    accum_eval_summary_num_updates = val_dataset_.num_examples_per_epoch(
                    ) // val_model.hparams.batch_size
                    val_fetches = {
                        "global_step": global_step,
                        "accum_eval_summary": val_model.accum_eval_summary_op
                    }
                    for update_step in range(accum_eval_summary_num_updates):
                        print(
                            'evaluating %d / %d' %
                            (update_step + 1, accum_eval_summary_num_updates))
                        val_results = sess.run(
                            val_fetches,
                            feed_dict={train_handle: val_handle_eval})
                    accum_eval_summary = add_tag_suffix(
                        val_results["accum_eval_summary"], '_%d' % (i + 1))
                    print("recording accum eval summary")
                    summary_writer.add_summary(accum_eval_summary,
                                               val_results["global_step"])
                    print("done")
            if (should(step, args.summary_freq)
                    or should(step, args.image_summary_freq)
                    or should_eval(step, args.eval_summary_freq)
                    or should_eval(step, args.accum_eval_summary_freq)):
                summary_writer.flush()
            if should(step, args.progress_freq):
                # global_step will have the correct step count if we resume from a checkpoint
                # global step is read before it's incremented
                steps_per_epoch = train_dataset.num_examples_per_epoch(
                ) / batch_size
                train_epoch = results["global_step"] / steps_per_epoch
                print("progress  global step %d  epoch %0.1f" %
                      (results["global_step"] + 1, train_epoch))
                if step > 0:
                    elapsed_time = time.time() - start_time
                    average_time = elapsed_time / step
                    images_per_sec = batch_size / average_time
                    remaining_time = (max_steps -
                                      (start_step + step + 1)) * average_time
                    print(
                        "          image/sec %0.1f  remaining %dm (%0.1fh) (%0.1fd)"
                        %
                        (images_per_sec, remaining_time / 60, remaining_time /
                         60 / 60, remaining_time / 60 / 60 / 24))

                if results['d_losses']:
                    print("d_loss", results["d_loss"])
                for name, loss in results['d_losses'].items():
                    print("  ", name, loss)
                if results['g_losses']:
                    print("g_loss", results["g_loss"])
                for name, loss in results['g_losses'].items():
                    print("  ", name, loss)
                if isinstance(model.learning_rate, tf.Tensor):
                    print("learning_rate", results["learning_rate"])

            if should(step, args.save_freq):
                print("saving model to", args.output_dir)
                saver.save(sess,
                           os.path.join(args.output_dir, "model"),
                           global_step=global_step)
                print("done")
Example #52
0
    def runbgcommand(
        cmd,
        env,
        shell=False,
        stdout=None,
        stderr=None,
        ensurestart=True,
        record_wait=None,
        stdin_bytes=None,
    ):
        """Spawn a command without waiting for it to finish.


        When `record_wait` is not None, the spawned process will not be fully
        detached and the `record_wait` argument will be called with a the
        `Subprocess.wait` function for the spawned process.  This is mostly
        useful for developers that need to make sure the spawned process
        finished before a certain point. (eg: writing test)"""
        if pycompat.isdarwin:
            # avoid crash in CoreFoundation in case another thread
            # calls gui() while we're calling fork().
            gui()

        # double-fork to completely detach from the parent process
        # based on http://code.activestate.com/recipes/278731
        if record_wait is None:
            pid = os.fork()
            if pid:
                if not ensurestart:
                    # Even though we're not waiting on the child process,
                    # we still must call waitpid() on it at some point so
                    # it's not a zombie/defunct. This is especially relevant for
                    # chg since the parent process won't die anytime soon.
                    # We use a thread to make the overhead tiny.
                    def _do_wait():
                        os.waitpid(pid, 0)

                    t = threading.Thread(target=_do_wait)
                    t.daemon = True
                    t.start()
                    return
                # Parent process
                (_pid, status) = os.waitpid(pid, 0)
                if os.WIFEXITED(status):
                    returncode = os.WEXITSTATUS(status)
                else:
                    returncode = -(os.WTERMSIG(status))
                if returncode != 0:
                    # The child process's return code is 0 on success, an errno
                    # value on failure, or 255 if we don't have a valid errno
                    # value.
                    #
                    # (It would be slightly nicer to return the full exception info
                    # over a pipe as the subprocess module does.  For now it
                    # doesn't seem worth adding that complexity here, though.)
                    if returncode == 255:
                        returncode = errno.EINVAL
                    raise OSError(
                        returncode,
                        b'error running %r: %s' %
                        (cmd, os.strerror(returncode)),
                    )
                return

        returncode = 255
        try:
            if record_wait is None:
                # Start a new session
                os.setsid()
            # connect stdin to devnull to make sure the subprocess can't
            # muck up that stream for mercurial.
            if stdin_bytes is None:
                stdin = open(os.devnull, b'r')
            else:
                stdin = pycompat.unnamedtempfile()
                stdin.write(stdin_bytes)
                stdin.flush()
                stdin.seek(0)

            if stdout is None:
                stdout = open(os.devnull, b'w')
            if stderr is None:
                stderr = open(os.devnull, b'w')

            p = subprocess.Popen(
                cmd,
                shell=shell,
                env=env,
                close_fds=True,
                stdin=stdin,
                stdout=stdout,
                stderr=stderr,
            )
            if record_wait is not None:
                record_wait(p.wait)
            returncode = 0
        except EnvironmentError as ex:
            returncode = ex.errno & 0xFF
            if returncode == 0:
                # This shouldn't happen, but just in case make sure the
                # return code is never 0 here.
                returncode = 255
        except Exception:
            returncode = 255
        finally:
            # mission accomplished, this child needs to exit and not
            # continue the hg process here.
            stdin.close()
            if record_wait is None:
                os._exit(returncode)
Example #53
0
    def disconnected(self, now, error):
        """Tell this FSM that the connection dropped or that a connection
        attempt failed.  'error' specifies the reason: a positive value
        represents an errno value, EOF indicates that the connection was closed
        by the peer (e.g. read() returned 0), and 0 indicates no specific
        error.

        The FSM will back off, then reconnect."""
        if self.state not in (Reconnect.Backoff, Reconnect.Void):
            # Report what happened
            if self.state in (Reconnect.Active, Reconnect.Idle):
                if error > 0:
                    logging.warning("%s: connection dropped (%s)" %
                                    (self.name, os.strerror(error)))
                elif error == EOF:
                    self.info_level("%s: connection closed by peer" %
                                    self.name)
                else:
                    self.info_level("%s: connection dropped" % self.name)
            elif self.state == Reconnect.Listening:
                if error > 0:
                    logging.warning(
                        "%s: error listening for connections (%s)" %
                        (self.name, os.strerror(error)))
                else:
                    self.info_level("%s: error listening for connections" %
                                    self.name)
            else:
                if self.passive:
                    type_ = "listen"
                else:
                    type_ = "connection"
                if error > 0:
                    logging.warning("%s: %s attempt failed (%s)" %
                                    (self.name, type_, os.strerror(error)))
                else:
                    self.info_level("%s: %s attempt timed out" %
                                    (self.name, type_))

            if (self.state in (Reconnect.Active, Reconnect.Idle)):
                self.last_disconnected = now

            # Back off
            if (self.state in (Reconnect.Active, Reconnect.Idle) and
                (self.last_received - self.last_connected >= self.backoff
                 or self.passive)):
                if self.passive:
                    self.backoff = 0
                else:
                    self.backoff = self.min_backoff
            else:
                if self.backoff < self.min_backoff:
                    self.backoff = self.min_backoff
                elif self.backoff >= self.max_backoff / 2:
                    self.backoff = self.max_backoff
                else:
                    self.backoff *= 2

                if self.passive:
                    self.info_level("%s: waiting %.3g seconds before trying "
                                    "to listen again" %
                                    (self.name, self.backoff / 1000.0))
                else:
                    self.info_level(
                        "%s: waiting %.3g seconds before reconnect" %
                        (self.name, self.backoff / 1000.0))

            if self.__may_retry():
                self._transition(now, Reconnect.Backoff)
            else:
                self._transition(now, Reconnect.Void)
Example #54
0
    def test_CCompilationHandler_results_zip_appender(self):
        test_compression = ZIP_DEFLATED
        test_compresslevel = 6

        with open(file=self.test_working_directory + "/" + self.test_html_name,
                  mode="w"):
            pass

        if not os.path.isfile(self.test_working_directory + "/" +
                              self.test_html_name):
            raise FileNotFoundError(
                errno.ENOENT, os.strerror(errno.ENOENT),
                self.test_working_directory + "/" + self.test_html_name)

        with open(file=self.test_working_directory + "/" + self.test_js_name,
                  mode="w"):
            pass

        if not os.path.isfile(self.test_working_directory + "/" +
                              self.test_js_name):
            raise FileNotFoundError(
                errno.ENOENT, os.strerror(errno.ENOENT),
                self.test_working_directory + "/" + self.test_js_name)

        with open(file=self.test_working_directory + "/" + self.test_wasm_name,
                  mode="w"):
            pass

        if not os.path.isfile(self.test_working_directory + "/" +
                              self.test_wasm_name):
            raise FileNotFoundError(
                errno.ENOENT, os.strerror(errno.ENOENT),
                self.test_working_directory + "/" + self.test_wasm_name)

        with ZipFile(file=self.test_working_directory + "/" +
                     self.test_zip_name,
                     mode="w",
                     compression=test_compression,
                     compresslevel=test_compresslevel) as test_zip:
            self.assertEqual(test_zip.namelist(), [])

        if not os.path.isfile(self.test_working_directory + "/" +
                              self.test_zip_name):
            raise FileNotFoundError(
                errno.ENOENT, os.strerror(errno.ENOENT),
                self.test_working_directory + "/" + self.test_zip_name)

        result = self.handler_c.results_zip_appender(
            working_directory=self.test_working_directory,
            results_zip_name=self.test_zip_name,
            output_filename="test_output",
            compression=test_compression,
            compresslevel=test_compresslevel)

        self.assertIsNone(result)

        with ZipFile(file=self.test_working_directory + "/" +
                     self.test_zip_name,
                     mode="r") as test_zip:
            self.assertEqual(
                test_zip.namelist(),
                [self.test_html_name, self.test_js_name, self.test_wasm_name])
 def cb_env_err():
     err = errno.EIO
     raise EnvironmentError(err, os.strerror(err), "Test Error")
Example #56
0
def check_loaded_dataset(work_dir='./', print_msg=True, relpath=False):
    """Check the result of loading data for the following two rules:
        1. file existance
        2. file attribute readability

    Parameters: work_dir  : string, MintPy working directory
                print_msg : bool, print out message
    Returns:    True, if all required files and dataset exist; otherwise, ERROR
                    If True, PROCESS, SLC folder could be removed.
                stack_file  :
                geom_file   :
                lookup_file :
    Example:    work_dir = os.path.expandvars('./FernandinaSenDT128/mintpy')
                ut.check_loaded_dataset(work_dir)
    """
    load_complete = True

    if not work_dir:
        work_dir = os.getcwd()
    work_dir = os.path.abspath(work_dir)

    # 1. interferograms stack file: unwrapPhase, coherence
    flist = [os.path.join(work_dir, 'inputs/ifgramStack.h5')]
    stack_file = is_file_exist(flist, abspath=True)
    if stack_file is not None:
        obj = ifgramStack(stack_file)
        obj.open(print_msg=False)
        for dname in ['unwrapPhase', 'coherence']:
            if dname not in obj.datasetNames and 'azimuthOffset' not in obj.datasetNames:
                raise ValueError(
                    'required dataset "{}" is missing in file {}'.format(
                        dname, stack_file))
    else:
        raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
                                './inputs/ifgramStack.h5')

    atr = readfile.read_attribute(stack_file)

    # 2. geom_file: height
    if 'X_FIRST' in atr.keys():
        flist = [os.path.join(work_dir, 'inputs/geometryGeo.h5')]
    else:
        flist = [os.path.join(work_dir, 'inputs/geometryRadar.h5')]
    geom_file = is_file_exist(flist, abspath=True)
    if geom_file is not None:
        obj = geometry(geom_file)
        obj.open(print_msg=False)
        dname = geometryDatasetNames[0]
        if dname not in obj.datasetNames:
            raise ValueError(
                'required dataset "{}" is missing in file {}'.format(
                    dname, geom_file))
    else:
        raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
                                './inputs/geometry*.h5')

    # 3. lookup_file: latitude,longitude or rangeCoord,azimuthCoord
    # could be different than geometry file in case of roipac and gamma
    flist = [os.path.join(work_dir, 'inputs/geometry*.h5')]
    lookup_file = get_lookup_file(flist, abspath=True, print_msg=print_msg)
    if 'X_FIRST' not in atr.keys():
        if lookup_file is not None:
            obj = geometry(lookup_file)
            obj.open(print_msg=False)

            if atr['PROCESSOR'] in ['isce', 'doris']:
                dnames = geometryDatasetNames[1:3]
            elif atr['PROCESSOR'] in ['gamma', 'roipac']:
                dnames = geometryDatasetNames[3:5]
            else:
                raise AttributeError('InSAR processor: {}'.format(
                    atr['PROCESSOR']))

            for dname in dnames:
                if dname not in obj.datasetNames:
                    load_complete = False
                    raise Exception(
                        'required dataset "{}" is missing in file {}'.format(
                            dname, lookup_file))
        else:
            raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
                                    './inputs/geometry*.h5')
    else:
        print("Input data seems to be geocoded. Lookup file not needed.")

    if relpath:
        stack_file = os.path.relpath(stack_file) if stack_file else stack_file
        geom_file = os.path.relpath(geom_file) if geom_file else geom_file
        lookup_file = os.path.relpath(
            lookup_file) if lookup_file else lookup_file

    # print message
    if print_msg:
        print(('Loaded dataset are processed by '
               'InSAR software: {}'.format(atr['PROCESSOR'])))
        if 'X_FIRST' in atr.keys():
            print('Loaded dataset is in GEO coordinates')
        else:
            print('Loaded dataset is in RADAR coordinates')
        print('Interferograms Stack: {}'.format(stack_file))
        print('Geometry File       : {}'.format(geom_file))
        print('Lookup Table File   : {}'.format(lookup_file))
        if load_complete:
            print('-' * 50)
            print(
                'All data needed found/loaded/copied. Processed 2-pass InSAR data can be removed.'
            )
        print('-' * 50)

    return load_complete, stack_file, geom_file, lookup_file
Example #57
0
from os import strerror
from sys import stdin

try:
    s = open("text.txt", "rt", encoding="utf-8")
    ch = s.read(1)
    while ch != "":
        print(ch, end="")
        ch = s.read(1)
    s.close()
except IOError as e:
    print("Oooooh noooo: ", strerror(e.errno))

try:
    s = open("text.txt", "rt", encoding="utf-8")
    content = s.readlines(200)
    print(content)
    s.close()
except IOError as e:
    print("Oooooh noooo: ", strerror(e.errno))

try:
    for line in open("text.txt", "rt", encoding="utf-8"):
        print(line, end="")
except IOError as e:
    print("Oooooh noooo: ", strerror(e.errno))

try:
    fo = open("newfile.txt", mode="wt", encoding="utf-8")
    for i in range(10):
        n = i + 1
Example #58
0
 def accept(oself):
     acceptCalls[0] += 1
     if acceptCalls[0] > maximumNumberOfAccepts:
         self.fail("Maximum number of accept calls exceeded.")
     raise socket.error(EPERM, os.strerror(EPERM))
Example #59
0
 def accept(self):
     raise socket.error(socketErrorNumber,
                        os.strerror(socketErrorNumber))
Example #60
0
 def chdir(self, path):
     path = self.normpath(path)
     if not self.isdir(path):
         raise OSError(errno.ENOENT, path, os.strerror(errno.ENOENT))
     self.cwd = path