def chdir(self, path): if path.startswith(self.root): _, bucket, obj = self.parse_fspath(path) if not bucket: if (self.isdir(path) and self.exists(path)): self._cwd = self.fs2ftp(path) else: raise OSError( 550, 'Failed to change directory: Path is not a dir: ' + path) return if not obj: try: operations.connection.get_bucket(bucket) self._cwd = self.fs2ftp(path) return except S3ResponseError: raise OSError(550, 'Failed to change directory.') raise OSError(550, 'Path is not a dir: ' + obj) ftpserver.logerror('Cannot chdir outside of root (%s) to %s' % (self.root, path)) raise OSError(1, 'Operation not permitted')
def listdir(self, path): """List the content of a directory, as a list of strings.""" try: _, bucket_name, key_name = self.parse_fspath(path) except(ValueError): raise OSError(2, 'No such file or directory') if not bucket_name and not key_name: buckets = self.get_all_buckets() return map( (lambda bucket: asciify(bucket.name)), buckets) if bucket_name and not key_name: try: bucket = operations.connection.get_bucket(bucket_name) # BEWARE! Since S3 does not have native directories # this bucket can have arbitrarily many elements. # Do NOT convert it to a list! # FIXME: implement conventional "virtual directory" support. # List only the set of unique prefixes ("virtual directories") # http://boto.s3.amazonaws.com/ref/s3.html count = 0 objects_limited = [] for object in bucket.list(delimiter=cloud_sep): count = count + 1 if count > self.MAX_OBJECTS: ftpserver.logerror("Too many items to list! Stopping at #%d" % MAX_OBJECTS) break objects_limited.append(object) return map( (lambda object: asciify(object.name)), objects_limited) except: raise OSError(2, 'No such file or directory')
def stat(self, path): st_mode = FULL_CONTROL_MODE_FLAG _, bucket_name, key_name = self.parse_fspath(path) #Hack together a stat result st_size = 0 st_mtime = datetime.datetime( *time.strptime("1970-01-01", "%Y-%m-%d")[0:6]) try: if not key_name: # Bucket # Return a part-bogus stat with the data we do have. st_mode = st_mode | DIR_MODE_FLAG else: # Key bucket = operations.connection.get_bucket(bucket_name) if (key_name[-1] == cloud_sep): # Virtual directory for hierarchical key. st_mode = st_mode | DIR_MODE_FLAG else: obj = bucket.get_key(key_name) # Workaround os.sep crap. if obj is None: obj = bucket.get_key(key_name.replace(cloud_sep, os.sep)) if obj is None: ftpserver.logerror("Cannot find object for path %s , key %s in bucket %s " % (path, key_name, bucket_name)) raise OSError(2, 'No such file or directory') st_size = obj.size return os.stat_result([st_mode, 0, 0, 0, 0, 0, st_size, 0, 0, 0]) #FIXME more stats (mtime) except Exception, e: ftpserver.logerror("Failed stat(%s) %s %s: %s " % (path, bucket_name, key_name, e)) raise OSError(2, 'No such file or directory')
def handle_error(self): try: raise except (KeyboardInterrupt, SystemExit, asyncore.ExitNow): raise except: logerror(traceback.format_exc()) # when facing an unhandled exception in here it's better # to rely on base class (FTPHandler or DTPHandler) # close() method as it does not imply SSL shutdown logic super(SSLConnection, self).close()
def close(self): if 'r' in self.mode: return self.temp_file.close() try: self.obj.set_contents_from_filename(self.temp_file_path) except S3ResponseError, e: # Avoid crashing when the "directory" vanished while we were processing it. # This is actually due to a server error. It seems to happen after # a "rm file" command incorrectly deletes an entire directory. (!!!) ftpserver.logerror("Directory vanished! could not set contents from file %s " % (self.temp_file_path)) return
def close(self): if 'r' in self.mode: return self.temp_file.close() try: self.obj.set_contents_from_filename(self.temp_file_path) except S3ResponseError, e: # Avoid crashing when the "directory" vanished while we were processing it. # This is actually due to a server error. It seems to happen after # a "rm file" command incorrectly deletes an entire directory. (!!!) ftpserver.logerror( "Directory vanished! could not set contents from file %s " % (self.temp_file_path)) return
def handle_accept(self): """Mainly copy-pasted from FTPServer code. Added stream_rate parameter to passive_dtp instantiator. """ """Called when remote client initiates a connection.""" try: sock, addr = self.accept() except TypeError: # sometimes accept() might return None (see issue 91) return except socket.error, err: # ECONNABORTED might be thrown on *BSD (see issue 105) if err.args[0] != errno.ECONNABORTED: ftpserver.logerror(traceback.format_exc()) return
def validate_authentication(self, username, password): '''username: your amazon AWS_ACCESS_KEY_ID or a mapped username password: your amazon AWS_SECRET_ACCESS_KEY or a mapped password ''' try: # Check for None, not false here. If the server # really wants to allow an empty list of allowed users, # then allow no users. if (self.allowed_users is None) or (username in self.allowed_users): s3_username = self.transform_username(username) s3_password = self.transform_password(password) operations.authenticate(s3_username, s3_password) return True else: return False except Exception, e: ftpserver.logerror(e) return False
def rmdir(self, path): _, bucket, name = self.parse_fspath(path) # If the user requests 'rmdir' of a file, refuse that. # This is important to avoid falling through to delete an entire bucket! if name: ftpserver.logerror("RMD requested on (non-drectory) file.") raise OSError(13, 'Operation not permitted') else: try: bucket = operations.connection.get_bucket(bucket) except: raise OSError(2, 'No such file or directory') try: operations.connection.delete_bucket(bucket) except: raise OSError(39, "Directory not empty: '%s'" % bucket)
def stat(self, path): st_mode = FULL_CONTROL_MODE_FLAG _, bucket_name, key_name = self.parse_fspath(path) #Hack together a stat result st_size = 0 st_mtime = datetime.datetime( *time.strptime("1970-01-01", "%Y-%m-%d")[0:6]) try: if not key_name: # Bucket # Return a part-bogus stat with the data we do have. st_mode = st_mode | DIR_MODE_FLAG else: # Key bucket = operations.connection.get_bucket(bucket_name) if (key_name[-1] == cloud_sep ): # Virtual directory for hierarchical key. st_mode = st_mode | DIR_MODE_FLAG else: obj = bucket.get_key(key_name) # Workaround os.sep crap. if obj is None: obj = bucket.get_key( key_name.replace(cloud_sep, os.sep)) if obj is None: ftpserver.logerror( "Cannot find object for path %s , key %s in bucket %s " % (path, key_name, bucket_name)) raise OSError(2, 'No such file or directory') st_size = obj.size return os.stat_result([st_mode, 0, 0, 0, 0, 0, st_size, 0, 0, 0]) #FIXME more stats (mtime) except Exception, e: ftpserver.logerror("Failed stat(%s) %s %s: %s " % (path, bucket_name, key_name, e)) raise OSError(2, 'No such file or directory')
def chdir(self, path): if path.startswith(self.root): _, bucket, obj = self.parse_fspath(path) if not bucket: if (self.isdir(path) and self.exists(path)): self._cwd = self.fs2ftp(path) else: raise OSError(550, 'Failed to change directory: Path is not a dir: ' + path) return if not obj: try: operations.connection.get_bucket(bucket) self._cwd = self.fs2ftp(path) return except S3ResponseError: raise OSError(550, 'Failed to change directory.') raise OSError(550, 'Path is not a dir: ' + obj) ftpserver.logerror('Cannot chdir outside of root (%s) to %s' % (self.root, path)); raise OSError(1, 'Operation not permitted');
def listdir(self, path): """List the content of a directory, as a list of strings.""" try: _, bucket_name, key_name = self.parse_fspath(path) except (ValueError): raise OSError(2, 'No such file or directory') if not bucket_name and not key_name: buckets = self.get_all_buckets() return map((lambda bucket: asciify(bucket.name)), buckets) if bucket_name and not key_name: try: bucket = operations.connection.get_bucket(bucket_name) # BEWARE! Since S3 does not have native directories # this bucket can have arbitrarily many elements. # Do NOT convert it to a list! # FIXME: implement conventional "virtual directory" support. # List only the set of unique prefixes ("virtual directories") # http://boto.s3.amazonaws.com/ref/s3.html count = 0 objects_limited = [] for object in bucket.list(delimiter=cloud_sep): count = count + 1 if count > self.MAX_OBJECTS: ftpserver.logerror( "Too many items to list! Stopping at #%d" % MAX_OBJECTS) break objects_limited.append(object) return map((lambda object: asciify(object.name)), objects_limited) except: raise OSError(2, 'No such file or directory')
def get_perms(self, username): ftpserver.logerror("unsupported operation: Authorizer.get_perms") return ""
try: handler.handle() except: handler.handle_error() except (KeyboardInterrupt, SystemExit, asyncore.ExitNow): raise except: # This is supposed to be an application bug that should # be fixed. We do not want to tear down the server though # (DoS). We just log the exception, hoping that someone # will eventually file a bug. References: # - http://code.google.com/p/pyftpdlib/issues/detail?id=143 # - http://code.google.com/p/pyftpdlib/issues/detail?id=166 # - https://groups.google.com/forum/#!topic/pyftpdlib/h7pPybzAx14 ftpserver.logerror(traceback.format_exc()) if handler is not None: handler.close() else: if ip is not None and ip in self.ip_map: self.ip_map.remove(ip) print "Connection accepted." self.conns.append((handler.remote_ip, handler.remote_port)) self.handlers.append(handler) # The FTP commands the server understands. proto_cmds = ftpserver.proto_cmds proto_cmds['VLEN'] = dict(perm='l', auth=True, arg=True, help='Syntax: VLEN (video length: number of frames total).') proto_cmds['CNKS'] = dict(perm='l', auth=True, arg=None, help='Syntax: CNKS (list available chunk nums).')