def do_client_list_files(host_obj, connection, address, msg_obj, client, cloud): _log = get_mylog() cloudname = cloud.name session_id = client.uuid if client is not None else None client_uid = client.user_id if client else PUBLIC_USER_ID private_data = host_obj.get_private_data(cloud) if private_data is None: msg = 'Somehow the cloud doesn\'t have a privatedata associated with it' err = InvalidStateMessage(msg) host_obj.log_client(client, 'ls', cloud, None, 'error') send_error_and_close(err, connection) return # todo: I believe this should be more complicated. # Say a person has permission to read some children of the directory, # but not the directory itself. ls returns ACCESS_ERROR currently. # Perhaps it should return the children it can access? # though, is this process recursive? What if I ls "/", but only have access to "/foo/bar/..."? rel_path = RelativePath() rd = rel_path.from_relative(msg_obj.fpath) if not rd.success: msg = '{} is not a valid cloud path'.format(msg_obj.fpath) err = InvalidStateMessage(msg) _log.debug(err) send_error_and_close(err, connection) host_obj.log_client(client, 'ls', cloud, rel_path, 'error') return rd = host_obj.client_access_check_or_close(connection, session_id, cloud, rel_path, READ_ACCESS) if rd.success: full_path = rel_path.to_absolute(cloud.root_directory) if not os.path.exists(full_path): resp = FileDoesNotExistErrorMessage() host_obj.log_client(client, 'ls', cloud, rel_path, 'error') elif not os.path.isdir(full_path): mylog( 'Responding to ClientListFiles with error - {} is a file, not dir.' .format(rel_path.to_string())) resp = FileIsNotDirErrorMessage() host_obj.log_client(client, 'ls', cloud, rel_path, 'error') else: mylog('Responding successfully to ClientListFiles') resp = ListFilesResponseMessage(cloudname, session_id, rel_path.to_string()) resp.stat = make_stat_dict(rel_path, private_data, cloud, client_uid) resp.ls = make_ls_array(rel_path, private_data, cloud, client_uid) host_obj.log_client(client, 'ls', cloud, rel_path, 'success') connection.send_obj(resp) else: # the access check will send error host_obj.log_client(client, 'ls', cloud, rel_path, 'error') pass
def recv_file_transfer(host_obj, msg, cloud, socket_conn, db, is_client): # type: (HostController, BaseMessage, Cloud, AbstractConnection, SimpleDB, bool) -> ResultAndData _log = get_mylog() msg_file_isdir = msg.isdir msg_file_size = msg.fsize msg_rel_path = msg.fpath rel_path = RelativePath() rd = rel_path.from_relative(msg_rel_path) if not rd.success: msg = '{} is not a valid cloud path'.format(msg_rel_path) err = InvalidStateMessage(msg) _log.debug(err) send_error_and_close(err, socket_conn) return rd full_path = rel_path.to_absolute(cloud.root_directory) rd = do_recv_file_transfer(host_obj, cloud, socket_conn, rel_path, msg_file_isdir, msg_file_size) if rd.success: # if it wasn't a client file transfer, update our node. # We don't want to see that it was updated and send updates to the other hosts. # else (this came from a client): # We DO want to tell other mirrors about this change, so don't change the DB> # The local thread will find the change and alert the other mirrors. # if not is_client: updated_node = cloud.create_or_update_node(rel_path.to_string(), db) if updated_node is not None: old_modified_on = updated_node.last_modified updated_node.last_modified = datetime.utcfromtimestamp(os.path.getmtime(full_path)) # mylog('update mtime {}=>{}'.format(old_modified_on, updated_node.last_modified)) db.session.commit()
def do_client_remove_dir(host_obj, connection, address, msg_obj, client, cloud): _log = get_mylog() user_id = client.user_id if client else PUBLIC_USER_ID session_id = client.uuid if client else None fpath = msg_obj.path recurse = msg_obj.recursive rel_path = RelativePath() rd = rel_path.from_relative(fpath) if not rd.success: msg = '{} is not a valid cloud path'.format(fpath) err = InvalidStateMessage(msg) send_error_and_close(err, connection) host_obj.log_client(client, 'rmdir', cloud, rel_path, 'error') return Error(err) # TODO Does the client need access to write the file, or the parent directory? # Technically they're modifying the parent dir rd = host_obj.client_access_check_or_close(connection, session_id, cloud, rel_path, WRITE_ACCESS) if not rd.success: # conn was closed by client_access_check_or_close return full_path = rel_path.to_absolute(cloud.root_directory) if not os.path.exists(full_path): resp = FileDoesNotExistErrorMessage() host_obj.log_client(client, 'rmdir', cloud, rel_path, 'error') elif not os.path.isdir(full_path): resp = FileIsDirErrorMessage() host_obj.log_client(client, 'rmdir', cloud, rel_path, 'error') else: subdirs = os.listdir(full_path) if len(subdirs) > 0 and not recurse: resp = DirIsNotEmptyMessage() elif len(subdirs) > 0 and recurse: try: shutil.rmtree(full_path) resp = ClientDeleteResponseMessage() except OSError as e: msg = 'error deleting {}, "{}"', rel_path.to_string( ), e.message _log.error(msg) resp = UnknownIoErrorMessage(msg) else: # len subdirs == 0 try: os.rmdir(full_path) resp = ClientDeleteResponseMessage() except IOError as e: msg = 'error deleting {}, "{}"', rel_path.to_string( ), e.message _log.error(msg) resp = UnknownIoErrorMessage(msg) host_obj.log_client( client, 'rmdir', cloud, rel_path, 'success' if resp.type == CLIENT_DELETE_RESPONSE else 'error') connection.send_obj(resp)
def do_client_stat_files(host_obj, connection, address, msg_obj, client, cloud): _log = get_mylog() cloudname = cloud.name session_id = client.uuid if client is not None else None client_uid = client.user_id if client else PUBLIC_USER_ID private_data = host_obj.get_private_data(cloud) if private_data is None: msg = 'Somehow the cloud doesn\'t have a privatedata associated with it' err = InvalidStateMessage(msg) host_obj.log_client(client, 'stat', cloud, None, 'error') send_error_and_close(err, connection) return Error(err) rel_path = RelativePath() rd = rel_path.from_relative(msg_obj.fpath) if not rd.success: msg = '{} is not a valid cloud path'.format(msg_obj.fpath) err = InvalidStateMessage(msg) _log.debug(err) send_error_and_close(err, connection) host_obj.log_client(client, 'stat', cloud, rel_path, 'error') return Error(err) rd = host_obj.client_access_check_or_close(connection, session_id, cloud, rel_path, READ_ACCESS) if rd.success: full_path = rel_path.to_absolute(cloud.root_directory) if not os.path.exists(full_path): resp = FileDoesNotExistErrorMessage() host_obj.log_client(client, 'stat', cloud, rel_path, 'error') # elif not os.path.isdir(full_path): # mylog('Responding to ClientListFiles with error - {} is a file, not dir.'.format(rel_path.to_string())) # resp = FileIsNotDirErrorMessage() # host_obj.log_client(client, 'ls', cloud, rel_path, 'error') else: mylog('Responding successfully to ClientStatFile') resp = StatFileResponseMessage(cloudname, session_id, rel_path.to_string()) resp.stat = make_stat_dict(rel_path, private_data, cloud, client_uid) # resp.ls = make_ls_array(rel_path, private_data, cloud, client_uid) host_obj.log_client(client, 'stat', cloud, rel_path, 'success') connection.send_obj(resp) return ResultAndData(resp.type == STAT_FILE_RESPONSE, resp) else: # the access check will send error host_obj.log_client(client, 'stat', cloud, rel_path, 'error') return rd
def do_recv_file_from_client(host_obj, connection, address, msg_obj, client, cloud): db = host_obj.get_db() _log = get_mylog() client_uuid = client.uuid if client is not None else None client_uid = client.user_id if client else PUBLIC_USER_ID file_isdir = msg_obj.isdir file_size = msg_obj.fsize file_path = msg_obj.fpath # todo: maybe add a quick response to tell the client the transfer is okay. # Originally, there was a ClientFilePut followed by a ClientFileTransfer. rel_path = RelativePath() rd = rel_path.from_relative(file_path) if not rd.success: msg = '{} is not a valid cloud path'.format(file_path) err = InvalidStateMessage(msg) _log.debug(err) host_obj.log_client(client, 'write', cloud, rel_path, 'error') send_error_and_close(err, connection) return Error(err) # make sure that it's not the private data file # make sure we have appropriate permissions # if it's a existing file, we'll need to make sure we have write access on that file # otherwise, the next existing parent must have either write or append permission # if we're creating a new file, and the parent only has append permission, then we should # SHOULD WE? make the child write access? # or should it be the responsibility of the client to also set that permission? # The user will likely not be able to modify the permissions of the cloud, # so they likely wont be able to append the file then chmod the file to # have write-access. However, there'd also be no way for the owner (of the # append-only dir) to pre-authorize any changes in ownership. # lets give them write permissions. Yes. Lets. # but then they could append a new dir, and then that would have write # access, and then they could go and write whatever the hell they want # But I guess that's not technically any worse than letting them append as # many children as they want. full_path = rel_path.to_absolute(cloud.root_directory) if host_obj.is_private_data_file(full_path, cloud): msg = 'Clients are not allowed to modify the {} file'.format( rel_path.to_string()) err = SystemFileWriteErrorMessage(msg) _log.debug(err) host_obj.log_client(client, 'write', cloud, rel_path, 'error') send_error_and_close(err, connection) return Error(err) appending_new = False if os.path.exists(full_path): rd = host_obj.client_access_check_or_close(connection, client_uuid, cloud, rel_path, WRITE_ACCESS) else: # get_client_permissions will call private_data.get_permissions, which # will traverse top-down to build the users's permissions. If there # are intermediate paths that dont exist, but at least one parent has # append access, get_client_permisssions will know permissions = host_obj.get_client_permissions(client_uuid, cloud, rel_path) if not permissions_are_sufficient(permissions, WRITE_ACCESS): if not permissions_are_sufficient(permissions, APPEND_ACCESS): msg = 'Session does not have sufficient permission to access <{}>'.format( rel_path.to_string()) _log.debug(msg) err = InvalidPermissionsMessage(msg) host_obj.log_client(client, 'write', cloud, rel_path, 'error') send_error_and_close(err, connection) return Error(err) else: # user does have append permission appending_new = True rd = Success() else: #user does have write access, this is good pass rd = Success() if rd.success: rd = do_recv_file_transfer(host_obj, cloud, connection, rel_path, file_isdir, file_size) if rd.success and appending_new: private_data = host_obj.get_private_data(cloud) if private_data is None: msg = 'Somehow the cloud doesn\'t have a privatedata associated with it' err = InvalidStateMessage(msg) _log.debug(err) host_obj.log_client(client, 'write', cloud, rel_path, 'error') send_error_and_close(err, connection) return Error(err) private_data.add_user_permission(client_uid, rel_path, WRITE_ACCESS) private_data.commit() _log.debug( 'Added permission {} for user [{}] to file {}:{} while appending_new' .format(WRITE_ACCESS, client_uid, cloud.cname(), rel_path.to_string())) host_obj.log_client(client, 'write', cloud, rel_path, 'success' if rd.success else 'error') connection.send_obj(rd.data)
def do_client_read_file(host_obj, connection, address, msg_obj, client, cloud, lookup_permissions=True): db = host_obj.get_db() _log = get_mylog() client_uuid = client.uuid if client is not None else None # cloud = client.cloud cloudname = cloud.name requested_file = msg_obj.fpath rel_path = RelativePath() rd = rel_path.from_relative(requested_file) if not rd.success: msg = '{} is not a valid cloud path'.format(requested_file) err = InvalidStateMessage(msg) _log.debug(err) send_error_and_close(err, connection) host_obj.log_client(client, 'read', cloud, rel_path, 'error') return requesting_all = requested_file == '/' filepath = None # if the root is '/', send all of the children of the root if requesting_all: filepath = cloud.root_directory # todo: if they're requesting all, it's definitely a dir, # which is an error else: filepath = rel_path.to_absolute(cloud.root_directory) try: req_file_stat = os.stat(filepath) except Exception: err_msg = FileDoesNotExistErrorMessage() connection.send_obj(err_msg) host_obj.log_client(client, 'read', cloud, rel_path, 'error') # connection.close() return if lookup_permissions: rd = host_obj.client_access_check_or_close(connection, client_uuid, cloud, rel_path, READ_ACCESS) if not rd.success: host_obj.log_client(client, 'read', cloud, rel_path, 'error') return req_file_is_dir = S_ISDIR(req_file_stat.st_mode) if req_file_is_dir: err_msg = FileIsDirErrorMessage() connection.send_obj(err_msg) host_obj.log_client(client, 'read', cloud, rel_path, 'error') # connection.close() else: # send RFP - ReadFileResponse req_file_size = req_file_stat.st_size requested_file = open(filepath, 'rb') response = ReadFileResponseMessage(client_uuid, rel_path.to_string(), req_file_size) connection.send_obj(response) mylog('sent RFRp:{}, now sending file bytes'.format( response.serialize())) l = 1 total_len = 0 num_MB = int(math.floor(req_file_size / (1024 * 1024))) transfer_size = 1024 + (10 * 1024 * num_MB) num_transfers = 0 # send file bytes while l > 0: new_data = requested_file.read(transfer_size) sent_len = connection.send_next_data(new_data) l = sent_len total_len += sent_len num_transfers += 1 if (num_transfers % 127 == 1) and num_transfers >= 1: mylog('sent {} blobs of <{}> ({}/{}B total)'.format( num_transfers, filepath, total_len, req_file_size)) time.sleep(.1) mylog('(RFQ)[{}]Sent <{}> data to [{}]'.format(cloud.my_id_from_remote, filepath, client.uuid)) requested_file.close() host_obj.log_client(client, 'read', cloud, rel_path, 'success') mylog('[{}]bottom of handle_read_file_request(...,{})'.format( client.uuid, msg_obj))