def send_file_to_local(db, src_mirror, tgt_mirror, relative_pathname): # type: (SimpleDB, Cloud, Cloud, str) -> ResultAndData rd = Error() full_src_path = os.path.join(src_mirror.root_directory, relative_pathname) full_tgt_path = os.path.join(tgt_mirror.root_directory, relative_pathname) src_file_stat = os.stat(full_src_path) src_file_is_dir = S_ISDIR(src_file_stat.st_mode) rd = Success() try: if src_file_is_dir and not os.path.exists(full_tgt_path): os.mkdir(full_tgt_path) else: shutil.copy2(full_src_path, full_tgt_path) except IOError as e: rd = Error(e) if rd.success: updated_node = tgt_mirror.create_or_update_node(relative_pathname, db) if updated_node is not None: old_modified_on = updated_node.last_modified updated_node.last_modified = datetime.utcfromtimestamp( os.path.getmtime(full_tgt_path)) mylog('update mtime {}=>{}'.format(old_modified_on, updated_node.last_modified)) db.session.commit() else: mylog('ERROR: Failed to create a FileNode for the new file {}'. format(full_tgt_path)) return rd
def remove_user_from_link(self, link_str, user_id): # type: (str, int) -> ResultAndData matching_link = self._find_link(link_str) if matching_link is None: return Error() matching_link.remove_user(user_id) return Success()
def do_client_get_cloud_hosts(db, session_id, cloud_uname, cname): # type: (SimpleDB, str, str, str) -> ResultAndData # type: (SimpleDB, str, str, str) -> ResultAndData(True, [dict]) # type: (SimpleDB, str, str, str) -> ResultAndData(False, BaseMessage) _log = get_mylog() rd = get_user_from_session(db, session_id) if not rd.success: return ResultAndData(False, InvalidStateMessage(rd.data)) else: user = rd.data # todo: also use uname to lookup cloud cloud = get_cloud_by_name(db, cloud_uname, cname) if cloud is None: msg = 'Cloud {}/{} does not exist'.format(cloud_uname, cname) _log.debug(msg) return Error(InvalidStateMessage(msg)) if not cloud.can_access(user): msg = 'You do not have permission to access {}/{} '.format( cloud_uname, cname) _log.debug(msg) return Error(InvalidStateMessage(msg)) # todo:37 maybe this should be an option in the API, to get all or only active # For now I'm defaulting to active, becuase all mirror attempts make a host, # Which is bad todo:38 hosts = [host.to_dict() for host in cloud.all_hosts()] # hosts = [host.to_dict() for host in cloud.hosts.all()] return Success(hosts)
def _do_tree(instance, output_all=False, cloudname=None, use_json=False): if not output_all and cloudname is None: return Error( 'error: must input a cloudname or use --all to print all clouds') db = instance.get_db() matches = [] if output_all: matches = db.session.query(Cloud).all() else: rd = validate_cloudname(cloudname) if rd.success: uname, cname = rd.data matches = get_clouds_by_name(db, uname, cname) if len(matches) == 0: return Error('No clouds on this host with name {}'.format(cloudname)) def print_filename(filename, depth): print('--' * depth) + (filename) for match in matches: print 'tree for {}[{}]<{}>'.format(match.name, match.my_id_from_remote, match.root_directory) root_dir = match.root_directory walktree(root_dir, 1, print_filename) return Success()
def do_create_cloud(db, creator, cloudname, max_size=INFINITE_SIZE): # type: (SimpleDB, User, str, int) -> ResultAndData # type: (SimpleDB, User, str, int) -> ResultAndData(True, Cloud) # type: (SimpleDB, User, str, int) -> ResultAndData(False, str) """ This doesn't actually check any user credentials. We kinda just make a cloud for that user. :param db: :param creator: :param cloudname: :param max_size: :return: """ if creator is None: return Error('Cloud creator was None') created_clouds = creator.created_clouds duplicate_cloud = created_clouds.filter_by(name=cloudname).first() if duplicate_cloud is not None: return Error('Another cloud with the name {} already exists'.format( duplicate_cloud.full_name())) new_cloud = Cloud(creator) new_cloud.name = cloudname new_cloud.max_size = max_size db.session.add(new_cloud) db.session.commit() return Success(new_cloud)
def set_link_permissions(self, link_str, permissions): # type: (str, int) -> ResultAndData matching_link = self._find_link(link_str) if matching_link is None: return Error() matching_link.set_access(permissions) return Success()
def _do_db_tree(instance, output_all=False, cloudname=None, use_json=False): if not output_all and cloudname is None: return Error( 'error: must input a cloudname or use --all to print all clouds') db = instance.get_db() matches = [] if output_all: matches = db.session.query(Cloud).all() else: rd = validate_cloudname(cloudname) if rd.success: uname, cname = rd.data matches = get_clouds_by_name(db, uname, cname) if len(matches) == 0: return Error('No clouds on this host with name {}'.format(cloudname)) def print_filename(file_node, depth): print('--' * depth) + (file_node.name) def walk_db_recursive(file_node, depth, callback): callback(file_node, depth) for child in file_node.children.all(): walk_db_recursive(child, depth + 1, print_filename) for match in matches: print 'db-tree for {}[{}]<{}>'.format(match.name, match.my_id_from_remote, match.root_directory) for top_level_node in match.children.all(): walk_db_recursive(top_level_node, 1, print_filename) return Success()
def retrieve_client_session(uname, password): rd = get_client_session(uname, password) if not rd.success: log_fail('Failed to create session') else: log_success('Created good session') rd = Success(HostSession(rd.data.sid)) return rd
def do_client_add_owner(host_obj, connection, address, msg_obj, client, cloud): cloudname = cloud.cname() session_id = client.uuid if client else None client_uid = client.user_id if client else PUBLIC_USER_ID new_owner_id = msg_obj.new_user_id private_data = host_obj.get_private_data(cloud) if private_data is None: msg = 'Somehow the cloud doesn\'t have a privatedata associated with it' err = InvalidStateMessage(msg) mylog(err.message, '31') host_obj.log_client(client, 'add-owner', cloud, None, 'error') send_error_and_close(err, connection) return if new_owner_id == PUBLIC_USER_ID: msg = 'The public can\'t be a owner of a cloud' err = AddOwnerFailureMessage(msg) mylog(err.message, '31') host_obj.log_client(client, 'add-owner', cloud, None, 'error') send_error_and_close(err, connection) return if not private_data.has_owner(client_uid): msg = 'User [{}] is not an owner of the cloud "{}"'.format( client_uid, cloudname) err = AddOwnerFailureMessage(msg) mylog(err.message, '31') host_obj.log_client(client, 'add-owner', cloud, None, 'error') send_error_and_close(err, connection) return rd = cloud.get_remote_conn() if rd.success: remote_conn = rd.data request = msg_obj # todo:24 too lazy to do now remote_conn.send_obj(request) response = remote_conn.recv_obj() if response.type == ADD_OWNER_SUCCESS: rd = Success() else: rd = Error(response.message) if not rd.success: msg = 'failed to validate the ADD_OWNER request with the remote, msg={}'.format( rd.data) err = AddOwnerFailureMessage(msg) mylog(err.message, '31') host_obj.log_client(client, 'add-owner', cloud, None, 'error') send_error_and_close(err, connection) else: private_data.add_owner(new_owner_id) private_data.commit() mylog('Added user [{}] to the owners of {}'.format( new_owner_id, cloudname)) # todo:15 host_obj.log_client(client, 'add-owner', cloud, None, 'success') response = AddOwnerSuccessMessage(session_id, new_owner_id, cloud.uname(), cloudname) connection.send_obj(response)
def do_command_with_args(self, instance, args): # type: (Instance, Namespace) -> ResultAndData db = instance.get_db() mirrors = db.session.query(Cloud).all() for mirror in mirrors: print('[{:3}]\t{}/{}, {}, {}, {}\n\t{}'.format( mirror.id, mirror.username, mirror.name, mirror.created_on, mirror.last_update, mirror.completed_mirroring, [child.name for child in mirror.children.all()])) return Success()
def do_command_with_args(self, instance, args): # type: (Instance, Namespace) -> ResultAndData db = instance.get_db() nodes = db.session.query(FileNode).all() for node in nodes: print('[{:3}]<{}>{}({},{})\t[{:4},{:4}]\t{}'.format( node.id, node.name, ' ' * (16 - len(node.name)), node.created_on, node.last_modified, node.parent_id, node.cloud_id, [child.id for child in node.children.all()])) return Success()
def add_owner(self, new_owner_id): msg = ClientAddOwnerMessage(self.sid, new_owner_id, self.cloud_uname, self.cname) conn = self.connect() conn.send_obj(msg) resp = conn.recv_obj() if resp.type == ADD_OWNER_SUCCESS: return Success() else: return Error(resp)
def share(self, new_owner_id, path, permissions): msg = ClientAddContributorMessage(self.sid, new_owner_id, self.cloud_uname, self.cname, path, permissions) conn = self.connect() conn.send_obj(msg) resp = conn.recv_obj() if resp.type == ADD_CONTRIBUTOR_SUCCESS: return Success() else: return Error(resp)
def do_command_with_args(self, instance, args): # type: (Instance, Namespace) -> ResultAndData db = instance.get_db() # users = User.query.all() users = db.session.query(User).all() print 'There are ', len(users), 'users.' print '[{:4}] {:16} {:16} {:16}'.format('id', 'username', 'name', 'email') for user in users: print '[{:4}] {:16} {:16} {:16}'.format(user.id, user.username, user.name, user.email) return Success()
def do_command_with_args(self, instance, args): # type: (Instance, Namespace) -> ResultAndData db = instance.get_db() clouds = db.session.query(Cloud).all() print 'There are ', len(clouds), 'clouds.' print '[{}] {:5} {:16} {:24} {:16}'.format('id', 'my_id', 'name', 'root', 'address') for cloud in clouds: print '[{}] {:5} {}/{}\t\t{:24} {}:{}'\ .format(cloud.id, cloud.my_id_from_remote, cloud.uname(), cloud.cname() , cloud.root_directory , cloud.remote.remote_address, cloud.remote.remote_port) return Success()
def do_remove_file(host_obj, mirror, relative_path, db): # type: (HostController, Cloud, str, SimpleDB) -> ResultAndData rd = Error() timestamp = datetime.utcnow() # Things to do: # - remove all children nodes from DB older than timestamp # - remove same set of child files # - DON'T clean up .nebs - The host who sent this delete should also send that update. full_path = os.path.join(mirror.root_directory, relative_path) file_node = mirror.get_child_node(relative_path) if file_node is None: err = 'There was no node in the tree for path:{}'.format(relative_path) return Error(err) is_root = file_node.is_root() if is_root: err = 'Deleting the root of the cloud is not allowed.' return Error(err) deletables = find_deletable_children(file_node, full_path, timestamp) # deletables should be in reverse BFS order, so as they are deleted they # should have no children for rel_child_path, node in deletables: full_child_path = os.path.join(full_path, rel_child_path) db.session.delete(node) if os.path.isdir(full_child_path): os.rmdir(full_child_path) else: os.remove(full_child_path) # mylog('Deleted node, file for {}'.format(full_child_path), '34') db.session.delete(file_node) if os.path.exists(full_path): if os.path.isdir(full_path): os.rmdir(full_path) else: os.remove(full_path) else: mylog('The file doesn\'t exist - may have already been deleted') db.session.commit() rd = Success(deletables) return rd
def read_file(self, path): msg = ReadFileRequestMessage(self.sid, self.cloud_uname, self.cname, path) conn = self.connect() conn.send_obj(msg) data_buffer = '' resp = conn.recv_obj() if resp.type == READ_FILE_RESPONSE: fsize = resp.fsize recieved = 0 while recieved < fsize: data = conn.recv_next_data(fsize) # log_text('Read "{}"'.format(data)) if len(data) == 0: break recieved += len(data) data_buffer += data return Success(data_buffer) else: return Error(resp)
def do_command_with_args(self, instance, args): # type: (Instance, Namespace) -> ResultAndData db = instance.get_db() # clouds = Cloud.query.all() clouds = db.session.query(Cloud).all() print('There are {} clouds.'.format(len(clouds))) print('[{}], {}, {}, {}, {}'.format('id', 'uname/cname', 'privacy', 'max_size', 'owners', 'contributors')) for cloud in clouds: # owners = '' # for owner in cloud.owners: # owners = owners + owner.username + ' ' owners = [u.username for u in cloud.owners] contributors = [u.username for u in cloud.contributors] print('[{}], {}, {}, {}B, {}, {}'.format(cloud.id, cloud.full_name(), cloud.privacy, cloud.max_size, owners, contributors)) return Success()
def do_recv_file_from_client(host_obj, connection, address, msg_obj, client, cloud): db = host_obj.get_db() _log = get_mylog() client_uuid = client.uuid if client is not None else None client_uid = client.user_id if client else PUBLIC_USER_ID file_isdir = msg_obj.isdir file_size = msg_obj.fsize file_path = msg_obj.fpath # todo: maybe add a quick response to tell the client the transfer is okay. # Originally, there was a ClientFilePut followed by a ClientFileTransfer. rel_path = RelativePath() rd = rel_path.from_relative(file_path) if not rd.success: msg = '{} is not a valid cloud path'.format(file_path) err = InvalidStateMessage(msg) _log.debug(err) host_obj.log_client(client, 'write', cloud, rel_path, 'error') send_error_and_close(err, connection) return Error(err) # make sure that it's not the private data file # make sure we have appropriate permissions # if it's a existing file, we'll need to make sure we have write access on that file # otherwise, the next existing parent must have either write or append permission # if we're creating a new file, and the parent only has append permission, then we should # SHOULD WE? make the child write access? # or should it be the responsibility of the client to also set that permission? # The user will likely not be able to modify the permissions of the cloud, # so they likely wont be able to append the file then chmod the file to # have write-access. However, there'd also be no way for the owner (of the # append-only dir) to pre-authorize any changes in ownership. # lets give them write permissions. Yes. Lets. # but then they could append a new dir, and then that would have write # access, and then they could go and write whatever the hell they want # But I guess that's not technically any worse than letting them append as # many children as they want. full_path = rel_path.to_absolute(cloud.root_directory) if host_obj.is_private_data_file(full_path, cloud): msg = 'Clients are not allowed to modify the {} file'.format( rel_path.to_string()) err = SystemFileWriteErrorMessage(msg) _log.debug(err) host_obj.log_client(client, 'write', cloud, rel_path, 'error') send_error_and_close(err, connection) return Error(err) appending_new = False if os.path.exists(full_path): rd = host_obj.client_access_check_or_close(connection, client_uuid, cloud, rel_path, WRITE_ACCESS) else: # get_client_permissions will call private_data.get_permissions, which # will traverse top-down to build the users's permissions. If there # are intermediate paths that dont exist, but at least one parent has # append access, get_client_permisssions will know permissions = host_obj.get_client_permissions(client_uuid, cloud, rel_path) if not permissions_are_sufficient(permissions, WRITE_ACCESS): if not permissions_are_sufficient(permissions, APPEND_ACCESS): msg = 'Session does not have sufficient permission to access <{}>'.format( rel_path.to_string()) _log.debug(msg) err = InvalidPermissionsMessage(msg) host_obj.log_client(client, 'write', cloud, rel_path, 'error') send_error_and_close(err, connection) return Error(err) else: # user does have append permission appending_new = True rd = Success() else: #user does have write access, this is good pass rd = Success() if rd.success: rd = do_recv_file_transfer(host_obj, cloud, connection, rel_path, file_isdir, file_size) if rd.success and appending_new: private_data = host_obj.get_private_data(cloud) if private_data is None: msg = 'Somehow the cloud doesn\'t have a privatedata associated with it' err = InvalidStateMessage(msg) _log.debug(err) host_obj.log_client(client, 'write', cloud, rel_path, 'error') send_error_and_close(err, connection) return Error(err) private_data.add_user_permission(client_uid, rel_path, WRITE_ACCESS) private_data.commit() _log.debug( 'Added permission {} for user [{}] to file {}:{} while appending_new' .format(WRITE_ACCESS, client_uid, cloud.cname(), rel_path.to_string())) host_obj.log_client(client, 'write', cloud, rel_path, 'success' if rd.success else 'error') connection.send_obj(rd.data)
def do_recv_file_transfer(host_obj, cloud, socket_conn, rel_path, is_dir, fsize): # type: (HostController, Cloud, AbstractConnection, RelativePath, bool, int) -> ResultAndData _log = get_mylog() if host_obj is None: return Error(InvalidStateMessage('Did not supply a host_obj to do_recv_file_transfer')) full_path = rel_path.to_absolute(cloud.root_directory) is_private_data_file = host_obj.is_private_data_file(full_path, cloud) full_dir_path = os.path.dirname(full_path) _log.debug('full_dir_path={}'.format(full_dir_path)) # Create the path to this file, if it doesn't exist if not os.path.exists(full_dir_path): _log.debug('had to make dirs for {}'.format(full_dir_path)) try: os.makedirs(full_dir_path) except OSError as e: if e.errno != errno.EEXIST: err = 'I/O Error creating path {}'.format(full_dir_path) resp = UnknownIoErrorMessage(err) return Error(resp) else: err = 'Path {} already exists'.format(full_dir_path) resp = FileAlreadyExistsMessage(full_dir_path) return Error(resp) if not os.path.isdir(full_dir_path): err = '{} is not a directory'.format(full_dir_path) resp = FileIsNotDirErrorMessage() return Error(resp) if is_dir: if not os.path.exists(full_path): try: os.mkdir(full_path) except OSError as e: if e.errno != errno.EEXIST: err = 'I/O Error creating path {}'.format(full_dir_path) resp = UnknownIoErrorMessage(err) return Error(resp) else: err = 'Path {} already exists'.format(full_dir_path) resp = FileAlreadyExistsMessage(full_dir_path) return Error(resp) else: # is normal file data_buffer = '' # fixme i'm using a string to buffer this?? LOL total_read = 0 while total_read < fsize: new_data = socket_conn.recv_next_data(min(1024, (fsize - total_read))) nbytes = len(new_data) if total_read is None or new_data is None: # todo:23 ??? what is happening here? print 'I know I should have broke AND I JUST DIDN\'T ANYWAYS' break total_read += nbytes data_buffer += new_data # exists = os.path.exists(full_path) # file_handle = None try: file_handle = open(full_path, mode='wb') file_handle.seek(0, 0) # seek to 0B relative to start file_handle.write(data_buffer) file_handle.close() except (OSError, IOError) as e: err = 'I/O Error writing file path {} - ERRNO:{}'.format(rel_path.to_string(), e.errno) resp = UnknownIoErrorMessage(err) return Error(resp) resp = FileTransferSuccessMessage(cloud.uname(), cloud.cname(), rel_path.to_string()) if is_private_data_file: host_obj.reload_private_data(cloud) return Success(resp)
def do_client_add_contributor(host_obj, connection, address, msg_obj, client, cloud): _log = get_mylog() cloudname = cloud.name session_id = client.uuid if client else None new_user_id = msg_obj.new_user_id fpath = msg_obj.fpath new_permissions = msg_obj.permissions rel_path = RelativePath() rd = rel_path.from_relative(fpath) if not rd.success: msg = '{} is not a valid cloud path'.format(fpath) err = InvalidStateMessage(msg) _log.debug(err) host_obj.log_client(client, 'share', cloud, rel_path, 'error') send_error_and_close(err, connection) return private_data = host_obj.get_private_data(cloud) if private_data is None: msg = 'Somehow the cloud doesn\'t have a privatedata associated with it' err = InvalidStateMessage(msg) mylog(err.message, '31') send_error_and_close(err, connection) host_obj.log_client(client, 'share', cloud, rel_path, 'error') return rd = host_obj.client_access_check_or_close(connection, session_id, cloud, rel_path, SHARE_ACCESS) if not rd.success: # conn was closed by client_access_check_or_close return perms = rd.data if not permissions_are_sufficient(perms, new_permissions): msg = 'Client doesn\'t have permission to give to other user' err = AddContributorFailureMessage(msg) mylog(err.message, '31') send_error_and_close(err, connection) host_obj.log_client(client, 'share', cloud, rel_path, 'error') return mylog('Client has sharing permission') rd = cloud.get_remote_conn() if rd.success: remote_conn = rd.data request = AddContributorMessage(cloud.my_id_from_remote, new_user_id, cloud.uname(), cloudname) # todo:24 too lazy to do now remote_conn.send_obj(request) response = remote_conn.recv_obj() if response.type == ADD_CONTRIBUTOR_SUCCESS: rd = Success() else: rd = Error(response.message) mylog('completed talking to remote, {}'.format(rd)) if not rd.success: msg = 'failed to validate the ADD_ADD_CONTRIBUTOR request with the remote, msg={}'.format( rd.data) err = AddContributorFailureMessage(msg) mylog(err.message, '31') host_obj.log_client(client, 'share', cloud, rel_path, 'error') send_error_and_close(err, connection) else: # PrivateData will be able to handle the public_user_id private_data.add_user_permission(new_user_id, rel_path, new_permissions) private_data.commit() mylog('Added permission {} for user [{}] to file {}:{}'.format( new_permissions, new_user_id, cloudname, fpath)) host_obj.log_client(client, 'share', cloud, rel_path, 'success') response = AddContributorSuccessMessage(new_user_id, cloud.uname(), cloudname) connection.send_obj(response)
def do_command_with_args(self, instance, args): # type: (Instance, Namespace) -> ResultAndData instance.migrate() return Success()
def get_link_full_permissions(self, link_str): matching_link = self._find_link(link_str) if matching_link is None: return Error() return Success((matching_link.get_access(), matching_link.get_users()))