def _do_tree(instance, output_all=False, cloudname=None, use_json=False): if not output_all and cloudname is None: return Error( 'error: must input a cloudname or use --all to print all clouds') db = instance.get_db() matches = [] if output_all: matches = db.session.query(Cloud).all() else: rd = validate_cloudname(cloudname) if rd.success: uname, cname = rd.data matches = get_clouds_by_name(db, uname, cname) if len(matches) == 0: return Error('No clouds on this host with name {}'.format(cloudname)) def print_filename(filename, depth): print('--' * depth) + (filename) for match in matches: print 'tree for {}[{}]<{}>'.format(match.name, match.my_id_from_remote, match.root_directory) root_dir = match.root_directory walktree(root_dir, 1, print_filename) return Success()
def send_file_to_local(db, src_mirror, tgt_mirror, relative_pathname): # type: (SimpleDB, Cloud, Cloud, str) -> ResultAndData rd = Error() full_src_path = os.path.join(src_mirror.root_directory, relative_pathname) full_tgt_path = os.path.join(tgt_mirror.root_directory, relative_pathname) src_file_stat = os.stat(full_src_path) src_file_is_dir = S_ISDIR(src_file_stat.st_mode) rd = Success() try: if src_file_is_dir and not os.path.exists(full_tgt_path): os.mkdir(full_tgt_path) else: shutil.copy2(full_src_path, full_tgt_path) except IOError as e: rd = Error(e) if rd.success: updated_node = tgt_mirror.create_or_update_node(relative_pathname, db) if updated_node is not None: old_modified_on = updated_node.last_modified updated_node.last_modified = datetime.utcfromtimestamp( os.path.getmtime(full_tgt_path)) mylog('update mtime {}=>{}'.format(old_modified_on, updated_node.last_modified)) db.session.commit() else: mylog('ERROR: Failed to create a FileNode for the new file {}'. format(full_tgt_path)) return rd
def do_create_cloud(db, creator, cloudname, max_size=INFINITE_SIZE): # type: (SimpleDB, User, str, int) -> ResultAndData # type: (SimpleDB, User, str, int) -> ResultAndData(True, Cloud) # type: (SimpleDB, User, str, int) -> ResultAndData(False, str) """ This doesn't actually check any user credentials. We kinda just make a cloud for that user. :param db: :param creator: :param cloudname: :param max_size: :return: """ if creator is None: return Error('Cloud creator was None') created_clouds = creator.created_clouds duplicate_cloud = created_clouds.filter_by(name=cloudname).first() if duplicate_cloud is not None: return Error('Another cloud with the name {} already exists'.format( duplicate_cloud.full_name())) new_cloud = Cloud(creator) new_cloud.name = cloudname new_cloud.max_size = max_size db.session.add(new_cloud) db.session.commit() return Success(new_cloud)
def _do_db_tree(instance, output_all=False, cloudname=None, use_json=False): if not output_all and cloudname is None: return Error( 'error: must input a cloudname or use --all to print all clouds') db = instance.get_db() matches = [] if output_all: matches = db.session.query(Cloud).all() else: rd = validate_cloudname(cloudname) if rd.success: uname, cname = rd.data matches = get_clouds_by_name(db, uname, cname) if len(matches) == 0: return Error('No clouds on this host with name {}'.format(cloudname)) def print_filename(file_node, depth): print('--' * depth) + (file_node.name) def walk_db_recursive(file_node, depth, callback): callback(file_node, depth) for child in file_node.children.all(): walk_db_recursive(child, depth + 1, print_filename) for match in matches: print 'db-tree for {}[{}]<{}>'.format(match.name, match.my_id_from_remote, match.root_directory) for top_level_node in match.children.all(): walk_db_recursive(top_level_node, 1, print_filename) return Success()
def do_client_get_cloud_hosts(db, session_id, cloud_uname, cname): # type: (SimpleDB, str, str, str) -> ResultAndData # type: (SimpleDB, str, str, str) -> ResultAndData(True, [dict]) # type: (SimpleDB, str, str, str) -> ResultAndData(False, BaseMessage) _log = get_mylog() rd = get_user_from_session(db, session_id) if not rd.success: return ResultAndData(False, InvalidStateMessage(rd.data)) else: user = rd.data # todo: also use uname to lookup cloud cloud = get_cloud_by_name(db, cloud_uname, cname) if cloud is None: msg = 'Cloud {}/{} does not exist'.format(cloud_uname, cname) _log.debug(msg) return Error(InvalidStateMessage(msg)) if not cloud.can_access(user): msg = 'You do not have permission to access {}/{} '.format( cloud_uname, cname) _log.debug(msg) return Error(InvalidStateMessage(msg)) # todo:37 maybe this should be an option in the API, to get all or only active # For now I'm defaulting to active, becuase all mirror attempts make a host, # Which is bad todo:38 hosts = [host.to_dict() for host in cloud.all_hosts()] # hosts = [host.to_dict() for host in cloud.hosts.all()] return Success(hosts)
def do_client_create_link(host_obj, connection, address, msg_obj, client, cloud): _log = get_mylog() user_id = client.user_id if client else PUBLIC_USER_ID session_id = client.uuid if client else None rel_path = RelativePath() rel_path.from_relative(msg_obj.path) _log.debug('Creating a link to {}'.format(rel_path.to_string())) private_data = host_obj.get_private_data(cloud) if private_data is None: msg = 'Somehow the cloud doesn\'t have a privatedata associated with it' err = InvalidStateMessage(msg) mylog(err.message, '31') host_obj.log_client(client, 'link', cloud, rel_path, 'error') send_error_and_close(err, connection) return Error(msg) # how do we want to gate this? Owners only? or sharers only? rd = host_obj.client_access_check_or_close(connection, session_id, cloud, rel_path, SHARE_ACCESS) if not rd.success: # conn was closed by client_access_check_or_close return rd # We'll ask the remote to give us a link id remote_req = HostReserveLinkRequestMessage(cloud.uname(), cloud.cname()) rd = cloud.get_remote_conn() if not rd.success: msg = 'Failed to connect to remote for {}: {}'.format( cloud.full_name(), rd.data) _log.error(msg) host_obj.log_client(client, 'link', cloud, rel_path, 'error') connection.send_obj(InvalidStateMessage(msg)) connection.close() return Error(msg) remote_conn = rd.data _log.debug('Got remote connection') remote_conn.send_obj(remote_req) remote_resp = remote_conn.recv_obj() if remote_resp.type is not HOST_RESERVE_LINK_RESPONSE: msg = 'Remote failed to reserve link for us' _log.error(msg) host_obj.log_client(client, 'link', cloud, rel_path, 'error') connection.send_obj(InvalidStateMessage(msg)) connection.close() return Error(msg) _log.debug('Got link from remote') link_str = remote_resp.link_string # Create the link in the private data private_data.add_link(rel_path, link_str) _log.debug('Committing .nebs to add link {}->{}'.format( link_str, rel_path.to_string())) private_data.commit() resp = ClientCreateLinkResponseMessage(link_str) connection.send_obj(resp) host_obj.log_client(client, 'link', cloud, rel_path, 'success')
def do_client_stat_files(host_obj, connection, address, msg_obj, client, cloud): _log = get_mylog() cloudname = cloud.name session_id = client.uuid if client is not None else None client_uid = client.user_id if client else PUBLIC_USER_ID private_data = host_obj.get_private_data(cloud) if private_data is None: msg = 'Somehow the cloud doesn\'t have a privatedata associated with it' err = InvalidStateMessage(msg) host_obj.log_client(client, 'stat', cloud, None, 'error') send_error_and_close(err, connection) return Error(err) rel_path = RelativePath() rd = rel_path.from_relative(msg_obj.fpath) if not rd.success: msg = '{} is not a valid cloud path'.format(msg_obj.fpath) err = InvalidStateMessage(msg) _log.debug(err) send_error_and_close(err, connection) host_obj.log_client(client, 'stat', cloud, rel_path, 'error') return Error(err) rd = host_obj.client_access_check_or_close(connection, session_id, cloud, rel_path, READ_ACCESS) if rd.success: full_path = rel_path.to_absolute(cloud.root_directory) if not os.path.exists(full_path): resp = FileDoesNotExistErrorMessage() host_obj.log_client(client, 'stat', cloud, rel_path, 'error') # elif not os.path.isdir(full_path): # mylog('Responding to ClientListFiles with error - {} is a file, not dir.'.format(rel_path.to_string())) # resp = FileIsNotDirErrorMessage() # host_obj.log_client(client, 'ls', cloud, rel_path, 'error') else: mylog('Responding successfully to ClientStatFile') resp = StatFileResponseMessage(cloudname, session_id, rel_path.to_string()) resp.stat = make_stat_dict(rel_path, private_data, cloud, client_uid) # resp.ls = make_ls_array(rel_path, private_data, cloud, client_uid) host_obj.log_client(client, 'stat', cloud, rel_path, 'success') connection.send_obj(resp) return ResultAndData(resp.type == STAT_FILE_RESPONSE, resp) else: # the access check will send error host_obj.log_client(client, 'stat', cloud, rel_path, 'error') return rd
def ripDVD(device, destDir, tmpDir, extraOptions=[], ejectDisk=True, procMgr=DFT_MGR): Msg("Reading metadata from %s" % device) dvd_data = dvdDiscProperties(device, procMgr) if dvd_data is None: return False name1 = dvd_data['dvd_title'] name2 = dvd_data['dvd_alt_title'] # find main_feature title main_title = 'unknown' for title, props in dvd_data['titles'].iteritems(): if 'main_feature' in props and props['main_feature']: main_title = title if len(name1) > 0: name = name1 elif len(name2) > 0: name = name2 else: name = "Unknown DVD" tmpfile = uniquePath(os.path.join(tmpDir, "%s.mp4" % name)) Msg("Ripping title %s of %s to %s" % (main_title, name, tmpDir)) retcode, sout, serr = procMgr.call( ['HandBrakeCLI', '-i', device, '-o', tmpfile] + extraOptions) if retcode != 0: Error("HandBrake failed to rip title '%s' of disc '%s'" % (main_title, name)) Error("HandBrake output:\n %s" % serr) # autoripd will clear up the temp directory return None else: # move movie back to destination final_file = uniquePath(os.path.join(destDir, "%s.mp4" % name)) os.rename(tmpfile, final_file) if ejectDisk: # not process logged, but probably safe. subp.call(['eject', device]) return os.path.abspath(final_file)
def ripBluRay(device, destDir, workingDir, ejectDisc=True, procManager=DFT_MGR): """Use makemkvcon to rip a blu-ray movie from the given device. <destDir> is the path of the folder into which finished ripped movies will be moved. <tmpDir> is the path of a folder where unfinished rips will reside until they are complete. Returns path of the ripped media file, or None.""" properties = bluRayDiscProperties(device, procManager) if properties is None: # failure. brdProperties() will have reported the error. return None disc, titles = properties feature_title_id = detectBluRayMainFeature(titles) name = disc['name'] if 'name' in disc else 'Unknown Blu-Ray' Msg("Ripping title %s of %s to %s" % (feature_title_id, name, tmpdest)) retcode, sout, serr = procManager.call([ "makemkvcon", "mkv", "dev:%s" % device, str(feature_title_id), workingDir ]) if retcode != 0: Error("Failed to rip from '%s' %s" % (disc['title'], device)) Error("makemkvcon output:\n%s" % serr) # unfinished mkv laying around for debugging. autoripd will delete the # working directory if the user has chosen so with a config setting return None else: # move tmp mkv to final location f_output = titles[feature_title_id]['outputFileName'] f_output = os.path.join(workingDir, f_output) final_filename = "%s.mkv" % name final_path = uniquePath(os.path.join(destDir, final_filename)) os.rename(f_output, final_path) if ejectDisc: # not process logged, but probably safe. subp.call(['eject', device]) Msg("Ripped %s successfully" % name) return os.path.abspath(final_path)
def remove_user_from_link(self, link_str, user_id): # type: (str, int) -> ResultAndData matching_link = self._find_link(link_str) if matching_link is None: return Error() matching_link.remove_user(user_id) return Success()
def set_link_permissions(self, link_str, permissions): # type: (str, int) -> ResultAndData matching_link = self._find_link(link_str) if matching_link is None: return Error() matching_link.set_access(permissions) return Success()
def do_command_with_args(self, instance, args): # type: (Instance, Namespace) -> ResultAndData """ Execute this command, with the Namespace as parsed by argparse. """ return Error()
def do_client_remove_dir(host_obj, connection, address, msg_obj, client, cloud): _log = get_mylog() user_id = client.user_id if client else PUBLIC_USER_ID session_id = client.uuid if client else None fpath = msg_obj.path recurse = msg_obj.recursive rel_path = RelativePath() rd = rel_path.from_relative(fpath) if not rd.success: msg = '{} is not a valid cloud path'.format(fpath) err = InvalidStateMessage(msg) send_error_and_close(err, connection) host_obj.log_client(client, 'rmdir', cloud, rel_path, 'error') return Error(err) # TODO Does the client need access to write the file, or the parent directory? # Technically they're modifying the parent dir rd = host_obj.client_access_check_or_close(connection, session_id, cloud, rel_path, WRITE_ACCESS) if not rd.success: # conn was closed by client_access_check_or_close return full_path = rel_path.to_absolute(cloud.root_directory) if not os.path.exists(full_path): resp = FileDoesNotExistErrorMessage() host_obj.log_client(client, 'rmdir', cloud, rel_path, 'error') elif not os.path.isdir(full_path): resp = FileIsDirErrorMessage() host_obj.log_client(client, 'rmdir', cloud, rel_path, 'error') else: subdirs = os.listdir(full_path) if len(subdirs) > 0 and not recurse: resp = DirIsNotEmptyMessage() elif len(subdirs) > 0 and recurse: try: shutil.rmtree(full_path) resp = ClientDeleteResponseMessage() except OSError as e: msg = 'error deleting {}, "{}"', rel_path.to_string( ), e.message _log.error(msg) resp = UnknownIoErrorMessage(msg) else: # len subdirs == 0 try: os.rmdir(full_path) resp = ClientDeleteResponseMessage() except IOError as e: msg = 'error deleting {}, "{}"', rel_path.to_string( ), e.message _log.error(msg) resp = UnknownIoErrorMessage(msg) host_obj.log_client( client, 'rmdir', cloud, rel_path, 'success' if resp.type == CLIENT_DELETE_RESPONSE else 'error') connection.send_obj(resp)
def do_client_add_owner(host_obj, connection, address, msg_obj, client, cloud): cloudname = cloud.cname() session_id = client.uuid if client else None client_uid = client.user_id if client else PUBLIC_USER_ID new_owner_id = msg_obj.new_user_id private_data = host_obj.get_private_data(cloud) if private_data is None: msg = 'Somehow the cloud doesn\'t have a privatedata associated with it' err = InvalidStateMessage(msg) mylog(err.message, '31') host_obj.log_client(client, 'add-owner', cloud, None, 'error') send_error_and_close(err, connection) return if new_owner_id == PUBLIC_USER_ID: msg = 'The public can\'t be a owner of a cloud' err = AddOwnerFailureMessage(msg) mylog(err.message, '31') host_obj.log_client(client, 'add-owner', cloud, None, 'error') send_error_and_close(err, connection) return if not private_data.has_owner(client_uid): msg = 'User [{}] is not an owner of the cloud "{}"'.format( client_uid, cloudname) err = AddOwnerFailureMessage(msg) mylog(err.message, '31') host_obj.log_client(client, 'add-owner', cloud, None, 'error') send_error_and_close(err, connection) return rd = cloud.get_remote_conn() if rd.success: remote_conn = rd.data request = msg_obj # todo:24 too lazy to do now remote_conn.send_obj(request) response = remote_conn.recv_obj() if response.type == ADD_OWNER_SUCCESS: rd = Success() else: rd = Error(response.message) if not rd.success: msg = 'failed to validate the ADD_OWNER request with the remote, msg={}'.format( rd.data) err = AddOwnerFailureMessage(msg) mylog(err.message, '31') host_obj.log_client(client, 'add-owner', cloud, None, 'error') send_error_and_close(err, connection) else: private_data.add_owner(new_owner_id) private_data.commit() mylog('Added user [{}] to the owners of {}'.format( new_owner_id, cloudname)) # todo:15 host_obj.log_client(client, 'add-owner', cloud, None, 'success') response = AddOwnerSuccessMessage(session_id, new_owner_id, cloud.uname(), cloudname) connection.send_obj(response)
def get_user(self): print 'Session get_user, {}'.format(self.uuid) # type: () -> ResultAndData if self.user is None: rd = Error('No user exists on remote\'s session, sid:{}'.format( self.uuid)) else: rd = ResultAndData(True, self.user) return rd
def check_file_contents(root, path, data): try: handle = open(os.path.join(root, path)) contents = handle.read() handle.close() return ResultAndData(data == contents, 'Checking {} file contents'.format(path)) except Exception, e: return Error(e)
def do_command_with_args(self, instance, args): # type: (Instance, Namespace) -> ResultAndData output_all = args.all cloudname = args.cloud_name if not output_all and cloudname is None: return Error( 'error: must input a cloudname or use --all to print all clouds' ) return do_list_hosts(instance, output_all, cloudname)
def call(self, args): try: with self.Popen(args, stdout=subp.PIPE, stderr=subp.PIPE) as pipe: sout, serr = pipe.communicate() Babble("%s output:\n%s\n%s\n" % (args[0], sout, serr)) return pipe.returncode, sout, serr except OSError, err: if err.errno == errno.ENOENT: Error("%s could not be found. Is it installed?" % args[0]) raise
def add_owner(self, new_owner_id): msg = ClientAddOwnerMessage(self.sid, new_owner_id, self.cloud_uname, self.cname) conn = self.connect() conn.send_obj(msg) resp = conn.recv_obj() if resp.type == ADD_OWNER_SUCCESS: return Success() else: return Error(resp)
def do_remove_file(host_obj, mirror, relative_path, db): # type: (HostController, Cloud, str, SimpleDB) -> ResultAndData rd = Error() timestamp = datetime.utcnow() # Things to do: # - remove all children nodes from DB older than timestamp # - remove same set of child files # - DON'T clean up .nebs - The host who sent this delete should also send that update. full_path = os.path.join(mirror.root_directory, relative_path) file_node = mirror.get_child_node(relative_path) if file_node is None: err = 'There was no node in the tree for path:{}'.format(relative_path) return Error(err) is_root = file_node.is_root() if is_root: err = 'Deleting the root of the cloud is not allowed.' return Error(err) deletables = find_deletable_children(file_node, full_path, timestamp) # deletables should be in reverse BFS order, so as they are deleted they # should have no children for rel_child_path, node in deletables: full_child_path = os.path.join(full_path, rel_child_path) db.session.delete(node) if os.path.isdir(full_child_path): os.rmdir(full_child_path) else: os.remove(full_child_path) # mylog('Deleted node, file for {}'.format(full_child_path), '34') db.session.delete(file_node) if os.path.exists(full_path): if os.path.isdir(full_path): os.rmdir(full_path) else: os.remove(full_path) else: mylog('The file doesn\'t exist - may have already been deleted') db.session.commit() rd = Success(deletables) return rd
def share(self, new_owner_id, path, permissions): msg = ClientAddContributorMessage(self.sid, new_owner_id, self.cloud_uname, self.cname, path, permissions) conn = self.connect() conn.send_obj(msg) resp = conn.recv_obj() if resp.type == ADD_CONTRIBUTOR_SUCCESS: return Success() else: return Error(resp)
def do_command_with_args(self, instance, args): # type: (Instance, Namespace) -> ResultAndData session_id = args.session_id size = args.size cloud_name = args.cloud_name db = instance.get_db() creator = None if session_id is not None: # TODO: Lookup the user based on their session_id, and validate the session pass else: owner_uname = raw_input('Enter the owner\'s username: '******'Enter the owner\'s password: '******'Specified user does not exist') if not (check_password_hash(creator.password, owner_pass)): return Error('Owner username/password combination invalid.') return do_create_cloud(db, creator, cloud_name, size)
def do_client_read_link(host_obj, connection, address, msg_obj, client, cloud): _log = get_mylog() user_id = client.user_id if client else PUBLIC_USER_ID session_id = client.uuid if client else None link_str = msg_obj.link_string private_data = host_obj.get_private_data(cloud) if private_data is None: msg = 'Somehow the cloud doesn\'t have a privatedata associated with it' err = InvalidStateMessage(msg) mylog(err.message, '31') host_obj.log_client(client, 'read-link', cloud, link_str, 'error') send_error_and_close(err, connection) return Error(msg) # TODO: how do we handle seperate link permissions here? # We're just translating it straight to a normal readfile message rd = host_obj.client_link_access_check_or_close(connection, session_id, cloud, link_str, READ_ACCESS) if not rd.success: # conn was closed by client_access_check_or_close return # get the path rom the link rel_path = RelativePath() path = private_data.get_path_from_link(link_str) if path is None: msg = 'The link {} is not valid for this cloud'.format(link_str) err = LinkDoesNotExistMessage(msg) _log.error(err.message) host_obj.log_client(client, 'read-link', cloud, link_str, 'error') send_error_and_close(err, connection) return rel_path.from_relative(path) # construct a ReadFile message, using the path from the link translated = ReadFileRequestMessage(session_id, cloud.uname(), cloud.cname(), rel_path.to_string()) return do_client_read_file(host_obj, connection, address, translated, client, cloud, lookup_permissions=False)
def get_client_host(sid, cloud_uname, cname): try: rem_sock = setup_test_remote_socket() if rem_sock is None: log_warn('Failed to get client host socket') return Error() rem_conn = RawConnection(rem_sock) msg = ClientGetCloudHostRequestMessage(sid, cloud_uname, cname) rem_conn.send_obj(msg) response = rem_conn.recv_obj() if not (response.type == CLIENT_GET_CLOUD_HOST_RESPONSE): raise Exception('remote did not respond with success CGCR') return ResultAndData(True, response) except Exception, e: return ResultAndData(False, e)
def do_client_set_link_permissions(host_obj, connection, address, msg_obj, client, cloud): _log = get_mylog() user_id = client.user_id if client else PUBLIC_USER_ID session_id = client.uuid if client else None link_str = msg_obj.link_string permissions = msg_obj.permissions private_data = host_obj.get_private_data(cloud) if private_data is None: msg = 'Somehow the cloud doesn\'t have a privatedata associated with it' err = InvalidStateMessage(msg) mylog(err.message, '31') host_obj.log_client(client, 'chmod-link', cloud, RelativeLink(link_str), 'error') send_error_and_close(err, connection) return Error(msg) # get the path from the link rel_path = RelativePath() path = private_data.get_path_from_link(link_str) if path is None: msg = 'The link {} is not valid for this cloud'.format(link_str) err = LinkDoesNotExistMessage(msg) _log.error(err.message) host_obj.log_client(client, 'chmod-link', cloud, RelativeLink(link_str), 'error') send_error_and_close(err, connection) return rel_path.from_relative(path) # Using the actual file, check if the client has access to share the file. rd = host_obj.client_access_check_or_close(connection, session_id, cloud, rel_path, SHARE_ACCESS) if not rd.success: return rd rd = private_data.set_link_permissions(link_str, permissions) if rd.success: private_data.commit() response = ClientSetLinkPermissionsSuccessMessage( ) if rd.success else LinkDoesNotExistMessage() host_obj.log_client(client, 'chmod-link', cloud, RelativeLink(link_str), 'success' if rd.success else 'error') connection.send_obj(response)
def do_client_get_link_permissions(host_obj, connection, address, msg_obj, client, cloud): _log = get_mylog() user_id = client.user_id if client else PUBLIC_USER_ID session_id = client.uuid if client else None link_str = msg_obj.link_string private_data = host_obj.get_private_data(cloud) if private_data is None: msg = 'Somehow the cloud doesn\'t have a privatedata associated with it' err = InvalidStateMessage(msg) mylog(err.message, '31') # host_obj.log_client(client, 'chown+link', cloud, RelativeLink(link_str), 'error') send_error_and_close(err, connection) return Error(msg) # get the path from the link rel_path = RelativePath() path = private_data.get_path_from_link(link_str) if path is None: msg = 'The link {} is not valid for this cloud'.format(link_str) err = LinkDoesNotExistMessage(msg) _log.error(err.message) # host_obj.log_client(client, 'chown+link', cloud, RelativeLink(link_str), 'error') send_error_and_close(err, connection) return rel_path.from_relative(path) # TODO: Get the permissions of the backing file too, and OR them with the permissions on the link. rd = host_obj.client_access_check_or_close(connection, session_id, cloud, rel_path, NO_ACCESS) if not rd.success: return rd file_perms = rd.data rd = private_data.get_link_full_permissions(link_str) if rd.success: link_perms = rd.data[0] users = rd.data[1] response = ClientGetLinkPermissionsResponseMessage( link_perms | file_perms, users) else: response = LinkDoesNotExistMessage() # host_obj.log_client(client, 'chown+link', cloud, RelativeLink(link_str), 'success' if rd.success else 'error') connection.send_obj(response)
def read_file(self, path): msg = ReadFileRequestMessage(self.sid, self.cloud_uname, self.cname, path) conn = self.connect() conn.send_obj(msg) data_buffer = '' resp = conn.recv_obj() if resp.type == READ_FILE_RESPONSE: fsize = resp.fsize recieved = 0 while recieved < fsize: data = conn.recv_next_data(fsize) # log_text('Read "{}"'.format(data)) if len(data) == 0: break recieved += len(data) data_buffer += data return Success(data_buffer) else: return Error(resp)
def get_link_full_permissions(self, link_str): matching_link = self._find_link(link_str) if matching_link is None: return Error() return Success((matching_link.get_access(), matching_link.get_users()))
def dvdDiscProperties(device, procMgr=DFT_MGR): """Return the on-disc title, duration, chapters, audio tracks, subtitle tracks, etc. by parsing HandBrakeCLI output. Note that the reported disc title may not reflect the actual movie title (e.g., "SONY").""" properties = {'titles': {}} retcode, sout, serr = procMgr.call( ["HandBrakeCLI", "-t", "0", "-i", device]) if retcode != 0: Error("Unable to obtain DVD info from %s" % device) Error("HandBrake output: %s \n\n %s" % (sout, serr)) return None # data is hierarchical, delimited by indent. # handbrake's data formatting is absolutely abysmal, thus parsing is also # ugly and complex. # top of this stack is current indent amt indentStack = [0] # top of this stack is what we add properties to dictStack = [properties['titles']] # top of this stack is the key (i.e. name) of our parent node keyStack = [''] # why yes, it *does* print valid, normal-operations data to stderr! for line in serr.splitlines(): if len(line.strip()) == 0: # blank line continue if len(dictStack) <= 1: # root-level properties if 'DVD Title:' in line: # example: # libdvdnav: DVD Title: AMADEUS_SIDE_A_16X9_LB properties['dvd_title'] = line.rsplit(":", 1)[1].strip() elif 'DVD Title (Alternative):' in line: properties['dvd_alt_title'] = line.rsplit(":", 1)[1].strip() elif 'DVD Serial Number' in line: properties['dvd_serial_number'] = line.rsplit(":", 1)[1].strip() if line.strip().startswith('+'): # a property tree node curIndent = getIndent(line) if curIndent > indentStack[-1]: # we've descended into a child node indentStack.append(curIndent) else: # we've popped back up to a parent node while curIndent < indentStack[-1]: # pop stuff off the stacks del dictStack[-1] del keyStack[-1] del indentStack[-1] # try to get a key : value pair # strip off the leading ' +' trimline = line.lstrip(' +\t\r\n') pair = map(str.strip, trimline.split(":", 1)) if len(pair) == 1 and 'Main Feature' in pair[0]: dictStack[-1]['main_feature'] = True continue elif 'track' in keyStack[-1]: # a special case. this data is like: # 1, English (AC3) (5.1 ch) (iso639-2: eng), 48000Hz, 384000bps # seriously, what is the logic behind this crap key, val = map(str.strip, trimline.split(',', 1)) elif len(pair) == 2: key, val = pair else: # I don't care what this stupid node is, it's not even labeled. continue if len(val) == 0: # a parent node. like: 'title 1:' # create a new dict for us to add to newDict = {} dictStack[-1][key] = newDict dictStack.append(newDict) keyStack.append(key) continue # now handle the actual data # special cases all over the place if keyStack[-1] == 'chapters': # data like: # cells 0->0, 93287 blocks, duration 00:04:48 datachunks = map(str.strip, val.split(',')) datapairs = map(str.split, datachunks) val = { 'cells': datapairs[0][1], 'blocks': datapairs[1][0], 'duration': datapairs[2][1] } elif key == 'size': # data like: # size: 720x480, pixel aspect: 853/720, display aspect: 1.78, 23.976 fps datachunks = trimline.split(",") datapairs = [map(str.strip, x.split(':')) for x in datachunks] dictStack[-1][datapairs[0][0]] = datapairs[0][1] dictStack[-1][datapairs[1][0]] = datapairs[1][1] dictStack[-1][datapairs[2][0]] = datapairs[2][1] dictStack[-1]['fps'] = datapairs[3][0] continue # store data dictStack[-1][key] = val else: # garbage data continue return properties
def mediaInfoData(filename, procManager=DFT_MGR): fpath = os.path.abspath(filename) # get the media info # -f means "full", which outputs lots of redundant data in lots of different # text formats. This is the only way to get integer data (e.g. for file # sizes, durations, resolution, etc.). We will ignore redundant textual data # if there is numerical data available. retcode, sout, serr = procManager.call(['mediainfo', '-f', fpath]) if retcode != 0: Error("Could not obtain media info for %s" % fpath) return None else: mode = None track = None properties = {'tracks': []} for line in sout.split('\n'): if ':' not in line: # push the last track onto the stack if mode != 'General' and track != None and mode != None: # 'General' isn't a track # don't push its (empty) track dictionary properties['tracks'].append(track) ctgs = ('General', 'Video', 'Audio', 'Text', 'Chapter', 'Menu') match = map(line.startswith, ctgs) if any(match): # beginning of a new track/category newmode = ctgs[match.index(True)] track = {'type': newmode.lower()} if newmode == 'Menu': track['items'] = {} mode = newmode else: # blank line, e.g. # don't attempt to parse or push tracks until we encounter # a new category mode = None continue else: if mode == 'Menu': a, b = map(str.strip, line.split(' :', 1)) if ':' in a: dest_dict = track['items'] key, val = b.lower(), a else: dest_dict = track key, val = a.lower(), b elif mode == None: continue else: key, val = map(str.strip, line.split(':', 1)) key = key.lower() if mode == 'General': dest_dict = properties else: dest_dict = track try: val = int(val) dest_dict[key] = val except ValueError: # not a number if key == 'language': # hackity hack if len(val) < 2: continue elif len(val) == 3 and val.islower(): # use only 3-letter language code dest_dict[key] = val else: dest_dict['language name'] = val # replace only if original is a string elif key not in dest_dict or type(dest_dict[key]) == str: dest_dict[key] = val return properties