def delete(self, request, sname, export_id): try: share = validate_share(sname, request) eg = validate_export_group(export_id, request) cur_exports = list(NFSExport.objects.all()) export = NFSExport.objects.get(export_group=eg, share=share) for e in NFSExport.objects.filter(share=share): if (e.export_group.host_str == eg.host_str): export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, share.name)) if (e.export_group.nohide): snap_name = e.mount.split(e.share.name + '_')[-1] export_pt = ('%s%s/%s' % (settings.NFS_EXPORT_ROOT, e.share.name, snap_name)) teardown_wrapper(export_pt, request, logger) cur_exports.remove(e) exports = create_nfs_export_input(cur_exports) export.delete() if (NFSExport.objects.filter(export_group=eg).count() == 0): #delete only when this is the only share in the group eg.delete() refresh_wrapper(exports, request, logger) return Response() except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def post(self, request): if ('shares' not in request.DATA): e_msg = ('Cannot export without specifying shares') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.DATA['shares']] try: options = parse_options(request) for s in shares: dup_export_check(s, options['host_str'], request) cur_exports = list(NFSExport.objects.all()) eg = NFSExportGroup(**options) eg.save() for s in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, s.name)) if (not is_share_mounted(s.name)): pool_device = Disk.objects.filter(pool=s.pool)[0].name mount_share(s.subvol_name, pool_device, mnt_pt) export = NFSExport(export_group=eg, share=s, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = create_nfs_export_input(cur_exports) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data) except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def post(self, request, sname): try: share = validate_share(sname, request) options = parse_options(request) dup_export_check(share, options['host_str'], request) cur_exports = list(NFSExport.objects.all()) eg = NFSExportGroup(**options) eg.save() mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, share.name)) if (not is_share_mounted(share.name)): pool_device = Disk.objects.filter(pool=share.pool)[0].name mount_share(share.subvol_name, pool_device, mnt_pt) export = NFSExport(export_group=eg, share=share, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = create_nfs_export_input(cur_exports) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data) except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def post(self, request): with self._handle_exception(request): if ('shares' not in request.data): e_msg = ('Cannot export without specifying shares') handle_exception(Exception(e_msg), request) shares = [ validate_share(s, request) for s in request.data['shares'] ] options = self.parse_options(request) for s in shares: self.dup_export_check(s, options['host_str'], request) cur_exports = list(NFSExport.objects.all()) eg = NFSExportGroup(**options) eg.save() for s in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, s.name)) mount_share(s, mnt_pt) export = NFSExport(export_group=eg, share=s, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = self.create_nfs_export_input(cur_exports) adv_entries = [ e.export_str for e in AdvancedNFSExport.objects.all() ] exports_d = self.create_adv_nfs_export_input(adv_entries, request) exports.update(exports_d) self.refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data)
def create_adv_nfs_export_input(exports, request): exports_d = {} for e in exports: fields = e.split() if (len(fields) < 2): e_msg = ('Invalid exports input -- %s' % e) handle_exception(Exception(e_msg), request) share = fields[0].split('/')[-1] s = validate_share(share, request) mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) if (not is_share_mounted(s.name)): pool_device = Disk.objects.filter(pool=s.pool)[0].name mount_share(s, pool_device, mnt_pt) exports_d[fields[0]] = [] for f in fields[1:]: cf = f.split('(') if (len(cf) != 2 or cf[1][-1] != ')'): e_msg = ('Invalid exports input -- %s. offending ' 'section: %s' % (e, f)) handle_exception(Exception(e_msg), request) exports_d[fields[0]].append({ 'client_str': cf[0], 'option_list': cf[1][:-1], 'mnt_pt': ('%s%s' % (settings.MNT_PT, share)) }) return exports_d
def create_adv_nfs_export_input(exports, request): exports_d = {} for e in exports: fields = e.split() if len(fields) < 2: e_msg = "Invalid exports input -- ({}).".format(e) handle_exception(Exception(e_msg), request) share = fields[0].split("/")[-1] s = validate_share(share, request) mnt_pt = "%s%s" % (settings.MNT_PT, s.name) if not s.is_mounted: mount_share(s, mnt_pt) exports_d[fields[0]] = [] for f in fields[1:]: cf = f.split("(") if len(cf) != 2 or cf[1][-1] != ")": e_msg = ( "Invalid exports input -- ({}). Offending section: ({})." ).format(e, f) handle_exception(Exception(e_msg), request) exports_d[fields[0]].append({ "client_str": cf[0], "option_list": cf[1][:-1], "mnt_pt": ("%s%s" % (settings.MNT_PT, share)), }) return exports_d
def post(self, request): with self._handle_exception(request): if ('shares' not in request.data): e_msg = ('Cannot export without specifying shares') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.data['shares']] options = parse_options(request) for s in shares: dup_export_check(s, options['host_str'], request) cur_exports = list(NFSExport.objects.all()) eg = NFSExportGroup(**options) eg.save() for s in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, s.name)) if (not is_share_mounted(s.name)): pool_device = Disk.objects.filter(pool=s.pool)[0].name mount_share(s, pool_device, mnt_pt) export = NFSExport(export_group=eg, share=s, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = create_nfs_export_input(cur_exports) adv_entries = [e.export_str for e in AdvancedNFSExport.objects.all()] exports_d = create_adv_nfs_export_input(adv_entries, request) exports.update(exports_d) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data)
def create_adv_nfs_export_input(exports, request): exports_d = {} for e in exports: fields = e.split() if (len(fields) < 2): e_msg = 'Invalid exports input -- ({}).'.format(e) handle_exception(Exception(e_msg), request) share = fields[0].split('/')[-1] s = validate_share(share, request) mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) if not s.is_mounted: mount_share(s, mnt_pt) exports_d[fields[0]] = [] for f in fields[1:]: cf = f.split('(') if (len(cf) != 2 or cf[1][-1] != ')'): e_msg = ('Invalid exports input -- ({}). Offending ' 'section: ({}).').format(e, f) handle_exception(Exception(e_msg), request) exports_d[fields[0]].append({ 'client_str': cf[0], 'option_list': cf[1][:-1], 'mnt_pt': ('%s%s' % (settings.MNT_PT, share)) }) return exports_d
def get_queryset(self, *args, **kwargs): share = validate_share(kwargs['sname'], self.request) if ('export_id' in kwargs): self.paginate_by = 0 try: return NFSExportGroup.objects.get(id=kwargs['export_id']) except: return [] exports = NFSExport.objects.filter(share=share) ids = [e.export_group.id for e in exports] return NFSExportGroup.objects.filter(nohide=False, id__in=ids)
def put(self, request, sname, export_id): with self._handle_exception(request): share = validate_share(sname, request) eg = validate_export_group(export_id, request) options = parse_options(request) dup_export_check(share, options["host_str"], request, export_id=int(export_id)) NFSExportGroup.objects.filter(id=export_id).update(**options) NFSExportGroup.objects.filter(id=export_id)[0].save() cur_exports = list(NFSExport.objects.all()) exports = create_nfs_export_input(cur_exports) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data)
def post(self, request): if ('shares' not in request.DATA): e_msg = ('Must provide share names') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.DATA['shares']] editable = 'rw' if ('read_only' in request.DATA and request.DATA['read_only'] is True): editable = 'ro' try: mnt_map = sftp_mount_map(settings.SFTP_MNT_ROOT) input_list = [] for share in shares: if (SFTP.objects.filter(share=share).exists()): e_msg = ('Share(%s) is already exported via SFTP' % share.name) handle_exception(Exception(e_msg), request) if (share.owner == 'root'): e_msg = ('Share(%s) is owned by root. It cannot be ' 'exported via SFTP with root ownership' % share.name) handle_exception(Exception(e_msg), request) for share in shares: sftpo = SFTP(share=share, editable=editable) sftpo.save() # mount if not already mounted helper_mount_share(share) # bindmount if not already sftp_mount(share, settings.MNT_PT, settings.SFTP_MNT_ROOT, mnt_map, editable) sftp_snap_toggle(share) chroot_loc = ('%s%s' % (settings.SFTP_MNT_ROOT, share.owner)) rsync_for_sftp(chroot_loc) input_list.append({ 'user': share.owner, 'dir': chroot_loc, }) for sftpo in SFTP.objects.all(): if (sftpo.share not in shares): input_list.append({ 'user': sftpo.share.owner, 'dir': ('%s%s' % (settings.SFTP_MNT_ROOT, sftpo.share.owner)), }) update_sftp_config(input_list) return Response() except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def put(self, request, sname, export_id): with self._handle_exception(request): share = validate_share(sname, request) eg = validate_export_group(export_id, request) options = parse_options(request) dup_export_check(share, options['host_str'], request, export_id=int(export_id)) NFSExportGroup.objects.filter(id=export_id).update(**options) NFSExportGroup.objects.filter(id=export_id)[0].save() cur_exports = list(NFSExport.objects.all()) exports = create_nfs_export_input(cur_exports) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data)
def post(self, request): with self._handle_exception(request): if "shares" not in request.data: e_msg = "Must provide share names." handle_exception(Exception(e_msg), request) shares = [ validate_share(s, request) for s in request.data["shares"] ] editable = "rw" if "read_only" in request.data and request.data[ "read_only"] is True: editable = "ro" mnt_map = sftp_mount_map(settings.SFTP_MNT_ROOT) input_map = {} for share in shares: if SFTP.objects.filter(share=share).exists(): e_msg = ( "Share ({}) is already exported via SFTP.").format( share.name) handle_exception(Exception(e_msg), request) if share.owner == "root": e_msg = ("Share ({}) is owned by root. It cannot be " "exported via SFTP with " "root ownership.").format(share.name) handle_exception(Exception(e_msg), request) for share in shares: sftpo = SFTP(share=share, editable=editable) sftpo.save() # mount if not already mounted helper_mount_share(share) # bindmount if not already sftp_mount(share, settings.MNT_PT, settings.SFTP_MNT_ROOT, mnt_map, editable) sftp_snap_toggle(share) chroot_loc = "{}{}".format(settings.SFTP_MNT_ROOT, share.owner) rsync_for_sftp(chroot_loc) input_map[share.owner] = chroot_loc for sftpo in SFTP.objects.all(): if sftpo.share not in shares: input_map[sftpo.share.owner] = "{}{}".format( settings.SFTP_MNT_ROOT, sftpo.share.owner, ) update_sftp_config(input_map) return Response()
def post(self, request): if ('shares' not in request.DATA): e_msg = ('Must provide share names') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.DATA['shares']] editable = 'rw' if ('read_only' in request.DATA and request.DATA['read_only'] is True): editable = 'ro' try: mnt_map = sftp_mount_map(settings.SFTP_MNT_ROOT) input_list = [] for share in shares: if (SFTP.objects.filter(share=share).exists()): e_msg = ('Share(%s) is already exported via SFTP' % share.name) handle_exception(Exception(e_msg), request) if (share.owner == 'root'): e_msg = ('Share(%s) is owned by root. It cannot be ' 'exported via SFTP with root ownership' % share.name) handle_exception(Exception(e_msg), request) for share in shares: sftpo = SFTP(share=share, editable=editable) sftpo.save() # mount if not already mounted helper_mount_share(share) # bindmount if not already sftp_mount(share, settings.MNT_PT, settings.SFTP_MNT_ROOT, mnt_map, editable) sftp_snap_toggle(share) chroot_loc = ('%s%s' % (settings.SFTP_MNT_ROOT, share.owner)) rsync_for_sftp(chroot_loc) input_list.append({'user': share.owner, 'dir': chroot_loc, }) for sftpo in SFTP.objects.all(): if (sftpo.share not in shares): input_list.append({'user': sftpo.share.owner, 'dir': ('%s%s' % (settings.SFTP_MNT_ROOT, sftpo.share.owner)), }) update_sftp_config(input_list) return Response() except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def put(self, request, export_id): if ('shares' not in request.DATA): e_msg = ('Cannot export without specifying shares') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.DATA['shares']] try: eg = validate_export_group(export_id, request) options = parse_options(request) for s in shares: dup_export_check(s, options['host_str'], request, export_id=int(export_id)) NFSExportGroup.objects.filter(id=export_id).update(**options) NFSExportGroup.objects.filter(id=export_id)[0].save() cur_exports = list(NFSExport.objects.all()) for e in NFSExport.objects.filter(export_group=eg): if (e.share not in shares): cur_exports.remove(e) e.delete() else: shares.remove(e.share) for s in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, s.name)) if (not is_share_mounted(s.name)): pool_device = Disk.objects.filter(pool=s.pool)[0].name mount_share(s.subvol_name, pool_device, mnt_pt) export = NFSExport(export_group=eg, share=s, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = create_nfs_export_input(cur_exports) adv_entries = [ e.export_str for e in AdvancedNFSExport.objects.all() ] exports_d = create_adv_nfs_export_input(adv_entries, request) exports.update(exports_d) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data) except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def post(self, request): with self._handle_exception(request): if ('shares' not in request.data): e_msg = ('Must provide share names') handle_exception(Exception(e_msg), request) shares = [ validate_share(s, request) for s in request.data['shares'] ] editable = 'rw' if ('read_only' in request.data and request.data['read_only'] is True): editable = 'ro' mnt_map = sftp_mount_map(settings.SFTP_MNT_ROOT) input_map = {} for share in shares: if (SFTP.objects.filter(share=share).exists()): e_msg = ('Share(%s) is already exported via SFTP' % share.name) handle_exception(Exception(e_msg), request) if (share.owner == 'root'): e_msg = ('Share(%s) is owned by root. It cannot be ' 'exported via SFTP with root ownership' % share.name) handle_exception(Exception(e_msg), request) for share in shares: sftpo = SFTP(share=share, editable=editable) sftpo.save() # mount if not already mounted helper_mount_share(share) # bindmount if not already sftp_mount(share, settings.MNT_PT, settings.SFTP_MNT_ROOT, mnt_map, editable) sftp_snap_toggle(share) chroot_loc = ('%s%s' % (settings.SFTP_MNT_ROOT, share.owner)) rsync_for_sftp(chroot_loc) input_map[share.owner] = chroot_loc for sftpo in SFTP.objects.all(): if (sftpo.share not in shares): input_map[sftpo.share.owner] = ( '%s%s' % (settings.SFTP_MNT_ROOT, sftpo.share.owner)) update_sftp_config(input_map) return Response()
def post(self, request): if ('shares' not in request.data): e_msg = 'Must provide share names.' handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.data['shares']] description = request.data.get('description', '') if (description == ''): description = self.def_description time_machine = request.data.get('time_machine', 'yes') if (time_machine != 'yes' and time_machine != 'no'): e_msg = ('Time_machine must be yes or no. ' 'Not ({}).').format(time_machine) handle_exception(Exception(e_msg), request) for share in shares: if (NetatalkShare.objects.filter(share=share).exists()): e_msg = ('Share ({}) is already exported ' 'via AFP.').format(share.name) handle_exception(Exception(e_msg), request) try: for share in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) cur_description = '%s' % share.name #cur_description = '%s %s' % (share.name, description) if (len(shares) == 1 and description != self.def_description and share.name == ''): cur_description = description afpo = NetatalkShare(share=share, path=mnt_pt, description=cur_description, time_machine=time_machine) afpo.save() if not share.is_mounted: mount_share(share, mnt_pt) refresh_afp_config(list(NetatalkShare.objects.all())) systemctl('netatalk', 'reload-or-restart') return Response() except RockStorAPIException: raise except Exception as e: handle_exception(e, request)
def post(self, request): with self._handle_exception(request): if ('shares' not in request.data): e_msg = ('Must provide share names') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.data['shares']] editable = 'rw' if ('read_only' in request.data and request.data['read_only'] is True): editable = 'ro' mnt_map = sftp_mount_map(settings.SFTP_MNT_ROOT) input_map = {} for share in shares: if (SFTP.objects.filter(share=share).exists()): e_msg = ('Share(%s) is already exported via SFTP' % share.name) handle_exception(Exception(e_msg), request) if (share.owner == 'root'): e_msg = ('Share(%s) is owned by root. It cannot be ' 'exported via SFTP with root ownership' % share.name) handle_exception(Exception(e_msg), request) for share in shares: sftpo = SFTP(share=share, editable=editable) sftpo.save() # mount if not already mounted helper_mount_share(share) # bindmount if not already sftp_mount(share, settings.MNT_PT, settings.SFTP_MNT_ROOT, mnt_map, editable) sftp_snap_toggle(share) chroot_loc = ('%s%s' % (settings.SFTP_MNT_ROOT, share.owner)) rsync_for_sftp(chroot_loc) input_map[share.owner] = chroot_loc for sftpo in SFTP.objects.all(): if (sftpo.share not in shares): input_map[sftpo.share.owner] = ( '%s%s' % (settings.SFTP_MNT_ROOT, sftpo.share.owner)) update_sftp_config(input_map) return Response()
def put(self, request, export_id): with self._handle_exception(request): if ('shares' not in request.data): e_msg = 'Cannot export without specifying shares.' handle_exception(Exception(e_msg), request) shares = [ validate_share(s, request) for s in request.data['shares'] ] eg = self.validate_export_group(export_id, request) options = self.parse_options(request) for s in shares: self.dup_export_check(s, options['host_str'], request, export_id=int(export_id)) NFSExportGroup.objects.filter(id=export_id).update(**options) NFSExportGroup.objects.filter(id=export_id)[0].save() cur_exports = list(NFSExport.objects.all()) for e in NFSExport.objects.filter(export_group=eg): if (e.share not in shares): cur_exports.remove(e) e.delete() else: shares.remove(e.share) for s in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, s.name)) if not s.is_mounted: mount_share(s, mnt_pt) export = NFSExport(export_group=eg, share=s, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = self.create_nfs_export_input(cur_exports) adv_entries = [ e.export_str for e in AdvancedNFSExport.objects.all() ] exports_d = self.create_adv_nfs_export_input(adv_entries, request) exports.update(exports_d) self.refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data)
def put(self, request, export_id): if ('shares' not in request.DATA): e_msg = ('Cannot export without specifying shares') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.DATA['shares']] try: eg = validate_export_group(export_id, request) options = parse_options(request) for s in shares: dup_export_check(s, options['host_str'], request, export_id=int(export_id)) NFSExportGroup.objects.filter(id=export_id).update(**options) NFSExportGroup.objects.filter(id=export_id)[0].save() cur_exports = list(NFSExport.objects.all()) for e in NFSExport.objects.filter(export_group=eg): if (e.share not in shares): cur_exports.remove(e) e.delete() else: shares.remove(e.share) for s in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, s.name)) if (not is_share_mounted(s.name)): pool_device = Disk.objects.filter(pool=s.pool)[0].name mount_share(s.subvol_name, pool_device, mnt_pt) export = NFSExport(export_group=eg, share=s, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = create_nfs_export_input(cur_exports) adv_entries = [e.export_str for e in AdvancedNFSExport.objects.all()] exports_d = create_adv_nfs_export_input(adv_entries, request) exports.update(exports_d) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data) except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def post(self, request): if ('shares' not in request.data): e_msg = ('Must provide share names') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.data['shares']] description = request.data.get('description', '') if (description == ''): description = self.def_description time_machine = request.data.get('time_machine', 'yes') if (time_machine != 'yes' and time_machine != 'no'): e_msg = ('time_machine must be yes or now. not %s' % time_machine) handle_exception(Exception(e_msg), request) for share in shares: if (NetatalkShare.objects.filter(share=share).exists()): e_msg = ('Share(%s) is already exported via AFP' % share.name) handle_exception(Exception(e_msg), request) try: for share in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) cur_description = '%s %s' % (share.name, description) if (len(shares) == 1 and description != self.def_description): cur_description = description afpo = NetatalkShare(share=share, path=mnt_pt, description=cur_description, time_machine=time_machine) afpo.save() if (not is_share_mounted(share.name)): pool_device = Disk.objects.filter(pool=share.pool)[0].name mount_share(share, pool_device, mnt_pt) refresh_afp_config(list(NetatalkShare.objects.all())) systemctl('netatalk', 'reload') return Response() except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def post(self, request, sname): with self._handle_exception(request): share = validate_share(sname, request) options = parse_options(request) dup_export_check(share, options["host_str"], request) cur_exports = list(NFSExport.objects.all()) eg = NFSExportGroup(**options) eg.save() mnt_pt = "%s%s" % (settings.MNT_PT, share.name) export_pt = "%s%s" % (settings.NFS_EXPORT_ROOT, share.name) if not is_share_mounted(share.name): mount_share(share, mnt_pt) export = NFSExport(export_group=eg, share=share, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = create_nfs_export_input(cur_exports) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data)
def delete(self, request, sname, export_id): with self._handle_exception(request): share = validate_share(sname, request) eg = validate_export_group(export_id, request) cur_exports = list(NFSExport.objects.all()) export = NFSExport.objects.get(export_group=eg, share=share) for e in NFSExport.objects.filter(share=share): if e.export_group.host_str == eg.host_str: export_pt = "%s%s" % (settings.NFS_EXPORT_ROOT, share.name) if e.export_group.nohide: snap_name = e.mount.split(e.share.name + "_")[-1] export_pt = "%s%s/%s" % (settings.NFS_EXPORT_ROOT, e.share.name, snap_name) nfs4_mount_teardown(export_pt) cur_exports.remove(e) exports = create_nfs_export_input(cur_exports) export.delete() if NFSExport.objects.filter(export_group=eg).count() == 0: # delete only when this is the only share in the group eg.delete() refresh_wrapper(exports, request, logger) return Response()
def put(self, request, export_id): with self._handle_exception(request): if ('shares' not in request.data): e_msg = 'Cannot export without specifying shares.' handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.data['shares']] eg = self.validate_export_group(export_id, request) options = self.parse_options(request) for s in shares: self.dup_export_check(s, options['host_str'], request, export_id=int(export_id)) NFSExportGroup.objects.filter(id=export_id).update(**options) NFSExportGroup.objects.filter(id=export_id)[0].save() cur_exports = list(NFSExport.objects.all()) for e in NFSExport.objects.filter(export_group=eg): if (e.share not in shares): cur_exports.remove(e) e.delete() else: shares.remove(e.share) for s in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, s.name)) if not s.is_mounted: mount_share(s, mnt_pt) export = NFSExport(export_group=eg, share=s, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = self.create_nfs_export_input(cur_exports) adv_entries = [e.export_str for e in AdvancedNFSExport.objects.all()] exports_d = self.create_adv_nfs_export_input(adv_entries, request) exports.update(exports_d) self.refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data)
def create_adv_nfs_export_input(exports, request): exports_d = {} for e in exports: fields = e.split() if (len(fields) < 2): e_msg = ('Invalid exports input -- %s' % e) handle_exception(Exception(e_msg), request) share = fields[0].split('/')[-1] s = validate_share(share, request) mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) if (not is_share_mounted(s.name)): mount_share(s, mnt_pt) exports_d[fields[0]] = [] for f in fields[1:]: cf = f.split('(') if (len(cf) != 2 or cf[1][-1] != ')'): e_msg = ('Invalid exports input -- %s. offending ' 'section: %s' % (e, f)) handle_exception(Exception(e_msg), request) exports_d[fields[0]].append( {'client_str': cf[0], 'option_list': cf[1][:-1], 'mnt_pt': ('%s%s' % (settings.MNT_PT, share))}) return exports_d
def create_adv_nfs_export_input(exports, request): exports_d = {} for e in exports: fields = e.split() if len(fields) < 2: e_msg = "Invalid exports input -- %s" % e handle_exception(Exception(e_msg), request) share = fields[0].split("/")[-1] s = validate_share(share, request) mnt_pt = "%s%s" % (settings.MNT_PT, s.name) if not is_share_mounted(s.name): pool_device = Disk.objects.filter(pool=s.pool)[0].name mount_share(s, pool_device, mnt_pt) exports_d[fields[0]] = [] for f in fields[1:]: cf = f.split("(") if len(cf) != 2 or cf[1][-1] != ")": e_msg = "Invalid exports input -- %s. offending " "section: %s" % (e, f) handle_exception(Exception(e_msg), request) exports_d[fields[0]].append( {"client_str": cf[0], "option_list": cf[1][:-1], "mnt_pt": ("%s%s" % (settings.MNT_PT, share))} ) return exports_d
def post(self, request): if ('shares' not in request.data): e_msg = ('Must provide share names') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.data['shares']] options = self._validate_input(request) custom_config = options['custom_config'] del(options['custom_config']) for share in shares: if (SambaShare.objects.filter(share=share).exists()): e_msg = ('Share(%s) is already exported via Samba' % share.name) handle_exception(Exception(e_msg), request) with self._handle_exception(request): for share in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) options['share'] = share options['path'] = mnt_pt smb_share = SambaShare(**options) smb_share.save() for cc in custom_config: cco = SambaCustomConfig(smb_share=smb_share, custom_config=cc) cco.save() if (not is_share_mounted(share.name)): pool_device = Disk.objects.filter(pool=share.pool)[0].name mount_share(share, pool_device, mnt_pt) admin_users = request.data.get('admin_users', None) if (admin_users is None): admin_users = [] for au in admin_users: auo = User.objects.get(username=au) auo.smb_shares.add(smb_share) refresh_smb_config(list(SambaShare.objects.all())) self._restart_samba() return Response(SambaShareSerializer(smb_share).data)
def post(self, request): if ('shares' not in request.DATA): e_msg = ('Must provide share names') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.DATA['shares']] options = { 'comment': 'samba export', 'browsable': 'yes', 'guest_ok': 'no', 'read_only': 'no', 'create_mask': '0755', } if ('comment' in request.DATA): options['comment'] = request.DATA['comment'] if ('browsable' in request.DATA): if (request.DATA['browsable'] != 'yes' and request.DATA['browsable'] != 'no'): e_msg = ('Invalid choice for browsable. Possible ' 'choices are yes or no.') handle_exception(Exception(e_msg), request) options['browsable'] = request.DATA['browsable'] if ('guest_ok' in request.DATA): if (request.DATA['guest_ok'] != 'yes' and request.DATA['guest_ok'] != 'no'): e_msg = ('Invalid choice for guest_ok. Possible ' 'options are yes or no.') handle_exception(Exception(e_msg), request) options['guest_ok'] = request.DATA['guest_ok'] if ('read_only' in request.DATA): if (request.DATA['read_only'] != 'yes' and request.DATA['read_only'] != 'no'): e_msg = ('Invalid choice for read_only. Possible ' 'options are yes or no.') handle_exception(Exception(e_msg), request) options['read_only'] = request.DATA['read_only'] if ('create_mask' in request.DATA): if (request.DATA['create_mask'] not in self.CREATE_MASKS): e_msg = ('Invalid choice for create_mask. Possible ' 'options are: %s' % self.CREATE_MASKS) handle_exception(Exception(e_msg), request) for share in shares: if (SambaShare.objects.filter(share=share).exists()): e_msg = ('Share(%s) is already exported via Samba' % share.name) handle_exception(Exception(e_msg), request) try: for share in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) smb_share = SambaShare(share=share, path=mnt_pt, comment=options['comment'], browsable=options['browsable'], read_only=options['read_only'], guest_ok=options['guest_ok'], create_mask=options['create_mask']) smb_share.save() if (not is_share_mounted(share.name)): pool_device = Disk.objects.filter(pool=share.pool)[0].name mount_share(share.subvol_name, pool_device, mnt_pt) refresh_smb_config(list(SambaShare.objects.all()), settings.SMB_CONF) restart_samba() return Response() except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def post(self, request): if ('shares' not in request.DATA): e_msg = ('Must provide share names') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.DATA['shares']] options = { 'comment': 'samba export', 'browsable': 'yes', 'guest_ok': 'no', 'read_only': 'no', 'create_mask': '0755', 'admin_users': 'Administrator', } if ('comment' in request.DATA): options['comment'] = request.DATA['comment'] if ('browsable' in request.DATA): if (request.DATA['browsable'] != 'yes' and request.DATA['browsable'] != 'no'): e_msg = ('Invalid choice for browsable. Possible ' 'choices are yes or no.') handle_exception(Exception(e_msg), request) options['browsable'] = request.DATA['browsable'] if ('guest_ok' in request.DATA): if (request.DATA['guest_ok'] != 'yes' and request.DATA['guest_ok'] != 'no'): e_msg = ('Invalid choice for guest_ok. Possible ' 'options are yes or no.') handle_exception(Exception(e_msg), request) options['guest_ok'] = request.DATA['guest_ok'] if ('read_only' in request.DATA): if (request.DATA['read_only'] != 'yes' and request.DATA['read_only'] != 'no'): e_msg = ('Invalid choice for read_only. Possible ' 'options are yes or no.') handle_exception(Exception(e_msg), request) options['read_only'] = request.DATA['read_only'] if ('create_mask' in request.DATA): if (request.DATA['create_mask'] not in self.CREATE_MASKS): e_msg = ('Invalid choice for create_mask. Possible ' 'options are: %s' % self.CREATE_MASKS) handle_exception(Exception(e_msg), request) if ('admin_users' in request.DATA): options['admin_users'] = request.DATA['admin_users'] for share in shares: if (SambaShare.objects.filter(share=share).exists()): e_msg = ('Share(%s) is already exported via Samba' % share.name) handle_exception(Exception(e_msg), request) try: for share in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) smb_share = SambaShare(share=share, path=mnt_pt, comment=options['comment'], browsable=options['browsable'], read_only=options['read_only'], guest_ok=options['guest_ok'], create_mask=options['create_mask'], admin_users=options['admin_users']) smb_share.save() if (not is_share_mounted(share.name)): pool_device = Disk.objects.filter(pool=share.pool)[0].name mount_share(share.subvol_name, pool_device, mnt_pt) refresh_smb_config(list(SambaShare.objects.all())) restart_samba() return Response() except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def get_queryset(self, *args, **kwargs): share = validate_share(self.kwargs['sname'], self.request) exports = NFSExport.objects.filter(share=share) ids = [e.export_group.id for e in exports] return NFSExportGroup.objects.filter(nohide=False, id__in=ids)