def post(self, request, sname): try: share = validate_share(sname, request) options = parse_options(request) dup_export_check(share, options['host_str'], request) cur_exports = list(NFSExport.objects.all()) eg = NFSExportGroup(**options) eg.save() mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, share.name)) if (not is_share_mounted(share.name)): pool_device = Disk.objects.filter(pool=share.pool)[0].name mount_share(share.subvol_name, pool_device, mnt_pt) export = NFSExport(export_group=eg, share=share, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = create_nfs_export_input(cur_exports) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data) except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def post(self, request, command): if (command == 'bootstrap'): try: for share in Share.objects.all(): if (not is_share_mounted(share.name)): mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) pool_device = Disk.objects.filter( pool=share.pool)[0].name mount_share(share.subvol_name, pool_device, mnt_pt) except Exception, e: e_msg = ('Unable to mount a share(%s, %s) during bootstrap.' % (pool_device, mnt_pt)) logger.error(e_msg) logger.exception(e) handle_exception(Exception(e_msg), request) try: mnt_map = sftp_mount_map(settings.SFTP_MNT_ROOT) for sftpo in SFTP.objects.all(): sftp_mount(sftpo.share, settings.MNT_PT, settings.SFTP_MNT_ROOT, mnt_map, sftpo.editable) sftp_snap_toggle(sftpo.share) except Exception, e: e_msg = ('Unable to export all sftp shares due to a system' ' error') logger.error(e_msg) logger.exception(e) handle_exception(Exception(e_msg), request)
def post(self, request): with self._handle_exception(request): if ('shares' not in request.data): e_msg = ('Cannot export without specifying shares') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.data['shares']] options = parse_options(request) for s in shares: dup_export_check(s, options['host_str'], request) cur_exports = list(NFSExport.objects.all()) eg = NFSExportGroup(**options) eg.save() for s in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, s.name)) if (not is_share_mounted(s.name)): pool_device = Disk.objects.filter(pool=s.pool)[0].name mount_share(s, pool_device, mnt_pt) export = NFSExport(export_group=eg, share=s, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = create_nfs_export_input(cur_exports) adv_entries = [e.export_str for e in AdvancedNFSExport.objects.all()] exports_d = create_adv_nfs_export_input(adv_entries, request) exports.update(exports_d) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data)
def post(self, request, command): if (command == 'bootstrap'): self._refresh_pool_state() for p in Pool.objects.all(): import_shares(p, request) for share in Share.objects.all(): try: if (share.pqgroup == settings.MODEL_DEFS['pqgroup']): share.pqgroup = qgroup_create(share.pool) share.save() if (not is_share_mounted(share.name)): mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) mount_share(share, mnt_pt) except Exception, e: e_msg = ('Exception while mounting a share(%s) during ' 'bootstrap: %s' % (share.name, e.__str__())) logger.error(e_msg) logger.exception(e) try: import_snapshots(share) except Exception, e: e_msg = ('Exception while importing Snapshots of ' 'Share(%s): %s' % (share.name, e.__str__())) logger.error(e_msg) logger.exception(e)
def vol_ops(container): ops_list = [] for v in DVolume.objects.filter(container=container): share_mnt = ('%s%s' % (settings.MNT_PT, v.share.name)) mount_share(v.share, share_mnt) ops_list.extend(['-v', '%s:%s' % (share_mnt, v.dest_dir)]) return ops_list
def post(self, request): if ('shares' not in request.DATA): e_msg = ('Cannot export without specifying shares') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.DATA['shares']] try: options = parse_options(request) for s in shares: dup_export_check(s, options['host_str'], request) cur_exports = list(NFSExport.objects.all()) eg = NFSExportGroup(**options) eg.save() for s in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, s.name)) if (not is_share_mounted(s.name)): pool_device = Disk.objects.filter(pool=s.pool)[0].name mount_share(s.subvol_name, pool_device, mnt_pt) export = NFSExport(export_group=eg, share=s, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = create_nfs_export_input(cur_exports) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data) except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def post(self, request): if ('shares' not in request.data): e_msg = ('Must provide share names') handle_exception(Exception(e_msg), request) shares = [self._validate_share(request, s) for s in request.data['shares']] options = self._validate_input(request) custom_config = options['custom_config'] del(options['custom_config']) for share in shares: if (SambaShare.objects.filter(share=share).exists()): e_msg = ('Share(%s) is already exported via Samba' % share.name) handle_exception(Exception(e_msg), request) with self._handle_exception(request): for share in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) options['share'] = share options['path'] = mnt_pt smb_share = SambaShare(**options) smb_share.save() for cc in custom_config: cco = SambaCustomConfig(smb_share=smb_share, custom_config=cc) cco.save() if (not is_share_mounted(share.name)): mount_share(share, mnt_pt) admin_users = request.data.get('admin_users', []) if (admin_users is None): admin_users = [] self._set_admin_users(admin_users, smb_share) refresh_smb_config(list(SambaShare.objects.all())) self._restart_samba() return Response(SambaShareSerializer(smb_share).data)
def post(self, request, sname): with self._handle_exception(request): share = Share.objects.get(name=sname) options = { 'owner': 'root', 'group': 'root', 'perms': '755', 'orecursive': True, 'precursive': True, } options['owner'] = request.data.get('owner', options['owner']) options['group'] = request.data.get('group', options['group']) options['perms'] = request.data.get('perms', options['perms']) options['orecursive'] = request.data.get('orecursive', options['orecursive']) options['precursive'] = request.data.get('precursive', options['precursive']) share.owner = options['owner'] share.group = options['group'] share.perms = options['perms'] share.save() mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) force_mount = False if (not is_share_mounted(share.name)): mount_share(share, mnt_pt) force_mount = True chown(mnt_pt, options['owner'], options['group'], options['orecursive']) chmod(mnt_pt, options['perms'], options['precursive']) if (force_mount is True): umount_root(mnt_pt) return Response(ShareSerializer(share).data)
def post(self, request, sname): with self._handle_exception(request): share = Share.objects.get(name=sname) options = {"owner": "root", "group": "root", "perms": "755", "orecursive": True, "precursive": True} options["owner"] = request.data.get("owner", options["owner"]) options["group"] = request.data.get("group", options["group"]) options["perms"] = request.data.get("perms", options["perms"]) options["orecursive"] = request.data.get("orecursive", options["orecursive"]) options["precursive"] = request.data.get("precursive", options["precursive"]) share.owner = options["owner"] share.group = options["group"] share.perms = options["perms"] share.save() mnt_pt = "%s%s" % (settings.MNT_PT, share.name) force_mount = False if not is_share_mounted(share.name): pool_device = Disk.objects.filter(pool=share.pool)[0].name mount_share(share, pool_device, mnt_pt) force_mount = True chown(mnt_pt, options["owner"], options["group"], options["orecursive"]) chmod(mnt_pt, options["perms"], options["precursive"]) if force_mount is True: umount_root(mnt_pt) return Response(ShareSerializer(share).data)
def create_adv_nfs_export_input(exports, request): exports_d = {} for e in exports: fields = e.split() if len(fields) < 2: e_msg = "Invalid exports input -- ({}).".format(e) handle_exception(Exception(e_msg), request) share = fields[0].split("/")[-1] s = validate_share(share, request) mnt_pt = "%s%s" % (settings.MNT_PT, s.name) if not s.is_mounted: mount_share(s, mnt_pt) exports_d[fields[0]] = [] for f in fields[1:]: cf = f.split("(") if len(cf) != 2 or cf[1][-1] != ")": e_msg = ( "Invalid exports input -- ({}). Offending section: ({})." ).format(e, f) handle_exception(Exception(e_msg), request) exports_d[fields[0]].append({ "client_str": cf[0], "option_list": cf[1][:-1], "mnt_pt": ("%s%s" % (settings.MNT_PT, share)), }) return exports_d
def _toggle_visibility(self, share, snap_name, on=True): cur_exports = list(NFSExport.objects.all()) snap_short_name = snap_name.split(share.name)[-1][1:] snap_mnt_pt = ('%s%s/.%s' % (settings.MNT_PT, share.name, snap_short_name)) export_pt = snap_mnt_pt.replace(settings.MNT_PT, settings.NFS_EXPORT_ROOT) if (on): pool_device = Disk.objects.filter(pool=share.pool)[0].name if (not is_share_mounted(share.name)): share_mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) mount_share(share.subvol_name, pool_device, share_mnt_pt) mount_share(snap_name, pool_device, snap_mnt_pt) if (NFSExport.objects.filter(share=share).exists()): se = NFSExport.objects.filter(share=share)[0] export_group = NFSExportGroup( host_str=se.export_group.host_str, nohide=True) export_group.save() export = NFSExport(share=share, export_group=export_group, mount=snap_mnt_pt) export.full_clean() export.save() cur_exports.append(export) else: try: export = NFSExport.objects.get(share=share, mount=snap_mnt_pt) cur_exports.remove(export) export.export_group.delete() export.delete() except Exception, e: logger.exception(e) finally:
def post(self, request): if ('shares' not in request.data): e_msg = ('Must provide share names') handle_exception(Exception(e_msg), request) shares = [ self._validate_share(request, s) for s in request.data['shares'] ] options = self._validate_input(request) custom_config = options['custom_config'] del (options['custom_config']) for share in shares: if (SambaShare.objects.filter(share=share).exists()): e_msg = ('Share(%s) is already exported via Samba' % share.name) handle_exception(Exception(e_msg), request) with self._handle_exception(request): for share in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) options['share'] = share options['path'] = mnt_pt smb_share = SambaShare(**options) smb_share.save() for cc in custom_config: cco = SambaCustomConfig(smb_share=smb_share, custom_config=cc) cco.save() if (not is_share_mounted(share.name)): mount_share(share, mnt_pt) admin_users = request.data.get('admin_users', []) if (admin_users is None): admin_users = [] self._set_admin_users(admin_users, smb_share) refresh_smb_config(list(SambaShare.objects.all())) self._restart_samba() return Response(SambaShareSerializer(smb_share).data)
def create_samba_share(self, rdata): if "shares" not in rdata: e_msg = "Must provide share names." handle_exception(Exception(e_msg), rdata) shares = [self._validate_share(rdata, s) for s in rdata["shares"]] options = self._validate_input(rdata) custom_config = options["custom_config"] del options["custom_config"] with self._handle_exception(rdata): for share in shares: if SambaShare.objects.filter(share=share).exists(): e_msg = ( "Share ({}) is already exported via Samba.").format( share.name) logger.error(e_msg) smb_share = SambaShare.objects.get(share=share) # handle_exception(Exception(e_msg), rdata) continue mnt_pt = "{}{}".format(settings.MNT_PT, share.name) options["share"] = share options["path"] = mnt_pt smb_share = SambaShare(**options) smb_share.save() for cc in custom_config: cco = SambaCustomConfig(smb_share=smb_share, custom_config=cc) cco.save() if not share.is_mounted: mount_share(share, mnt_pt) admin_users = rdata.get("admin_users", []) if admin_users is None: admin_users = [] self._set_admin_users(admin_users, smb_share) return smb_share
def create_adv_nfs_export_input(exports, request): exports_d = {} for e in exports: fields = e.split() if (len(fields) < 2): e_msg = ('Invalid exports input -- %s' % e) handle_exception(Exception(e_msg), request) share = fields[0].split('/')[-1] s = validate_share(share, request) mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) if (not is_share_mounted(s.name)): pool_device = Disk.objects.filter(pool=s.pool)[0].name mount_share(s, pool_device, mnt_pt) exports_d[fields[0]] = [] for f in fields[1:]: cf = f.split('(') if (len(cf) != 2 or cf[1][-1] != ')'): e_msg = ('Invalid exports input -- %s. offending ' 'section: %s' % (e, f)) handle_exception(Exception(e_msg), request) exports_d[fields[0]].append({ 'client_str': cf[0], 'option_list': cf[1][:-1], 'mnt_pt': ('%s%s' % (settings.MNT_PT, share)) }) return exports_d
def post(self, request, sid): with self._handle_exception(request): share = Share.objects.get(id=sid) options = { "owner": "root", "group": "root", "perms": "755", "orecursive": True, "precursive": True, } options["owner"] = request.data.get("owner", options["owner"]) options["group"] = request.data.get("group", options["group"]) options["perms"] = request.data.get("perms", options["perms"]) options["orecursive"] = request.data.get( "orecursive", options["orecursive"] ) options["precursive"] = request.data.get( "precursive", options["precursive"] ) share.owner = options["owner"] share.group = options["group"] share.perms = options["perms"] share.save() mnt_pt = "%s%s" % (settings.MNT_PT, share.name) force_mount = False if not share.is_mounted: mount_share(share, mnt_pt) force_mount = True chown(mnt_pt, options["owner"], options["group"], options["orecursive"]) chmod(mnt_pt, options["perms"], options["precursive"]) if force_mount is True: umount_root(mnt_pt) return Response(ShareSerializer(share).data)
def post(self, request, sname): with self._handle_exception(request): share = Share.objects.get(name=sname) options = { 'owner': 'root', 'group': 'root', 'perms': '755', 'orecursive': True, 'precursive': True, } options['owner'] = request.data.get('owner', options['owner']) options['group'] = request.data.get('group', options['group']) options['perms'] = request.data.get('perms', options['perms']) options['orecursive'] = request.data.get('orecursive', options['orecursive']) options['precursive'] = request.data.get('precursive', options['precursive']) share.owner = options['owner'] share.group = options['group'] share.perms = options['perms'] share.save() mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) force_mount = False if (not is_share_mounted(share.name)): pool_device = Disk.objects.filter(pool=share.pool)[0].name mount_share(share, pool_device, mnt_pt) force_mount = True chown(mnt_pt, options['owner'], options['group'], options['orecursive']) chmod(mnt_pt, options['perms'], options['precursive']) if (force_mount is True): umount_root(mnt_pt) return Response(ShareSerializer(share).data)
def create_adv_nfs_export_input(exports, request): exports_d = {} for e in exports: fields = e.split() if (len(fields) < 2): e_msg = 'Invalid exports input -- ({}).'.format(e) handle_exception(Exception(e_msg), request) share = fields[0].split('/')[-1] s = validate_share(share, request) mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) if not s.is_mounted: mount_share(s, mnt_pt) exports_d[fields[0]] = [] for f in fields[1:]: cf = f.split('(') if (len(cf) != 2 or cf[1][-1] != ')'): e_msg = ('Invalid exports input -- ({}). Offending ' 'section: ({}).').format(e, f) handle_exception(Exception(e_msg), request) exports_d[fields[0]].append({ 'client_str': cf[0], 'option_list': cf[1][:-1], 'mnt_pt': ('%s%s' % (settings.MNT_PT, share)) }) return exports_d
def post(self, request): with self._handle_exception(request): if ('shares' not in request.data): e_msg = ('Cannot export without specifying shares') handle_exception(Exception(e_msg), request) shares = [ validate_share(s, request) for s in request.data['shares'] ] options = self.parse_options(request) for s in shares: self.dup_export_check(s, options['host_str'], request) cur_exports = list(NFSExport.objects.all()) eg = NFSExportGroup(**options) eg.save() for s in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, s.name)) mount_share(s, mnt_pt) export = NFSExport(export_group=eg, share=s, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = self.create_nfs_export_input(cur_exports) adv_entries = [ e.export_str for e in AdvancedNFSExport.objects.all() ] exports_d = self.create_adv_nfs_export_input(adv_entries, request) exports.update(exports_d) self.refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data)
def post(self, request, sname): with self._handle_exception(request): share = Share.objects.get(name=sname) try: samba_o = SambaShare.objects.get(share=share) samba_serializer = SambaShareSerializer(samba_o) return Response(samba_serializer.data) except: options = { 'comment': ('samba for %s' % sname), 'browsable': 'yes', 'guest_ok': 'no', 'read_only': 'no', 'create_mask': '0755', } if ('comment' in request.DATA): options['comment'] = request.DATA['comment'] if ('browsable' in request.DATA): if (request.DATA['browsable'] != 'yes' and request.DATA['browsable'] != 'no'): e_msg = ('Invalid choice for browsable. Possible ' 'choices are yes or no.') handle_exception(Exception(e_msg), request) options['browsable'] = request.DATA['browsable'] if ('guest_ok' in request.DATA): if (request.DATA['guest_ok'] != 'yes' and request.DATA['guest_ok'] != 'no'): e_msg = ('Invalid choice for guest_ok. Possible ' 'options are yes or no.') handle_exception(Exception(e_msg), request) options['guest_ok'] = request.DATA['guest_ok'] if ('read_only' in request.DATA): if (request.DATA['read_only'] != 'yes' and request.DATA['read_only'] != 'no'): e_msg = ('Invalid choice for read_only. Possible ' 'options are yes or no.') handle_exception(Exception(e_msg), request) options['read_only'] = request.DATA['read_only'] if ('create_mask' in request.DATA): if (request.DATA['create_mask'] not in self.CREATE_MASKS): e_msg = ('Invalid choice for create_mask. Possible ' 'options are: %s' % self.CREATE_MASKS) handle_exception(Exception(e_msg), request) mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) smb_share = SambaShare(share=share, path=mnt_pt, comment=options['comment'], browsable=options['browsable'], read_only=options['read_only'], guest_ok=options['guest_ok'], create_mask=options['create_mask']) smb_share.save() if (not is_share_mounted(share.name)): pool_device = Disk.objects.filter(pool=share.pool)[0].name mount_share(share.subvol_name, pool_device, mnt_pt) refresh_smb_config(list(SambaShare.objects.all())) restart_samba() samba_serializer = SambaShareSerializer(smb_share) return Response(samba_serializer.data)
def post(self, request, command): service = Service.objects.get(name=self.name) if command == 'config': config = request.data.get('config', None) root_share = config['root_share'] self._validate_root(request, root_share) self._save_config(service, config) elif command == 'start': try: config = self._get_config(service) except Exception as e: logger.exception(e) e_msg = ('Cannot start without configuration. ' 'Please configure (System->Services) and try again.') handle_exception(Exception(e_msg), request) share = self._validate_root(request, config['root_share']) mnt_pt = '{}{}'.format(settings.MNT_PT, share.name) if not share.is_mounted: mount_share(share, mnt_pt) docker_wrapper = '{}bin/docker-wrapper'.format(settings.ROOT_DIR) distro_id = distro.id() # for Leap 15 <--> Tumbleweed moves. if distro_id not in KNOWN_DISTRO_IDS: distro_id = 'generic' # If openSUSE, source conf file from docker package itself if re.match('opensuse', distro_id) is not None: inf = '/usr/lib/systemd/system/docker.service' else: inf = '{}/docker-{}.service'.format(settings.CONFROOT, distro_id) outf = '/etc/systemd/system/docker.service' with open(inf) as ino, open(outf, 'w') as outo: for l in ino.readlines(): if re.match('ExecStart=', l) is not None: outo.write('{} {}\n'.format( l.strip().replace(DOCKERD, docker_wrapper, 1), mnt_pt)) elif re.match('Type=notify', l) is not None: # Our docker wrapper use need NotifyAccess=all: avoids # "Got notification message from PID ####1, but # reception only permitted for main PID ####2" outo.write(l) outo.write('NotifyAccess=all\n') elif re.match('After=', l) is not None: outo.write('{} {}\n'.format( l.strip(), 'rockstor-bootstrap.service')) else: outo.write(l) if distro_id == 'rockstor': socket_file = '{}/docker.socket'.format(settings.CONFROOT) shutil.copy(socket_file, '/etc/systemd/system/docker.socket') systemctl(self.name, 'enable') systemctl(self.name, 'start') elif command == 'stop': systemctl(self.name, 'stop') systemctl(self.name, 'disable') return Response()
def main(): mnt_pt = sys.argv[1] sname = mnt_pt.split('/')[-1] try: so = Share.objects.get(name=sname) mount_share(so, mnt_pt) except Exception, e: sys.exit('Failed to mount Docker root(%s). Exception: %s' % (mnt_pt, e.__str__()))
def vol_ops(container): ops_list = [] for v in DVolume.objects.filter(container=container): share_mnt = ('%s%s' % (settings.MNT_PT, v.share.name)) mount_share(v.share, share_mnt) ops_list.extend(['-v', '%s:%s' % (share_mnt, v.dest_dir)]) # map /etc/localtime for consistency across base rockstor and apps. ops_list.extend(['-v', '/etc/localtime:/etc/localtime:ro']) return ops_list
def vol_ops(container): ops_list = [] for v in DVolume.objects.filter(container=container): share_mnt = "{}{}".format(settings.MNT_PT, v.share.name) mount_share(v.share, share_mnt) ops_list.extend(["-v", "{}:{}".format(share_mnt, v.dest_dir)]) # map /etc/localtime for consistency across base rockstor and apps. ops_list.extend(["-v", "/etc/localtime:/etc/localtime:ro"]) return ops_list
def import_shares(pool, request): disk = Disk.objects.filter(pool=pool)[0].name shares = [s.name for s in Share.objects.filter(pool=pool)] shares_d = shares_info('%s%s' % (settings.MNT_PT, pool.name)) for s in shares: if (s not in shares_d): Share.objects.get(pool=pool, name=s).delete() for s in shares_d: if (s in shares): share = Share.objects.get(name=s) share.qgroup = shares_d[s] rusage, eusage = share_usage(pool, share.qgroup) ts = datetime.utcnow().replace(tzinfo=utc) if (rusage != share.rusage or eusage != share.eusage): share.rusage = rusage share.eusage = eusage su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage, ts=ts) su.save() else: try: su = ShareUsage.objects.filter(name=s).latest('id') su.ts = ts su.count += 1 except ShareUsage.DoesNotExist: su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage, ts=ts) finally: su.save() share.save() continue try: cshare = Share.objects.get(name=s) cshares_d = shares_info('%s%s' % (settings.MNT_PT, cshare.pool.name)) if (s in cshares_d): e_msg = ('Another pool(%s) has a Share with this same ' 'name(%s) as this pool(%s). This configuration is not supported.' ' You can delete one of them manually with this command: ' 'btrfs subvol delete %s[pool name]/%s' % (cshare.pool.name, s, pool.name, settings.MNT_PT, s)) handle_exception(Exception(e_msg), request) else: cshare.pool = pool cshare.qgroup = shares_d[s] cshare.size = pool.size cshare.subvol_name = s cshare.rusage, cshare.eusage = share_usage(pool, cshare.qgroup) cshare.save() except Share.DoesNotExist: pqid = qgroup_create(pool) update_quota(pool, pqid, pool.size * 1024) nso = Share(pool=pool, qgroup=shares_d[s], pqgroup=pqid, name=s, size=pool.size, subvol_name=s) nso.save() mount_share(nso, '%s%s' % (settings.MNT_PT, s))
def post(self, request, command): service = Service.objects.get(name=self.name) if command == 'config': config = request.data.get('config', None) root_share = config['root_share'] self._validate_root(request, root_share) self._save_config(service, config) elif command == 'start': try: config = self._get_config(service) except Exception as e: logger.exception(e) e_msg = ('Cannot start without configuration. ' 'Please configure (System->Services) and try again.') handle_exception(Exception(e_msg), request) share = self._validate_root(request, config['root_share']) mnt_pt = '{}{}'.format(settings.MNT_PT, share.name) if not share.is_mounted: mount_share(share, mnt_pt) docker_wrapper = '{}bin/docker-wrapper'.format(settings.ROOT_DIR) distro_id = distro.id() # for Leap 15 <--> Tumbleweed moves. if distro_id not in KNOWN_DISTRO_IDS: distro_id = 'generic' # TODO: Consider sourcing /usr/lib/systemd/system/docker.service inf = '{}/docker-{}.service'.format(settings.CONFROOT, distro_id) outf = '/etc/systemd/system/docker.service' with open(inf) as ino, open(outf, 'w') as outo: for l in ino.readlines(): if re.match('ExecStart=', l) is not None: outo.write('{} {}\n'.format( l.strip().replace(DOCKERD, docker_wrapper, 1), mnt_pt)) elif re.match('Type=notify', l) is not None: # Our docker wrapper use need NotifyAccess=all: avoids # "Got notification message from PID ####1, but # reception only permitted for main PID ####2" outo.write(l) outo.write('NotifyAccess=all\n') elif re.match('After=', l) is not None: outo.write('{} {}\n'.format( l.strip(), 'rockstor-bootstrap.service')) else: outo.write(l) if distro_id == 'rockstor': socket_file = '{}/docker.socket'.format(settings.CONFROOT) shutil.copy(socket_file, '/etc/systemd/system/docker.socket') systemctl(self.name, 'enable') systemctl(self.name, 'start') elif command == 'stop': systemctl(self.name, 'stop') systemctl(self.name, 'disable') return Response()
def import_shares(pool, request): disk = Disk.objects.filter(pool=pool)[0].name shares = [s.name for s in Share.objects.filter(pool=pool)] shares_d = shares_info(pool) for s in shares: if (s not in shares_d): Share.objects.get(pool=pool, name=s).delete() for s in shares_d: if (s in shares): share = Share.objects.get(name=s) share.qgroup = shares_d[s] rusage, eusage = share_usage(pool, share.qgroup) ts = datetime.utcnow().replace(tzinfo=utc) if (rusage != share.rusage or eusage != share.eusage): share.rusage = rusage share.eusage = eusage su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage, ts=ts) su.save() else: try: su = ShareUsage.objects.filter(name=s).latest('id') su.ts = ts su.count += 1 except ShareUsage.DoesNotExist: su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage, ts=ts) finally: su.save() share.save() continue try: cshare = Share.objects.get(name=s) cshares_d = shares_info('%s%s' % (settings.MNT_PT, cshare.pool.name)) if (s in cshares_d): e_msg = ('Another pool(%s) has a Share with this same ' 'name(%s) as this pool(%s). This configuration is not supported.' ' You can delete one of them manually with this command: ' 'btrfs subvol delete %s[pool name]/%s' % (cshare.pool.name, s, pool.name, settings.MNT_PT, s)) handle_exception(Exception(e_msg), request) else: cshare.pool = pool cshare.qgroup = shares_d[s] cshare.size = pool.size cshare.subvol_name = s cshare.rusage, cshare.eusage = share_usage(pool, cshare.qgroup) cshare.save() except Share.DoesNotExist: pqid = qgroup_create(pool) update_quota(pool, pqid, pool.size * 1024) nso = Share(pool=pool, qgroup=shares_d[s], pqgroup=pqid, name=s, size=pool.size, subvol_name=s) nso.save() mount_share(nso, '%s%s' % (settings.MNT_PT, s))
def sftp_snap_toggle(share, mount=True): pool_device = Disk.objects.filter(pool=share.pool)[0].name for snap in Snapshot.objects.filter(share=share, uvisible=True): mnt_pt = ('%s/%s/%s/.%s' % (settings.SFTP_MNT_ROOT, share.owner, share.name, snap.name)) if (mount and not is_mounted(mnt_pt)): mount_share(snap.real_name, pool_device, mnt_pt) elif (is_mounted(mnt_pt) and not mount): umount_root(mnt_pt)
def post(self, request, sname, command): with self._handle_exception(request): share = self._validate_share(request, sname) if command == "clone": new_name = request.DATA.get("name", "") return create_clone(share, new_name, request, logger) if command == "rollback": snap = self._validate_snapshot(request, share) if NFSExport.objects.filter(share=share).exists(): e_msg = ( "Share(%s) cannot be rolled back as it is " "exported via nfs. Delete nfs exports and " "try again" % sname ) handle_exception(Exception(e_msg), request) if SambaShare.objects.filter(share=share).exists(): e_msg = ( "Share(%s) cannot be rolled back as it is shared" " via Samba. Unshare and try again" % sname ) handle_exception(Exception(e_msg), request) pool_device = Disk.objects.filter(pool=share.pool)[0].name rollback_snap(snap.real_name, share.name, share.subvol_name, share.pool, pool_device) update_quota(share.pool, pool_device, snap.qgroup, share.size * 1024) share.qgroup = snap.qgroup share.save() snap.delete() return Response() if command == "compress": algo = request.DATA.get("compress", None) if algo is None: e_msg = ( "Compression algorithm must be specified. Valid " "options are: %s" % settings.COMPRESSION_TYPES ) handle_exception(Exception(e_msg), request) if algo not in settings.COMPRESSION_TYPES: e_msg = ( "Compression algorithm(%s) is invalid. Valid " "options are: %s" % settings.COMPRESSION_TYPES ) handle_exception(Exception(e_msg), request) mnt_pt = "%s%s" % (settings.MNT_PT, share.name) if not is_share_mounted(share.name): disk = Disk.objects.filter(pool=share.pool)[0].name mount_share(share, disk, mnt_pt) share.compression_algo = algo share.save() if algo == "no": algo = "" set_property(mnt_pt, "compression", algo) return Response(ShareSerializer(share).data)
def post(self, request): with self._handle_exception(request): pool_name = request.data.get('pool', None) try: pool = Pool.objects.get(name=pool_name) except: e_msg = ('Pool(%s) does not exist.' % pool_name) handle_exception(Exception(e_msg), request) compression = self._validate_compression(request) size = self._validate_share_size(request, pool) sname = request.data.get('sname', None) if ((sname is None or re.match('%s$' % settings.SHARE_REGEX, sname) is None)): e_msg = ('Share name must start with a alphanumeric(a-z0-9) ' 'character and can be followed by any of the ' 'following characters: letter(a-z), digits(0-9), ' 'hyphen(-), underscore(_) or a period(.).') handle_exception(Exception(e_msg), request) if (Share.objects.filter(name=sname).exists()): e_msg = ('Share(%s) already exists. Choose a different name' % sname) handle_exception(Exception(e_msg), request) if (Pool.objects.filter(name=sname).exists()): e_msg = ( 'A Pool with this name(%s) exists. Share and Pool names ' 'must be distinct. Choose a different name' % sname) handle_exception(Exception(e_msg), request) disk = Disk.objects.filter(pool=pool)[0] replica = False if ('replica' in request.data): replica = request.data['replica'] if (type(replica) != bool): e_msg = ('replica must be a boolean, not %s' % type(replica)) handle_exception(Exception(e_msg), request) add_share(pool, disk.name, sname) qid = qgroup_id(pool, disk.name, sname) update_quota(pool, disk.name, qid, size * 1024) s = Share(pool=pool, qgroup=qid, name=sname, size=size, subvol_name=sname, replica=replica, compression_algo=compression) s.save() mnt_pt = '%s%s' % (settings.MNT_PT, sname) if (not is_share_mounted(sname)): disk = Disk.objects.filter(pool=pool)[0].name mount_share(s, disk, mnt_pt) if (compression != 'no'): set_property(mnt_pt, 'compression', compression) return Response(ShareSerializer(s).data)
def main(): mnt_pt = sys.argv[1] sname = mnt_pt.split('/')[-1] try: so = Share.objects.get(name=sname) mount_share(so, mnt_pt) except Exception as e: sys.exit('Failed to mount Docker root(%s). Exception: %s' % (mnt_pt, e.__str__())) run_command([DOCKER, 'daemon', '--log-driver=journald', '-s', 'btrfs', '-g', mnt_pt])
def main(): mnt_pt = sys.argv[1] sname = mnt_pt.split('/')[-1] try: so = Share.objects.get(name=sname) mount_share(so, mnt_pt) except Exception as e: sys.exit('Failed to mount Docker root(%s). Exception: %s' % (mnt_pt, e.__str__())) run_command([DOCKERD, '--log-driver=journald', '--storage-driver', 'btrfs', '--storage-opt', 'btrfs.min_space=1G', '--data-root', mnt_pt])
def run(self): #0. mount destination share dest_mnt_pt = ('%s/%s' % (settings.MNT_PT, self.dest_share)) msg = ('Destination share(%s) not mounted' % self.dest_share) with self._update_trail_and_quit(msg): if (not is_share_mounted(self.dest_share)): mount_share(self.dest_share, self.dest_pool, dest_mnt_pt) if (not is_share_mounted(self.dest_share)): raise Exception(msg) #1. mount source share msg = ('Failed to mount source(%s:%s)' % (self.source_ip, self.source_path)) with self._update_trail_and_quit(msg): mount_source(self.source_ip, self.source_path) #2. create a snapshot snap_name = ('snap-%d' % self.tid) msg = ('Failed to create snapshot(%s) for share(%s)' % (snap_name, self.dest_share)) data = { 'status': 'snapshot created', } with self._update_trail_and_quit(msg, data=data): create_snapshot(self.dest_share, snap_name, logger) #3. rsync src_mnt = ('/mnt/backup/%s_%s' % (self.source_ip, self.source_path)) cmd = [RSYNC, '-az', src_mnt, dest_mnt_pt] msg = ('Unable to start sync') data = { 'status': 'sync started', } with self._update_trail_and_quit(msg, data=data): rp = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) while True: if (os.getppid() != self.ppid): msg = ('Backup plugin scheduler exited. Aborting...') with self._update_trail_and_quit(msg): rp.terminate() if (rp.poll() is not None): msg = ('sync finished. But failed to update trail status') data = { 'status': 'succeeded', } with self._update_trail_and_quit(msg, data): logger.debug('sync finished') break time.sleep(1)
def mount_share(): try: name = sys.argv[1] except IndexError: sys.exit('%s <share_name>' % sys.argv[0]) try: so = Share.objects.get(name=name) except Share.DoesNotExist: sys.exit('Share(%s) does not exist' % name) mnt_pt = ('%s%s' % (settings.MNT_PT, so.name)) btrfs.mount_share(so, mnt_pt)
def toggle_sftp_visibility(share, snap_name, on=True): if (not SFTP.objects.filter(share=share).exists()): return snap_short_name = snap_name.split(share.name)[-1][1:] mnt_pt = ('%s/%s/%s/.%s' % (settings.SFTP_MNT_ROOT, share.owner, share.name, snap_short_name)) if (on): if (not is_mounted(mnt_pt)): pool_device = Disk.objects.filter(pool=share.pool)[0].name mount_share(snap_name, pool_device, mnt_pt) else: umount_root(mnt_pt)
def post(self, request, command): service = Service.objects.get(name=self.name) if command == "config": config = request.data.get("config", None) root_share = config["root_share"] self._validate_root(request, root_share) self._save_config(service, config) elif command == "start": try: config = self._get_config(service) except Exception as e: logger.exception(e) e_msg = ("Cannot start without configuration. " "Please configure (System->Services) and try again.") handle_exception(Exception(e_msg), request) share = self._validate_root(request, config["root_share"]) mnt_pt = "{}{}".format(settings.MNT_PT, share.name) if not share.is_mounted: mount_share(share, mnt_pt) distro_id = distro.id() # for Leap 15 <--> Tumbleweed moves. if distro_id not in KNOWN_DISTRO_IDS: distro_id = "generic" # Write a custom daemon.json file (openSUSE only) conf_file = "{}/docker-daemon.json".format(settings.CONFROOT) if re.match("opensuse", distro_id) is not None: # Write them to file self._write_docker_daemon_conf(conf_file, mnt_pt, request) # Then write the docker.service file try: self._write_docker_service(distro_id, mnt_pt, conf_file) except Exception as e: logger.exception(e) e_msg = "An error occurred while writing the docker.service file" handle_exception(Exception(e_msg), request) if distro_id == "rockstor": socket_file = "{}/docker.socket".format(settings.CONFROOT) shutil.copy(socket_file, "/etc/systemd/system/docker.socket") systemctl(self.name, "enable") systemctl(self.name, "start") elif command == "stop": systemctl(self.name, "stop") systemctl(self.name, "disable") return Response()
def post(self, request): with self._handle_exception(request): pool_name = request.DATA.get('pool', None) try: pool = Pool.objects.get(name=pool_name) except: e_msg = ('Pool(%s) does not exist.' % pool_name) handle_exception(Exception(e_msg), request) compression = self._validate_compression(request) size = self._validate_share_size(request, pool) sname = request.DATA.get('sname', None) if ((sname is None or re.match('%s$' % settings.SHARE_REGEX, sname) is None)): e_msg = ('Share name must start with a letter(a-z) and can ' 'be followed by any of the following characters: ' 'letter(a-z), digits(0-9), hyphen(-), underscore' '(_) or a period(.).') handle_exception(Exception(e_msg), request) if (Share.objects.filter(name=sname).exists()): e_msg = ('Share(%s) already exists.' % sname) handle_exception(Exception(e_msg), request) try: disk = Disk.objects.filter(pool=pool)[0] except: e_msg = ('Pool(%s) does not have any disks in it.' % pool_name) handle_exception(Exception(e_msg), request) replica = False if ('replica' in request.DATA): replica = request.DATA['replica'] if (type(replica) != bool): e_msg = ('replica must be a boolean, not %s' % type(replica)) handle_exception(Exception(e_msg), request) add_share(pool, disk.name, sname) qgroup_id = self._update_quota(pool, disk.name, sname, size) s = Share(pool=pool, qgroup=qgroup_id, name=sname, size=size, subvol_name=sname, replica=replica, compression_algo=compression) s.save() mnt_pt = '%s%s' % (settings.MNT_PT, sname) if (not is_share_mounted(sname)): disk = Disk.objects.filter(pool=pool)[0].name mount_share(s, disk, mnt_pt) if (compression != 'no'): set_property(mnt_pt, 'compression', compression) return Response(ShareSerializer(s).data)
def put(self, request, smb_id): with self._handle_exception(request): try: smbo = SambaShare.objects.get(id=smb_id) except: e_msg = ('Samba export for the id(%s) does not exist' % smb_id) handle_exception(Exception(e_msg), request) options = self._validate_input(request) custom_config = options['custom_config'] del(options['custom_config']) smbo.__dict__.update(**options) admin_users = request.data.get('admin_users', None) if (admin_users is None): admin_users = [] for uo in User.objects.filter(smb_shares=smbo): if (uo.username not in admin_users): uo.smb_shares.remove(smbo) for u in admin_users: if (not User.objects.filter(username=u, smb_shares=smbo).exists()): auo = User.objects.get(username=u) auo.smb_shares.add(smbo) smbo.save() for cco in SambaCustomConfig.objects.filter(smb_share=smbo): if (cco.custom_config not in custom_config): cco.delete() else: custom_config.remove(cco.custom_config) for cc in custom_config: cco = SambaCustomConfig(smb_share=smbo, custom_config=cc) cco.save() for smb_o in SambaShare.objects.all(): if (not is_share_mounted(smb_o.share.name)): pool_device = Disk.objects.filter( pool=smb_o.share.pool)[0].name mnt_pt = ('%s%s' % (settings.MNT_PT, smb_o.share.name)) try: mount_share(smb_o.share, pool_device, mnt_pt) except Exception, e: logger.exception(e) if (smb_o.id == smbo.id): e_msg = ('Failed to mount share(%s) due to a low ' 'level error.' % smb_o.share.name) handle_exception(Exception(e_msg), request) refresh_smb_config(list(SambaShare.objects.all())) self._restart_samba() return Response(SambaShareSerializer(smbo).data)
def post(self, request): with self._handle_exception(request): pool_name = request.data.get('pool', None) try: pool = Pool.objects.get(name=pool_name) except: e_msg = ('Pool(%s) does not exist.' % pool_name) handle_exception(Exception(e_msg), request) compression = self._validate_compression(request) size = self._validate_share_size(request, pool) sname = request.data.get('sname', None) if ((sname is None or re.match('%s$' % settings.SHARE_REGEX, sname) is None)): e_msg = ('Share name must start with a alphanumeric(a-z0-9) ' 'character and can be followed by any of the ' 'following characters: letter(a-z), digits(0-9), ' 'hyphen(-), underscore(_) or a period(.).') handle_exception(Exception(e_msg), request) if (Share.objects.filter(name=sname).exists()): e_msg = ('Share(%s) already exists. Choose a different name' % sname) handle_exception(Exception(e_msg), request) if (Pool.objects.filter(name=sname).exists()): e_msg = ('A Pool with this name(%s) exists. Share and Pool names ' 'must be distinct. Choose a different name' % sname) handle_exception(Exception(e_msg), request) disk = Disk.objects.filter(pool=pool)[0] replica = False if ('replica' in request.data): replica = request.data['replica'] if (type(replica) != bool): e_msg = ('replica must be a boolean, not %s' % type(replica)) handle_exception(Exception(e_msg), request) add_share(pool, disk.name, sname) qid = qgroup_id(pool, disk.name, sname) update_quota(pool, disk.name, qid, size * 1024) s = Share(pool=pool, qgroup=qgroup_id, name=sname, size=size, subvol_name=sname, replica=replica, compression_algo=compression) s.save() mnt_pt = '%s%s' % (settings.MNT_PT, sname) if (not is_share_mounted(sname)): disk = Disk.objects.filter(pool=pool)[0].name mount_share(s, disk, mnt_pt) if (compression != 'no'): set_property(mnt_pt, 'compression', compression) return Response(ShareSerializer(s).data)
def put(self, request, smb_id): with self._handle_exception(request): try: smbo = SambaShare.objects.get(id=smb_id) except: e_msg = ('Samba export for the id(%s) does not exist' % smb_id) handle_exception(Exception(e_msg), request) options = self._validate_input(request, smbo=smbo) custom_config = options['custom_config'] del (options['custom_config']) smbo.__dict__.update(**options) admin_users = request.data.get('admin_users', None) if (admin_users is None): admin_users = [] for uo in User.objects.filter(smb_shares=smbo): if (uo.username not in admin_users): uo.smb_shares.remove(smbo) for u in admin_users: if (not User.objects.filter(username=u, smb_shares=smbo).exists()): auo = User.objects.get(username=u) auo.smb_shares.add(smbo) smbo.save() for cco in SambaCustomConfig.objects.filter(smb_share=smbo): if (cco.custom_config not in custom_config): cco.delete() else: custom_config.remove(cco.custom_config) for cc in custom_config: cco = SambaCustomConfig(smb_share=smbo, custom_config=cc) cco.save() for smb_o in SambaShare.objects.all(): if (not is_share_mounted(smb_o.share.name)): pool_device = Disk.objects.filter( pool=smb_o.share.pool)[0].name mnt_pt = ('%s%s' % (settings.MNT_PT, smb_o.share.name)) try: mount_share(smb_o.share, pool_device, mnt_pt) except Exception, e: logger.exception(e) if (smb_o.id == smbo.id): e_msg = ('Failed to mount share(%s) due to a low ' 'level error.' % smb_o.share.name) handle_exception(Exception(e_msg), request) refresh_smb_config(list(SambaShare.objects.all())) self._restart_samba() return Response(SambaShareSerializer(smbo).data)
def run(self): #0. mount destination share dest_mnt_pt = ('%s/%s' % (settings.MNT_PT, self.dest_share)) msg = ('Destination share(%s) not mounted' % self.dest_share) with self._update_trail_and_quit(msg): if (not is_share_mounted(self.dest_share)): mount_share(self.dest_share, self.dest_pool, dest_mnt_pt) if (not is_share_mounted(self.dest_share)): raise Exception(msg) #1. mount source share msg = ('Failed to mount source(%s:%s)' % (self.source_ip, self.source_path)) with self._update_trail_and_quit(msg): mount_source(self.source_ip, self.source_path) #2. create a snapshot snap_name = ('snap-%d' % self.tid) msg = ('Failed to create snapshot(%s) for share(%s)' % (snap_name, self.dest_share)) data = {'status': 'snapshot created',} with self._update_trail_and_quit(msg, data=data): create_snapshot(self.dest_share, snap_name, logger) #3. rsync src_mnt = ('/mnt/backup/%s_%s' % (self.source_ip, self.source_path)) cmd = [RSYNC, '-az', src_mnt, dest_mnt_pt] msg = ('Unable to start sync') data = {'status': 'sync started',} with self._update_trail_and_quit(msg, data=data): rp = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) while True: if (os.getppid() != self.ppid): msg = ('Backup plugin scheduler exited. Aborting...') with self._update_trail_and_quit(msg): rp.terminate() if (rp.poll() is not None): msg = ('sync finished. But failed to update trail status') data = {'status': 'succeeded',} with self._update_trail_and_quit(msg, data): logger.debug('sync finished') break time.sleep(1)
def put(self, request, smb_id): with self._handle_exception(request): try: smbo = SambaShare.objects.get(id=smb_id) except: e_msg = ("Samba export for the id ({}) does not exist." ).format(smb_id) handle_exception(Exception(e_msg), request) options = self._validate_input(request.data, smbo=smbo) custom_config = options["custom_config"] del options["custom_config"] smbo.__dict__.update(**options) admin_users = request.data.get("admin_users", []) if admin_users is None: admin_users = [] for uo in User.objects.filter(smb_shares=smbo): if uo.username not in admin_users: uo.smb_shares.remove(smbo) self._set_admin_users(admin_users, smbo) smbo.save() for cco in SambaCustomConfig.objects.filter(smb_share=smbo): if cco.custom_config not in custom_config: cco.delete() else: custom_config.remove(cco.custom_config) for cc in custom_config: cco = SambaCustomConfig(smb_share=smbo, custom_config=cc) cco.save() for smb_o in SambaShare.objects.all(): if not smb_o.share.is_mounted: mnt_pt = "%s%s" % (settings.MNT_PT, smb_o.share.name) try: mount_share(smb_o.share, mnt_pt) except Exception as e: logger.exception(e) if smb_o.id == smbo.id: e_msg = ("Failed to mount share ({}) due to a low " "level error.").format(smb_o.share.name) handle_exception(Exception(e_msg), request) refresh_smb_config(list(SambaShare.objects.all())) refresh_smb_discovery(list(SambaShare.objects.all())) self._restart_samba() return Response(SambaShareSerializer(smbo).data)
def put(self, request, export_id): if ('shares' not in request.DATA): e_msg = ('Cannot export without specifying shares') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.DATA['shares']] try: eg = validate_export_group(export_id, request) options = parse_options(request) for s in shares: dup_export_check(s, options['host_str'], request, export_id=int(export_id)) NFSExportGroup.objects.filter(id=export_id).update(**options) NFSExportGroup.objects.filter(id=export_id)[0].save() cur_exports = list(NFSExport.objects.all()) for e in NFSExport.objects.filter(export_group=eg): if (e.share not in shares): cur_exports.remove(e) e.delete() else: shares.remove(e.share) for s in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, s.name)) if (not is_share_mounted(s.name)): pool_device = Disk.objects.filter(pool=s.pool)[0].name mount_share(s.subvol_name, pool_device, mnt_pt) export = NFSExport(export_group=eg, share=s, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = create_nfs_export_input(cur_exports) adv_entries = [ e.export_str for e in AdvancedNFSExport.objects.all() ] exports_d = create_adv_nfs_export_input(adv_entries, request) exports.update(exports_d) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data) except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def post(self, request, sname): try: share = Share.objects.get(name=sname) options = { 'owner': 'root', 'group': 'root', 'perms': '755', 'orecursive': False, 'precursive': False, } if ('owner' in request.DATA): options['owner'] = request.DATA['owner'] if ('group' in request.DATA): options['group'] = request.DATA['group'] else: options['group'] = options['owner'] if ('orecursive' in request.DATA): options['orecursive'] = True if ('perms' in request.DATA): options['perms'] = request.DATA['perms'] if ('precursive' in request.DATA): options['precursive'] = True share.owner = options['owner'] share.group = options['group'] share.perms = options['perms'] share.save() mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) force_mount = False if (not is_share_mounted(share.name)): pool_device = Disk.objects.filter(pool=share.pool)[0].name mount_share(share.subvol_name, pool_device, mnt_pt) force_mount = True chown(mnt_pt, options['owner'], options['group'], options['orecursive']) chmod(mnt_pt, options['perms'], options['precursive']) if (force_mount is True): umount_root(mnt_pt) return Response(ShareSerializer(share).data) except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def create_clone(share, new_name, request, logger, snapshot=None): # if snapshot is None, create clone of the share. # If it's not, then clone it. if re.match(settings.SHARE_REGEX + "$", new_name) is None: e_msg = ("Clone name is invalid. It must start with a letter and can " "contain letters, digits, _, . and - characters.") handle_exception(Exception(e_msg), request) if Share.objects.filter(name=new_name).exists(): e_msg = "Another share with name ({}) already exists.".format(new_name) handle_exception(Exception(e_msg), request) if Snapshot.objects.filter(share=share, name=new_name).exists(): e_msg = ("Snapshot with name ({}) already exists for the " "share ({}). Choose a different name.").format( new_name, share.name) handle_exception(Exception(e_msg), request) try: share_name = share.subvol_name snap = None if snapshot is not None: snap = snapshot.real_name add_clone(share.pool, share_name, new_name, snapshot=snap) snap_id = share_id(share.pool, new_name) qgroup_id = "0/{}".format(snap_id) pqid = qgroup_create(share.pool) new_share = Share( pool=share.pool, qgroup=qgroup_id, pqgroup=pqid, name=new_name, size=share.size, subvol_name=new_name, ) new_share.save() if pqid != PQGROUP_DEFAULT: update_quota(new_share.pool, pqid, new_share.size * 1024) share_pqgroup_assign(pqid, new_share) # Mount our new clone share. # We independently mount all shares, data pool or system pool, in /mnt2/name mnt_pt = "{}{}".format(settings.MNT_PT, new_name) mount_share(new_share, mnt_pt) return Response(ShareSerializer(new_share).data) except Exception as e: handle_exception(e, request)
def put(self, request, export_id): if ('shares' not in request.DATA): e_msg = ('Cannot export without specifying shares') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.DATA['shares']] try: eg = validate_export_group(export_id, request) options = parse_options(request) for s in shares: dup_export_check(s, options['host_str'], request, export_id=int(export_id)) NFSExportGroup.objects.filter(id=export_id).update(**options) NFSExportGroup.objects.filter(id=export_id)[0].save() cur_exports = list(NFSExport.objects.all()) for e in NFSExport.objects.filter(export_group=eg): if (e.share not in shares): cur_exports.remove(e) e.delete() else: shares.remove(e.share) for s in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, s.name)) export_pt = ('%s%s' % (settings.NFS_EXPORT_ROOT, s.name)) if (not is_share_mounted(s.name)): pool_device = Disk.objects.filter(pool=s.pool)[0].name mount_share(s.subvol_name, pool_device, mnt_pt) export = NFSExport(export_group=eg, share=s, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = create_nfs_export_input(cur_exports) adv_entries = [e.export_str for e in AdvancedNFSExport.objects.all()] exports_d = create_adv_nfs_export_input(adv_entries, request) exports.update(exports_d) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data) except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def post(self, request, sname): try: share = Share.objects.get(name=sname) if (SambaShare.objects.filter(share=share).exists()): raise Exception('Already exported via Samba') if (NFSExport.objects.filter(share=share).exists()): raise Exception('Already exported via nfs') if (IscsiTarget.objects.filter(share=share).exists()): raise Exception('Already exported via iscsi') options = { 'tname': 'fooscsi', 'tid': -1, 'dev_name': 'iscsi.img', 'dev_size': 10, } if ('tname' in request.DATA): options['tname'] = request.DATA['tname'] if ('tid' in request.DATA): try: options['tid'] = int(request.DATA['tid']) except: raise Exception('tid must be an integer') pool_device = Disk.objects.filter(pool=share.pool)[0].name mnt_pt = '/mnt2/' + share.name mount_share(share.name, pool_device, mnt_pt) dev_name = mnt_pt + '/' + options['dev_name'] export_iscsi(options['tid'], options['tname'], options['tid'], dev_name, options['dev_size']) iscsi_target = IscsiTarget(share=share, tid=options['tid'], tname=options['tname'], dev_name=dev_name, dev_size=options['dev_size']) iscsi_target.save() iscsi_serializer = IscsiSerializer(iscsi_target) return Response(iscsi_serializer.data) except Exception, e: handle_exception(e, request)
def post(self, request): if ('shares' not in request.data): e_msg = ('Must provide share names') handle_exception(Exception(e_msg), request) shares = [validate_share(s, request) for s in request.data['shares']] description = request.data.get('description', '') if (description == ''): description = self.def_description time_machine = request.data.get('time_machine', 'yes') if (time_machine != 'yes' and time_machine != 'no'): e_msg = ('time_machine must be yes or now. not %s' % time_machine) handle_exception(Exception(e_msg), request) for share in shares: if (NetatalkShare.objects.filter(share=share).exists()): e_msg = ('Share(%s) is already exported via AFP' % share.name) handle_exception(Exception(e_msg), request) try: for share in shares: mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) cur_description = '%s %s' % (share.name, description) if (len(shares) == 1 and description != self.def_description): cur_description = description afpo = NetatalkShare(share=share, path=mnt_pt, description=cur_description, time_machine=time_machine) afpo.save() if (not is_share_mounted(share.name)): pool_device = Disk.objects.filter(pool=share.pool)[0].name mount_share(share, pool_device, mnt_pt) refresh_afp_config(list(NetatalkShare.objects.all())) systemctl('netatalk', 'reload') return Response() except RockStorAPIException: raise except Exception, e: handle_exception(e, request)
def btrfs_add_share(share): pools = btrfs_pool_scan() share_s = {} for pool in pools: if pool["name"] == share["pool"]: pqid = qgroup_create(pool) add_share(pool, share["sname"], pqid) qid = qgroup_id(pool, share["sname"]) update_quota(pool, pqid, share["size"] * 1024) mnt_pt = '%s%s' % (MNT_PT, share["sname"]) replica = False share_s["name"] = share["sname"] share_s["subvol_name"] = share["sname"] share_s["size"] = share["size"] share_s["qgroup"] = qid share_s["pqgroup"] = pqid share_s["pool"] = pool share_s["replica"] = replica share_s["compression_algo"] = share["compression"] mount_share(share_s, mnt_pt) return share_s
def main(): # We expect the last element of our argument list to be the mount point as # docker_service.py formats it that way.: mnt_pt = sys.argv[-1] # N.B. sys.argv[0] is name of script itself and always present. system_docker_opts = [] if len(sys.argv) > 2: # We have at least 1 additional argument passed so extract it/them ie: # [script-name, additional-arg, mount-point] # we extract additional-arg (or it's plural counterpart) as a list. system_docker_opts = sys.argv[1:-1] sname = mnt_pt.split('/')[-1] try: so = Share.objects.get(name=sname) mount_share(so, mnt_pt) except Exception as e: sys.exit('Failed to mount Docker root(%s). Exception: %s' % (mnt_pt, e.__str__())) cmd = [DOCKERD] + ROCKSTOR_DOCKER_OPTS + system_docker_opts + \ ['--data-root', mnt_pt] run_command(cmd)
def post(self, request, sname): with self._handle_exception(request): share = validate_share(sname, request) options = parse_options(request) dup_export_check(share, options["host_str"], request) cur_exports = list(NFSExport.objects.all()) eg = NFSExportGroup(**options) eg.save() mnt_pt = "%s%s" % (settings.MNT_PT, share.name) export_pt = "%s%s" % (settings.NFS_EXPORT_ROOT, share.name) if not is_share_mounted(share.name): mount_share(share, mnt_pt) export = NFSExport(export_group=eg, share=share, mount=export_pt) export.full_clean() export.save() cur_exports.append(export) exports = create_nfs_export_input(cur_exports) refresh_wrapper(exports, request, logger) nfs_serializer = NFSExportGroupSerializer(eg) return Response(nfs_serializer.data)