def create_clone(share, new_name, request, logger): if (Share.objects.filter(name=new_name).exists()): e_msg = ('Share with name: %s already exists.' % new_name) handle_exception(Exception(e_msg), request) pool_device = Disk.objects.filter(pool=share.pool)[0].name snap_name = ('%s-clone-%s-snapshot' % (share.name, new_name)) if (Snapshot.objects.filter(share=share, name=snap_name).exists()): e_msg = ('Snapshot with name: %s already exists for the ' 'share: %s' % (snap_name, share.name)) handle_exception(Exception(e_msg), request) try: add_snap(share.pool.name, pool_device, share.subvol_name, snap_name, share_prepend=False) snap_id = share_id(share.pool.name, pool_device, snap_name) qgroup_id = ('0/%s' % snap_id) update_quota(share.pool.name, pool_device, qgroup_id, share.size * 1024) new_share = Share(pool=share.pool, qgroup=qgroup_id, name=new_name, size=share.size, subvol_name=snap_name) new_share.save() return Response(ShareSerializer(new_share).data) except Exception, e: e_msg = ('Failed to create clone due to a system error.') logger.error(e_msg) logger.exception(e) handle_exception(Exception(e_msg), request)
def create_clone(share, new_name, request, logger, snapshot=None): # if snapshot is None, create clone of the share. # If it's not, then clone it. if (re.match(settings.SHARE_REGEX + '$', new_name) is None): e_msg = ('Clone name is invalid. It must start with a letter and can' ' contain letters, digits, _, . and - characters') handle_exception(Exception(e_msg), request) if (Share.objects.filter(name=new_name).exists()): e_msg = ('Another Share with name: %s already exists.' % new_name) handle_exception(Exception(e_msg), request) if (Snapshot.objects.filter(share=share, name=new_name).exists()): e_msg = ('Snapshot with name: %s already exists for the ' 'share: %s. Choose a different name' % (new_name, share.name)) handle_exception(Exception(e_msg), request) try: share_name = share.subvol_name snap = None if (snapshot is not None): snap = snapshot.real_name add_clone(share.pool, share_name, new_name, snapshot=snap) snap_id = share_id(share.pool, new_name) qgroup_id = ('0/%s' % snap_id) update_quota(share.pool, qgroup_id, share.size * 1024) new_share = Share(pool=share.pool, qgroup=qgroup_id, name=new_name, size=share.size, subvol_name=new_name) new_share.save() return Response(ShareSerializer(new_share).data) except Exception as e: handle_exception(e, request)
def _create(self, share, snap_name, request, uvisible, snap_type, writable): if (Snapshot.objects.filter(share=share, name=snap_name).exists()): # Note e_msg is consumed by replication/util.py create_snapshot() e_msg = ('Snapshot ({}) already exists for ' 'the share ({}).').format(snap_name, share.name) handle_exception(Exception(e_msg), request) snap_size = 0 qgroup_id = '0/na' if (snap_type == 'replication'): writable = False add_snap(share.pool, share.subvol_name, snap_name, writable) snap_id = share_id(share.pool, snap_name) qgroup_id = ('0/%s' % snap_id) if share.pqgroup is not settings.MODEL_DEFS['pqgroup']: pool_mnt_pt = '{}{}'.format(settings.MNT_PT, share.pool.name) qgroup_assign(qgroup_id, share.pqgroup, pool_mnt_pt) snap_size, eusage = volume_usage(share.pool, qgroup_id) s = Snapshot(share=share, name=snap_name, real_name=snap_name, size=snap_size, qgroup=qgroup_id, uvisible=uvisible, snap_type=snap_type, writable=writable) # The following share.save() was informed by test_snapshot.py share.save() s.save() return Response(SnapshotSerializer(s).data)
def _create(self, share, snap_name, pool_device, request, uvisible, snap_type, writable): if (Snapshot.objects.filter(share=share, name=snap_name).exists()): e_msg = ('Snapshot(%s) already exists for the Share(%s).' % (snap_name, share.name)) handle_exception(Exception(e_msg), request) real_name = snap_name snap_size = 0 qgroup_id = '0/na' if (snap_type != 'receiver'): if (snap_type == 'replication'): writable = False add_snap(share.pool, pool_device, share.subvol_name, real_name, readonly=not writable) snap_id = share_id(share.pool, pool_device, real_name) qgroup_id = ('0/%s' % snap_id) snap_size = share_usage(share.pool, pool_device, qgroup_id) s = Snapshot(share=share, name=snap_name, real_name=real_name, size=snap_size, qgroup=qgroup_id, uvisible=uvisible, snap_type=snap_type, writable=writable) s.save() return Response(SnapshotSerializer(s).data)
def create_clone(share, new_name, request, logger, snapshot=None): # if snapshot is None, create clone of the share. # If it's not, then clone it. if (re.match(settings.SHARE_REGEX + '$', new_name) is None): e_msg = ('Clone name is invalid. It must start with a letter and can' ' contain letters, digits, _, . and - characters') handle_exception(Exception(e_msg), request) if (Share.objects.filter(name=new_name).exists()): e_msg = ('Another Share with name: %s already exists.' % new_name) handle_exception(Exception(e_msg), request) if (Snapshot.objects.filter(share=share, name=new_name).exists()): e_msg = ('Snapshot with name: %s already exists for the ' 'share: %s. Choose a different name' % (new_name, share.name)) handle_exception(Exception(e_msg), request) try: share_name = share.subvol_name snap = None if (snapshot is not None): snap = snapshot.real_name add_clone(share.pool, share_name, new_name, snapshot=snap) snap_id = share_id(share.pool, new_name) qgroup_id = ('0/%s' % snap_id) update_quota(share.pool, qgroup_id, share.size * 1024) new_share = Share(pool=share.pool, qgroup=qgroup_id, name=new_name, size=share.size, subvol_name=new_name) new_share.save() return Response(ShareSerializer(new_share).data) except Exception, e: handle_exception(e, request)
def _create(self, share, snap_name, pool_device, request, uvisible, snap_type, writable): if (Snapshot.objects.filter(share=share, name=snap_name).exists()): e_msg = ('Snapshot with name: %s already exists for the ' 'share: %s' % (snap_name, share.name)) handle_exception(Exception(e_msg), request) try: real_name = ('%s_%s' % (share.name, snap_name)) snap_size = 0 qgroup_id = '0/na' if (snap_type != 'receiver'): add_snap(share.pool.name, pool_device, share.subvol_name, real_name, share_prepend=False, readonly=not writable) snap_id = share_id(share.pool.name, pool_device, real_name) qgroup_id = ('0/%s' % snap_id) snap_size = share_usage(share.pool.name, pool_device, qgroup_id) s = Snapshot(share=share, name=snap_name, real_name=real_name, size=snap_size, qgroup=qgroup_id, uvisible=uvisible, snap_type=snap_type, writable=writable) s.save() return Response(SnapshotSerializer(s).data) except Exception, e: e_msg = ('Failed to create snapshot due to a system error.') logger.error(e_msg) logger.exception(e) handle_exception(Exception(e_msg), request)
def _create(self, share, snap_name, request, uvisible, snap_type, writable): if Snapshot.objects.filter(share=share, name=snap_name).exists(): # Note e_msg is consumed by replication/util.py create_snapshot() e_msg = ("Snapshot ({}) already exists for the share ({}).").format( snap_name, share.name ) handle_exception(Exception(e_msg), request) snap_size = 0 qgroup_id = "0/na" if snap_type == "replication": writable = False add_snap(share, snap_name, writable) snap_id = share_id(share.pool, snap_name) qgroup_id = "0/{}".format(snap_id) if share.pqgroup != settings.MODEL_DEFS["pqgroup"]: qgroup_assign(qgroup_id, share.pqgroup, share.pool.mnt_pt) snap_size, eusage = volume_usage(share.pool, qgroup_id) s = Snapshot( share=share, name=snap_name, real_name=snap_name, size=snap_size, qgroup=qgroup_id, uvisible=uvisible, snap_type=snap_type, writable=writable, ) # The following share.save() was informed by test_snapshot.py share.save() s.save() return Response(SnapshotSerializer(s).data)
def _create(self, share, snap_name, pool_device, request, uvisible, snap_type): if (Snapshot.objects.filter(share=share, name=snap_name).exists()): e_msg = ('Snapshot with name: %s already exists for the ' 'share: %s' % (snap_name, share.name)) handle_exception(Exception(e_msg), request) try: real_name = ('%s_%s' % (share.name, snap_name)) snap_size = 0 qgroup_id = '0/na' if (snap_type != 'receiver'): add_snap(share.pool.name, pool_device, share.subvol_name, real_name, share_prepend=False) snap_id = share_id(share.pool.name, pool_device, real_name) qgroup_id = ('0/%s' % snap_id) snap_size = share_usage(share.pool.name, pool_device, qgroup_id) s = Snapshot(share=share, name=snap_name, real_name=real_name, size=snap_size, qgroup=qgroup_id, uvisible=uvisible, snap_type=snap_type) s.save() return Response(SnapshotSerializer(s).data) except Exception, e: e_msg = ('Failed to create snapshot due to a system error.') logger.error(e_msg) logger.exception(e) handle_exception(Exception(e_msg), request)
def _create(self, share, snap_name, request, uvisible, snap_type, writable): if (Snapshot.objects.filter(share=share, name=snap_name).exists()): e_msg = ('Snapshot(%s) already exists for the Share(%s).' % (snap_name, share.name)) handle_exception(Exception(e_msg), request) snap_size = 0 qgroup_id = '0/na' if (snap_type == 'replication'): writable = False add_snap(share.pool, share.subvol_name, snap_name, writable) snap_id = share_id(share.pool, snap_name) qgroup_id = ('0/%s' % snap_id) qgroup_assign(qgroup_id, share.pqgroup, ('%s/%s' % (settings.MNT_PT, share.pool.name))) snap_size, eusage = share_usage(share.pool, qgroup_id) s = Snapshot(share=share, name=snap_name, real_name=snap_name, size=snap_size, qgroup=qgroup_id, uvisible=uvisible, snap_type=snap_type, writable=writable) s.save() return Response(SnapshotSerializer(s).data)
def create_clone(share, new_name, request, logger, snapshot=None): # if snapshot is None, create clone of the share. # If it's not, then clone it. if re.match(settings.SHARE_REGEX + "$", new_name) is None: e_msg = ("Clone name is invalid. It must start with a letter and can " "contain letters, digits, _, . and - characters.") handle_exception(Exception(e_msg), request) if Share.objects.filter(name=new_name).exists(): e_msg = "Another share with name ({}) already exists.".format(new_name) handle_exception(Exception(e_msg), request) if Snapshot.objects.filter(share=share, name=new_name).exists(): e_msg = ("Snapshot with name ({}) already exists for the " "share ({}). Choose a different name.").format( new_name, share.name) handle_exception(Exception(e_msg), request) try: share_name = share.subvol_name snap = None if snapshot is not None: snap = snapshot.real_name add_clone(share.pool, share_name, new_name, snapshot=snap) snap_id = share_id(share.pool, new_name) qgroup_id = "0/{}".format(snap_id) pqid = qgroup_create(share.pool) new_share = Share( pool=share.pool, qgroup=qgroup_id, pqgroup=pqid, name=new_name, size=share.size, subvol_name=new_name, ) new_share.save() if pqid != PQGROUP_DEFAULT: update_quota(new_share.pool, pqid, new_share.size * 1024) share_pqgroup_assign(pqid, new_share) # Mount our new clone share. # We independently mount all shares, data pool or system pool, in /mnt2/name mnt_pt = "{}{}".format(settings.MNT_PT, new_name) mount_share(new_share, mnt_pt) return Response(ShareSerializer(new_share).data) except Exception as e: handle_exception(e, request)
def _create(self, share, snap_name, request, uvisible, snap_type, writable): if (Snapshot.objects.filter(share=share, name=snap_name).exists()): e_msg = ('Snapshot(%s) already exists for the Share(%s).' % (snap_name, share.name)) handle_exception(Exception(e_msg), request) snap_size = 0 qgroup_id = '0/na' if (snap_type == 'replication'): writable = False add_snap(share.pool, share.subvol_name, snap_name, writable) snap_id = share_id(share.pool, snap_name) qgroup_id = ('0/%s' % snap_id) qgroup_assign(qgroup_id, share.pqgroup, ('%s/%s' % (settings.MNT_PT, share.pool.name))) snap_size, eusage = volume_usage(share.pool, qgroup_id) s = Snapshot(share=share, name=snap_name, real_name=snap_name, size=snap_size, qgroup=qgroup_id, uvisible=uvisible, snap_type=snap_type, writable=writable) s.save() return Response(SnapshotSerializer(s).data)
def create_clone(share, new_name, request, logger, snapshot=None): # if snapshot is None, create clone of the share. # If it's not, then clone it. if (re.match(settings.SHARE_REGEX + '$', new_name) is None): e_msg = ('Clone name is invalid. It must start with a letter and can ' 'contain letters, digits, _, . and - characters.') handle_exception(Exception(e_msg), request) if (Share.objects.filter(name=new_name).exists()): e_msg = 'Another share with name ({}) already exists.'.format(new_name) handle_exception(Exception(e_msg), request) if (Snapshot.objects.filter(share=share, name=new_name).exists()): e_msg = ('Snapshot with name ({}) already exists for the ' 'share ({}). Choose a different name.').format(new_name, share.name) handle_exception(Exception(e_msg), request) try: share_name = share.subvol_name snap = None if (snapshot is not None): snap = snapshot.real_name add_clone(share.pool, share_name, new_name, snapshot=snap) snap_id = share_id(share.pool, new_name) qgroup_id = ('0/%s' % snap_id) pqid = qgroup_create(share.pool) new_share = Share(pool=share.pool, qgroup=qgroup_id, pqgroup=pqid, name=new_name, size=share.size, subvol_name=new_name) new_share.save() if pqid is not PQGROUP_DEFAULT: update_quota(new_share.pool, pqid, new_share.size * 1024) share_pqgroup_assign(pqid, new_share) # Mount our new clone share. mnt_pt = '{}{}'.format(settings.MNT_PT, new_name) mount_share(new_share, mnt_pt) return Response(ShareSerializer(new_share).data) except Exception as e: handle_exception(e, request)
def post(self, request, sname, snap_name, command=None): share = self._validate_share(sname, request) pool_device = Disk.objects.filter(pool=share.pool)[0].name if (command is None): if (Snapshot.objects.filter(share=share, name=snap_name).exists()): e_msg = ('Snapshot with name: %s already exists for the ' 'share: %s' % (snap_name, sname)) handle_exception(Exception(e_msg), request) add_snap(share.pool.name, pool_device, share.subvol_name, snap_name) snap_id = share_id(share.pool.name, pool_device, snap_name) qgroup_id = ('0/%s' % snap_id) s = Snapshot(share=share, name=snap_name, qgroup=qgroup_id) s.save() return Response(SnapshotSerializer(s).data) if (command == 'rollback'): try: snap = Snapshot.objects.get(share=share, name=snap_name) except: e_msg = ('Snapshot with name: %s does not exist for the ' 'share: %s' % (snap_name, sname)) handle_exception(Exception(e_msg), request) try: rollback_snap(snap_name, sname, share.subvol_name, share.pool.name, pool_device) share.subvol_name = snap_name update_quota(share.pool.name, pool_device, snap.qgroup, share.size * 1024) share.qgroup = snap.qgroup share.save() snap.delete() return Response() except Exception, e: logger.exception(e) handle_exception(e, request)
def post(self, request): try: pool_name = request.DATA['pool'] share_name = request.DATA['name'] size = int(request.DATA['size']) pool = None for p in Pool.objects.all(): if (p.name == pool_name): pool = p break disk = Disk.objects.filter(pool=p)[0] add_share(pool_name, disk.name, share_name) sid = share_id(pool_name, disk.name, share_name) qgroup_id = '0/' + sid update_quota(pool_name, disk.name, qgroup_id, str(size)) cur_usage = int(share_usage(pool_name, disk.name, qgroup_id)) qgroup = Qgroup(uuid=qgroup_id) qgroup.save() s = Share(pool=pool, qgroup=qgroup, name=share_name, size=size, free=(size - cur_usage)) s.save() return Response(ShareSerializer(s).data) except Exception, e: handle_exception(e, request)
def _update_quota(self, pool_name, disk_name, share_name, size): sid = share_id(pool_name, disk_name, share_name) qgroup_id = '0/' + sid update_quota(pool_name, disk_name, qgroup_id, size * 1024) return qgroup_id
def test_share_id(self): """ Test to see if share_id() successfully returns existing subvolume id's :return: """ pool = Pool(raid='raid0', name='test-pool') # Typical output from subvol_list_helper(), a simple wrapper around # run_command with re-try's out = [ 'ID 257 gen 13616 top level 5 path rock-ons-root', 'ID 259 gen 13616 top level 5 path plex-data', 'ID 260 gen 13616 top level 5 path plex-config', 'ID 261 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/83e4dde6b9cfddf46b75a07ec8d65ad87a748b98cf27de7d5b3298c1f3455ae4', # noqa E501 'ID 262 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/b670fb0c7ecd3d2c401fbfd1fa4d7a872fbada0a4b8c2516d0be18911c6b25d6', # noqa E501 'ID 263 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/29460ac934423a55802fcad24856827050697b4a9f33550bd93c82762fb6db8f', # noqa E501 'ID 264 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/d2a0ecffe6fa4ef3de9646a75cc629bbd9da7eead7f767cb810f9808d6b3ecb6', # noqa E501 'ID 265 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/a3a20fd46b6d67fdab1af5e4b1ce148d87b8012d8187edfea6b04b3704cba6c0', # noqa E501 'ID 266 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/1ed1f43a22cbe1f8380e0cef729e461f6b344be78e2d3723cbd1231d8cc562de', # noqa E501 'ID 267 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/bc9bc5a736c631cbc43d64c0d03392477ca06a2ac2bb2e68cabc511166525e1c', # noqa E501 'ID 268 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/600b3b23bb1613bd694a547865f8dbbf6118749300f846372f182d33b6cc7039', # noqa E501 'ID 269 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/a55eda61cffe86f4bef7a308ded5b9a64daed3db625ae33cf8c2b4926dfa4da6', # noqa E501 'ID 270 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/bd06bc691e41ca2e6ebbfb2e49f0dcea815f17f2575915cc16bd948398fe198f', # noqa E501 'ID 271 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/5dfde44c18f7bcac81e497b56b25e06af239999305c44970346ef2316479cddf', # noqa E501 'ID 272 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/26f44488da244735b4b0f4f5d1fd269f46f45959f8d32d53c58ce7f6566625db', # noqa E501 'ID 273 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/e6121e4ddc6bf59dc4cfabed58366f3c9d97a6477b0357f12dea89b20e61a194', # noqa E501 'ID 274 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/e2ebadbfcdb671f5de00f47470e435e7c73fc691e303bc15f5087a11f24439bc', # noqa E501 'ID 275 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/90549afba45a31f090522e483a39e5afc6c4c7129455636572d88534dd368fe6', # noqa E501 'ID 276 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/5b066908aceaaacb952253905f1e20ba42735439544fb91a4a5e594f1c705e08', # noqa E501 'ID 283 gen 13631 top level 5 path sftpdata', 'ID 284 gen 13616 top level 5 path .snapshots/sftpdata/sftp-daily-snapshot_201508011700', # noqa E501 'ID 285 gen 13616 top level 5 path .snapshots/sftpdata/sftp-monthly-snapshot_201508011730', # noqa E501 'ID 286 gen 13616 top level 5 path .snapshots/sftpdata/sftp-daily-snapshot_201508021700', # noqa E501 'ID 287 gen 13616 top level 5 path .snapshots/sftpdata/sftp-weekly-snapshot_201508021715', # noqa E501 'ID 288 gen 13616 top level 5 path .snapshots/sftpdata/sftp-daily-snapshot_201508051700', # noqa E501 'ID 289 gen 13616 top level 5 path .snapshots/sftpdata/sftp-daily-snapshot_201508251700', # noqa E501 'ID 400 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/01a44504c48d528cf64d2691e5d362f328962560eb0427c6f53fb2300df87bd9', # noqa E501 'ID 401 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/97e9fc98a8bdb50045400594330b50d79ae6e8b3bf90bb7b63c34751f4c495e0', # noqa E501 'ID 402 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/b54b867d760328df6f7aca9934ddbbb5b2afebbbd9e228d86bede93324bcd0d2', # noqa E501 'ID 403 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/fe5a39fcbbb23a05a3478944d5ad5143b01d0b63362d935c86c03a9a38fa3006', # noqa E501 'ID 404 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/2b3b8ebd68c3baeb685d76e1f87ccd667b43ee7b6587a3beff4797ca70321bf1', # noqa E501 'ID 405 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/86c9daa3d5aa144423daf15d87bd20a2e9f133903893f7178871751f0c96051e', # noqa E501 'ID 406 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/fd9afcfa5754be3fa26d6a811717661e4cf7c42163216b8e2e076729b5397429', # noqa E501 'ID 407 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/cd8ea80e21c405a5a3db583d91f8d459a12f1dfb0a912af413cf52eca9b18bf1', # noqa E501 'ID 408 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/ebea89404d903a8fdbb8ae6ecc18e1a6cb63af0d4821b87385854310741b2679', # noqa E501 'ID 409 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/595cc965de9c2d4b2d909a2663d7f34eb3659a50cfab04455b5408883a2d0e4c', # noqa E501 'ID 410 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/68a23a078a69b225107bd75a3f53e4c10b5cc2e22a1bb9911c6666a0bd938734', # noqa E501 'ID 411 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/5c873fdd5c4eb8b0b4ec43b0e52620a8ced984675949132789870b3789d6f236', # noqa E501 'ID 412 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/8f201c360d8a0fba5fd9282814484f0709567aa4b7e34755855419c0de27f2cb', # noqa E501 'ID 413 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/e62fa2fe0b8877602f2ec1f41ced2e1ef20733b95f6f2dc95b44d6ce1e3a78a5', # noqa E501 'ID 414 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/bc8085f96802edf614fd1fc66bb28108bbd1e700bb96779fa977e7ac6d59e527', # noqa E501 'ID 415 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/2a8355cf96789fda77fa67ab99ca14e40fd9210b29635b363bf20ced53c22aa2', # noqa E501 'ID 416 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/ed6561db61c857c9ff9a63f578961a6f7619089191ab373ec81bede37f3c1426', # noqa E501 'ID 417 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/bcc04bfdc35f0b7174b67f9778354c7f14e73425ba054d39d52e7d8ad70c2e69', # noqa E501 'ID 418 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/0c680ed4d54df71ec6bd4a61a62e6ce4e9fb3c8a2bb84f299e30aea7dd99ef52', # noqa E501 'ID 419 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/a8090d90a27208860585f2e1abb823e365e078d4d5ec0ef5e9114f103d8b3cde', # noqa E501 'ID 420 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/0717197731662beb1812fced93b463c772036f9c849b913a4d830e26c72a7222', # noqa E501 'ID 792 gen 13627 top level 5 path .snapshots/sftpdata/test-share-snaphot', # noqa E501 'ID 793 gen 13629 top level 5 path .snapshots/sftpdata/another-test-snapshot', # noqa E501 'ID 794 gen 13631 top level 5 path .snapshots/sftpdata/snapshot-name', # noqa E501 '' ] err = [''] rc = 0 existing_share = 'snapshot-name' existing_share2 = 'sftpdata' nonexistent_share = 'abcdef' # if queried for the last entry "snapshot-name" we would expect: expected_result = '794' expected_result2 = '283' # setup run_command mock to return the above test data self.mock_run_command.return_value = (out, err, rc) self.mock_mount_root.return_value = '/mnt2/test-mount' self.assertEqual(share_id(pool, existing_share), expected_result, msg=("Failed to get existing share_id snapshot " "example")) self.assertEqual(share_id(pool, existing_share2), expected_result2, msg="Failed to get existing share_id regular example") with self.assertRaises(Exception): share_id(pool, nonexistent_share)
def test_share_id(self): """ Test to see if share_id() successfully returns existing subvolume id's :return: """ pool = Pool(raid='raid0', name='test-pool') # Typical output from subvol_list_helper(), a simple wrapper around # run_command with re-try's out = ['ID 257 gen 13616 top level 5 path rock-ons-root', 'ID 259 gen 13616 top level 5 path plex-data', 'ID 260 gen 13616 top level 5 path plex-config', 'ID 261 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/83e4dde6b9cfddf46b75a07ec8d65ad87a748b98cf27de7d5b3298c1f3455ae4', 'ID 262 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/b670fb0c7ecd3d2c401fbfd1fa4d7a872fbada0a4b8c2516d0be18911c6b25d6', 'ID 263 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/29460ac934423a55802fcad24856827050697b4a9f33550bd93c82762fb6db8f', 'ID 264 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/d2a0ecffe6fa4ef3de9646a75cc629bbd9da7eead7f767cb810f9808d6b3ecb6', 'ID 265 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/a3a20fd46b6d67fdab1af5e4b1ce148d87b8012d8187edfea6b04b3704cba6c0', 'ID 266 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/1ed1f43a22cbe1f8380e0cef729e461f6b344be78e2d3723cbd1231d8cc562de', 'ID 267 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/bc9bc5a736c631cbc43d64c0d03392477ca06a2ac2bb2e68cabc511166525e1c', 'ID 268 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/600b3b23bb1613bd694a547865f8dbbf6118749300f846372f182d33b6cc7039', 'ID 269 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/a55eda61cffe86f4bef7a308ded5b9a64daed3db625ae33cf8c2b4926dfa4da6', 'ID 270 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/bd06bc691e41ca2e6ebbfb2e49f0dcea815f17f2575915cc16bd948398fe198f', 'ID 271 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/5dfde44c18f7bcac81e497b56b25e06af239999305c44970346ef2316479cddf', 'ID 272 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/26f44488da244735b4b0f4f5d1fd269f46f45959f8d32d53c58ce7f6566625db', 'ID 273 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/e6121e4ddc6bf59dc4cfabed58366f3c9d97a6477b0357f12dea89b20e61a194', 'ID 274 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/e2ebadbfcdb671f5de00f47470e435e7c73fc691e303bc15f5087a11f24439bc', 'ID 275 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/90549afba45a31f090522e483a39e5afc6c4c7129455636572d88534dd368fe6', 'ID 276 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/5b066908aceaaacb952253905f1e20ba42735439544fb91a4a5e594f1c705e08', 'ID 283 gen 13631 top level 5 path sftpdata', 'ID 284 gen 13616 top level 5 path .snapshots/sftpdata/sftp-daily-snapshot_201508011700', 'ID 285 gen 13616 top level 5 path .snapshots/sftpdata/sftp-monthly-snapshot_201508011730', 'ID 286 gen 13616 top level 5 path .snapshots/sftpdata/sftp-daily-snapshot_201508021700', 'ID 287 gen 13616 top level 5 path .snapshots/sftpdata/sftp-weekly-snapshot_201508021715', 'ID 288 gen 13616 top level 5 path .snapshots/sftpdata/sftp-daily-snapshot_201508051700', 'ID 289 gen 13616 top level 5 path .snapshots/sftpdata/sftp-daily-snapshot_201508251700', 'ID 400 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/01a44504c48d528cf64d2691e5d362f328962560eb0427c6f53fb2300df87bd9', 'ID 401 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/97e9fc98a8bdb50045400594330b50d79ae6e8b3bf90bb7b63c34751f4c495e0', 'ID 402 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/b54b867d760328df6f7aca9934ddbbb5b2afebbbd9e228d86bede93324bcd0d2', 'ID 403 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/fe5a39fcbbb23a05a3478944d5ad5143b01d0b63362d935c86c03a9a38fa3006', 'ID 404 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/2b3b8ebd68c3baeb685d76e1f87ccd667b43ee7b6587a3beff4797ca70321bf1', 'ID 405 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/86c9daa3d5aa144423daf15d87bd20a2e9f133903893f7178871751f0c96051e', 'ID 406 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/fd9afcfa5754be3fa26d6a811717661e4cf7c42163216b8e2e076729b5397429', 'ID 407 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/cd8ea80e21c405a5a3db583d91f8d459a12f1dfb0a912af413cf52eca9b18bf1', 'ID 408 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/ebea89404d903a8fdbb8ae6ecc18e1a6cb63af0d4821b87385854310741b2679', 'ID 409 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/595cc965de9c2d4b2d909a2663d7f34eb3659a50cfab04455b5408883a2d0e4c', 'ID 410 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/68a23a078a69b225107bd75a3f53e4c10b5cc2e22a1bb9911c6666a0bd938734', 'ID 411 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/5c873fdd5c4eb8b0b4ec43b0e52620a8ced984675949132789870b3789d6f236', 'ID 412 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/8f201c360d8a0fba5fd9282814484f0709567aa4b7e34755855419c0de27f2cb', 'ID 413 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/e62fa2fe0b8877602f2ec1f41ced2e1ef20733b95f6f2dc95b44d6ce1e3a78a5', 'ID 414 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/bc8085f96802edf614fd1fc66bb28108bbd1e700bb96779fa977e7ac6d59e527', 'ID 415 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/2a8355cf96789fda77fa67ab99ca14e40fd9210b29635b363bf20ced53c22aa2', 'ID 416 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/ed6561db61c857c9ff9a63f578961a6f7619089191ab373ec81bede37f3c1426', 'ID 417 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/bcc04bfdc35f0b7174b67f9778354c7f14e73425ba054d39d52e7d8ad70c2e69', 'ID 418 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/0c680ed4d54df71ec6bd4a61a62e6ce4e9fb3c8a2bb84f299e30aea7dd99ef52', 'ID 419 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/a8090d90a27208860585f2e1abb823e365e078d4d5ec0ef5e9114f103d8b3cde', 'ID 420 gen 13616 top level 257 path rock-ons-root/btrfs/subvolumes/0717197731662beb1812fced93b463c772036f9c849b913a4d830e26c72a7222', 'ID 792 gen 13627 top level 5 path .snapshots/sftpdata/test-share-snaphot', 'ID 793 gen 13629 top level 5 path .snapshots/sftpdata/another-test-snapshot', 'ID 794 gen 13631 top level 5 path .snapshots/sftpdata/snapshot-name', ''] err = [''] rc = 0 existing_share = 'snapshot-name' existing_share2 = 'sftpdata' nonexistent_share = 'abcdef' # if queried for the last entry "snapshot-name" we would expect: expected_result = '794' expected_result2 = '283' # setup expected Exception when no share is found: expected_exception = 'subvolume id for share: %s not found.' % nonexistent_share # setup run_command mock to return the above test data self.mock_run_command.return_value = (out, err, rc) self.mock_mount_root.return_value = '/mnt2/test-mount' self.assertEqual(share_id(pool, existing_share), expected_result, msg="Failed to get existing share_id snapshot example") self.assertEqual(share_id(pool, existing_share2), expected_result2, msg="Failed to get existing share_id regular example") with self.assertRaises(Exception): share_id(pool, nonexistent_share)