def import_snapshots(share):
    snaps_d = snaps_info('%s%s' % (settings.MNT_PT, share.pool.name),
                         share.name)
    snaps = [s.name for s in Snapshot.objects.filter(share=share)]
    for s in snaps:
        if (s not in snaps_d):
            logger.debug('Removing, missing on disk, snapshot db entry ({}) '
                         'from share ({}).'.format(s, share.name))
            Snapshot.objects.get(share=share, name=s).delete()
    for s in snaps_d:
        if (s in snaps):
            so = Snapshot.objects.get(share=share, name=s)
        else:
            logger.debug('Adding, missing in db, on disk snapshot ({}) '
                         'against share ({}).'.format(s, share.name))
            so = Snapshot(share=share, name=s, real_name=s,
                          writable=snaps_d[s][1], qgroup=snaps_d[s][0])
        rusage, eusage = volume_usage(share.pool, snaps_d[s][0])
        if (rusage != so.rusage or eusage != so.eusage):
            so.rusage = rusage
            so.eusage = eusage
            update_shareusage_db(s, rusage, eusage)
        else:
            update_shareusage_db(s, rusage, eusage, UPDATE_TS)
        so.save()
Exemple #2
0
    def _create(self, share, snap_name, request, uvisible,
                snap_type, writable):
        if (Snapshot.objects.filter(share=share, name=snap_name).exists()):
            # Note e_msg is consumed by replication/util.py create_snapshot()
            e_msg = ('Snapshot ({}) already exists for '
                     'the share ({}).').format(snap_name, share.name)
            handle_exception(Exception(e_msg), request)

        snap_size = 0
        qgroup_id = '0/na'
        if (snap_type == 'replication'):
            writable = False
        add_snap(share.pool, share.subvol_name, snap_name, writable)
        snap_id = share_id(share.pool, snap_name)
        qgroup_id = ('0/%s' % snap_id)
        if share.pqgroup is not settings.MODEL_DEFS['pqgroup']:
            pool_mnt_pt = '{}{}'.format(settings.MNT_PT, share.pool.name)
            qgroup_assign(qgroup_id, share.pqgroup, pool_mnt_pt)
        snap_size, eusage = volume_usage(share.pool, qgroup_id)
        s = Snapshot(share=share, name=snap_name, real_name=snap_name,
                     size=snap_size, qgroup=qgroup_id,
                     uvisible=uvisible, snap_type=snap_type,
                     writable=writable)
        # The following share.save() was informed by test_snapshot.py
        share.save()
        s.save()
        return Response(SnapshotSerializer(s).data)
def import_snapshots(share):
    snaps_d = snaps_info('%s%s' % (settings.MNT_PT, share.pool.name),
                         share.name)
    snaps = [s.name for s in Snapshot.objects.filter(share=share)]
    for s in snaps:
        if (s not in snaps_d):
            Snapshot.objects.get(share=share, name=s).delete()
    for s in snaps_d:
        if (s in snaps):
            so = Snapshot.objects.get(share=share, name=s)
        else:
            so = Snapshot(share=share, name=s, real_name=s,
                          writable=snaps_d[s][1], qgroup=snaps_d[s][0])
        rusage, eusage = volume_usage(share.pool, snaps_d[s][0])
        ts = datetime.utcnow().replace(tzinfo=utc)
        if (rusage != so.rusage or eusage != so.eusage):
            so.rusage = rusage
            so.eusage = eusage
            su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage, ts=ts)
            su.save()
        else:
            try:
                su = ShareUsage.objects.filter(name=s).latest('id')
                su.ts = ts
                su.count += 1
            except ShareUsage.DoesNotExist:
                su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage,
                                ts=ts)
            finally:
                su.save()
        so.save()
Exemple #4
0
 def put(self, request, sid):
     with self._handle_exception(request):
         share = self._validate_share(request, sid)
         if ('size' in request.data):
             new_size = self._validate_share_size(request, share.pool)
             qid = qgroup_id(share.pool, share.subvol_name)
             cur_rusage, cur_eusage = volume_usage(share.pool, qid)
             if (new_size < cur_rusage):
                 e_msg = ('Unable to resize because requested new '
                          'size {} KB is less than current usage {} KB '
                          'of the share.').format(new_size, cur_rusage)
                 handle_exception(Exception(e_msg), request)
             # quota maintenance
             if share.pool.quotas_enabled:
                 # Only try create / update quotas if they are enabled,
                 # pqgroup of PQGROUP_DEFAULT (-1/-1) indicates no pqgroup,
                 # ie quotas were disabled when update was requested.
                 if share.pqgroup == PQGROUP_DEFAULT or \
                         not share.pqgroup_exist:
                     # if quotas were disabled or pqgroup non-existent.
                     share.pqgroup = qgroup_create(share.pool)
                     share.save()
                 if share.pqgroup is not PQGROUP_DEFAULT:
                     # Only update quota and assign if now non default as
                     # default can also indicate Read-only fs at this point.
                     update_quota(share.pool, share.pqgroup,
                                  new_size * 1024)
                     share_pqgroup_assign(share.pqgroup, share)
             else:
                 # Our pool's quotas are disabled so reset pqgroup to -1/-1.
                 if share.pqgroup != PQGROUP_DEFAULT:
                     # Only reset if necessary
                     share.pqgroup = PQGROUP_DEFAULT
                     share.save()
             share.size = new_size
         if ('compression' in request.data):
             new_compression = self._validate_compression(request)
             if (share.compression_algo != new_compression):
                 share.compression_algo = new_compression
                 mnt_pt = '%s%s' % (settings.MNT_PT, share.name)
                 if (new_compression == 'no'):
                     new_compression = ''
                 set_property(mnt_pt, 'compression', new_compression)
         share.save()
         return Response(ShareSerializer(share).data)
Exemple #5
0
    def _create(self, share, snap_name, request, uvisible,
                snap_type, writable):
        if (Snapshot.objects.filter(share=share, name=snap_name).exists()):
            e_msg = ('Snapshot(%s) already exists for the Share(%s).' %
                     (snap_name, share.name))
            handle_exception(Exception(e_msg), request)

        snap_size = 0
        qgroup_id = '0/na'
        if (snap_type == 'replication'):
            writable = False
        add_snap(share.pool, share.subvol_name, snap_name, writable)
        snap_id = share_id(share.pool, snap_name)
        qgroup_id = ('0/%s' % snap_id)
        qgroup_assign(qgroup_id, share.pqgroup, ('%s/%s' % (settings.MNT_PT,
                                                            share.pool.name)))
        snap_size, eusage = volume_usage(share.pool, qgroup_id)
        s = Snapshot(share=share, name=snap_name, real_name=snap_name,
                     size=snap_size, qgroup=qgroup_id,
                     uvisible=uvisible, snap_type=snap_type,
                     writable=writable)
        s.save()
        return Response(SnapshotSerializer(s).data)
def import_shares(pool, request):
    shares = [s.name for s in Share.objects.filter(pool=pool)]
    shares_d = shares_info(pool)
    for s in shares:
        if (s not in shares_d):
            Share.objects.get(pool=pool, name=s).delete()
    for s in shares_d:
        if (s in shares):
            share = Share.objects.get(name=s)
            share.qgroup = shares_d[s]
            rusage, eusage, pqgroup_rusage, pqgroup_eusage = \
                volume_usage(pool, share.qgroup, share.pqgroup)
            ts = datetime.utcnow().replace(tzinfo=utc)
            if (rusage != share.rusage or eusage != share.eusage or
               pqgroup_rusage != share.pqgroup_rusage or
               pqgroup_eusage != share.pqgroup_eusage):
                share.rusage = rusage
                share.eusage = eusage
                share.pqgroup_rusage = pqgroup_rusage
                share.pqgroup_eusage = pqgroup_eusage
                su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage,
                                ts=ts)
                su.save()
            else:
                try:
                    su = ShareUsage.objects.filter(name=s).latest('id')
                    su.ts = ts
                    su.count += 1
                except ShareUsage.DoesNotExist:
                    su = ShareUsage(name=s, r_usage=rusage,
                                    e_usage=eusage, ts=ts)
                finally:
                    su.save()
            share.save()
            continue
        try:
            cshare = Share.objects.get(name=s)
            cshares_d = shares_info('%s%s' % (settings.MNT_PT,
                                              cshare.pool.name))
            if (s in cshares_d):
                e_msg = ('Another pool(%s) has a Share with this same '
                         'name(%s) as this pool(%s). This configuration '
                         'is not supported. You can delete one of them '
                         'manually with this command: '
                         'btrfs subvol delete %s[pool name]/%s' %
                         (cshare.pool.name, s, pool.name, settings.MNT_PT, s))
                handle_exception(Exception(e_msg), request)
            else:
                cshare.pool = pool
                cshare.qgroup = shares_d[s]
                cshare.size = pool.size
                cshare.subvol_name = s
                cshare.rusage, cshare.eusage,
                cshare.pqgroup_rusage, cshare.pqgroup_eusage = \
                    volume_usage(pool, cshare.qgroup, cshare.pqgroup)
                cshare.save()
        except Share.DoesNotExist:
            pqid = qgroup_create(pool)
            update_quota(pool, pqid, pool.size * 1024)
            nso = Share(pool=pool, qgroup=shares_d[s], pqgroup=pqid, name=s,
                        size=pool.size, subvol_name=s)
            nso.save()
        mount_share(nso, '%s%s' % (settings.MNT_PT, s))
def import_shares(pool, request):
    shares = [s.name for s in Share.objects.filter(pool=pool)]
    shares_d = shares_info(pool)
    for s in shares:
        if (s not in shares_d):
            Share.objects.get(pool=pool, name=s).delete()
    for s in shares_d:
        if (s in shares):
            share = Share.objects.get(name=s)
            share.qgroup = shares_d[s]
            rusage, eusage, pqgroup_rusage, pqgroup_eusage = \
                volume_usage(pool, share.qgroup, share.pqgroup)
            ts = datetime.utcnow().replace(tzinfo=utc)
            if (rusage != share.rusage or eusage != share.eusage
                    or pqgroup_rusage != share.pqgroup_rusage
                    or pqgroup_eusage != share.pqgroup_eusage):
                share.rusage = rusage
                share.eusage = eusage
                share.pqgroup_rusage = pqgroup_rusage
                share.pqgroup_eusage = pqgroup_eusage
                su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage, ts=ts)
                su.save()
            else:
                try:
                    su = ShareUsage.objects.filter(name=s).latest('id')
                    su.ts = ts
                    su.count += 1
                except ShareUsage.DoesNotExist:
                    su = ShareUsage(name=s,
                                    r_usage=rusage,
                                    e_usage=eusage,
                                    ts=ts)
                finally:
                    su.save()
            share.save()
            continue
        try:
            cshare = Share.objects.get(name=s)
            cshares_d = shares_info('%s%s' %
                                    (settings.MNT_PT, cshare.pool.name))
            if (s in cshares_d):
                e_msg = ('Another pool(%s) has a Share with this same '
                         'name(%s) as this pool(%s). This configuration '
                         'is not supported. You can delete one of them '
                         'manually with this command: '
                         'btrfs subvol delete %s[pool name]/%s' %
                         (cshare.pool.name, s, pool.name, settings.MNT_PT, s))
                handle_exception(Exception(e_msg), request)
            else:
                cshare.pool = pool
                cshare.qgroup = shares_d[s]
                cshare.size = pool.size
                cshare.subvol_name = s
                cshare.rusage, cshare.eusage,
                cshare.pqgroup_rusage, cshare.pqgroup_eusage = \
                    volume_usage(pool, cshare.qgroup, cshare.pqgroup)
                cshare.save()
        except Share.DoesNotExist:
            pqid = qgroup_create(pool)
            update_quota(pool, pqid, pool.size * 1024)
            nso = Share(pool=pool,
                        qgroup=shares_d[s],
                        pqgroup=pqid,
                        name=s,
                        size=pool.size,
                        subvol_name=s)
            nso.save()
        mount_share(nso, '%s%s' % (settings.MNT_PT, s))
Exemple #8
0
 def test_volume_usage(self):
     """
     Moc the return value of "btrfs qgroup show share_mount_pt" to assess
     to extract rfer and excl usage for original 0/* qgroup and Rockstor
     ad hoc 2015/* qgroup.
     :return:
     """
     # volume_usage() called with pool name of=test-pool volume_id=0/261
     # and new Rockstor qgroup pvolume_id=2015/4
     # mount_root(Pool object) returned /mnt2/test-pool
     # cmd=['/sbin/btrfs', 'qgroup', 'show', u'/mnt2/test-pool']
     #
     # Setup our calling variables and mock the root pool as mounted.
     o = [
         'qgroupid         rfer         excl ',
         '--------         ----         ---- ',
         '0/5          16.00KiB     16.00KiB ',
         '0/259         2.04MiB      2.04MiB ',
         '0/260         7.37GiB      7.37GiB ',
         '0/261        63.65MiB     63.65MiB ',
         '0/263       195.32MiB    496.00KiB ',
         '0/264       195.34MiB    112.00KiB ',
         '0/265       195.34MiB     80.00KiB ',
         '0/266       195.34MiB     80.00KiB ',
         '0/267       195.34MiB     80.00KiB ',
         '0/268       195.38MiB    152.00KiB ',
         '0/269       229.06MiB     80.00KiB ',
         '0/270       229.06MiB     80.00KiB ',
         '0/271       229.06MiB     80.00KiB ',
         '0/272       229.06MiB     96.00KiB ',
         '0/273       229.06MiB    128.00KiB ',
         '0/274       236.90MiB     80.00KiB ',
         '0/275       236.90MiB     80.00KiB ',
         '0/276       236.90MiB     80.00KiB ',
         '0/277       450.54MiB    128.00KiB ',
         '0/278       450.54MiB    112.00KiB ',
         '0/279       450.54MiB    128.00KiB ',
         '0/280       450.54MiB     80.00KiB ',
         '0/281       450.54MiB     80.00KiB ',
         '0/282       450.54MiB     80.00KiB ',
         '0/283       450.54MiB     80.00KiB ',
         '0/284       450.54MiB    176.00KiB ',
         '0/285       450.59MiB      3.43MiB ',
         '2015/1          0.00B        0.00B ',
         '2015/2        2.04MiB      2.04MiB ',
         '2015/3        7.37GiB      7.37GiB ',
         '2015/4       63.00MiB     63.00MiB ', ''
     ]
     # the following is an example of fresh clone of a snapshot post import.
     o2 = [
         'qgroupid         rfer         excl ',
         '--------         ----         ---- ',
         '0/5          16.00KiB     16.00KiB ',
         '0/258        16.00KiB     16.00KiB ',
         '0/261        16.00KiB     16.00KiB ',
         '0/262        16.00KiB     16.00KiB ',
         '0/263        16.00KiB     16.00KiB ',
         '2015/1          0.00B        0.00B ',
         '2015/2          0.00B        0.00B ',
         '2015/3          0.00B        0.00B ',
         '2015/4          0.00B        0.00B ',
         '2015/5          0.00B        0.00B ',
         '2015/6          0.00B        0.00B ',
         '2015/7          0.00B        0.00B ',
         '2015/8          0.00B        0.00B ',
         '2015/9          0.00B        0.00B ',
         '2015/10         0.00B        0.00B ',
         '2015/11         0.00B        0.00B ',
         '2015/12         0.00B        0.00B ',
         '2015/13         0.00B        0.00B ',
         '2015/14         0.00B        0.00B ',
         '2015/15         0.00B        0.00B ',
         '2015/16         0.00B        0.00B ',
         '2015/17         0.00B        0.00B ',
         '2015/18         0.00B        0.00B ',
         '2015/19      16.00KiB     16.00KiB ',
         '2015/20         0.00B        0.00B ',
         '2015/21      16.00KiB     16.00KiB ',
         '2015/22      16.00KiB     16.00KiB ', ''
     ]
     e = ['']
     rc = 0
     # is_mounted returning True avoids mount command calls in mount_root()
     mount_point = '/mnt2/test-pool'
     self.mock_mount_root.return_value = mount_point
     # setup the return values from our run_command wrapper
     # examples of output from /mnt2/test-pool from a real system install
     self.mock_run_command.return_value = (o, e, rc)
     # create a fake pool object
     pool = Pool(raid='raid0', name='test-pool')
     # fake volume_id / qgroupid
     volume_id = '0/261'
     # and fake pvolume_id
     pvolume_id = '2015/4'
     # As volume_usage uses convert_to_kib() everything is converted to KiB
     # here we convert 450.59MiB and 3.43MiB to their KiB equivalent (x1024)
     expected_results_share = [65177, 65177, 64512, 64512]
     self.assertEqual(volume_usage(pool, volume_id, pvolume_id),
                      expected_results_share,
                      msg='Failed to retrieve share rfer and excl usage')
     # We perform a test with snapshots volumes to, having pqgroup None
     pvolume_id2 = None
     expected_results_snapshot = [65177, 65177]
     self.assertEqual(volume_usage(pool, volume_id, pvolume_id2),
                      expected_results_snapshot,
                      msg='Failed to retrieve snapshot rfer and excl usage')
     # As we have now observed a rogue db field entry for pvolume_id of
     # -1/-1 which in turn caused our subject "volume_usage" to return only
     # 2 values when callers using 3 actual parameters expect 4 values, we
     # should test to ensure dependable parameter count to return value
     # count behaviour when the 3rd parameter is not None.
     # In the above test involving 3 actual parameters where the last is
     # not None ie pvolume_id = '-1/-1' we prove 4 return values.
     self.mock_run_command.return_value = (o2, e, rc)
     pvolume_id3 = '-1/-1'
     expected_results_rogue_pvolume_id = [16, 16, 0, 0]
     # here we choose to return 0, 0 in place of the
     self.assertEqual(volume_usage(pool, volume_id, pvolume_id3),
                      expected_results_rogue_pvolume_id,
                      msg='Failed to handle bogus pvolume_id')
def import_shares(pool, request):
    # Establish known shares/subvols within our db for the given pool:
    shares_in_pool_db = [s.name for s in Share.objects.filter(pool=pool)]
    # Find the actual/current shares/subvols within the given pool:
    # Limited to Rockstor relevant subvols ie shares and clones.
    shares_in_pool = shares_info(pool)
    # List of pool's share.pqgroups so we can remove inadvertent duplication.
    # All pqgroups are removed when quotas are disabled, combined with a part
    # refresh we could have duplicates within the db.
    share_pqgroups_used = []
    # Delete db Share object if it is no longer found on disk.
    for s_in_pool_db in shares_in_pool_db:
        if s_in_pool_db not in shares_in_pool:
            logger.debug('Removing, missing on disk, share db entry ({}) from '
                         'pool ({}).'.format(s_in_pool_db, pool.name))
            Share.objects.get(pool=pool, name=s_in_pool_db).delete()
    # Check if each share in pool also has a db counterpart.
    for s_in_pool in shares_in_pool:
        logger.debug('---- Share name = {}.'.format(s_in_pool))
        if s_in_pool in shares_in_pool_db:
            logger.debug('Updating pre-existing same pool db share entry.')
            # We have a pool db share counterpart so retrieve and update it.
            share = Share.objects.get(name=s_in_pool, pool=pool)
            # Initially default our pqgroup value to db default of '-1/-1'
            # This way, unless quotas are enabled, all pqgroups will be
            # returned to db default.
            pqgroup = PQGROUP_DEFAULT
            if share.pool.quotas_enabled:
                # Quotas are enabled on our pool so we can validate pqgroup.
                if share.pqgroup == pqgroup or not share.pqgroup_exist \
                        or share.pqgroup in share_pqgroups_used:
                    # we have a void '-1/-1' or non existent pqgroup or
                    # this pqgroup has already been seen / used in this pool.
                    logger.debug('#### replacing void, non-existent, or '
                                 'duplicate pqgroup.')
                    pqgroup = qgroup_create(pool)
                    if pqgroup is not PQGROUP_DEFAULT:
                        update_quota(pool, pqgroup, share.size * 1024)
                        share_pqgroup_assign(pqgroup, share)
                else:
                    # Our share's pqgroup looks OK so use it.
                    pqgroup = share.pqgroup
                # Record our use of this pqgroup to spot duplicates later.
                share_pqgroups_used.append(deepcopy(share.pqgroup))
            if share.pqgroup != pqgroup:
                # we need to update our share.pqgroup
                share.pqgroup = pqgroup
                share.save()
            share.qgroup = shares_in_pool[s_in_pool]
            rusage, eusage, pqgroup_rusage, pqgroup_eusage = \
                volume_usage(pool, share.qgroup, pqgroup)
            if (rusage != share.rusage or eusage != share.eusage or
               pqgroup_rusage != share.pqgroup_rusage or
               pqgroup_eusage != share.pqgroup_eusage):
                share.rusage = rusage
                share.eusage = eusage
                share.pqgroup_rusage = pqgroup_rusage
                share.pqgroup_eusage = pqgroup_eusage
                update_shareusage_db(s_in_pool, rusage, eusage)
            else:
                update_shareusage_db(s_in_pool, rusage, eusage, UPDATE_TS)
            share.save()
            continue
        try:
            logger.debug('No prior entries in scanned pool trying all pools.')
            # Test (Try) for an existing system wide Share db entry.
            cshare = Share.objects.get(name=s_in_pool)
            # Get a list of Rockstor relevant subvols (ie shares and clones)
            # for the prior existing db share entry's pool.
            cshares_d = shares_info(cshare.pool)
            if s_in_pool in cshares_d:
                e_msg = ('Another pool ({}) has a share with this same '
                         'name ({}) as this pool ({}). This configuration '
                         'is not supported. You can delete one of them '
                         'manually with the following command: '
                         '"btrfs subvol delete {}[pool name]/{}" WARNING this '
                         'will remove the entire contents of that '
                         'subvolume.').format(cshare.pool.name, s_in_pool,
                                              pool.name, settings.MNT_PT,
                                              s_in_pool)
                handle_exception(Exception(e_msg), request)
            else:
                # Update the prior existing db share entry previously
                # associated with another pool.
                logger.debug('Updating prior db entry from another pool.')
                cshare.pool = pool
                cshare.qgroup = shares_in_pool[s_in_pool]
                cshare.size = pool.size
                cshare.subvol_name = s_in_pool
                (cshare.rusage, cshare.eusage, cshare.pqgroup_rusage,
                 cshare.pqgroup_eusage) = volume_usage(pool, cshare.qgroup,
                                                       cshare.pqgroup)
                cshare.save()
                update_shareusage_db(s_in_pool, cshare.rusage, cshare.eusage)
        except Share.DoesNotExist:
            logger.debug('Db share entry does not exist - creating.')
            # We have a share on disk that has no db counterpart so create one.
            # Retrieve new pool quota id for use in db Share object creation.
            # As the replication receive share is 'special' we tag it as such.
            replica = False
            share_name = s_in_pool
            if re.match('.snapshot', s_in_pool) is not None:
                # We have an initial replication share, non snap in .snapshots.
                # We could change it's name here but still a little mixing
                # of name and subvol throughout project.
                replica = True
                logger.debug('Initial receive quirk-subvol found: Importing '
                             'as share and setting replica flag.')
            qid = shares_in_pool[s_in_pool]
            pqid = qgroup_create(pool)
            if pqid is not PQGROUP_DEFAULT:
                update_quota(pool, pqid, pool.size * 1024)
                pool_mnt_pt = '{}{}'.format(settings.MNT_PT, pool.name)
                qgroup_assign(qid, pqid, pool_mnt_pt)
            rusage, eusage, pqgroup_rusage, pqgroup_eusage = \
                volume_usage(pool, qid, pqid)
            nso = Share(pool=pool, qgroup=qid, pqgroup=pqid, name=share_name,
                        size=pool.size, subvol_name=s_in_pool, rusage=rusage,
                        eusage=eusage, pqgroup_rusage=pqgroup_rusage,
                        pqgroup_eusage=pqgroup_eusage,
                        replica=replica)
            nso.save()
            update_shareusage_db(s_in_pool, rusage, eusage)
            mount_share(nso, '%s%s' % (settings.MNT_PT, s_in_pool))
Exemple #10
0
def import_shares(pool, request):
    # Establish known shares/subvols within our db for the given pool:
    shares_in_pool_db = [s.name for s in Share.objects.filter(pool=pool)]
    # Find the actual/current shares/subvols within the given pool:
    # Limited to Rockstor relevant subvols ie shares and clones.
    shares_in_pool = shares_info(pool)
    # List of pool's share.pqgroups so we can remove inadvertent duplication.
    # All pqgroups are removed when quotas are disabled, combined with a part
    # refresh we could have duplicates within the db.
    share_pqgroups_used = []
    # Delete db Share object if it is no longer found on disk.
    for s_in_pool_db in shares_in_pool_db:
        if s_in_pool_db not in shares_in_pool:
            logger.debug('Removing, missing on disk, share db entry ({}) from '
                         'pool ({}).'.format(s_in_pool_db, pool.name))
            Share.objects.get(pool=pool, name=s_in_pool_db).delete()
    # Check if each share in pool also has a db counterpart.
    for s_in_pool in shares_in_pool:
        logger.debug('---- Share name = {}.'.format(s_in_pool))
        if s_in_pool in shares_in_pool_db:
            logger.debug('Updating pre-existing same pool db share entry.')
            # We have a pool db share counterpart so retrieve and update it.
            share = Share.objects.get(name=s_in_pool, pool=pool)
            # Initially default our pqgroup value to db default of '-1/-1'
            # This way, unless quotas are enabled, all pqgroups will be
            # returned to db default.
            pqgroup = PQGROUP_DEFAULT
            if share.pool.quotas_enabled:
                # Quotas are enabled on our pool so we can validate pqgroup.
                if share.pqgroup == pqgroup or not share.pqgroup_exist \
                        or share.pqgroup in share_pqgroups_used:
                    # we have a void '-1/-1' or non existent pqgroup or
                    # this pqgroup has already been seen / used in this pool.
                    logger.debug('#### replacing void, non-existent, or '
                                 'duplicate pqgroup.')
                    pqgroup = qgroup_create(pool)
                    if pqgroup is not PQGROUP_DEFAULT:
                        update_quota(pool, pqgroup, share.size * 1024)
                        share_pqgroup_assign(pqgroup, share)
                else:
                    # Our share's pqgroup looks OK so use it.
                    pqgroup = share.pqgroup
                # Record our use of this pqgroup to spot duplicates later.
                share_pqgroups_used.append(deepcopy(share.pqgroup))
            if share.pqgroup != pqgroup:
                # we need to update our share.pqgroup
                share.pqgroup = pqgroup
                share.save()
            share.qgroup = shares_in_pool[s_in_pool]
            rusage, eusage, pqgroup_rusage, pqgroup_eusage = \
                volume_usage(pool, share.qgroup, pqgroup)
            if (rusage != share.rusage or eusage != share.eusage
                    or pqgroup_rusage != share.pqgroup_rusage
                    or pqgroup_eusage != share.pqgroup_eusage):
                share.rusage = rusage
                share.eusage = eusage
                share.pqgroup_rusage = pqgroup_rusage
                share.pqgroup_eusage = pqgroup_eusage
                update_shareusage_db(s_in_pool, rusage, eusage)
            else:
                update_shareusage_db(s_in_pool, rusage, eusage, UPDATE_TS)
            share.save()
            continue
        try:
            logger.debug('No prior entries in scanned pool trying all pools.')
            # Test (Try) for an existing system wide Share db entry.
            cshare = Share.objects.get(name=s_in_pool)
            # Get a list of Rockstor relevant subvols (ie shares and clones)
            # for the prior existing db share entry's pool.
            cshares_d = shares_info(cshare.pool)
            if s_in_pool in cshares_d:
                e_msg = ('Another pool ({}) has a share with this same '
                         'name ({}) as this pool ({}). This configuration '
                         'is not supported. You can delete one of them '
                         'manually with the following command: '
                         '"btrfs subvol delete {}[pool name]/{}" WARNING this '
                         'will remove the entire contents of that '
                         'subvolume.').format(cshare.pool.name, s_in_pool,
                                              pool.name, settings.MNT_PT,
                                              s_in_pool)
                handle_exception(Exception(e_msg), request)
            else:
                # Update the prior existing db share entry previously
                # associated with another pool.
                logger.debug('Updating prior db entry from another pool.')
                cshare.pool = pool
                cshare.qgroup = shares_in_pool[s_in_pool]
                cshare.size = pool.size
                cshare.subvol_name = s_in_pool
                (cshare.rusage, cshare.eusage, cshare.pqgroup_rusage,
                 cshare.pqgroup_eusage) = volume_usage(pool, cshare.qgroup,
                                                       cshare.pqgroup)
                cshare.save()
                update_shareusage_db(s_in_pool, cshare.rusage, cshare.eusage)
        except Share.DoesNotExist:
            logger.debug('Db share entry does not exist - creating.')
            # We have a share on disk that has no db counterpart so create one.
            # Retrieve new pool quota id for use in db Share object creation.
            # As the replication receive share is 'special' we tag it as such.
            replica = False
            share_name = s_in_pool
            if re.match('.snapshot', s_in_pool) is not None:
                # We have an initial replication share, non snap in .snapshots.
                # We could change it's name here but still a little mixing
                # of name and subvol throughout project.
                replica = True
                logger.debug('Initial receive quirk-subvol found: Importing '
                             'as share and setting replica flag.')
            qid = shares_in_pool[s_in_pool]
            pqid = qgroup_create(pool)
            if pqid is not PQGROUP_DEFAULT:
                update_quota(pool, pqid, pool.size * 1024)
                pool_mnt_pt = '{}{}'.format(settings.MNT_PT, pool.name)
                qgroup_assign(qid, pqid, pool_mnt_pt)
            rusage, eusage, pqgroup_rusage, pqgroup_eusage = \
                volume_usage(pool, qid, pqid)
            nso = Share(pool=pool,
                        qgroup=qid,
                        pqgroup=pqid,
                        name=share_name,
                        size=pool.size,
                        subvol_name=s_in_pool,
                        rusage=rusage,
                        eusage=eusage,
                        pqgroup_rusage=pqgroup_rusage,
                        pqgroup_eusage=pqgroup_eusage,
                        replica=replica)
            nso.save()
            update_shareusage_db(s_in_pool, rusage, eusage)
            mount_share(nso, '%s%s' % (settings.MNT_PT, s_in_pool))
Exemple #11
0
def import_shares(pool, request):
    # Establish known shares/subvols within our db for the given pool:
    shares_in_pool_db = [s.name for s in Share.objects.filter(pool=pool)]
    # Find the actual/current shares/subvols within the given pool:
    # Limited to Rockstor relevant subvols ie shares and clones.
    shares_in_pool = shares_info(pool)
    # Delete db Share object if it is no longer found on disk.
    for s_in_pool_db in shares_in_pool_db:
        if s_in_pool_db not in shares_in_pool:
            Share.objects.get(pool=pool, name=s_in_pool_db).delete()
    # Check if each share in pool also has a db counterpart.
    for s_in_pool in shares_in_pool:
        logger.debug('Share name = {}.'.format(s_in_pool))
        if s_in_pool in shares_in_pool_db:
            logger.debug('Updating pre-existing same pool db share entry.')
            # We have a pool db share counterpart so retrieve and update it.
            share = Share.objects.get(name=s_in_pool, pool=pool)
            share.qgroup = shares_in_pool[s_in_pool]
            rusage, eusage, pqgroup_rusage, pqgroup_eusage = \
                volume_usage(pool, share.qgroup, share.pqgroup)
            if (rusage != share.rusage or eusage != share.eusage
                    or pqgroup_rusage != share.pqgroup_rusage
                    or pqgroup_eusage != share.pqgroup_eusage):
                share.rusage = rusage
                share.eusage = eusage
                share.pqgroup_rusage = pqgroup_rusage
                share.pqgroup_eusage = pqgroup_eusage
                update_shareusage_db(s_in_pool, rusage, eusage)
            else:
                update_shareusage_db(s_in_pool, rusage, eusage, UPDATE_TS)
            share.save()
            continue
        try:
            logger.debug('No prior entries in scanned pool trying all pools.')
            # Test (Try) for an existing system wide Share db entry.
            cshare = Share.objects.get(name=s_in_pool)
            # Get a list of Rockstor relevant subvols (ie shares and clones)
            # for the prior existing db share entry's pool.
            cshares_d = shares_info(cshare.pool)
            if s_in_pool in cshares_d:
                e_msg = ('Another pool ({}) has a Share with this same '
                         'name ({}) as this pool ({}). This configuration '
                         'is not supported. You can delete one of them '
                         'manually with the following command: '
                         '"btrfs subvol delete {}[pool name]/{}" WARNING this '
                         'will remove the entire contents of that subvolume.'.
                         format(cshare.pool.name, s_in_pool, pool.name,
                                settings.MNT_PT, s_in_pool))
                handle_exception(Exception(e_msg), request)
            else:
                # Update the prior existing db share entry previously
                # associated with another pool.
                logger.debug('Updating prior db entry from another pool.')
                cshare.pool = pool
                cshare.qgroup = shares_in_pool[s_in_pool]
                cshare.size = pool.size
                cshare.subvol_name = s_in_pool
                (cshare.rusage, cshare.eusage, cshare.pqgroup_rusage,
                 cshare.pqgroup_eusage) = volume_usage(pool, cshare.qgroup,
                                                       cshare.pqgroup)
                cshare.save()
                update_shareusage_db(s_in_pool, cshare.rusage, cshare.eusage)
        except Share.DoesNotExist:
            logger.debug('Db share entry does not exist - creating.')
            # We have a share on disk that has no db counterpart so create one.
            # Retrieve pool quota id for use in db Share object creation.
            pqid = qgroup_create(pool)
            update_quota(pool, pqid, pool.size * 1024)
            rusage, eusage, pqgroup_rusage, pqgroup_eusage = \
                volume_usage(pool, shares_in_pool[s_in_pool], pqid)
            nso = Share(pool=pool,
                        qgroup=shares_in_pool[s_in_pool],
                        pqgroup=pqid,
                        name=s_in_pool,
                        size=pool.size,
                        subvol_name=s_in_pool,
                        rusage=rusage,
                        eusage=eusage,
                        pqgroup_rusage=pqgroup_rusage,
                        pqgroup_eusage=pqgroup_eusage)
            nso.save()
            update_shareusage_db(s_in_pool, rusage, eusage)
            mount_share(nso, '%s%s' % (settings.MNT_PT, s_in_pool))
 def test_volume_usage(self):
     """
     Moc the return value of "btrfs qgroup show share_mount_pt" to assess
     to extract rfer and excl usage for original 0/* qgroup and Rockstor
     ad hoc 2015/* qgroup.
     :return:
     """
     # volume_usage() called with pool name of=test-pool volume_id=0/261
     # and new Rockstor qgroup pvolume_id=2015/4
     # mount_root(Pool object) returned /mnt2/test-pool
     # cmd=['/sbin/btrfs', 'qgroup', 'show', u'/mnt2/test-pool']
     #
     # Setup our calling variables and mock the root pool as mounted.
     o = ['qgroupid         rfer         excl ',
          '--------         ----         ---- ',
          '0/5          16.00KiB     16.00KiB ',
          '0/259         2.04MiB      2.04MiB ',
          '0/260         7.37GiB      7.37GiB ',
          '0/261        63.65MiB     63.65MiB ',
          '0/263       195.32MiB    496.00KiB ',
          '0/264       195.34MiB    112.00KiB ',
          '0/265       195.34MiB     80.00KiB ',
          '0/266       195.34MiB     80.00KiB ',
          '0/267       195.34MiB     80.00KiB ',
          '0/268       195.38MiB    152.00KiB ',
          '0/269       229.06MiB     80.00KiB ',
          '0/270       229.06MiB     80.00KiB ',
          '0/271       229.06MiB     80.00KiB ',
          '0/272       229.06MiB     96.00KiB ',
          '0/273       229.06MiB    128.00KiB ',
          '0/274       236.90MiB     80.00KiB ',
          '0/275       236.90MiB     80.00KiB ',
          '0/276       236.90MiB     80.00KiB ',
          '0/277       450.54MiB    128.00KiB ',
          '0/278       450.54MiB    112.00KiB ',
          '0/279       450.54MiB    128.00KiB ',
          '0/280       450.54MiB     80.00KiB ',
          '0/281       450.54MiB     80.00KiB ',
          '0/282       450.54MiB     80.00KiB ',
          '0/283       450.54MiB     80.00KiB ',
          '0/284       450.54MiB    176.00KiB ',
          '0/285       450.59MiB      3.43MiB ',
          '2015/1          0.00B        0.00B ',
          '2015/2        2.04MiB      2.04MiB ',
          '2015/3        7.37GiB      7.37GiB ',
          '2015/4       63.00MiB     63.00MiB ', '']
     e = ['']
     rc = 0
     # is_mounted returning True avoids mount command calls in mount_root()
     mount_point = '/mnt2/test-pool'
     self.mock_mount_root.return_value = mount_point
     # setup the return values from our run_command wrapper
     # examples of output from /mnt2/test-pool from a real system install
     self.mock_run_command.return_value = (o, e, rc)
     # create a fake pool object
     pool = Pool(raid='raid0', name='test-pool')
     # fake volume_id / qgroupid
     volume_id = '0/261'
     # and fake pvolume_id
     pvolume_id = '2015/4'
     # As volume_usage uses convert_to_kib() everything is converted to KiB
     # here we convert 450.59MiB and 3.43MiB to their KiB equivalent (x1024)
     expected_results_share = [65177, 65177, 64512, 64512]
     self.assertEqual(volume_usage(pool, volume_id, pvolume_id),
                      expected_results_share,
                      msg='Failed to retrieve share rfer and excl usage')
     # We perform a test with snapshots volumes to, having pqgroup None
     pvolume_id2 = None
     expected_results_snapshot = [65177, 65177]
     self.assertEqual(volume_usage(pool, volume_id, pvolume_id2),
                      expected_results_snapshot,
                      msg='Failed to retrieve snapshot rfer and excl usage')