Exemplo n.º 1
0
def update_storage_usage_cache(target_id, target_guid, per_page=500000):
    if not settings.ENABLE_STORAGE_USAGE_CACHE:
        return
    sql = """
        SELECT count(size), sum(size) from
        (SELECT size FROM osf_basefileversionsthrough AS obfnv
        LEFT JOIN osf_basefilenode file ON obfnv.basefilenode_id = file.id
        LEFT JOIN osf_fileversion version ON obfnv.fileversion_id = version.id
        LEFT JOIN django_content_type type on file.target_content_type_id = type.id
        WHERE file.provider = 'osfstorage'
        AND type.model = 'abstractnode'
        AND file.deleted_on IS NULL
        AND file.target_object_id=%s
        ORDER BY version.id
        LIMIT %s OFFSET %s) file_page
    """
    count = per_page
    offset = 0
    storage_usage_total = 0
    with connection.cursor() as cursor:
        while count:
            cursor.execute(sql, [target_id, per_page, offset])
            result = cursor.fetchall()
            storage_usage_total += int(result[0][1]) if result[0][1] else 0
            count = int(result[0][0]) if result[0][0] else 0
            offset += count

    key = cache_settings.STORAGE_USAGE_KEY.format(target_id=target_guid)
    storage_usage_cache.set(key, storage_usage_total,
                            settings.STORAGE_USAGE_CACHE_TIMEOUT)
Exemplo n.º 2
0
    def test_limit_default(self, node):
        assert node.storage_usage is 0

        key = cache_settings.STORAGE_USAGE_KEY.format(target_id=node._id)
        storage_usage_cache.set(key, 0)

        assert node.storage_limit_status == StorageLimits.DEFAULT
Exemplo n.º 3
0
def update_storage_usage_with_size(payload):
    BaseFileNode = apps.get_model('osf.basefilenode')
    AbstractNode = apps.get_model('osf.abstractnode')

    metadata = payload.get('metadata') or payload.get('destination')

    if not metadata.get('nid'):
        return
    target_node = AbstractNode.load(metadata['nid'])

    if target_node.is_quickfiles:
        return

    action = payload['action']
    provider = metadata.get('provider', 'osfstorage')

    target_file_id = metadata['path'].replace('/', '')
    target_file_size = metadata.get('size', 0)

    current_usage = target_node.storage_usage
    target_file = BaseFileNode.load(target_file_id)

    if target_file and action in ['copy', 'delete', 'move']:

        target_file_size = target_file.versions.aggregate(
            Sum('size'))['size__sum'] or target_file_size

    if action in ['create', 'update', 'copy'] and provider == 'osfstorage':
        current_usage += target_file_size

    elif action == 'delete' and provider == 'osfstorage':
        current_usage = max(current_usage - target_file_size, 0)

    elif action in 'move':
        source_node = AbstractNode.load(
            payload['source']['nid'])  # Getting the 'from' node

        source_provider = payload['source']['provider']
        if target_node == source_node and source_provider == provider:
            return  # Its not going anywhere.
        if source_provider == 'osfstorage' and not source_node.is_quickfiles:
            source_node_usage = source_node.storage_usage
            source_node_usage = max(source_node_usage - target_file_size, 0)

            key = cache_settings.STORAGE_USAGE_KEY.format(
                target_id=source_node._id)
            storage_usage_cache.set(key, source_node_usage,
                                    settings.STORAGE_USAGE_CACHE_TIMEOUT)

        current_usage += target_file_size

        if provider != 'osfstorage':
            return  # We don't want to update the destination node if the provider isn't osfstorage
    else:
        return

    key = cache_settings.STORAGE_USAGE_KEY.format(target_id=target_node._id)
    storage_usage_cache.set(key, current_usage,
                            settings.STORAGE_USAGE_CACHE_TIMEOUT)
Exemplo n.º 4
0
def update_storage_usage_cache(target_id):
    AbstractNode = apps.get_model('osf.AbstractNode')

    storage_usage_total = AbstractNode.objects.get(
        guids___id=target_id,
    ).files.aggregate(sum=models.Sum('versions__size'))['sum'] or 0

    key = cache_settings.STORAGE_USAGE_KEY.format(target_id=target_id)
    storage_usage_cache.set(key, storage_usage_total, cache_settings.FIVE_MIN_TIMEOUT)
Exemplo n.º 5
0
    def test_limit_custom(self, node):
        node.custom_storage_usage_limit_private = 7
        node.save()

        key = cache_settings.STORAGE_USAGE_KEY.format(target_id=node._id)

        storage_usage_cache.set(key,
                                node.custom_storage_usage_limit_private * GBs)

        assert node.storage_limit_status is StorageLimits.OVER_PRIVATE

        storage_usage_cache.set(
            key, node.custom_storage_usage_limit_private * GBs - 1)

        assert node.storage_limit_status is StorageLimits.APPROACHING_PRIVATE

        node.custom_storage_usage_limit_public = 142
        node.save()

        storage_usage_cache.set(key,
                                node.custom_storage_usage_limit_public * GBs)

        assert node.storage_limit_status is StorageLimits.OVER_PUBLIC

        storage_usage_cache.set(
            key, node.custom_storage_usage_limit_public * GBs - 1)

        assert node.storage_limit_status is StorageLimits.APPROACHING_PUBLIC
Exemplo n.º 6
0
    def test_node_storage_with_storage_usage(self, app, url, project,
                                             admin_contributor):

        # Test Node Storage with OSFStorage Usage
        storage_usage = (settings.STORAGE_LIMIT_PRIVATE + 1) * settings.GBs
        key = cache_settings.STORAGE_USAGE_KEY.format(target_id=project._id)
        storage_usage_cache.set(key, storage_usage,
                                settings.STORAGE_USAGE_CACHE_TIMEOUT)

        res = app.get(url, auth=admin_contributor.auth)
        assert res.status_code == 200
        data = res.json['data']
        assert data['attributes']['storage_limit_status'] == 'OVER_PRIVATE'
        assert data['attributes']['storage_usage'] == str(storage_usage)
Exemplo n.º 7
0
    def test_storage_limits(self, node):
        assert node.storage_limit_status is StorageLimits.NOT_CALCULATED

        key = cache_settings.STORAGE_USAGE_KEY.format(target_id=node._id)
        storage_usage_cache.set(
            key, int(STORAGE_LIMIT_PUBLIC * STORAGE_WARNING_THRESHOLD * GBs))

        assert node.storage_limit_status is StorageLimits.APPROACHING_PUBLIC

        storage_usage_cache.set(
            key, int(STORAGE_LIMIT_PRIVATE * STORAGE_WARNING_THRESHOLD * GBs))

        assert node.storage_limit_status is StorageLimits.APPROACHING_PRIVATE

        storage_usage_cache.set(key, int(STORAGE_LIMIT_PUBLIC * GBs))

        assert node.storage_limit_status is StorageLimits.OVER_PUBLIC