Beispiel #1
0
def tasks(request, uuid):
    job = models.Job.objects.get(jobuuid=uuid)
    objects = job.task_set.all().order_by('-exitcode', '-endtime',
                                          '-starttime', '-createdtime')

    if (len(objects) == 0):
        return tasks_subjobs(request, uuid)

    # Filenames can be any encoding - we want to be able to display
    # unicode, while just displaying unicode replacement characters
    # for any other encoding present.
    for item in objects:
        item.filename = escape(item.filename)
        item.arguments = escape(item.arguments)
        item.stdout = escape(item.stdout)
        item.stderror = escape(item.stderror)

    page = helpers.pager(objects, django_settings.TASKS_PER_PAGE,
                         request.GET.get('page', None))
    objects = page.object_list

    # figure out duration in seconds
    for object in objects:
        object.duration = helpers.task_duration_in_seconds(object)

    return render(request, 'main/tasks.html', locals())
Beispiel #2
0
def ingest_upload_atk_resource_component(request, uuid, resource_component_id):
    db = ingest_upload_atk_db_connection()
    try:
        query = request.GET.get('query', '').strip()
        resource_component_data = atk.get_resource_component_and_children(
            db,
            resource_component_id,
            'description',
            recurse_max_level=2,
            search_pattern=query)
        if resource_component_data['children']:
            page = helpers.pager(resource_component_data['children'], 10,
                                 request.GET.get('page', 1))
    except MySQLdb.ProgrammingError:
        return HttpResponseServerError(
            'Database error. Please contact an administrator.')

    resource_id = ingest_upload_atk_determine_resource_component_resource_id(
        resource_component_id)

    if not resource_component_data['children'] and query == '':
        return HttpResponseRedirect(
            reverse(
                'components.ingest.views_atk.ingest_upload_atk_match_dip_objects_to_resource_component_levels',
                args=[uuid, resource_component_id]))
    else:
        search_params = advanced_search.extract_url_search_params_from_request(
            request)
        return render(request, 'ingest/atk/resource_component.html', locals())
def render_resource(
    client,
    request,
    resource_id,
    query,
    page,
    sort_by,
    search_params,
    match_redirect_target,
    resource_detail_template,
    reset_url,
    uuid,
):
    resource_data = client.get_resource_component_and_children(
        resource_id,
        "collection",
        recurse_max_level=2,
        search_pattern=query,
        sort_by=sort_by,
    )

    sort_direction = _determine_reverse_sort_direction(sort_by)

    if resource_data["children"]:
        page = helpers.pager(resource_data["children"], PAGE_SIZE, page)

    if not resource_data["children"] and query == "":
        return HttpResponseRedirect(
            reverse(match_redirect_target, args=[uuid, resource_id]))
    else:
        return render(request, resource_detail_template, locals())
def list_records(
    client,
    request,
    query,
    identifier,
    page_number,
    sort_by,
    search_params,
    list_redirect_target,
    reset_url,
    uuid,
):
    resources = LazyPagedSequence(
        lambda page, page_size: client.find_collections(
            search_pattern=query,
            identifier=identifier,
            page=page,
            page_size=page_size,
            sort_by=sort_by,
        ),
        PAGE_SIZE,
        client.count_collections(query, identifier),
    )
    page = helpers.pager(resources, PAGE_SIZE, page_number)

    sort_direction = _determine_reverse_sort_direction(sort_by)

    return render(request, list_redirect_target, locals())
Beispiel #5
0
def render_resource_component(client, request, resource_component_id, query,
                              page, sort_by, search_params,
                              match_redirect_target, resource_detail_template,
                              reset_url, uuid):
    resource_component_data = client.get_resource_component_and_children(
        resource_component_id,
        'description',
        recurse_max_level=2,
        search_pattern=query,
        sort_by=sort_by)

    sort_direction = _determine_reverse_sort_direction(sort_by)

    if resource_component_data['children']:
        page = helpers.pager(resource_component_data['children'], PAGE_SIZE,
                             page)

    if not resource_component_data['children'] and query == '':
        return HttpResponseRedirect(
            reverse(match_redirect_target, args=[uuid, resource_component_id]))
    else:
        return render(
            request, resource_detail_template, {
                'match_redirect_target': match_redirect_target,
                'page': page,
                'query': query,
                'reset_url': reset_url,
                'resource_component_data': resource_component_data,
                'resource_component_id': resource_component_id,
                'search_params': search_params,
                'sort_by': sort_by,
                'sort_direction': sort_direction,
                'uuid': uuid,
            })
Beispiel #6
0
def archival_storage_list_display(request, current_page_number=None):
    form = forms.StorageSearchForm()

    total_size = 0

    # get ElasticSearch stats
    aip_indexed_file_count = advanced_search.indexed_count('aips')

    # get AIPs
    conn = elasticSearchFunctions.connect_and_create_index('aips')
    aipResults = conn.search(pyes.StringQuery('*'), doc_types=['aip'])
    aips = []

    #if aipResults._total != None:
    if len(aipResults) > 0:
        for aip in aipResults:
            aips.append(aip)

    # handle pagination
    page = helpers.pager(aips, 10, current_page_number)

    sips = []
    for aip in page['objects']:
        sip = {}
        sip['href'] = aip.filePath.replace(AIPSTOREPATH + '/', "AIPsStore/")
        sip['name'] = aip.name
        sip['uuid'] = aip.uuid

        #sip['date'] = str(aip.date)[0:19].replace('T', ' ')
        sip['date'] = aip.created

        try:
            size = float(aip.size)
            total_size = total_size + size
            sip['size'] = '{0:.2f} MB'.format(size)
        except:
            sip['size'] = 'Removed'

        sips.append(sip)

    order_by = request.GET.get('order_by', 'name')
    sort_by = request.GET.get('sort_by', 'up')

    def sort_aips(sip):
        value = 0
        if 'name' == order_by:
            value = sip['name'].lower()
        else:
            value = sip[order_by]
        return value

    sips = sorted(sips, key=sort_aips)

    if sort_by == 'down':
        sips.reverse()

    total_size = '{0:.2f}'.format(total_size)

    return render(request, 'archival_storage/archival_storage.html', locals())
Beispiel #7
0
def archival_storage_list_display(request, current_page_number=None):
    form = forms.StorageSearchForm()

    total_size = 0

    # get ElasticSearch stats
    aip_indexed_file_count = advanced_search.indexed_count('aips')

    # get AIPs
    conn = elasticSearchFunctions.connect_and_create_index('aips')
    aipResults = conn.search(pyes.StringQuery('*'), doc_types=['aip'])
    aips = []

    #if aipResults._total != None:
    if len(aipResults) > 0:
        for aip in aipResults:
            aips.append(aip)

    # handle pagination
    page = helpers.pager(aips, 10, current_page_number)

    sips = []
    for aip in page['objects']:
        sip = {}
        sip['href'] = aip.filePath.replace(AIPSTOREPATH + '/', "AIPsStore/")
        sip['name'] = aip.name
        sip['uuid'] = aip.uuid

        #sip['date'] = str(aip.date)[0:19].replace('T', ' ')
        sip['date'] = aip.created

        try:
            size = float(aip.size)
            total_size = total_size + size
            sip['size'] = '{0:.2f} MB'.format(size)
        except:
            sip['size'] = 'Removed'

        sips.append(sip)

    order_by = request.GET.get('order_by', 'name');
    sort_by  = request.GET.get('sort_by', 'up');

    def sort_aips(sip):
        value = 0
        if 'name' == order_by:
            value = sip['name'].lower()
        else:
            value = sip[order_by]
        return value
    sips = sorted(sips, key = sort_aips)

    if sort_by == 'down':
        sips.reverse()

    total_size = '{0:.2f}'.format(total_size)

    return render(request, 'archival_storage/archival_storage.html', locals())
Beispiel #8
0
def failure_report(request, report_id=None):
    if report_id is not None:
        report = models.Report.objects.get(pk=report_id)
        return render(request, 'administration/reports/failure_detail.html', locals())
    else:
        current_page_number = request.GET.get('page', '1')
        items_per_page = 10

        reports = models.Report.objects.all().order_by('-created')
        page = helpers.pager(reports, items_per_page, current_page_number)
        return render(request, 'administration/reports/failures.html', locals())
Beispiel #9
0
def tasks(request, uuid):
    job = models.Job.objects.get(jobuuid=uuid)
    objects = job.task_set.all().order_by('-exitcode', '-endtime', '-starttime', '-createdtime')

    page    = helpers.pager(objects, django_settings.TASKS_PER_PAGE, request.GET.get('page', None))
    objects = page['objects']

    # figure out duration in seconds
    for object in objects:
         object.duration = helpers.task_duration_in_seconds(object)

    return render(request, 'main/tasks.html', locals())
Beispiel #10
0
def preservation_planning_fpr_search(request, current_page_number = None):
    if current_page_number == None:                
        current_page_number = 1

    query = request.GET.get('query', '')

    if query == '':
        # No query in the URL parameters list, try to see if we've got an existing query going from a previous page...
        query = request.session['fpr_query']
  
        # No query from a previous page either
        if query == '':
            query = '*'
            return HttpResponse('No query.')


    request.session['fpr_query'] = query # Save this for pagination...
    conn = pyes.ES(elasticSearchFunctions.getElasticsearchServerHostAndPort())

    indexes = conn.get_indices()

    if 'fpr_file' not in indexes:
        # Grab relevant FPR data from the DB
        results = get_fpr_table()
        request.session['fpr_results'] = results

        # Setup indexing for some Elastic Search action.
        for row in results:
            conn.index(row, 'fpr_file', 'fpr_files')
    else:
        results = request.session['fpr_results']
    
    # do fulltext search
    q = pyes.StringQuery(query)
    s = pyes.Search(q)

    try:
        results = conn.search_raw(s, size=len(results), indices='fpr_file')
    except:
        return HttpResponse('Error accessing index.')
    
    form = FPRSearchForm()

    search_hits = []

    for row in results.hits.hits:
        search_hits.append(row['_source'].copy())

    page = helpers.pager(search_hits, results_per_page, current_page_number)
    hit_count = len(search_hits) 
  
    return render(request, 'main/preservation_planning_fpr.html', locals())
Beispiel #11
0
def preservation_planning_fpr_search(request, current_page_number=None):
    if current_page_number == None:
        current_page_number = 1

    query = request.GET.get("query", "")

    if query == "":
        # No query in the URL parameters list, try to see if we've got an existing query going from a previous page...
        query = request.session["fpr_query"]

        # No query from a previous page either
        if query == "":
            query = "*"
            return HttpResponse("No query.")

    request.session["fpr_query"] = query  # Save this for pagination...
    conn = pyes.ES(elasticSearchFunctions.getElasticsearchServerHostAndPort())

    indexes = conn.get_indices()

    if "fpr_file" not in indexes:
        # Grab relevant FPR data from the DB
        results = get_fpr_table()
        request.session["fpr_results"] = results

        # Setup indexing for some Elastic Search action.
        for row in results:
            conn.index(row, "fpr_file", "fpr_files")
    else:
        results = request.session["fpr_results"]

    # do fulltext search
    q = pyes.StringQuery(query)
    s = pyes.Search(q)

    try:
        results = conn.search_raw(s, size=len(results), indices="fpr_file")
    except:
        return HttpResponse("Error accessing index.")

    form = FPRSearchForm()

    search_hits = []

    for row in results.hits.hits:
        search_hits.append(row["_source"].copy())

    page = helpers.pager(search_hits, results_per_page, current_page_number)
    hit_count = len(search_hits)

    return render(request, "main/preservation_planning_fpr.html", locals())
Beispiel #12
0
def archival_storage_sip_display(request, current_page_number=None):
    form = forms.StorageSearchForm()

    total_size = 0

    # get ElasticSearch stats
    aip_indexed_file_count = archival_storage_indexed_count('aips')

    # get AIPs from DB
    aips = models.AIP.objects.all()

    # handle pagination
    page = helpers.pager(aips, 10, current_page_number)

    sips = []
    for aip in page['objects']:
        sip = {}
        sip['href'] = aip.filepath.replace(AIPSTOREPATH + '/', "AIPsStore/")
        sip['name'] = aip.sipname
        sip['uuid'] = aip.sipuuid

        sip['date'] = aip.sipdate

        try:
            size = os.path.getsize(aip.filepath) / float(1024) / float(1024)
            total_size = total_size + size
            sip['size'] = '{0:.2f} MB'.format(size)
        except:
            sip['size'] = 'Removed'

        sips.append(sip)

    order_by = request.GET.get('order_by', 'name');
    sort_by  = request.GET.get('sort_by', 'up');

    def sort_aips(sip):
        value = 0
        if 'name' == order_by:
            value = sip['name'].lower()
        else:
            value = sip[order_by]
        return value
    sips = sorted(sips, key = sort_aips)

    if sort_by == 'down':
        sips.reverse()

    total_size = '{0:.2f}'.format(total_size)

    return render(request, 'archival_storage/archival_storage.html', locals())
Beispiel #13
0
def ingest_normalization_report(request, uuid, current_page=None):
    jobs = models.Job.objects.filter(sipuuid=uuid, subjobof='')
    job = jobs[0]
    sipname = utils.get_directory_name(job)

    objects = getNormalizationReportQuery(sipUUID=uuid)

    results_per_page = 10

    if current_page == None:
        current_page = 1

    page = helpers.pager(objects, results_per_page, current_page)
    hit_count = len(objects)

    return render(request, 'ingest/normalization_report.html', locals())
Beispiel #14
0
def preservation_planning_fpr_data(request, current_page_number=None):

    results = get_fpr_table()
    request.session["fpr_results"] = results

    if current_page_number == None:
        current_page_number = 1

    form = FPRSearchForm()

    page = helpers.pager(results, results_per_page, current_page_number)
    request.session["fpr_query"] = ""

    item_count = len(results)

    return render(request, "main/preservation_planning_fpr.html", locals())
Beispiel #15
0
def ingest_normalization_report(request, uuid, current_page=None):
    jobs = models.Job.objects.filter(sipuuid=uuid, subjobof='')
    job = jobs[0]
    sipname = utils.get_directory_name(job)

    objects = getNormalizationReportQuery(sipUUID=uuid)

    results_per_page = 10

    if current_page == None:
        current_page = 1

    page = helpers.pager(objects, results_per_page, current_page)
    hit_count = len(objects)

    return render(request, 'ingest/normalization_report.html', locals())
Beispiel #16
0
def preservation_planning_fpr_data(request, current_page_number = None):

    results = get_fpr_table()
    request.session['fpr_results'] = results

    if current_page_number == None:
        current_page_number = 1

    form = FPRSearchForm()

    page = helpers.pager(results, results_per_page, current_page_number)
    request.session['fpr_query'] = ''

    item_count = len(results)

    return render(request, 'main/preservation_planning_fpr.html', locals())
def render_resource_component(
    client,
    request,
    resource_component_id,
    query,
    page,
    sort_by,
    search_params,
    match_redirect_target,
    resource_detail_template,
    reset_url,
    uuid,
):
    resource_component_data = client.get_resource_component_and_children(
        resource_component_id,
        "description",
        recurse_max_level=2,
        search_pattern=query,
        sort_by=sort_by,
    )

    sort_direction = _determine_reverse_sort_direction(sort_by)

    if resource_component_data["children"]:
        page = helpers.pager(resource_component_data["children"], PAGE_SIZE,
                             page)

    if not resource_component_data["children"] and query == "":
        return HttpResponseRedirect(
            reverse(match_redirect_target, args=[uuid, resource_component_id]))
    else:
        return render(
            request,
            resource_detail_template,
            {
                "match_redirect_target": match_redirect_target,
                "page": page,
                "query": query,
                "reset_url": reset_url,
                "resource_component_data": resource_component_data,
                "resource_component_id": resource_component_id,
                "search_params": search_params,
                "sort_by": sort_by,
                "sort_direction": sort_direction,
                "uuid": uuid,
            },
        )
Beispiel #18
0
def ingest_normalization_report(request, uuid, current_page=None):
    jobs = models.Job.objects.filter(sipuuid=uuid, subjobof='')
    sipname = utils.get_directory_name_from_job(jobs)

    objects = getNormalizationReportQuery(sipUUID=uuid)
    for o in objects:
        o['location'] = archivematicaFunctions.escape(o['location'])

    results_per_page = 10

    if current_page == None:
        current_page = 1

    page = helpers.pager(objects, results_per_page, current_page)
    hit_count = len(objects)

    return render(request, 'ingest/normalization_report.html', locals())
Beispiel #19
0
def tasks(request, uuid):
    job = models.Job.objects.get(jobuuid=uuid)
    objects = job.task_set.all().order_by('-exitcode', '-endtime',
                                          '-starttime', '-createdtime')

    if (len(objects) == 0):
        return tasks_subjobs(request, uuid)

    page = helpers.pager(objects, django_settings.TASKS_PER_PAGE,
                         request.GET.get('page', None))
    objects = page['objects']

    # figure out duration in seconds
    for object in objects:
        object.duration = helpers.task_duration_in_seconds(object)

    return render(request, 'main/tasks.html', locals())
Beispiel #20
0
def ingest_normalization_report(request, uuid, current_page=None):
    jobs = models.Job.objects.filter(sipuuid=uuid, subjobof='')
    sipname = jobs.get_directory_name()

    objects = getNormalizationReportQuery(sipUUID=uuid)
    for o in objects:
        o['location'] = archivematicaFunctions.escape(o['location'])
        (o['preservation_derivative_validation_attempted'],
         o['preservation_derivative_validation_failed'],
         o['access_derivative_validation_attempted'],
         o['access_derivative_validation_failed']
         ) = derivative_validation_report(o)

    results_per_page = 10

    if current_page is None:
        current_page = 1

    page = helpers.pager(objects, results_per_page, current_page)
    hit_count = len(objects)

    return render(request, 'ingest/normalization_report.html', locals())
Beispiel #21
0
def ingest_normalization_report(request, uuid, current_page=None):
    jobs = models.Job.objects.filter(sipuuid=uuid)
    sipname = jobs.get_directory_name()

    objects = getNormalizationReportQuery(sipUUID=uuid)
    for o in objects:
        o["location"] = escape(o["location"])
        (
            o["preservation_derivative_validation_attempted"],
            o["preservation_derivative_validation_failed"],
            o["access_derivative_validation_attempted"],
            o["access_derivative_validation_failed"],
        ) = derivative_validation_report(o)

    results_per_page = 10

    if current_page is None:
        current_page = 1

    page = helpers.pager(objects, results_per_page, current_page)
    hit_count = len(objects)

    return render(request, "ingest/normalization_report.html", locals())
Beispiel #22
0
def ingest_upload_atk(request, uuid):
    try:
        query = request.GET.get('query', '').strip()

        db = ingest_upload_atk_db_connection()

        try:
            resources = ingest_upload_atk_get_collection_ids(db, query)
        except MySQLdb.OperationalError:
            return HttpResponseServerError(
                'Database connection error. Please contact an administration.')

        page = helpers.pager(resources, 10, request.GET.get('page', 1))

        page.objects = augment_resource_data(db, page.object_list)

    except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as e:
        return HttpResponseServerError(
            'Database error {0}. Please contact an administrator.'.format(
                str(e)))

    search_params = advanced_search.extract_url_search_params_from_request(
        request)
    return render(request, 'ingest/atk/resource_list.html', locals())
Beispiel #23
0
def search(request):
    # deal with transfer mode
    file_mode = False
    checked_if_in_file_mode = ''
    if request.GET.get('mode', '') != '':
        file_mode = True
        checked_if_in_file_mode = 'checked'

    # get search parameters from request
    queries, ops, fields, types = advanced_search.search_parameter_prep(
        request)

    # redirect if no search params have been set
    if not 'query' in request.GET:
        return helpers.redirect_with_get_params(
            'components.archival_storage.views.search',
            query='',
            field='',
            type='')

    # get string of URL parameters that should be passed along when paging
    search_params = advanced_search.extract_url_search_params_from_request(
        request)

    # set paging variables
    if not file_mode:
        items_per_page = 2
    else:
        items_per_page = 20

    page = advanced_search.extract_page_number_from_url(request)

    start = page * items_per_page + 1

    # perform search
    conn = pyes.ES(elasticSearchFunctions.getElasticsearchServerHostAndPort())

    try:
        query = advanced_search.assemble_query(queries, ops, fields, types)

        # use all results to pull transfer facets if not in file mode
        # pulling only one field (we don't need field data as we augment
        # the results using separate queries)
        if not file_mode:
            results = conn.search_raw(query=query,
                                      indices='aips',
                                      type='aipfile',
                                      fields='uuid')
        else:
            results = conn.search_raw(query=query,
                                      indices='aips',
                                      type='aipfile',
                                      start=start - 1,
                                      size=items_per_page,
                                      fields='AIPUUID,filePath,FILEUUID')
    except:
        return HttpResponse('Error accessing index.')

    # take note of facet data
    aip_uuids = results['facets']['AIPUUID']['terms']

    if not file_mode:
        number_of_results = len(aip_uuids)

        page_data = helpers.pager(aip_uuids, items_per_page, page + 1)
        aip_uuids = page_data['objects']
        search_augment_aip_results(conn, aip_uuids)
    else:
        number_of_results = results.hits.total
        results = search_augment_file_results(results)

    # set remaining paging variables
    end, previous_page, next_page = advanced_search.paging_related_values_for_template_use(
        items_per_page, page, start, number_of_results)

    # make sure results is set
    try:
        if results:
            pass
    except:
        results = False

    form = forms.StorageSearchForm(initial={'query': queries[0]})
    return render(request, 'archival_storage/archival_storage_search.html',
                  locals())
Beispiel #24
0
def search(request):
    # deal with transfer mode
    file_mode = False
    checked_if_in_file_mode = ''
    if request.GET.get('mode', '') != '':
        file_mode = True
        checked_if_in_file_mode = 'checked'

    # get search parameters from request
    queries, ops, fields, types = advanced_search.search_parameter_prep(request)

    # redirect if no search params have been set
    if not 'query' in request.GET:
        return helpers.redirect_with_get_params(
            'components.archival_storage.views.search',
            query='',
            field='',
            type=''
        )

    # get string of URL parameters that should be passed along when paging
    search_params = advanced_search.extract_url_search_params_from_request(request)

    # set paging variables
    if not file_mode:
        items_per_page = 2
    else:
        items_per_page = 20

    page = advanced_search.extract_page_number_from_url(request)

    start = page * items_per_page + 1

    # perform search
    conn = pyes.ES(elasticSearchFunctions.getElasticsearchServerHostAndPort())

    try:
        query=advanced_search.assemble_query(queries, ops, fields, types)

        # use all results to pull transfer facets if not in file mode
        # pulling only one field (we don't need field data as we augment
        # the results using separate queries)
        if not file_mode:
            results = conn.search_raw(
                query=query,
                indices='aips',
                type='aipfile',
                fields='uuid'
            )
        else:
            results = conn.search_raw(
                query=query,
                indices='aips',
                type='aipfile',
                start=start - 1,
                size=items_per_page,
                fields='AIPUUID,filePath,FILEUUID'
            )
    except:
        return HttpResponse('Error accessing index.')

    # take note of facet data
    aip_uuids = results['facets']['AIPUUID']['terms']

    if not file_mode:
        number_of_results = len(aip_uuids)

        page_data = helpers.pager(aip_uuids, items_per_page, page + 1)
        aip_uuids = page_data['objects']
        search_augment_aip_results(conn, aip_uuids)
    else:
        number_of_results = results.hits.total
        results = search_augment_file_results(results)

    # set remaining paging variables
    end, previous_page, next_page = advanced_search.paging_related_values_for_template_use(
       items_per_page,
       page,
       start,
       number_of_results
    )

    # make sure results is set
    try:
        if results:
            pass
    except:
        results = False

    form = forms.StorageSearchForm(initial={'query': queries[0]})
    return render(request, 'archival_storage/archival_storage_search.html', locals())
Beispiel #25
0
def list_display(request):

    if "aips" not in settings.SEARCH_ENABLED:
        return render(request, "archival_storage/list.html")
    current_page_number = int(request.GET.get("page", 1))
    logger.debug("Current page: %s", current_page_number)

    # get count of AIP files
    es_client = elasticSearchFunctions.get_client()
    aip_indexed_file_count = aip_file_count(es_client)

    # get AIPs
    order_by = request.GET.get("order_by", "name")
    sort_by = request.GET.get("sort_by", "up")

    sort_params = "order_by=" + order_by + "&sort_by=" + sort_by

    # use raw subfield to sort by name
    if order_by == "name":
        order_by = order_by + ".raw"

    # change sort_by param to ES sort directions
    if sort_by == "down":
        sort_by = "desc"
    else:
        sort_by = "asc"

    sort_specification = order_by + ":" + sort_by

    # get list of UUIDs of AIPs that are deleted or pending deletion
    aips_deleted_or_pending_deletion = []
    should_haves = [{
        "match": {
            "status": "DEL_REQ"
        }
    }, {
        "match": {
            "status": "DELETED"
        }
    }]
    query = {"query": {"bool": {"should": should_haves}}}
    deleted_aip_results = es_client.search(body=query,
                                           index="aips",
                                           _source="uuid,status")
    for deleted_aip in deleted_aip_results["hits"]["hits"]:
        aips_deleted_or_pending_deletion.append(deleted_aip["_source"]["uuid"])

    # Fetch results and paginate
    def es_pager(page, page_size):
        """
        Fetch one page of normalized entries from Elasticsearch.

        :param page: 1-indexed page to fetch
        :param page_size: Number of entries on a page
        :return: List of dicts for each entry, where keys and values have been cleaned up
        """
        start = (page - 1) * page_size
        results = es_client.search(
            index="aips",
            body={"query": {
                "match_all": {}
            }},
            _source="origin,uuid,filePath,created,name,size,encrypted",
            sort=sort_specification,
            size=page_size,
            from_=start,
        )
        return [d["_source"] for d in results["hits"]["hits"]]

    items_per_page = 10
    count = es_client.count(index="aips", body={"query": {
        "match_all": {}
    }})["count"]
    results = LazyPagedSequence(es_pager,
                                page_size=items_per_page,
                                length=count)

    # Paginate
    page = helpers.pager(results, items_per_page, current_page_number)

    # process deletion, etc., and format results
    aips = []
    for aip in page.object_list:
        # If an AIP was deleted or is pending deletion, react if status changed
        if aip["uuid"] in aips_deleted_or_pending_deletion:
            # check with storage server to see current status
            api_results = storage_service.get_file_info(uuid=aip["uuid"])
            try:
                aip_status = api_results[0]["status"]
            except IndexError:
                # Storage service does not know about this AIP
                # TODO what should happen here?
                logger.info("AIP not found in storage service: {}".format(aip))
                continue

            # delete AIP metadata in ElasticSearch if AIP has been deleted from the
            # storage server
            # TODO: handle this asynchronously
            if aip_status == "DELETED":
                elasticSearchFunctions.delete_aip(es_client, aip["uuid"])
                elasticSearchFunctions.delete_aip_files(es_client, aip["uuid"])
            elif aip_status != "DEL_REQ":
                # update the status in ElasticSearch for this AIP
                elasticSearchFunctions.mark_aip_stored(es_client, aip["uuid"])
        else:
            aip_status = "UPLOADED"

        # Tweak AIP presentation and add to display array
        if aip_status != "DELETED":
            aip["status"] = AIP_STATUS_DESCRIPTIONS[aip_status]

            try:
                size = "{0:.2f} MB".format(float(aip["size"]))
            except (TypeError, ValueError):
                size = "Removed"

            aip["size"] = size

            aip["href"] = aip["filePath"].replace(AIPSTOREPATH + "/",
                                                  "AIPsStore/")
            aip["date"] = aip["created"]

            aips.append(aip)

    total_size = total_size_of_aips(es_client)
    # Find out which AIPs are encrypted

    return render(
        request,
        "archival_storage/list.html",
        {
            "total_size": total_size,
            "aip_indexed_file_count": aip_indexed_file_count,
            "aips": aips,
            "page": page,
            "search_params": sort_params,
        },
    )
Beispiel #26
0
def list_display(request):
    current_page_number = request.GET.get('page', 1)

    form = forms.StorageSearchForm()

    # get ElasticSearch stats
    aip_indexed_file_count = advanced_search.indexed_count('aips')

    # get AIPs
    order_by = request.GET.get('order_by', 'name')
    sort_by = request.GET.get('sort_by', 'up')

    if sort_by == 'down':
        sort_direction = 'desc'
    else:
        sort_direction = 'asc'

    sort_specification = order_by + ':' + sort_direction

    conn = elasticSearchFunctions.connect_and_create_index('aips')

    items_per_page = 10
    start = (int(current_page_number) - 1) * items_per_page

    aipResults = conn.search(pyes.Search(pyes.MatchAllQuery(),
                                         start=start,
                                         size=items_per_page),
                             doc_types=['aip'],
                             fields='origin,uuid,filePath,created,name,size',
                             sort=sort_specification)

    try:
        len(aipResults)
    except pyes.exceptions.ElasticSearchException:
        # there will be an error if no mapping exists for AIPs due to no AIPs
        # having been created
        return render(request, 'archival_storage/archival_storage.html',
                      locals())

    # handle pagination
    page = helpers.pager(aipResults, items_per_page, current_page_number)

    if not page:
        raise Http404

    # augment data
    sips = []
    for aip in page['objects']:
        sip = {}
        sip['href'] = aip.filePath.replace(AIPSTOREPATH + '/', "AIPsStore/")
        sip['name'] = aip.name
        sip['uuid'] = aip.uuid

        sip['date'] = aip.created

        try:
            size = float(aip.size)
            sip['size'] = '{0:.2f} MB'.format(size)
        except:
            sip['size'] = 'Removed'

        sips.append(sip)

    # get total size of all AIPS from ElasticSearch
    q = pyes.MatchAllQuery().search()
    q.facet.add(pyes.facets.StatisticalFacet('total', field='size'))
    aipResults = conn.search(q, doc_types=['aip'])
    total_size = aipResults.facets.total.total
    total_size = '{0:.2f}'.format(total_size)

    return render(request, 'archival_storage/archival_storage.html', locals())
Beispiel #27
0
def taxonomy(request):
    taxonomies = models.Taxonomy.objects.all().order_by('name')
    page = helpers.pager(taxonomies, 20, request.GET.get('page', 1))
    return render(request, 'administration/taxonomy.html', locals())
Beispiel #28
0
def search(request):
    # FIXME there has to be a better way of handling checkboxes than parsing
    # them by hand here, and displaying 'checked' in
    # _archival_storage_search_form.html
    # Parse checkbox for file mode
    yes_options = ("checked", "yes", "true", "on")
    if request.GET.get("filemode", "") in yes_options:
        file_mode = True
        checked_if_in_file_mode = "checked"
        items_per_page = 20
    else:  # AIP list
        file_mode = False
        checked_if_in_file_mode = ""
        items_per_page = 10

    # Parse checkbox for show AICs
    show_aics = ""
    if request.GET.get("show_aics", "") in yes_options:
        show_aics = "checked"

    # get search parameters from request
    queries, ops, fields, types = advanced_search.search_parameter_prep(
        request)
    logger.debug("Queries: %s, Ops: %s, Fields: %s, Types: %s", queries, ops,
                 fields, types)

    # redirect if no search params have been set
    if "query" not in request.GET:
        return helpers.redirect_with_get_params(
            "components.archival_storage.views.search",
            query="",
            field="",
            type="")

    # get string of URL parameters that should be passed along when paging
    search_params = advanced_search.extract_url_search_params_from_request(
        request)

    current_page_number = int(request.GET.get("page", 1))

    # perform search
    es_client = elasticSearchFunctions.get_client()
    results = None
    query = advanced_search.assemble_query(queries, ops, fields, types)
    try:
        # Use all results to pull transfer facets if not in file mode
        # pulling only one field (we don't need field data as we augment
        # the results using separate queries).
        if not file_mode:
            # Fetch all unique AIP UUIDs in the returned set of files
            # ES query will limit to 10 aggregation results by default,
            # add size parameter in terms to override.
            # TODO: Use composite aggregation when it gets out of beta.
            query["aggs"] = {
                "aip_uuids": {
                    "terms": {
                        "field": "AIPUUID",
                        "size": "10000"
                    }
                }
            }
            # Don't return results, just the aggregation
            query["size"] = 0
            # Searching for AIPs still actually searches type 'aipfile', and
            # returns the UUID of the AIP the files are a part of.  To search
            # for an attribute of an AIP, the aipfile must index that
            # information about their AIP.
            results = es_client.search(body=query, index="aipfiles")
            # Given these AIP UUIDs, now fetch the actual information we want from aips/aip
            buckets = results["aggregations"]["aip_uuids"]["buckets"]
            uuids = [bucket["key"] for bucket in buckets]
            uuid_file_counts = {
                bucket["key"]: bucket["doc_count"]
                for bucket in buckets
            }
            query = {"query": {"terms": {"uuid": uuids}}}
            index = "aips"
            fields = (
                "name,uuid,size,created,status,AICID,isPartOf,countAIPsinAIC,encrypted"
            )
            sort = "name.raw:desc"
        else:
            index = "aipfiles"
            fields = "AIPUUID,filePath,FILEUUID,encrypted"
            sort = "sipName.raw:desc"

        # To reduce amount of data fetched from ES, use LazyPagedSequence
        def es_pager(page, page_size):
            """
            Fetch one page of normalized aipfile entries from Elasticsearch.

            :param page: 1-indexed page to fetch
            :param page_size: Number of entries on a page
            :return: List of dicts for each entry with additional information
            """
            start = (page - 1) * page_size
            results = es_client.search(
                body=query,
                from_=start,
                size=page_size,
                index=index,
                _source=fields,
                sort=sort,
            )
            if file_mode:
                return search_augment_file_results(es_client, results)
            else:
                return search_augment_aip_results(results, uuid_file_counts)

        count = es_client.count(index=index, body={"query":
                                                   query["query"]})["count"]
        results = LazyPagedSequence(es_pager, items_per_page, count)

    except ElasticsearchException:
        logger.exception("Error accessing index.")
        return HttpResponse("Error accessing index.")

    if not file_mode:
        aic_creation_form = forms.CreateAICForm(initial={"results": uuids})
    else:  # if file_mode
        aic_creation_form = None

    page_data = helpers.pager(results, items_per_page, current_page_number)

    return render(
        request,
        "archival_storage/search.html",
        {
            "file_mode": file_mode,
            "show_aics": show_aics,
            "checked_if_in_file_mode": checked_if_in_file_mode,
            "aic_creation_form": aic_creation_form,
            "results": page_data.object_list,
            "search_params": search_params,
            "page": page_data,
        },
    )
Beispiel #29
0
def terms(request, taxonomy_uuid):
    taxonomy = models.Taxonomy.objects.get(pk=taxonomy_uuid)
    terms = taxonomy.taxonomyterm_set.order_by('term')
    page = helpers.pager(terms, 20, request.GET.get('page', 1))
    return render(request, 'administration/terms.html', locals())
Beispiel #30
0
def transfer_backlog(request):
    # deal with transfer mode
    file_mode = False
    checked_if_in_file_mode = ''
    if request.GET.get('mode', '') != '':
        file_mode = True
        checked_if_in_file_mode = 'checked'

    # get search parameters from request
    queries, ops, fields, types = advanced_search.search_parameter_prep(
        request)

    # redirect if no search params have been set
    if not 'query' in request.GET:
        return helpers.redirect_with_get_params(
            'components.ingest.views.transfer_backlog',
            query='',
            field='',
            type='')

    # get string of URL parameters that should be passed along when paging
    search_params = advanced_search.extract_url_search_params_from_request(
        request)

    # set paging variables
    if not file_mode:
        items_per_page = 10
    else:
        items_per_page = 20

    page = advanced_search.extract_page_number_from_url(request)

    start = page * items_per_page + 1

    # perform search
    conn = elasticSearchFunctions.connect_and_create_index('transfers')

    try:
        query = advanced_search.assemble_query(
            queries,
            ops,
            fields,
            types,
            must_haves=[pyes.TermQuery('status', 'backlog')])

        # use all results to pull transfer facets if not in file mode
        if not file_mode:
            results = conn.search_raw(
                query,
                indices='transfers',
                type='transferfile',
            )
        else:
            # otherwise use pages results
            results = conn.search_raw(query,
                                      indices='transfers',
                                      type='transferfile',
                                      start=start - 1,
                                      size=items_per_page)
    except:
        return HttpResponse('Error accessing index.')

    # take note of facet data
    file_extension_usage = results['facets']['fileExtension']['terms']
    transfer_uuids = results['facets']['sipuuid']['terms']

    if not file_mode:
        # run through transfers to see if they've been created yet
        awaiting_creation = {}
        for transfer_instance in transfer_uuids:
            try:
                awaiting_creation[transfer_instance.
                                  term] = transfer_awaiting_sip_creation_v2(
                                      transfer_instance.term)
                transfer = models.Transfer.objects.get(
                    uuid=transfer_instance.term)
                transfer_basename = os.path.basename(
                    transfer.currentlocation[:-1])
                transfer_instance.name = transfer_basename[:-37]
                transfer_instance.type = transfer.type
                if transfer.accessionid != None:
                    transfer_instance.accession = transfer.accessionid
                else:
                    transfer_instance.accession = ''
            except:
                awaiting_creation[transfer_instance.term] = False

        # page data
        number_of_results = len(transfer_uuids)
        page_data = helpers.pager(transfer_uuids, items_per_page, page + 1)
        transfer_uuids = page_data['objects']
    else:
        # page data
        number_of_results = results.hits.total
        results = transfer_backlog_augment_search_results(results)

    # set remaining paging variables
    end, previous_page, next_page = advanced_search.paging_related_values_for_template_use(
        items_per_page, page, start, number_of_results)

    # make sure results is set
    try:
        if results:
            pass
    except:
        results = False

    form = StorageSearchForm(initial={'query': queries[0]})
    return render(request, 'ingest/backlog/search.html', locals())
Beispiel #31
0
def transfer_backlog(request):
    # deal with transfer mode
    file_mode = False
    checked_if_in_file_mode = ''
    if request.GET.get('mode', '') != '':
        file_mode = True
        checked_if_in_file_mode = 'checked'

    # get search parameters from request
    queries, ops, fields, types = advanced_search.search_parameter_prep(request)

    # redirect if no search params have been set 
    if not 'query' in request.GET:
        return helpers.redirect_with_get_params(
            'components.ingest.views.transfer_backlog',
            query='',
            field='',
            type=''
        )

    # get string of URL parameters that should be passed along when paging
    search_params = advanced_search.extract_url_search_params_from_request(request)

    # set paging variables
    if not file_mode:
        items_per_page = 10
    else:
        items_per_page = 20

    page = advanced_search.extract_page_number_from_url(request)

    start = page * items_per_page + 1

    # perform search
    conn = elasticSearchFunctions.connect_and_create_index('transfers')

    try:
        query = advanced_search.assemble_query(
            queries,
            ops,
            fields,
            types,
            must_haves=[pyes.TermQuery('status', 'backlog')]
        )

        # use all results to pull transfer facets if not in file mode
        if not file_mode:
            results = conn.search_raw(
                query,
                indices='transfers',
                type='transferfile',
            )
        else:
        # otherwise use pages results
            results = conn.search_raw(
                query,
                indices='transfers',
                type='transferfile',
                start=start - 1,
                size=items_per_page
            )
    except:
        return HttpResponse('Error accessing index.')

    # take note of facet data
    file_extension_usage = results['facets']['fileExtension']['terms']
    transfer_uuids       = results['facets']['sipuuid']['terms']

    if not file_mode:
        # run through transfers to see if they've been created yet
        awaiting_creation = {}
        for transfer_instance in transfer_uuids:
            try:
                awaiting_creation[transfer_instance.term] = transfer_awaiting_sip_creation_v2(transfer_instance.term)
                transfer = models.Transfer.objects.get(uuid=transfer_instance.term)
                transfer_basename = os.path.basename(transfer.currentlocation[:-1])
                transfer_instance.name = transfer_basename[:-37]
                transfer_instance.type = transfer.type
                if transfer.accessionid != None:
                    transfer_instance.accession = transfer.accessionid
                else:
                    transfer_instance.accession = ''
            except:
                awaiting_creation[transfer_instance.term] = False

        # page data
        number_of_results = len(transfer_uuids)
        page_data = helpers.pager(transfer_uuids, items_per_page, page + 1)
        transfer_uuids = page_data['objects']
    else:
        # page data
        number_of_results = results.hits.total
        results = transfer_backlog_augment_search_results(results)

    # set remaining paging variables
    end, previous_page, next_page = advanced_search.paging_related_values_for_template_use(
       items_per_page,
       page,
       start,
       number_of_results
    )

    # make sure results is set
    try:
        if results:
            pass
    except:
        results = False

    form = StorageSearchForm(initial={'query': queries[0]})
    return render(request, 'ingest/backlog/search.html', locals())
Beispiel #32
0
def list_display(request):

    if 'aips' not in settings.SEARCH_ENABLED:
        return render(request, 'archival_storage/list.html')
    current_page_number = int(request.GET.get('page', 1))
    logger.debug('Current page: %s', current_page_number)

    # get count of AIP files
    es_client = elasticSearchFunctions.get_client()
    aip_indexed_file_count = aip_file_count(es_client)

    # get AIPs
    order_by = request.GET.get('order_by', 'name_unanalyzed')
    sort_by = request.GET.get('sort_by', 'up')

    if sort_by == 'down':
        sort_direction = 'desc'
    else:
        sort_direction = 'asc'

    sort_specification = order_by + ':' + sort_direction
    sort_params = 'order_by=' + order_by + '&sort_by=' + sort_by

    # get list of UUIDs of AIPs that are deleted or pending deletion
    aips_deleted_or_pending_deletion = []
    should_haves = [
        {'match': {'status': 'DEL_REQ'}},
        {'match': {'status': 'DELETED'}},
    ]
    query = {
        "query": {
            "bool": {
                "should": should_haves
            }
        }
    }
    deleted_aip_results = es_client.search(
        body=query,
        index='aips',
        doc_type='aip',
        fields='uuid,status'
    )
    for deleted_aip in deleted_aip_results['hits']['hits']:
        aips_deleted_or_pending_deletion.append(deleted_aip['fields']['uuid'][0])

    # Fetch results and paginate
    def es_pager(page, page_size):
        """
        Fetch one page of normalized entries from Elasticsearch.

        :param page: 1-indexed page to fetch
        :param page_size: Number of entries on a page
        :return: List of dicts for each entry, where keys and values have been cleaned up
        """
        start = (page - 1) * page_size
        results = es_client.search(
            index='aips',
            doc_type='aip',
            body=elasticSearchFunctions.MATCH_ALL_QUERY,
            fields='origin,uuid,filePath,created,name,size,encrypted',
            sort=sort_specification,
            size=page_size,
            from_=start,
        )
        # normalize results - each of the fields contains a single value,
        # but is returned from the ES API as a single-length array
        # e.g. {"fields": {"uuid": ["abcd"], "name": ["aip"] ...}}
        return [elasticSearchFunctions.normalize_results_dict(d) for d in results['hits']['hits']]

    items_per_page = 10
    count = es_client.count(index='aips', doc_type='aip', body=elasticSearchFunctions.MATCH_ALL_QUERY)['count']
    results = LazyPagedSequence(es_pager, page_size=items_per_page, length=count)

    # Paginate
    page = helpers.pager(
        results,
        items_per_page,
        current_page_number
    )

    # process deletion, etc., and format results
    aips = []
    for aip in page.object_list:
        # If an AIP was deleted or is pending deletion, react if status changed
        if aip['uuid'] in aips_deleted_or_pending_deletion:
            # check with storage server to see current status
            api_results = storage_service.get_file_info(uuid=aip['uuid'])
            try:
                aip_status = api_results[0]['status']
            except IndexError:
                # Storage service does not know about this AIP
                # TODO what should happen here?
                logger.info("AIP not found in storage service: {}".format(aip))
                continue

            # delete AIP metadata in ElasticSearch if AIP has been deleted from the
            # storage server
            # TODO: handle this asynchronously
            if aip_status == 'DELETED':
                elasticSearchFunctions.delete_aip(es_client, aip['uuid'])
                elasticSearchFunctions.delete_aip_files(es_client, aip['uuid'])
            elif aip_status != 'DEL_REQ':
                # update the status in ElasticSearch for this AIP
                elasticSearchFunctions.mark_aip_stored(es_client, aip['uuid'])
        else:
            aip_status = 'UPLOADED'

        # Tweak AIP presentation and add to display array
        if aip_status != 'DELETED':
            aip['status'] = AIP_STATUS_DESCRIPTIONS[aip_status]

            try:
                size = '{0:.2f} MB'.format(float(aip['size']))
            except (TypeError, ValueError):
                size = 'Removed'

            aip['size'] = size

            aip['href'] = aip['filePath'].replace(AIPSTOREPATH + '/', "AIPsStore/")
            aip['date'] = aip['created']

            aips.append(aip)

    total_size = total_size_of_aips(es_client)
    # Find out which AIPs are encrypted

    return render(request, 'archival_storage/list.html',
                  {
                      'total_size': total_size,
                      'aip_indexed_file_count': aip_indexed_file_count,
                      'aips': aips,
                      'page': page,
                      'search_params': sort_params,
                  }
                  )
Beispiel #33
0
def search(request):
    # FIXME there has to be a better way of handling checkboxes than parsing
    # them by hand here, and displaying 'checked' in
    # _archival_storage_search_form.html
    # Parse checkbox for file mode
    yes_options = ('checked', 'yes', 'true', 'on')
    if request.GET.get('filemode', '') in yes_options:
        file_mode = True
        checked_if_in_file_mode = 'checked'
        items_per_page = 20
    else:  # AIP list
        file_mode = False
        checked_if_in_file_mode = ''
        items_per_page = 10

    # Parse checkbox for show AICs
    show_aics = ''
    if request.GET.get('show_aics', '') in yes_options:
        show_aics = 'checked'

    # get search parameters from request
    queries, ops, fields, types = advanced_search.search_parameter_prep(request)
    logger.debug('Queries: %s, Ops: %s, Fields: %s, Types: %s', queries, ops, fields, types)

    # redirect if no search params have been set
    if 'query' not in request.GET:
        return helpers.redirect_with_get_params(
            'components.archival_storage.views.search',
            query='',
            field='',
            type=''
        )

    # get string of URL parameters that should be passed along when paging
    search_params = advanced_search.extract_url_search_params_from_request(request)

    current_page_number = int(request.GET.get('page', 1))

    # perform search
    es_client = elasticSearchFunctions.get_client()
    results = None
    query = advanced_search.assemble_query(es_client, queries, ops, fields, types, search_index='aips', doc_type='aipfile')
    try:
        # use all results to pull transfer facets if not in file mode
        # pulling only one field (we don't need field data as we augment
        # the results using separate queries)
        if not file_mode:
            # Fetch all unique AIP UUIDs in the returned set of files
            query['aggs'] = {'aip_uuids': {'terms': {'field': 'AIPUUID', 'size': 0}}}
            # Don't return results, just the aggregation
            query['size'] = 0
            # Searching for AIPs still actually searches type 'aipfile', and
            # returns the UUID of the AIP the files are a part of.  To search
            # for an attribute of an AIP, the aipfile must index that
            # information about their AIP in
            # elasticSearchFunctions.index_mets_file_metadata
            results = es_client.search(
                body=query,
                index='aips',
                doc_type='aipfile',
                sort='sipName:desc',
            )
            # Given these AIP UUIDs, now fetch the actual information we want from aips/aip
            buckets = results['aggregations']['aip_uuids']['buckets']
            uuids = [bucket['key'] for bucket in buckets]
            uuid_file_counts = {bucket['key']: bucket['doc_count'] for bucket in buckets}
            query = {
                'query': {
                    'terms': {
                        'uuid': uuids,
                    },
                },
            }
            index = 'aips'
            doc_type = 'aip'
            fields = 'name,uuid,size,created,status,AICID,isPartOf,countAIPsinAIC,encrypted'
            sort = 'name:desc'
        else:
            index = 'aips'
            doc_type = 'aipfile'
            fields = 'AIPUUID,filePath,FILEUUID,encrypted'
            sort = 'sipName:desc'

        # To reduce amount of data fetched from ES, use LazyPagedSequence
        def es_pager(page, page_size):
            """
            Fetch one page of normalized aipfile entries from Elasticsearch.

            :param page: 1-indexed page to fetch
            :param page_size: Number of entries on a page
            :return: List of dicts for each entry with additional information
            """
            start = (page - 1) * page_size
            results = es_client.search(
                body=query,
                from_=start,
                size=page_size,
                index=index,
                doc_type=doc_type,
                fields=fields,
                sort=sort,
            )
            if file_mode:
                return search_augment_file_results(es_client, results)
            else:
                return search_augment_aip_results(results, uuid_file_counts)
        count = es_client.count(index=index, doc_type=doc_type, body={'query': query['query']})['count']
        results = LazyPagedSequence(es_pager, items_per_page, count)

    except ElasticsearchException:
        logger.exception('Error accessing index.')
        return HttpResponse('Error accessing index.')

    if not file_mode:
        aic_creation_form = forms.CreateAICForm(initial={'results': uuids})
    else:  # if file_mode
        aic_creation_form = None

    page_data = helpers.pager(results, items_per_page, current_page_number)

    return render(request, 'archival_storage/search.html',
                  {
                      'file_mode': file_mode,
                      'show_aics': show_aics,
                      'checked_if_in_file_mode': checked_if_in_file_mode,
                      'aic_creation_form': aic_creation_form,
                      'results': page_data.object_list,
                      'search_params': search_params,
                      'page': page_data,
                  }
                  )
Beispiel #34
0
def list_display(request):
    current_page_number = request.GET.get('page', 1)

    form = forms.StorageSearchForm()

    # get ElasticSearch stats
    aip_indexed_file_count = advanced_search.indexed_count('aips')

    # get AIPs
    order_by = request.GET.get('order_by', 'name')
    sort_by  = request.GET.get('sort_by', 'up')

    if sort_by == 'down':
        sort_direction = 'desc'
    else:
        sort_direction = 'asc'

    sort_specification = order_by + ':' + sort_direction

    conn = elasticSearchFunctions.connect_and_create_index('aips')

    items_per_page = 10
    start = (int(current_page_number) - 1) * items_per_page

    aipResults = conn.search(
        pyes.Search(pyes.MatchAllQuery(), start=start, size=items_per_page),
        doc_types=['aip'],
        fields='origin,uuid,filePath,created,name,size',
        sort=sort_specification
    )

    try:
        len(aipResults)
    except pyes.exceptions.ElasticSearchException:
        # there will be an error if no mapping exists for AIPs due to no AIPs
        # having been created
        return render(request, 'archival_storage/archival_storage.html', locals())

    # handle pagination
    page = helpers.pager(
        aipResults,
        items_per_page,
        current_page_number
    )

    if not page:
        raise Http404

    # augment data
    sips = []
    for aip in page['objects']:
        sip = {}
        sip['href'] = aip.filePath.replace(AIPSTOREPATH + '/', "AIPsStore/")
        sip['name'] = aip.name
        sip['uuid'] = aip.uuid

        sip['date'] = aip.created

        try:
            size = float(aip.size)
            sip['size'] = '{0:.2f} MB'.format(size)
        except:
            sip['size'] = 'Removed'

        sips.append(sip)

    # get total size of all AIPS from ElasticSearch
    q = pyes.MatchAllQuery().search()
    q.facet.add(pyes.facets.StatisticalFacet('total', field='size'))
    aipResults = conn.search(q, doc_types=['aip'])
    total_size = aipResults.facets.total.total
    total_size = '{0:.2f}'.format(total_size)

    return render(request, 'archival_storage/archival_storage.html', locals())
Beispiel #35
0
def taxonomy(request):
    taxonomies = models.Taxonomy.objects.all().order_by("name")
    page = helpers.pager(taxonomies, 20, request.GET.get("page", 1))
    return render(request, "administration/taxonomy.html", locals())