Exemplo n.º 1
0
def search_titles_results(request):
    page_title = 'US Newspaper Directory Search Results'
    crumbs = list(settings.BASE_CRUMBS)
    crumbs.extend([{'label': 'Search Newspaper Directory',
                    'href': reverse('openoni_search_titles')},
                   ])

    def prep_title_for_return(t):
        title = {}
        title.update(t.solr_doc)
        title['oclc'] = t.oclc
        return title

    format = request.GET.get('format', None)

    # check if requested format is CSV before building pages for response. CSV 
    # response does not make use of pagination, instead all matching titles from
    # SOLR are returned at once
    if format == 'csv':
        query = request.GET.copy()
        q, fields, sort_field, sort_order, facets = index.get_solr_request_params_from_query(query)
        
        # return all titles in csv format. * May hurt performance. Assumption is that this
        # request is not made often. 
        # TODO: revisit if assumption is incorrect
        solr_response = index.execute_solr_query(q, fields, sort_field, 
                                                 sort_order, index.title_count(), 0)
        titles = index.get_titles_from_solr_documents(solr_response)

        csv_header_labels = ('lccn', 'title', 'place_of_publication', 'start_year',
                             'end_year', 'publisher', 'edition', 'frequency', 'subject', 
                             'state', 'city', 'country', 'language', 'oclc',
                             'holding_type',)
        response = HttpResponse(content_type='text/csv')
        response['Content-Disposition'] = 'attachment; filename="openoni_titles.csv"'
        writer = csv.writer(response)
        writer.writerow(csv_header_labels)
        for title in titles:
            writer.writerow(map(lambda val: smart_str(val or '--'),
                               (title.lccn, title.name, title.place_of_publication,
                                title.start_year, title.end_year, title.publisher, 
                                title.edition, title.frequency, 
                                map(str, title.subjects.all()), 
                                set(map(lambda p: p.state, title.places.all())), 
                                map(lambda p: p.city, title.places.all()),
                                str(title.country), map(str, title.languages.all()),
                                title.oclc, title.holding_types)))
        return response
 
    try:
        curr_page = int(request.REQUEST.get('page', 1))
    except ValueError, e:
        curr_page = 1
Exemplo n.º 2
0
def status(request):
    page_title = 'System Status'
    page_count = models.Page.objects.all().count()
    issue_count = models.Issue.objects.all().count()
    batch_count = models.Batch.objects.all().count()
    title_count = models.Title.objects.all().count()
    holding_count = models.Holding.objects.all().count()
    essay_count = models.Essay.objects.all().count()
    pages_indexed = index.page_count()
    titles_indexed = index.title_count()
    return render_to_response('reports/status.html', dictionary=locals(),
                              context_instance=RequestContext(request))
Exemplo n.º 3
0
    def handle(self, **options):
        if not (
            models.Title.objects.all().count() == 0
            and models.Holding.objects.all().count() == 0
            and models.Essay.objects.all().count() == 0
            and models.Batch.objects.all().count() == 0
            and models.Issue.objects.all().count() == 0
            and models.Page.objects.all().count() == 0
            and index.page_count() == 0
            and index.title_count() == 0
        ):
            _logger.warn("Database or index not empty as expected.")
            return

        start = datetime.now()
        management.call_command("loaddata", "languages.json")
        management.call_command("loaddata", "institutions.json")
        management.call_command("loaddata", "ethnicities.json")
        management.call_command("loaddata", "labor_presses.json")
        management.call_command("loaddata", "countries.json")

        bib_in_settings = validate_bib_dir()
        if bib_in_settings:
            # look in BIB_STORAGE for original titles to load
            for filename in os.listdir(bib_in_settings):
                if filename.startswith("titles-") and filename.endswith(".xml"):
                    filepath = os.path.join(bib_in_settings, filename)
                    management.call_command("load_titles", filepath, skip_index=True)

        management.call_command("title_sync", pull_title_updates=options["pull_title_updates"])

        end = datetime.now()
        total_time = end - start
        _logger.info("start time: %s" % start)
        _logger.info("end time: %s" % end)
        _logger.info("total time: %s" % total_time)
        _logger.info("openoni_sync done.")