示例#1
0
def datasetDataColumnsText(request, dataset_name):
    """ Returns data in text format for a specific dataset.
        Column format.
    """
    #
    header_language = request.GET.get('header_language', None)
    #
    dataset = models.Datasets.objects.get(dataset_name=dataset_name)
    dataset_file_name = dataset.dataset_file_name
    #
    data_as_text = sharkdata_core.DatasetUtils().getDataColumnsAsText(
        dataset_name)
    #
    response = HttpResponse(content_type='text/plain; charset=cp1252')
    response[
        'Content-Disposition'] = 'attachment; filename=' + dataset_file_name.replace(
            '.zip', '_COLUMNS.txt')
    if header_language:
        # Extract first row and translate.
        rows = data_as_text.split('\r\n')
        if len(rows) > 0:
            headerrow = sharkdata_core.DatasetUtils().translateDataHeaders(
                rows[0].split('\t'), language=header_language)
            response.write(('\t'.join(headerrow) + '\r\n').encode('cp1252'))
        if len(rows) > 0:
            response.write('\r\n'.join(rows[1:]).encode('cp1252'))
    else:
        response.write(data_as_text.encode('cp1252'))
    #
    return response
    def updateDatasetsAndResources(self, logfile_name, user):
        """ """
        error_counter = 0
        try:
            sharkdata_core.SharkdataAdminUtils().log_write(
                logfile_name, log_row='\nDatasets:')
            error_counter += sharkdata_core.DatasetUtils(
            ).writeLatestDatasetsInfoToDb(logfile_name, )

            sharkdata_core.SharkdataAdminUtils().log_write(
                logfile_name, log_row='\nResources:')
            error_counter += sharkdata_core.ResourcesUtils(
            ).writeResourcesInfoToDb(logfile_name, )

            if error_counter > 0:
                sharkdata_core.SharkdataAdminUtils().log_close(
                    logfile_name,
                    new_status='FINISHED-' + str(error_counter) + '-errors')
            else:
                sharkdata_core.SharkdataAdminUtils().log_close(
                    logfile_name, new_status='FINISHED')

        except Exception as e:
            error_message = 'Failed when loading datasets or resources.' + '\nException: ' + str(
                e) + '\n'
            sharkdata_core.SharkdataAdminUtils().log_write(
                logfile_name, log_row=error_message)
            sharkdata_core.SharkdataAdminUtils().log_close(logfile_name,
                                                           new_status='FAILED')
        #
        return None  # No error message.
示例#3
0
def tableDatasetsText(request):
    """ Generates a text file containing a list of datasets and their properties. """
    header_language = request.GET.get('header_language', None)
    data_header = sharkdata_core.DatasetUtils().getDatasetListHeaders()
    translated_header = sharkdata_core.DatasetUtils(
    ).translateDatasetListHeaders(data_header, language=header_language)
    #
    data_rows = models.Datasets.objects.values_list(*data_header)
    #
    response = HttpResponse(content_type='text/plain; charset=cp1252')
    response[
        'Content-Disposition'] = 'attachment; filename=sharkdata_datasets.txt'
    response.write('\t'.join(translated_header) + '\r\n')  # Tab separated.
    for row in data_rows:
        response.write('\t'.join(map(str, row)) + '\r\n')  # Tab separated.
    return response
示例#4
0
def datasetDataColumnsJson(request, dataset_name):
    """ Returns data in JSON format for a specific dataset. 
        Column format.
    """
    #
    header_language = request.GET.get('header_language', None)
    #
    dataset = models.Datasets.objects.get(dataset_name=dataset_name)
    dataset_file_name = dataset.dataset_file_name
    #
    data_as_text = sharkdata_core.DatasetUtils().getDataColumnsAsText(
        dataset_name)
    #
    response = HttpResponse(content_type='application/json; charset=cp1252')
    response[
        'Content-Disposition'] = 'attachment; filename=' + dataset_file_name.replace(
            '.zip', '_COLUMNS.json')
    response.write('{')
    row_delimiter = ''
    #     for index, row in enumerate(data_as_text.split('\r\n')):
    for index, row in enumerate(data_as_text.split('\n')):
        rowitems = row.strip().split('\t')
        if index == 0:
            response.write('"header": ["')
            if header_language:
                rowitems = sharkdata_core.DatasetUtils().translateDataHeaders(
                    rowitems, language=header_language)
            #
            outrow = '", "'.join(rowitems) + '"], '
            response.write(outrow.encode('cp1252'))
            response.write(' "rows": [')
        else:
            if len(rowitems) > 1:
                outrow = row_delimiter + '["' + '", "'.join(rowitems) + '"]'
                response.write(outrow.encode('cp1252'))
                row_delimiter = ', '
    response.write(']')
    response.write('}')
    #
    return response
示例#5
0
def datasetMetadataText(request, dataset_name):
    """ Returns metadata in text format for a specific dataset. """
    dataset = models.Datasets.objects.get(dataset_name=dataset_name)
    dataset_file_name = dataset.dataset_file_name
    #
    metadata_as_text = sharkdata_core.DatasetUtils().getMetadataAsText(
        dataset_name)
    #
    response = HttpResponse(content_type='text/plain; charset=cp1252')
    response[
        'Content-Disposition'] = 'attachment; filename=' + dataset_file_name.replace(
            '.zip', '_METADATA.txt')
    response.write(metadata_as_text)
    return response
示例#6
0
def listDatasetsJson(request):
    """ Generates a JSON file containing a list of datasets and their properties. """
    data_header = sharkdata_core.DatasetUtils().getDatasetListHeaders()
    datasets_json = []
    #
    data_rows = models.Datasets.objects.values_list(*data_header)
    for data_row in data_rows:
        row_dict = dict(zip(data_header, map(str, data_row)))
        datasets_json.append(row_dict)
    #
    response = HttpResponse(content_type='application/json; charset=cp1252')
    response[
        'Content-Disposition'] = 'attachment; filename=sharkdata_dataset_list.json'
    response.write(json.dumps(datasets_json, encoding='utf8'))
    return response
示例#7
0
def datasetMetadataJson(request, dataset_name):
    """ Returns metadata in JSON format for a specific dataset. """
    dataset = models.Datasets.objects.get(dataset_name=dataset_name)
    dataset_file_name = dataset.dataset_file_name
    #
    metadata_as_text = sharkdata_core.DatasetUtils().getMetadataAsText(
        dataset_name)
    metadata_dict = {}
    for row in metadata_as_text.split('\r\n'):
        if ':' in row:
            parts = row.split(':', 1)  # Split on first occurence.
            key = parts[0].strip()
            value = parts[1].strip()
            metadata_dict[key] = value
    #
    response = HttpResponse(content_type='application/json; charset=utf-8')
    response[
        'Content-Disposition'] = 'attachment; filename=' + dataset_file_name.replace(
            '.zip', '_METADATA.json')
    response.write(json.dumps(metadata_dict))  #, encoding = 'utf-8'))
    return response
示例#8
0
def tableDatasetsJson(request):
    """ Generates a text file containing a list of datasets and their properties. 
        Organised as header and rows.
    """
    data_header = sharkdata_core.DatasetUtils().getDatasetListHeaders()
    #
    data_rows = models.Datasets.objects.values_list(*data_header)
    #
    response = HttpResponse(content_type='application/json; charset=cp1252')
    response[
        'Content-Disposition'] = 'attachment; filename=sharkdata_datasets.json'
    response.write('{')
    response.write('"header": ["')
    response.write('", "'.join(data_header) + '"], ')  # Tab separated.
    response.write('"rows": [')
    row_delimiter = ''
    for row in data_rows:
        response.write(row_delimiter + '["' + '", "'.join(map(str, row)) +
                       '"]')
        row_delimiter = ', '
    response.write(']')
    response.write('}')
    #
    return response