"id": "word_vectors id",
            "description": "word_vectors description",
            "date": "2020-12-31",
            "vector": {}
        }]
    }]
}
tags = [{"tag": "tag"}]
files = [{
    "file_uri": "http://example.org/files/file.uri",  # File name or URL
    "format":
    "application/excel",  # The file format, physical medium, or dimensions of the resource.
    "location": "100",  # Page number or sheet name for the table
    "note": "file note text"  # file note
}]

response = create_and_import.create_document_dataset(
    dataset_id=dataset_id,
    repository_id=repository_id,
    published=published,
    overwrite=overwrite,
    metadata_information=metadata_information,
    document_description=document_description,
    tags=tags,
    files=files)

print(response)

# upload temporary thumbnail
thumbnail_path = utils.text_to_thumbnail("Document\nDataset")
create_and_import.upload_thumbnail(dataset_id, thumbnail_path)
create_and_import.set_api_url('http://training.ihsn.org/index.php/api/')
api_key = 'cf16a23a3cfc6a928f63dd3c8daf8796'
create_and_import.set_api_key(api_key)

#########################################
# create_survey_dataset_from_DDI example
#########################################

file = "SURVEY_DATASET_SAMPLE_02.xml"
overwrite = "yes"
repository_id = "central"
access_policy = "open"
published = 1

response = create_and_import.create_survey_dataset_from_DDI(
    file=file,
    overwrite=overwrite,
    repository_id=repository_id,
    access_policy=access_policy,
    # data_remote_url=data_remote_url,
    # rdf=rdf,
    # published=published
)

print(response)

# upload temporary thumbnail
dataset_id = response['survey']['idno']
thumbnail_path = utils.text_to_thumbnail("Survey\nDataset")
create_and_import.upload_thumbnail(dataset_id, thumbnail_path)
Esempio n. 3
0
        "name": "album name",
        "description": "album description",
        "owner": "album owner",
        "uri": "album uri"
    }],
    "tags": [{
        "tag": "tag"
    }],
    "files": [{
        "file_uri":
        "http://example.org/image_description/files/file.uri",  # File name or URL
        "format":
        "file format",  # The file format, physical medium, or dimensions of the resource.
        "note": "file note",
        "show": True  # Show the image file on the page
    }]
}

response = create_and_import.create_image_dataset(
    dataset_id=dataset_id,
    repository_id=repository_id,
    published=published,
    overwrite=overwrite,
    metadata_information=metadata_information,
    image_description=image_description)

print(response)

# upload temporary thumbnail
thumbnail_path = utils.text_to_thumbnail("Image\nDataset")
create_and_import.upload_thumbnail(dataset_id, thumbnail_path)
Esempio n. 4
0
        "name": "Related document name",
        "type":
        "isPartOf"  # "isPartOf""hasPart""isVersionOf""isFormatOf""hasFormat""references""isReferencedBy""isBasedOn""isBasisFor""requires""isRequiredBy"
    }]
}
files = [{
    "file_uri": "http://example.org/files/file.uri",  # File name or URL
    "format":
    "file format",  # The file format, physical medium, or dimensions of the resource.
    "location": "file location",
    "note": "file note"
}]
tags = [{"tag": "tag"}]
additional = {"additional": "additional info"}

response = create_and_import.create_table_dataset(
    dataset_id=dataset_id,
    repository_id=repository_id,
    published=published,
    overwrite=overwrite,
    metadata_information=metadata_information,
    table_description=table_description,
    files=files,
    tags=tags,
    additional=additional)

print(response)

# upload temporary thumbnail
thumbnail_path = utils.text_to_thumbnail("Table\nDataset")
create_and_import.upload_thumbnail(dataset_id, thumbnail_path)
Esempio n. 5
0
        "data_quality_info lineage statement"  # Data quality lineage statement
    },
    "spatial_representation_info":
    {  # Resource spatial representation - Spatial representation information for the dataset (resource). Best practice is to include metadata for spatial representation if the described resource is a georeferenced dataset.
        "topology_level":
        "geometryOnly",  # Topology Level Code: {geometryOnly, topology1D, planarGraph, fullPlanarGraph, surfaceGraph, fullSurfaceGraph, topology3D, fullTopology3D, abstract}
        "Geometric_object_code":
        "complex"  # Geometric Object Type Code codes ={complex, composite, curve, point, solid, surface}
    },
    "reference_system_info":
    {  # Resource’s spatial reference system - Description of the spatial and/or temporal reference systems used in the dataset.
        "code": "EPSG:5701",  # reference_system Identifier Code
        "code_space": "urn:ogc:def:crs"  # spatial reference system code_space
    }
}
additional = {"additional": "additional info"}

response = create_and_import.create_geospatial_dataset(
    dataset_id=dataset_id,
    repository_id=repository_id,
    published=published,
    overwrite=overwrite,
    metadata_maintenance=metadata_maintenance,
    dataset_description=dataset_description,
    additional=additional)

print(response)

# upload temporary thumbnail
thumbnail_path = utils.text_to_thumbnail("Geospatial\nDataset")
create_and_import.upload_thumbnail(dataset_id, thumbnail_path)
Esempio n. 6
0
    "word_vectors": [{
        "id": "Vector Model ID",
        "description": "Vector Model Description",
        "date": "2020-12-31",
        "vector": {}
    }],
    "series_groups": [  # Series included in groups
        {
            "name": "series_group name",
            "version": "series_group version",
            "uri": "http://example.org/series_groups/uri"
        }
    ]
}
additional = {}  # Any other custom metadata not covered by the schema

response = create_and_import.create_timeseries_dataset(
    dataset_id=dataset_id,
    repository_id=repository_id,
    access_policy=access_policy,
    published=published,
    overwrite=overwrite,
    metadata_creation=metadata_creation,
    series_description=series_description,
    additional=additional)

print(response)

# upload temporary thumbnail
thumbnail_path = utils.text_to_thumbnail("Timeseries\nDataset")
create_and_import.upload_thumbnail(dataset_id, thumbnail_path)
        "isPartOf"  # Valid values: "isPartOf" "hasPart" "isVersionOf" "isFormatOf" "hasFormat" "references" "isReferencedBy" "isBasedOn" "isBasisFor" "requires" "isRequiredBy"
    }],
    "tags": [{
        "tag": "tag"
    }]
}
files = [{
    "file_uri":
    "http://example.org/files/file.uri",  # Provide file name, path or URL
    "format":
    "file format",  # The file format, physical medium, or dimensions of the resource.
    "location": "file location",
    "note": "file note"
}]
additional = {"additional": "additional info"}

response = create_and_import.create_visualization_dataset(
    dataset_id=dataset_id,
    repository_id=repository_id,
    published=published,
    overwrite=overwrite,
    metadata_information=metadata_information,
    visualization_description=visualization_description,
    files=files,
    additional=additional)

print(response)

# upload temporary thumbnail
thumbnail_path = utils.text_to_thumbnail("Visualization\nDataset")
create_and_import.upload_thumbnail(dataset_id, thumbnail_path)