예제 #1
0
def main():

    # instantiate the weosession and connect
    # market.weogeo.com gets us the whole public library
    weos = WeoGeoAPI.weoSession('http://market.weogeo.com', '*****@*****.**', 'avimordy')

    # Now we connect to the market
    weos.connectToMarket()

    # if you want to see your connection parameters use this line
    #print weos

    ## Get all the datasets that intersect this box in the public library
    dataSets = weos.getDatasets('JSON', '&north=37.1714&south=37.1012&west=-122.1728&east=-122.0614&per_page=25');

    # Here are a bunch of calls I used to dig into the structure of a returned dataset. Given it's XML origins it
    # has a pretty deeply nested structure. I use the playing around to get to the minimal information I needed to
    # decide which data I wanted

    #print type(dataSets)

    # Get the actual data
    #print json.dumps(dataSets, sort_keys=True, indent=4)

    # Get the actual dictionary of the data, not the metadata of the query
    dataDict = dataSets[1]

    # Now pull the actual array of datasets
    dataList = dataDict['items']
    #print json.dumps(dataList[0], sort_keys=True, indent=4)
    #for keys in dataList[0].keys():
    #	print keys
    	#print type(dataDict['items'])

    # We want token, name, data_type,layers and data coord type (we need this to create bounding box in the native
    # coordinate system.
    
    for item in dataList:

        #the token is the unique identifier which we use to create a job
        print item['token']
        print item['name']
        
        print item['data_type']
        print json.dumps(item['layers'])

        #
        print json.dumps(item['boundaries']['tiles'])
        
        print ('\n----/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/----\n')
    

    print "----------\n"
    
    datasetOSM = weos.getDataset('e0dd1ef6-df94-4e7f-a95f-f48327ba3467','JSON')
    #datasetsOSM = weos.getDatasets('JSON', '&north=89&south=89&west=-179&east=179')
    print json.dumps(datasetOSM, indent=4, sort_keys=True)
    
    print "Finished!"
예제 #2
0
def main():

    weos = WeoGeoAPI.weoSession('http://market.weogeo.com', '*****@*****.**', 'avimordy')

    if weos.connectToMarket() is True:

        print "\n Getting Download(s) \n"
        
        response = weos.getDownloadFile("921f2d6f-4f21-4238-aa0d-3cb6452a9883")
        print json.dumps(response, indent=4)
예제 #3
0
def get_connex(options):
    print "Trying to connect to library %s using username %s and password %s" % (options.library,options.username,options.password)
    weos = WeoGeoAPI.weoSession(options.library,options.username,options.password)
        
    result = weos.connect()        
    if not result:
        print "\nCould not connect to your library with the credentils that you supplied.\nPlease check #your credentials and the CLI syntax.\n"
        parser.print_help()
        sys.exit()

    return weos
예제 #4
0
"""
In this example we will demonstrate how obtain the download link for a job order using
information that was logged from the order_multiple.py example. Important note: download
links are only valid for 7 days after the job has been ordered.
"""

import WeoGeoAPI

# Establish connection to Trimble Data Marketplace
session = WeoGeoAPI.weoSession('market.trimbledata.com', 'username', 'password')
session.connect()
print session

# List of job tokens pulled from previous example
jobtokens = ['c6c89bce-c633-4fe3-814f-61265d3324a6',
             '3ea0b1c7-0c09-44b5-a7fe-b183b4e8ab76',
             '8d0a9080-3bcc-426a-b367-585ee8d01161']

# Get download link for each job
for token in jobtokens:
    response = session.getDownloadFile(token)
    for item in response.content:
        print item['url']

print "\nAll download links displayed."
예제 #5
0
def main():

	print "making jobs"

	# Alright, we need to make an order for each dataset we want and we need to add it to our cart.
	# For the meaning of the different pieces of the Job please refer to "create a job" on the following
	# page:  http://www.weogeo.com/developer_doc/Jobs_API.html

	# DRGs are a raster images which are scans of the USGS Topo sheets - we will use this as a backdrop
	drgOrder = {

		# may be either true or false
		"cart" : "true",
		"job" :
			{
				# This corresponds to the token from the listing we did before
				"dataset_token" : "f1bca22c-013e-4e57-a78b-f9bbb3a918e8",

				# must be 1 or it will not be accepted
				"content_license_acceptance" : "1",

				"parameters":
					{
						"job_geocrop" : "Clip",
						#all of the coordinates must in the native coords of the data layer,
						# NOT in our projected coord system
						"job_north": "4457478.778 ",
						"job_south": "4436707.6 ",
						"job_east" : "-13579988.662 ",
						"job_west" : "-13595996.424 ",
						"job_layers": "Layer_1",
						"job_datum_projection" : "EPSG:4326",
						#"job_datum_projection" : "Native",
						# FME name for file format
						"job_file_format": "JPEG",


						# raster only - this seems to correspond to the level of compression used on the image
						"job_spatial_resolution": "1"
					}
			}
	}

	# Add the job we just created to a List which will contain all orders
	jobOrders = [drgOrder]



	# Next job will be the vector data from the US Census - TIGER/Line - Santa Cruz County, California
	landmarkOrder = {
		"cart" : "true",
		"job" :
			{
				"dataset_token" : "63472b72-0f0d-2554-8120-67b445942ff7",
				"content_license_acceptance" : "1",
				"parameters":
					{
						#Spatial Selection will grab everything that touches the bounding box but not clip it to the box
						"job_geocrop" : "Spatial_Selection",
						"job_north": "37.1714 ",
						"job_south": "37.1012 ",
						"job_east" : "-122.1728 ",
						"job_west" : "-122.0614 ",
						# We are grabbing all the landmarks from this census dataset
						"job_layers": "Point Landmark;Area Landmark",
						"job_datum_projection" : "EPSG:4326",
						#Most of this data is natively in Shapefile format but we could set this to SHAPE instead
						"job_file_format": "Native",
					}

			}

	}

	censusOrder = {
		"cart" : "true",
		"job" :
			{
				"dataset_token" : "85da9c3d-a58b-9b5f-9c59-f324f4d4d186",
				"content_license_acceptance" : "1",
				"parameters":
					{
						"job_geocrop" : "Spatial_Selection",
						"job_north": "37.1714 ",
						"job_south": "37.1012 ",
						"job_east" : "-122.1728 ",
						"job_west" : "-122.0614 ",
						# We are grabbing all the landmarks from this census dataset
						"job_layers": "Census Block Group;Census Block;Census Tract;Tribal Block Group;Tribal Census Tract",
						"job_datum_projection" : "EPSG:4326",
						"job_file_format": "Native",
						}

			}

	}

	votingOrder = {
		"cart" : "true",
		"job" :
			{
				"dataset_token" : "1a9f8125-cd90-ef5a-9c16-c3329fb51497",
				"content_license_acceptance" : "1",
				"parameters":
					{
						"job_geocrop" : "Spatial_Selection",
						"job_north": "37.1714 ",
						"job_south": "37.1012 ",
						"job_east" : "-122.1728 ",
						"job_west" : "-122.0614 ",
						# We are grabbing all the landmarks from this census dataset
						"job_layers": "State Legislative District--Lower Chamber;State Legislative District--Upper Chamber;Place",
						"job_datum_projection" : "EPSG:4326",
						"job_file_format": "Native",
						}

			}

	}

	zipOrder = {
		"cart" : "true",
		"job" :
			{
				"dataset_token" : "5c910ad3-fc69-4023-b180-8c862639eadb",
				"content_license_acceptance" : "1",
				"parameters":
					{
						"job_geocrop" : "Spatial_Selection",
						"job_north": "37.1714 ",
						"job_south": "37.1012 ",
						"job_east" : "-122.1728 ",
						"job_west" : "-122.0614 ",
						# We are grabbing all the landmarks from this census dataset
						"job_layers": "Layer_1",
						"job_datum_projection" : "EPSG:4326",
						"job_file_format": "Native",
						}

			}

	}

	schoolOrder = {
		"cart" : "true",
		"job" :
			{
				"dataset_token" : "1a4d4a22-d84b-4cb3-b874-1a99bf6d42eb",
				"content_license_acceptance" : "1",
				"parameters":
					{
						"job_geocrop" : "Spatial_Selection",
						"job_north": "37.1714 ",
						"job_south": "37.1012 ",
						"job_east" : "-122.1728 ",
						"job_west" : "-122.0614 ",
						# We are grabbing all the landmarks from this census dataset
						"job_layers": "School District--Elementary;School District--Secondary;School District--Unified",
						"job_datum_projection" : "EPSG:4326",
						"job_file_format": "Native",
						}

			}

	}

	transOrder = {
		"cart" : "true",
		"job" :
			{
				"dataset_token" : "fd732c1b-880b-4ea2-ac4e-60e8f95d677f",
				"content_license_acceptance" : "1",
				"parameters":
					{
						"job_geocrop" : "Spatial_Selection",
						"job_north": "37.1714 ",
						"job_south": "37.1012 ",
						"job_east" : "-122.1728 ",
						"job_west" : "-122.0614 ",
						# We are grabbing all the landmarks from this census dataset
						"job_layers": "Primary Roads;Primary and Secondary Roads;All Roads;Rails",
						"job_datum_projection" : "EPSG:4326",
						"job_file_format": "Native",
						}

			}

	}

	waterOrder = {
		"cart" : "true",
		"job" :
			{
				"dataset_token" : "e1f0bcce-30cb-4890-81e2-e5e768c7ddfa",
				"content_license_acceptance" : "1",
				"parameters":
					{
						"job_geocrop" : "Spatial_Selection",
						"job_north": "37.1714 ",
						"job_south": "37.1012 ",
						"job_east" : "-122.1728 ",
						"job_west" : "-122.0614 ",
						# We are grabbing all the landmarks from this census dataset
						"job_layers": "Area Hydrography;Linear Hydrography",
						"job_datum_projection" : "EPSG:4326",
						"job_file_format": "Native",
						}

			}

	}


	OSMOrder = {
		"cart" : "true",
		"job" :
			{
				"dataset_token" : "e0dd1ef6-df94-4e7f-a95f-f48327ba3467",
				"content_license_acceptance" : "1",
				"parameters":
					{
						"job_geocrop" : "Spatial_Selection",
						"job_north": "37.1714 ",
						"job_south": "37.1012 ",
						"job_east" : "-122.1728 ",
						"job_west" : "-122.0614 ",
						# We are grabbing all the landmarks from this census dataset
						"job_layers": "Highway;Man Made;Place;Leisure;Shop;Tourism;Amenity",
						"job_datum_projection" : "EPSG:4326",
						"job_file_format": "SHAPE",
						}

			}

	}

	jobOrders.append(landmarkOrder)
	jobOrders.append(censusOrder)
	jobOrders.append(votingOrder)
	jobOrders.append(zipOrder)
	jobOrders.append(schoolOrder)
	jobOrders.append(transOrder)
	jobOrders.append(waterOrder)
	jobOrders.append(OSMOrder)


	weos = WeoGeoAPI.weoSession('http://market.weogeo.com', '*****@*****.**', 'avimordy')

	if weos.connectToMarket() is True:

		print "\nSubmitting your jobs...\n"

		for job in jobOrders:
			response = weos.createJob(job, 'JSON')
			print "Here is your order info:\n"
			print job['job']['dataset_token'] + '\n'
			print json.dumps(response, indent=4)

		print "\nfinished creating jobs - now to order!\n"
		weoJobOrder = weos.orderJobsInCart('JSON')
		print json.dumps(weoJobOrder, indent=4)

		print "\n\n ---Finished---\n"
예제 #6
0
"""
In this example we will demonstrate how to order a raster job with customization using
information we logged from the getdatasets_by_area.py example. The dataset used in this
example is "Natural Earth Shaded Relief", which can be found here:
http://market.weogeo.com/datasets/5dbdb7db-1acf-4f19-b629-04b54f907552
"""

# Third Party Modules
import WeoGeoAPI

# Establish connection to WeoGeo Market
weos = WeoGeoAPI.weoSession("market.weogeo.com", "username", "password")
weos.connect()
print weos

# Set initial job parameters. The 'note' variable is optional. Here we only want one layer, '10m High Res'.
# Spatial resolution is 1 (native). Use 2, 3 or 4 to deliver as 2x/3x/4x coarser.
# Reproject our order to NAD83-Geo(EPSG:4269).
newJob = WeoGeoAPI.weoJob(
    datasetToken="5dbdb7db-1acf-4f19-b629-04b54f907552",
    layers=["10m High Res"],
    outputFormat="GeoTIFF",
    coordinateSystem="EPSG:4269",
    spatialResolution="1",
    note="Extract of area around Oregon.",
    acceptLicense=True,
)

# Set crop box around Oregon. EPSG must be GEO(EPSG:4326) or Spherical Mercator(EPSG:3857).
newJob.setBoxCropArea("EPSG:4326", 46.17, 42.13, -116.28, -124.33)
예제 #7
0
"""
In this example we will demonstrate how to use the cart to order multiple datasets using
information that was logged from the getdatasets_by_area.py example. We will use the vector,
raster and standard datasets found in previous examples. We will log the job tokens for use
in another example to download the jobs.
"""

# Third Party Modules
import WeoGeoAPI

# Establish connection to WeoGeo Market
weos = WeoGeoAPI.weoSession('market.weogeo.com', 'username', 'password')
weos.connect()
print weos

# Create text file to log tokens
outfile = open('job_tokens.txt', 'w')

# Create lists for job instances and another list for their tokens
jobs = []
jobtokens = []

# Create standard job object and append to jobs list
standardJob = WeoGeoAPI.weoJob( datasetToken = '9dc42e34-cbd0-6952-ad6c-fb39eb23fd0a',
                                acceptLicense = True)
jobs.append(standardJob)

# Create vector job object and append to jobs list
vectorJob = WeoGeoAPI.weoJob( datasetToken = 'bfc2b36e-3d0d-4a6d-935d-e9ab090aaa3c',
                              layers = ['Area Hydrography', 'Linear Hydrography'],
                              outputFormat = 'SHAPE',
예제 #8
0
import sys
import WeoGeoAPI

# define globals for authentication
if len(sys.argv) > 3:
    LIBRARY, USERNAME, PASSWORD = sys.argv[1],sys.argv[2],sys.argv[3]
else:
    LIBRARY, USERNAME, PASSWORD = ("YOUR-LIBRARY-SUBDOMAIN.weogeo.com",
                                   "YOUR-LIBRARY-USERNAME",
                                   "YOUR-LIBRARY-PASSWORD")

# instantiate the weosession and connect
weos = WeoGeoAPI.weoSession(LIBRARY, USERNAME, PASSWORD)
if not weos.connect():
    print "Couldn't connect with the credentials provided."
    sys.exit()

http_response, groups_dict = weos.getGroups("JSON")
print "="*20,"\n","Group Name,ID","\n","="*20
for item in groups_dict["items"]:
    print "%s,%s" % (item["name"], item["id"])
print "="*20,"\n"
    
http_response, roles_dict = weos.getRoles("JSON")
print "="*20,"\n","Role Name,ID","\n","="*20
for item in roles_dict["items"]:
    print "%s,%s" % (item["name"], item["id"])
print "="*20,"\n"
    
http_response, datasets_dict = weos.getDatasets("JSON")
print "="*20,"\n","Dataset Name,token","\n","="*20