コード例 #1
0
ファイル: mtcd.py プロジェクト: kkoperskidg/MTCD
def runMTCDmosaicPre(id, s3_location):
    gbdx = Interface()
    if (id == -2):
        # do the mosaicing of 1 and 2
        images1 = getS3location(gbdx, s3_location + "/1")
        images2 = getS3location(gbdx, s3_location + "/2")
        tsk = gbdx.Task('mtcdvrt', images1=images1, images2=images2, id="pre")
    elif (id == -3):
        # do the mosaicing of 1, 2 snd 3
        images1 = getS3location(gbdx, s3_location + "/1")
        images2 = getS3location(gbdx, s3_location + "/2")
        images3 = getS3location(gbdx, s3_location + "/3")
        tsk = gbdx.Task('mtcdvrt',
                        images1=images1,
                        images2=images2,
                        images3=images3,
                        id="pre")
    else:
        print("Wrong id in runMTCDmosaicPre", id)
        return 1
    workflow = gbdx.Workflow([tsk])
    workflow.savedata(tsk.outputs.data, location=s3_location + "/image2image/")
    workflow.execute()
    print('MTCD mosaic start', id, images1, workflow.id)
    waitForWorkflow(workflow)
    print('MTCD mosaic done ' + images1 + ' ' + images2 + ' ' + str(id) + ' ' +
          str(workflow.status) + ' at ' + str(datetime.now()))
    if (not workflow.status["event"] == "succeeded"):
        print('MTCD mosaic failed')
        return 1
    return 0
コード例 #2
0
ファイル: mtcd.py プロジェクト: kkoperskidg/MTCD
def aopImage(catalog_id,
             s3_location,
             local_dir=None,
             panPixelSize=0.5,
             clip=None):
    gbdx = Interface()
    isWV1 = catalog_id.startswith('102')
    if (isWV1):
        print(
            "WARNING is a WV1 image and MS or Pansharpened image can't be ordered"
        )
    isSWIR = catalog_id.startswith('104A')
    if (isSWIR):
        print("ERROR SWIR image can't be orthorectified")
        return
    order_id = order(gbdx, catalog_id)
    data = gbdx.ordering.status(order_id)[0]['location']
    gdalwarpOptions = "  -r near --config GDAL_CACHEMAX 4000 -wm 4000 -co TILED=TRUE -co COMPRESS=PACKBITS -co BIGTIFF=YES "
    aoptask = gbdx.Task("AOP_Strip_Processor",
                        data=data,
                        enable_acomp=True,
                        enable_pansharpen=False,
                        enable_dra=False,
                        ortho_epsg='UTM',
                        bands='MS',
                        ortho_pixel_size=str(4 * panPixelSize),
                        ortho_dem_specifier=dem)
    if (clip is not None):
        clipTask = gbdx.Task("gdalcrop",
                             image=aoptask.outputs.data,
                             crop=clip + ' -tr ' + str(4 * panPixelSize) +
                             ' ' + str(4 * panPixelSize) + gdalwarpOptions,
                             ship='false')
        workflow = gbdx.Workflow([aoptask, clipTask])
        workflow.savedata(clipTask.outputs.cropped,
                          location=s3_location + 'MS')
    else:
        workflow = gbdx.Workflow([aoptask])
        workflow.savedata(aoptask.outputs.data, location=s3_location + 'MS')
    workflow.execute()
    print('AOP is processing image ' + catalog_id + ' MS workflow id is ' +
          workflow.id)
    waitForWorkflow(workflow)
    print('MS      image ' + catalog_id + ' ' + str(workflow.status) + ' at ' +
          str(datetime.now()))
    if local_dir == '':
        return
    if (local_dir is not None):
        print('Downloading AOP images')
        if not os.path.exists(local_dir):
            os.makedirs(local_dir)
        gbdx.s3.download(location=s3_location, local_dir=local_dir)
        print('Image downloaded' + catalog_id + ' at ' + str(datetime.now()))

    return
コード例 #3
0
ファイル: mtcd.py プロジェクト: kkoperskidg/MTCD
def changePrep(catalog_id, s3_location, clip=None):
    size = '2'
    gbdx = Interface()
    isWV1 = catalog_id.startswith('102')
    if (isWV1 and (ms or pansharpen or pansharpenship)):
        print("ERROR Image is a WV1 image.")
        return
    isSWIR = catalog_id.startswith('104A')
    if (isSWIR):
        print("ERROR Image is a WV1 image.")
        return
    order_id = order(gbdx, catalog_id)
    data = gbdx.ordering.status(order_id)[0]['location']
    gdalwarpOptions = "  -r near --config GDAL_CACHEMAX 4000 -wm 4000 -co TILED=TRUE -co COMPRESS=PACKBITS -co BIGTIFF=YES "
    gdalwarpOptions = "  -r near -co TILED=TRUE -co COMPRESS=PACKBITS -co BIGTIFF=YES "
    aoptask = gbdx.Task("AOP_Strip_Processor",
                        data=data,
                        enable_acomp=True,
                        enable_pansharpen=False,
                        enable_dra=False,
                        ortho_epsg='UTM',
                        bands='MS',
                        ortho_pixel_size=size)
    #aoptask = gbdx.Task("AOP_Strip_Processor", data=data, enable_acomp=False, enable_pansharpen=False, enable_dra=False, ortho_epsg='UTM', bands='MS', ortho_pixel_size='16')
    topoTask = gbdx.Task("topo-correction", image=aoptask.outputs.data)
    topoTask.impersonation_allowed = True
    cloudTask = gbdx.Task("CloudPuncher",
                          image=topoTask.outputs.data,
                          maskOnly='false')
    #cloudTask = gbdx.Task("CloudPuncher", image = aoptask.outputs.data, maskOnly = 'false')
    if (clip is not None):
        #####################################################################################################################################################################################
        clipTask = gbdx.Task('gdalcrop',
                             image=cloudTask.outputs.mask,
                             crop=clip + ' -tr ' + size + ' ' + size +
                             gdalwarpOptions,
                             ship='false')
        workflow = gbdx.Workflow([aoptask, topoTask, cloudTask, clipTask])
        workflow.savedata(clipTask.outputs.cropped, location=s3_location)
    else:
        workflow = gbdx.Workflow([aoptask, topoTask, cloudTask])
        workflow.savedata(cloudTask.outputs.mask, location=s3_location)
    workflow.execute()
    print('AOP is processing image ' + catalog_id + ' MS workflow id is ' +
          workflow.id)
    waitForWorkflow(workflow)
    print('MS      image ' + catalog_id + ' ' + str(workflow.status) +
          ' wfl id ' + workflow.id + ' at ' + str(datetime.now()))
    if (not workflow.status["event"] == "succeeded"):
        print("workflow.status", workflow.status,
              workflow.status["event"] == "succeeded")
        return 1
    return 0
コード例 #4
0
ファイル: mtcd.py プロジェクト: kkoperskidg/MTCD
def image2image(ref_image_s3_dir,
                image_s3_dir,
                output_s3,
                source_filename=None,
                reference_filename=None,
                clip=None,
                pixelSize=2,
                fileName=None):
    gbdx = Interface()
    full_ref_image_s3_dir = getS3location(gbdx, ref_image_s3_dir)
    full_image_s3_dir = getS3location(gbdx, image_s3_dir)
    task = gbdx.Task("image2image",
                     reference_directory=full_ref_image_s3_dir,
                     source_directory=full_image_s3_dir,
                     source_filename=source_filename,
                     reference_filename=reference_filename)
    task.timeout = 36000
    if (clip is not None):
        clipTask1 = gbdx.Task("gdalcrop",
                              image=task.outputs.out,
                              crop=clip + ' -tr ' + str(pixelSize) + ' ' +
                              str(pixelSize),
                              ship='False',
                              updateName='False',
                              fileName=fileName)
        workflow = gbdx.Workflow([task, clipTask1])
        workflow.savedata(clipTask1.outputs.cropped, location=output_s3)
    else:
        workflow = gbdx.Workflow([task])
        workflow.savedata(task.outputs.out, location=output_s3)

    workflow.execute()

    print('image2Image ' + ref_image_s3_dir + ' ' + image_s3_dir + ' ' +
          output_s3 + ' ' + fileName + ' workflow ' + str(workflow.id) +
          ' started at ' + str(datetime.now()))
    waitForWorkflow(workflow)
    print('image2Image ' + str(workflow.id) + ' ' + ref_image_s3_dir + ' ' +
          image_s3_dir + ' ' + output_s3 + ' ' + str(workflow.status) +
          ' at ' + str(datetime.now()))
    if (not workflow.status["event"] == "succeeded"):
        return 1
    return 0
コード例 #5
0
ファイル: mtcd.py プロジェクト: kkoperskidg/MTCD
def runMTCDmosaic(id, s3_location):
    gbdx = Interface()
    images = getS3location(gbdx, s3_location + "/" + str(id))
    tsk = gbdx.Task('mtcdvrt', images1=images, id=str(id))
    workflow = gbdx.Workflow([tsk])
    workflow.savedata(tsk.outputs.data, location=s3_location + "/image2image/")
    workflow.execute()
    print('MTCD mosaic start', id, images, workflow.id)
    waitForWorkflow(workflow)
    print('MTCD mosaic done ' + images + ' ' + str(id) + ' ' +
          str(workflow.status) + ' at ' + str(datetime.now()))
    if (not workflow.status["event"] == "succeeded"):
        print('MTCD mosaic failed')
        return 1
    return 0
コード例 #6
0
ファイル: mtcd.py プロジェクト: kkoperskidg/MTCD
def runMTCD(s3_locationPre, s3_locationPost, out_s3_location):
    gbdx = Interface()
    preimage = getS3location(gbdx, s3_locationPre)
    postimage = getS3location(gbdx, s3_locationPost)
    tsk = gbdx.Task('mtcd', preimage=preimage, postimage=postimage)
    workflow = gbdx.Workflow([tsk])
    workflow.savedata(tsk.outputs.data, location=out_s3_location)
    workflow.execute()
    print('MTCD image pair start ' + s3_locationPre + ' ' + s3_locationPost +
          ' ' + out_s3_location + ' ' + str(workflow.id) + ' at ' +
          str(datetime.now()))
    waitForWorkflow(workflow)
    print('MTCD image pair done ' + s3_locationPre + ' ' + s3_locationPost +
          ' ' + out_s3_location + ' ' + str(workflow.status) + ' at ' +
          str(datetime.now()))
    if (not workflow.status["event"] == "succeeded"):
        return 1
    return 0
コード例 #7
0
from gbdxtools import Interface
gbdx = Interface()

s3_flood_extents = 's3://egolden/floodwatch/intersect_inputs/flood_shp_one/'
s3_footprints = 's3://egolden/floodwatch/intersect_inputs/footprints_shp/'

intersect_task = gbdx.Task('classify_flooded_footprints',
                           footprints_shp=s3_footprints,
                           flood_shp=s3_flood_extents)

workflow = gbdx.Workflow([intersect_task])
workflow.savedata(intersect_task.outputs.footprints_flood_results, location='egolden/intersect_results')
workflow.execute()
print(workflow.id)

コード例 #8
0
from gbdxtools import Interface
gbdx = Interface()

QB = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/QB"
WV1 = "s3://receiving-dgcs-tdgplatform-com/054876516120_01_003"
WV2 = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/WV2"
WV3 = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/WV3"
GE = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/GE/055217125010_01"

#aoptask = gbdx.Task('AOP_Strip_Processor', data=QB, bands='MS', enable_acomp=True, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image
#aoptask = gbdx.Task('AOP_Strip_Processor', data=WV1, bands='PAN', enable_acomp=False, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image
#aoptask = gbdx.Task('AOP_Strip_Processor', data=WV2, bands='MS', enable_acomp=True, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image
#aoptask = gbdx.Task('AOP_Strip_Processor', data=WV3, bands='MS', enable_acomp=True, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image
#aoptask = gbdx.Task('AOP_Strip_Processor', data=GE, bands='MS', enable_acomp=True, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image

isodata = gbdx.Task("ENVI_ISODATAClassification")
isodata.inputs.input_raster = GE
isodata.inputs.file_types = "tif"

sieve = gbdx.Task("ENVI_ClassificationSieving")
sieve.inputs.input_raster = isodata.outputs.output_raster_uri.value
sieve.inputs.file_types = "hdr"

clump = gbdx.Task("ENVI_ClassificationClumping")
clump.inputs.input_raster = sieve.outputs.output_raster_uri.value
clump.inputs.file_types = "hdr"

workflow = gbdx.Workflow([isodata, sieve, clump])
'''
workflow.savedata(
	clump.outputs.output_raster_uri,
コード例 #9
0
# To order the image with DG factory catalog id 10400100143FC900:
order_id = gbdx.ordering.order('10400100143FC900')
print order_id

# The order_id is unique to your image order and can be used to track the progress of your order. The ordered image sits in a directory on S3. The output of the following describes where:
status = gbdx.ordering.status(order_id)
# result:
# [{u'acquisition_id': u'10400100143FC900',
#   u'state': u'delivered',
#   u'location': u's3://receiving-dgcs-tdgplatform-com/055546367010_01_003'}]

# test a quick workflow on the item
data = str(status[0]['location'])

aoptask = gbdx.Task("AOP_Strip_Processor",
                    data=data,
                    enable_acomp=True,
                    enable_pansharpen=True)
workflow = gbdx.Workflow([aoptask])

data = "s3://receiving-dgcs-tdgplatform-com/054813633050_01_003"  # WV02 Image over San Francisco
aoptask = gbdx.Task("AOP_Strip_Processor", data=data)

s3task = gbdx.Task("StageDataToS3")
s3task.inputs.data = aoptask.outputs.data.value
s3task.inputs.destination = "s3://gbd-customer-data"

workflow = gbdx.Workflow([aoptask, s3task])
workflow.execute()

# workflow.execute()
コード例 #10
0
from gbdxtools import Interface
gbdx = Interface()

QB = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/QB"
WV1 = "s3://receiving-dgcs-tdgplatform-com/054876516120_01_003"
WV2 = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/WV2"
WV3 = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/WV3"
GE = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/GE/055217125010_01"


aop2envi = gbdx.Task("AOP_ENVI_HDR")
aop2envi.inputs.image = GE
envi_ndvi = gbdx.Task("ENVI_SpectralIndices")
envi_ndvi.inputs.input_raster = aop2envi.outputs.output_data.value
envi_ndvi.inputs.file_types = "hdr"
# Specify a string/list of indicies to run on the input_raster variable.  The order of indicies wi
envi_ndvi.inputs.index = '["Normalized Difference Vegetation Index", "Simple Ratio"]'

workflow = gbdx.Workflow([aop2envi, envi_ndvi])
'''
workflow.savedata(
	       envi_ndvi.outputs.output_raster_uri,
	          location='Benchmark/spectralindices/QB'
)

workflow.savedata(
	       envi_ndvi.outputs.output_raster_uri,
	          location='Benchmark/spectralindices/WV2'
)

workflow.savedata(
コード例 #11
0
from gbdxtools import Interface
gbdx = Interface()


QB = "s3://receiving-dgcs-tdgplatform-com/054876960040_01_003"
WV1 = "s3://receiving-dgcs-tdgplatform-com/054876516120_01_003"
WV2 = "s3://receiving-dgcs-tdgplatform-com/054876618060_01_003"
WV3 = "s3://receiving-dgcs-tdgplatform-com/055605759010_01_003"
GE = "s3://receiving-dgcs-tdgplatform-com/055217125010_01_003"

#aoptask = gbdx.Task("AOP_Strip_Processor", data=GE, enable_acomp=True, enable_pansharpen=False, enable_dra=False, bands='MS')

aoptask2 = gbdx.Task('AOP_Strip_Processor', data=WV1, bands='PAN', enable_acomp=False, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image

# Capture AOP task outputs
#orthoed_output = aoptask.get_output('data')

aop2envi = gbdx.Task("Build_ENVI_HDR")
aop2envi.inputs.image = aoptask2.outputs.data.value

#hdr file used to compute spectral index

#envi_ndvi = gbdx.Task("ENVI_SpectralIndex")
#envi_ndvi.inputs.input_raster = aop2envi.outputs.output_data.value
#envi_ndvi.inputs.file_types = "hdr"
#envi_ndvi.inputs.index = "Normalized Difference Vegetation Index"

#spectral index file used in color slice classification task

envi_color = gbdx.Task('ENVI_ColorSliceClassification')
envi_color.inputs.input_raster = aoptask2.outputs.data.value
コード例 #12
0
from gbdxtools import Interface
import boto3

gbdx = Interface()
sts = boto3.client('sts')

creds = sts.get_session_token()['Credentials']

ingest = gbdx.Task(
    'ingest-s3-data',
    data=
    's3://viper-projects/open_data/intermediate/PreprocessImage/1030010069650A00/059441867010_01/',
    aws_access_key_id=creds['AccessKeyId'],
    aws_secret_access_key=creds['SecretAccessKey'],
    aws_session_token=creds['SessionToken'])

cog = gbdx.Task(
    'CloudOptimizedGeoTIFF:2.0.1',
    data=ingest.outputs.data.value,
)

save = gbdx.Task(
    'SaveToS3',
    data=cog.outputs.data.value,
    destination='s3://jduckworth/cog-tests/1030010069650A00',
    access_key_id=creds['AccessKeyId'],
    secret_key=creds['SecretAccessKey'],
    session_token=creds['SessionToken'],
)

workflow = gbdx.Workflow([ingest, cog, save])
コード例 #13
0
# First we'll run atmospheric compensation on Landsat8 data
from gbdxtools import Interface
gbdx = Interface()

acomp = gbdx.Task('AComp',
                  data='s3://landsat-pds/L8/033/032/LC80330322015035LGN00')

# Now we'll save the result to our own S3 bucket.  First we need to generate temporary AWS credentials
# (this assumes you have an AWS account and your IAM credentials are appropriately accessible via boto)
import boto3
client = boto3.client('sts')
response = client.get_session_token(DurationSeconds=86400)
access_key_id = response['Credentials']['AccessKeyId']
secret_key = response['Credentials']['SecretAccessKey']
session_token = response['Credentials']['SessionToken']

# Save the data to your s3 bucket using the SaveToS3 task:
savetask = gbdx.Task('SaveToS3')
savetask.inputs.data = acomp.outputs.data.value
savetask.inputs.destination = "s3://your-bucket/your-path/"
savetask.inputs.access_key_id = access_key_id
savetask.inputs.secret_key = secret_key
savetask.inputs.session_token = session_token

workflow = gbdx.Workflow([acomp, savetask])
workflow.execute()
コード例 #14
0
from gbdxtools import Interface
gbdx = Interface()

QB = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/QB"
WV1 = "s3://receiving-dgcs-tdgplatform-com/054876516120_01_003"
WV2 = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/WV2"
WV3 = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/WV3"
GE = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/GE/055217125010_01"

envi_task = gbdx.Task("ENVI_RXAnomalyDetection")
envi_task.inputs.file_types = 'til'
envi_task.inputs.kernel_size = '3'
envi_task.inputs.input_raster = WV3

workflow = gbdx.Workflow([envi_task])
workflow.savedata(envi_task.outputs.task_meta_data,
                  location='envi_task_output')
workflow.savedata(envi_task.outputs.output_raster_uri,
                  location='envi_task_output')
'''
workflow.savedata(
  envi_task.outputs.task_meta_data,
    location='Benchmark/RX/QB'
)

workflow.savedata(
  envi_task.outputs.task_meta_data,
    location='Benchmark/RX/WV1'
)

コード例 #15
0
ファイル: ENVI_RPCO.py プロジェクト: beevor/docs
)
workflow.execute()
status = workflow.status["state"]
wf_id = workflow.id

from gbdxtools import Interface
gbdx = Interface()
'''

from gbdxtools import Interface
gbdx = Interface()
image = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/RPCOrtho/image1"
dem = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/RPCOrtho/DEM/image1"


envi_RPCO = gbdx.Task("ENVI_RPCOrthorectification")
envi_RPCO.inputs.input_raster_metadata = '{"sensor type": "IKONOS"}'
envi_RPCO.inputs.input_raster_band_grouping = 'multispectral'
envi_RPCO.inputs.input_raster = image
envi_RPCO.inputs.dem_raster = dem



workflow = gbdx.Workflow([envi_RPCO])

workflow.savedata(
    envi_RPCO.outputs.output_raster_uri,
        location='Benchmark/ENVI_RPCO/results'
)

workflow.execute()
コード例 #16
0
from gbdxtools.simple_answerfactory import Recipe, RecipeParameter, Project, RecipeConfig
from gbdxtools import Interface
gbdx = Interface()

## the workflow that must be defined in order to specify a recipe
aop = gbdx.Task('AOP_Strip_Processor')
aop.inputs.ortho_interpolation_type = 'Bilinear'
aop.inputs.ortho_pixel_size = 'auto'
aop.inputs.bands = 'PAN+MS'
aop.inputs.ortho_epsg = 'UTM'
aop.inputs.enable_acomp = 'true'
aop.inputs.enable_pansharpen = 'true'
aop.inputs.enable_dra = 'true'
aop.inputs.ortho_pixel_size = '0.5'

# Answerfactory will automatically prepend an auto-ordering task and replace
# {raster_path} with the actual s3 path to the raster data
aop.inputs.data = '{raster_path}'

# remove xml files (causes a bug in skynet)
xmlfix = gbdx.Task('gdal-cli-multiplex')
xmlfix.inputs.data = aop.outputs.data.value
xmlfix.inputs.command = "find $indir/data/ -name *XML -type f -delete; mkdir -p $outdir; cp -R $indir/data/ $outdir/"

skynet = gbdx.Task('openskynet:0.0.10')
skynet.inputs.data = xmlfix.outputs.data.value
# AnswerFactory auto populates {model_location_s3} with the s3 location of the model referred to in
# the recipe property 'model_type'.  This model must be previously registered with the model catalog service.
# AF searches the model catalog for the closest model with the specified type to the input acquisition
skynet.inputs.model = '{model_location_s3}'
skynet.inputs.log_level = 'trace'
コード例 #17
0
#############################################################################################

from gbdxtools import Interface

gbdx = Interface()

# get output dir from arg on the command line
out_data_loc = "dloomis/lulc_wf_test"

# set the input data location.  This could also be pulled from a catalog API response using a catalog_id
data = "s3://receiving-dgcs-tdgplatform-com/055186940010_01_003/"

# build the task used in the workflow
aoptask = gbdx.Task("AOP_Strip_Processor",
                    data=data,
                    enable_acomp=True,
                    enable_pansharpen=False,
                    enable_dra=False,
                    bands='MS')
pp_task = gbdx.Task(
    "ProtogenPrep", raster=aoptask.outputs.data.value
)  # ProtogenPrep task is used to get AOP output into proper format for protogen task
prot_lulc = gbdx.Task("protogenV2LULC", raster=pp_task.outputs.data.value)
# build the workflow ( AOP -> ProtogenPrep -> protogenV2LULC )
workflow = gbdx.Workflow([aoptask, pp_task, prot_lulc])
workflow.savedata(prot_lulc.outputs.data.value, location=out_data_loc)

# optional: print workflow tasks, to check the json
print
print(aoptask.generate_task_workflow_json())
print
print(pp_task.generate_task_workflow_json())
コード例 #18
0
ファイル: __init__.py プロジェクト: jbants/gbdxrun
from gbdxtools import Interface
import os
from gbdxrun.local_task import LocalTask
from gbdxrun.local_workflow import LocalWorkflow

HOST = os.environ.get('GBDXTOOLS_HOST', None)
CONFIG = os.environ.get('GBDXTOOLS_PROFILE', None)

config_kwargs = {}
if HOST:
    config_kwargs['host'] = HOST
elif CONFIG:
    config_kwargs['config_file'] = CONFIG

gbdx = Interface(**config_kwargs)

gbdx.Task = LocalTask
gbdx.Workflow = LocalWorkflow


class LocalWorkflowError(Exception):
    pass
コード例 #19
0
# Run atmospheric compensation on Landsat8 data
from gbdxtools import Interface
gbdx = Interface()

acomp = gbdx.Task('AComp_New', data='s3://landsat-pds/L8/033/032/LC80330322015035LGN00')
workflow = gbdx.Workflow([acomp])
workflow.savedata(acomp.outputs.data, location='acomp_output_folder')
workflow.execute()
コード例 #20
0
from gbdxtools import Interface
gbdx = Interface()

test = "s3://receiving-dgcs-tdgplatform-com/055777190010_01_003"
QB = "s3://receiving-dgcs-tdgplatform-com/054876960040_01_003"
WV1 = "s3://receiving-dgcs-tdgplatform-com/054876516120_01_003"
WV2 = "s3://receiving-dgcs-tdgplatform-com/054876618060_01_003"
WV3 = "s3://receiving-dgcs-tdgplatform-com/055605759010_01_003"
GE = "s3://receiving-dgcs-tdgplatform-com/055217125010_01_003"

#aoptask1 = gbdx.Task('AOP_Strip_Processor', data=test, bands='MS', enable_acomp=True, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image
aoptask2 = gbdx.Task('AOP_Strip_Processor',
                     data=WV1,
                     bands='PAN',
                     enable_acomp=False,
                     enable_pansharpen=False,
                     enable_dra=False)  # creates acomp'd multispectral image
#aoptask3 = gbdx.Task('AOP_Strip_Processor', data=WV2, bands='MS', enable_acomp=True, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image
#aoptask4 = gbdx.Task('AOP_Strip_Processor', data=WV3, bands='MS', enable_acomp=True, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image
#aoptask5 = gbdx.Task('AOP_Strip_Processor', data=GE, bands='MS', enable_acomp=True, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image

envitask = gbdx.Task("ENVI_ISODATAClassification")
envitask.inputs.file_types = 'tif'
envitask.inputs.input_raster = aoptask1.outputs.data.value
envitask.outputs.output_raster = "ENVI"

shptask = gbdx.Task("ENVI_ClassificationToShapefile")
shptask.inputs.input_raster = envitask.outputs.output_raster_uri.value
shptask.inputs.file_types = "hdr"

workflow = gbdx.Workflow([aoptask1, envitas, shptask])
コード例 #21
0
ファイル: AOP_Runtime_data.py プロジェクト: spgriffin/docs
from gbdxtools import Interface
gbdx = Interface()

QB = "s3://receiving-dgcs-tdgplatform-com/054876960040_01_003"
WV1 = "s3://receiving-dgcs-tdgplatform-com/054876516120_01_003"
WV2 = "s3://receiving-dgcs-tdgplatform-com/054876618060_01_003"
WV3 = "s3://receiving-dgcs-tdgplatform-com/055605759010_01_003"
GE = "s3://receiving-dgcs-tdgplatform-com/055217125010_01_003"

aoptask1 = gbdx.Task('AOP_Strip_Processor', data=QB, bands='MS', enable_acomp=True, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image
#aoptask2 = gbdx.Task('AOP_Strip_Processor', data=WV1, bands='PAN', enable_acomp=False, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image
#aoptask3 = gbdx.Task('AOP_Strip_Processor', data=WV2, bands='MS', enable_acomp=True, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image
#aoptask4 = gbdx.Task('AOP_Strip_Processor', data=WV3, bands='MS', enable_acomp=True, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image
#aoptask5 = gbdx.Task('AOP_Strip_Processor', data=GE, bands='MS', enable_acomp=True, enable_pansharpen=False, enable_dra=False)     # creates acomp'd multispectral image



workflow = gbdx.Workflow([aoptask1])

workflow.savedata(
    aoptask1.outputs.data,
        location='Benchmark/QB'
)
'''
workflow.savedata(
    aoptask2.outputs.data,
        location='Benchmark/WV1'
)

workflow.savedata(
    aoptask3.outputs.data,
コード例 #22
0
from gbdxtools import Interface
gbdx = Interface()

QB = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/QB"
WV1 = "s3://receiving-dgcs-tdgplatform-com/054876516120_01_003"
WV2 = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/WV2"
WV3 = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/WV3"
GE = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/GE/055217125010_01"


# Capture AOP task outputs
#orthoed_output = aoptask.get_output('data')

task = gbdx.Task("ENVI_ImageThresholdToROI")
task.inputs.input_raster=WV1
task.inputs.file_types = "tif"
task.inputs.roi_name = "[\"Water\"]"
task.inputs.roi_color = "[[0,255,0]"
task.inputs.threshold = "[[138]"
task.inputs.output_roi_uri_filename = "roi.xml"

workflow = gbdx.Workflow([ task ])
'''
workflow.savedata(
    task.outputs.output_roi_uri,
        location='Benchmark/ImgToROI/QB'
)
'''
workflow.savedata(
    task.outputs.output_roi_uri,
        location='Benchmark/ImgToROI/WV1'
コード例 #23
0
                    # and a few extra lines than it needs in the displayed example.
                    # This code removes the additional tabs and lines for the code that will be displayed in the markdown.
                    i['%s_text' % j] = i['%s_text' % j].replace(
                        '\n        ', '\n')
                    i['%s_text' % j] = i['%s_text' % j].replace('\n\n', '\n')

                # If the test failed, don't load the code into the docs. Load the default failed message.
                else:

                    i['%s_text' % j] = fail_string

    # For each task that is known to exist on GBDx already, hit the API and get the task details
    for i in list_of_tasks:
        if i['knowntask'] is True:
            # retrieve task info and store it in the task data object
            task = gbdx.Task(i['name'])
            i['description'] = task.definition['description']
            i['input_ports'] = task.input_ports
            i['output_ports'] = task.output_ports

        list_of_steps = {'create_md': True, 'run_tests': True}

    # Now that we have all of the information we need, we can write it to the markdown file for each task in the list.
    for i in list_of_tasks:
        # We don't create markdowns for tasks that are not on GBDx
        if i['knowntask'] is True:
            # If the markdown file already exists, read it in
            if i['markdown'] is True:
                with open(i['markdown_file_name']) as f:
                    s = f.read()
            # If not, we start with the template
コード例 #24
0
from gbdxtools import Interface

gbdx = Interface(
)  # Instantiate gbdxtools, which logs you in using your .config file

s3path1 = 's3://receiving-dgcs-tdgplatform-com/055364007010_01_003'
aoptask1 = gbdx.Task("AOP_Strip_Processor",
                     data=s3path1,
                     enable_acomp=True,
                     enable_pansharpen=False,
                     enable_dra=False,
                     bands='MS')
s3task1 = gbdx.Task("StageDataToS3")
s3task1.inputs.data = aoptask1.outputs.data.value
s3task1.inputs.destination = "s3://change_detection/test_job/Steps/acomp_fastortho_step-pre_image_task/Output"

s3path2 = 's3://receiving-dgcs-tdgplatform-com/055364005010_01_003'
aoptask2 = gbdx.Task("AOP_Strip_Processor",
                     data=s3path2,
                     enable_acomp=True,
                     enable_pansharpen=False,
                     enable_dra=False,
                     bands='MS')
s3task2 = gbdx.Task("StageDataToS3")
s3task2.inputs.data = aoptask1.outputs.data.value
s3task2.inputs.destination = "s3://change_detection/test_job/Steps/acomp_fastortho_step-post_image_task/Output"

cdtask = gbdx.Task("change_detection")
cdtask.inputs.pre_image = aoptask1.outputs.data.value
cdtask.inputs.post_image = aoptask2.outputs.data.value
コード例 #25
0
ファイル: mtcd.py プロジェクト: kkoperskidg/MTCD
def processChangeSet(idsets,
                     s3_location,
                     clip=None,
                     deleteIntermediateFromS3=False):
    start_time = time.time()
    ids = []
    if (len(idsets) < 4 or len(idsets) > 6):
        print("FATAL number of idsets has to be 4, 5, or 6. it is " +
              str(len(idsets)))
        return
    for idset in idsets:
        for catid in idset:
            ids.append(catid)

    print("AOP, Topocorrection, Cloud punching")
    threads = []
    id = 0
    for idset in idsets:
        id += 1
        for catID in idset:
            if (catID is not None and len(catID) > 3):
                if catID.find(' ') > 0:
                    catID = catID[0:catID.index(' ')]
                thread = startChangePrepThread(catID,
                                               s3_location + "/" + str(id),
                                               clip)
                threads.append(thread)
                time.sleep(2)
    nThreads = len(threads)
    i = 0
    for thread in threads:
        ret = thread.join()
        i += 1
        print(str(i) + " out of " + str(nThreads) + " threads done")
        if (ret != 0):
            print("ChangePrep error", ret)
            return 1
    print('AOP/Topo/CloudDet done. Elapsed time: {} min'.format(
        round((time.time() - start_time) / 60)))

    print("mosaicking")
    id = 0
    threads = []
    if (len(idsets) == 4):
        thread = startMTCDmosaicThread(-2, s3_location)
    else:
        thread = startMTCDmosaicThread(-3, s3_location)
    threads.append(thread)
    time.sleep(2)

    for idset in idsets:
        id += 1
        thread = startMTCDmosaicThread(id, s3_location)
        threads.append(thread)
        time.sleep(2)
    nThreads = len(threads)
    i = 0
    for thread in threads:
        ret = thread.join()
        i += 1
        print(
            str(i) + " out of " + str(nThreads) + " threads done. Return ",
            ret)
        if (ret != 0):
            print("MTCDmosaic error", ret)
            return 1
    print('mtcdvrt done. Elapsed time: {} min'.format(
        round((time.time() - start_time) / 60)))

    threads = []
    id = 0
    for idset in idsets:
        id += 1
        if (deleteIntermediateFromS3):
            deleteFromS3(s3_location + "/" + str(id) + "/")
        fileName = str(id) + "_warped.tif"
        thread = startimage2imageThread(
            s3_location + "/image2image/pre.tif",
            s3_location + "/image2image/" + str(id) + ".tif",
            s3_location + "/image2imageFinal/" + str(id),
            clip=clip,
            fileName=fileName)
        threads.append(thread)
        time.sleep(2)
    print("Running image2image alignment")
    nThreads = len(threads)
    i = 0
    for thread in threads:
        ret = thread.join()
        i += 1
        print(
            str(i) + " out of " + str(nThreads) + " threads done. Return ",
            ret)
        if (ret != 0):
            print("image2image error", ret)
            return 1

    print('Image2image done. Elapsed time: {} min'.format(
        round((time.time() - start_time) / 60)))
    gbdx = Interface()
    watertsk = gbdx.Task('kk-watermask')
    watertsk.inputs.image = getS3location(gbdx,
                                          s3_location + "/image2imageFinal/1")
    waterwfl = gbdx.Workflow([watertsk])
    waterwfl.savedata(watertsk.outputs.mask,
                      location=s3_location + "/waterMask")
    waterwfl.execute()
    print('Water mask task start ' + str(waterwfl.id) + ' at ' +
          str(datetime.now()))

    nImages = len(idsets)
    threads = []
    for i in range(1, nImages + 1):
        for j in range(i + 1, nImages + 1):
            threads.append(
                startMTCDThread(
                    s3_location + "/image2imageFinal/" + str(i) + "/" +
                    str(i) + "_warped.tif",
                    s3_location + "/image2imageFinal/" + str(j) + "/" +
                    str(j) + "_warped.tif", s3_location + "/changePairs/"))

    nThreads = len(threads)
    i = 0
    for thread in threads:
        ret = thread.join()
        i += 1
        print(
            str(i) + " out of " + str(nThreads) + " threads done. Return ",
            ret)
        if (ret != 0):
            print("MTCDThread error", ret)
            return 1
    print('Image pair change done. Elapsed time: {} min'.format(
        round((time.time() - start_time) / 60)))

    if (deleteIntermediateFromS3):
        deleteFromS3(s3_location + "/image2imageFinal/")
        deleteFromS3(s3_location + "/image2image/")
    waitForWorkflow(waterwfl)
    print('Water mask task done ' + str(waterwfl.status) + ' at ' +
          str(datetime.now()))
    gbdx = Interface()
    changeImages = getS3location(gbdx, s3_location + "/changePairs/")
    maskFolder = getS3location(gbdx, s3_location + "/waterMask")
    tsk = gbdx.Task('mtcd2', image=changeImages, mask=maskFolder)
    wfl = gbdx.Workflow([tsk])
    wfl.savedata(tsk.outputs.data, location=s3_location + "/change/")
    wfl.execute()
    print('MTCD time filter start ' + str(wfl.id) + ' at ' +
          str(datetime.now()))
    waitForWorkflow(wfl)
    print('MTCD time filter done ' + str(wfl.status) + ' at ' +
          str(datetime.now()))

    if (deleteIntermediateFromS3):
        deleteFromS3(s3_location + "/changePairs/")
        deleteFromS3(s3_location + "/waterMask/")
    print('MTCD change ALL processes done. Elapsed time: {} min'.format(
        round((time.time() - start_time) / 60)))
    print("All done " + wfl.status['event'])
    return 0
コード例 #26
0
ファイル: Protogen_runtime.py プロジェクト: spgriffin/docs
# Input data
from gbdxtools import Interface
gbdx = Interface()

WV2 = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/WV2/"
WV3 = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/WV3/"

prep2 = gbdx.Task('ProtogenPrep')
# ProtogenPrep will move the aoptask output to root where prototask can find it
prep2.inputs.raster = WV2

prep3 = gbdx.Task('ProtogenPrep')
# ProtogenPrep will move the aoptask output to root where prototask can find it
prep3.inputs.raster = WV3
'''
protoLULC = gbdx.Task("protogenV2LULC", raster=gluetask2.outputs.data.value)
protoPAN = gbdx.Task("protogenV2PANTEX10", raster=gluetask2.outputs.data.value)
protoRAC = gbdx.Task("protogenV2RAC", raster=gluetask2.outputs.data.value)
protoRAS = gbdx.Task("protogenV2RAS", raster=gluetask2.outputs.data.value)
protoRAV = gbdx.Task("protogenV2RAV", raster=gluetask2.outputs.data.value)
protoRAW = gbdx.Task("protogenV2RAW", raster=gluetask2.outputs.data.value)

'''
protoLULC3 = gbdx.Task("protogenV2LULC", raster=prep3.outputs.data)
protoPAN3 = gbdx.Task("protogenV2PANTEX10", raster=prep3.outputs.data)
protoRAC3 = gbdx.Task("protogenV2RAC", raster=prep3.outputs.data)
protoRAS3 = gbdx.Task("protogenV2RAS", raster=prep3.outputs.data)
protoRAV3 = gbdx.Task("protogenV2RAV", raster=prep3.outputs.data)
protoRAW3 = gbdx.Task("protogenV2RAW", raster=prep3.outputs.data)

workflow = gbdx.Workflow(
コード例 #27
0
from gbdxtools import Interface
gbdx = Interface()

QB = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/QB"
WV1 = "s3://receiving-dgcs-tdgplatform-com/054876516120_01_003"
WV2 = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/WV2"
WV3 = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/WV3"
GE = "s3://gbd-customer-data/7d8cfdb6-13ee-4a2a-bf7e-0aff4795d927/Benchmark/GE/055217125010_01"


aop2envi = gbdx.Task("AOP_ENVI_HDR")
#aop2envi.inputs.image = QB
#aop2envi.inputs.image = WV2
#aop2envi.inputs.image = WV3
aop2envi.inputs.image = GE

envi_query = gbdx.Task("ENVI_QuerySpectralIndices")
envi_query.inputs.input_raster = aop2envi.outputs.output_data.value
envi_query.inputs.file_types = "hdr"

workflow = gbdx.Workflow([aop2envi, envi_query])

'''
workflow.savedata(
  envi_query.outputs.task_meta_data,
    location='Benchmark/QSI/QB'
)

workflow.savedata(
  envi_query.outputs.task_meta_data,
    location='Benchmark/QSI/WV2'
コード例 #28
0
ファイル: batch_workflows.py プロジェクト: yxqd/gbdxtools
from gbdxtools import Interface
"""
Example using multiple inputs with 1 submission
"""

gbdx = Interface()

# note there are 2 inputs
data = [
    "s3://receiving-dgcs-tdgplatform-com/054813633050_01_003",
    "http://test-tdgplatform-com/data/QB02/LV1B/053702625010_01_004/053702625010_01/053702625010_01_P013_MUL"
]

aoptask = gbdx.Task("AOP_Strip_Processor",
                    data=data,
                    enable_acomp=True,
                    enable_pansharpen=True)

workflow = gbdx.Workflow([aoptask])

workflow.savedata(aoptask.outputs.data, location='some_folder')

batch_workflow_id = workflow.execute()
コード例 #29
0
scene_id = '10300100612DE400'  # big strip! cloudy
scene_id = '103001002B3BEF00'  # january, 2014 (lakewood)
scene_id1 = '103001001AAB3D00'  #8-5-2012 big strip
order_id1 = gbdx.ordering.order(scene_id1)
print(order_id)

# The order_id is unique to your image order and can be used to track the progress of your order. The ordered image sits in a directory on S3. The output of the following describes where:
status = gbdx.ordering.status(order_id)

# Make sure DRA is disabled if you are processing both the PAN+MS files
#Edit the following line(s) to reflect specific folder(s) for the output file (example location provided)
# data = str(status[0]['location'])
data = str(gbdx.ordering.status(order_id)[0]['location'])
aoptask = gbdx.Task("AOP_Strip_Processor",
                    data=data,
                    enable_acomp=True,
                    bands="MS",
                    enable_pansharpen=False,
                    enable_dra=False)

# Capture AOP task outputs
log = aoptask.get_output('log')
orthoed_output = aoptask.get_output('data')

# Stage AOP output for the Protogen Task using the Protogen Prep Task
pp_task = gbdx.Task("ProtogenPrep", raster=aoptask.outputs.data.value)

# Setup ProtogenV2LULC Task
prot_lulc = gbdx.Task("protogenV2LULC", raster=pp_task.outputs.data.value)

# Run Combined Workflow
workflow = gbdx.Workflow([aoptask, pp_task, prot_lulc])
コード例 #30
0
#### RUNNING WORKFLOWS

workflowList = [
]  # We will keep track of our workflows by putting workflow IDs in a list as they are created

for catID in catalog_ids:  # Loop through all of the catIDs we found earlier to find their s3 location. This will be the input for the first task.
    s3path = gbdx.catalog.get_data_location(catalog_id=catID)
    print s3path

    if s3path != None:
        s3path = '/'.join(s3path.split('/')[:-1]) + '/'
        print s3path

        aoptask = gbdx.Task("AOP_Strip_Processor",
                            data=s3path,
                            enable_acomp=True,
                            enable_pansharpen=False,
                            enable_dra=False,
                            bands='MS')
        s3task = gbdx.Task("StageDataToS3")
        s3task.inputs.data = aoptask.outputs.data.value
        s3task.inputs.destination = "s3://molly-g/seattleAOP/"

        pp_task = gbdx.Task(
            "ProtogenPrep", raster=aoptask.outputs.data.value
        )  # ProtogenPrep task is used to get AOP output into proper format for protogen task
        prot_ubfp = gbdx.Task("protogenV2UBFP",
                              raster=pp_task.outputs.data.value)
        workflow = gbdx.Workflow([pp_task, aoptask, prot_ubfp, s3task
                                  ])  # Cool tip: these can be in any order!
        workflow.savedata(
            prot_ubfp.outputs.data.value, location="/molly-g/seattle"