示例#1
0
def proccess_farm_complete(farm,
                           aws_mode=False,
                           data_bucket_name=DATA_BUCKET_NAME,
                           upload_bucket_name=UPLOAD_BUCKET_NAME,
                           max_results=100):
    if aws_mode:
        import aws_api
        tran_api = aws_api.TransparentApi()
        all_data_sets = aws_api.ALL_DATA_SETS
    else:
        import earth_explorer_api
        tran_api = earth_explorer_api.TransparentApi()
        all_data_sets = earth_explorer_api.ALL_DATA_SETS
        all_data_sets_proccesed = True
    for data_set in all_data_sets:
        try:
            proccess_farm(farm,
                          data_set,
                          tran_api,
                          data_bucket_name,
                          upload_bucket_name,
                          max_results=max_results)
        except:
            all_data_sets_proccesed = False
    return all_data_sets_proccesed
示例#2
0
def proccess_farm_date_complete(farm,
                                dataset,
                                date,
                                raw_dir_group,
                                aws_mode=False,
                                data_bucket_name=DATA_BUCKET_NAME,
                                upload_bucket_name=UPLOAD_BUCKET_NAME):
    if aws_mode:
        import aws_api
        tran_api = aws_api.TransparentApi()
        all_data_sets = aws_api.ALL_DATA_SETS
    else:
        import earth_explorer_api
        tran_api = earth_explorer_api.TransparentApi()
        all_data_sets = earth_explorer_api.ALL_DATA_SETS

    s3 = boto3.resource('s3',
                        aws_access_key_id=key,
                        aws_secret_access_key=secret_key)
    data_bucket = s3.Bucket(data_bucket_name)
    upload_bucket = s3.Bucket(upload_bucket_name)
    try:
        tran_api.process_raw_dir_groups(farm, dataset, data_bucket,
                                        upload_bucket, date, raw_dir_group)
        return True
    except:
        return False
示例#3
0
def get_raw_dirs_groups(farm, data_bucket, max_results=100, aws_mode=False):

    if aws_mode:
        import aws_api
        tran_api = aws_api.TransparentApi()
        all_data_sets = aws_api.ALL_DATA_SETS
    else:
        import earth_explorer_api
        tran_api = earth_explorer_api.TransparentApi()
        all_data_sets = earth_explorer_api.ALL_DATA_SETS

    raw_dirs_groups = {}
    for data_set in all_data_sets:
        raw_dirs_groups[data_set] = tran_api.get_raw_dirs_groups(
            farm['coordinates'],
            data_set,
            data_bucket,
            farm,
            max_results=max_results)

    return raw_dirs_groups
示例#4
0
def main(aws_mode=False):

    if aws_mode:
        import aws_api
        tran_api = aws_api.TransparentApi()
        all_data_sets = aws_api.ALL_DATA_SETS
    else:
        import earth_explorer_api
        tran_api = earth_explorer_api.TransparentApi()
        all_data_sets = earth_explorer_api.ALL_DATA_SETS

    farms = requests.get('http://www.ricult.com/api/active_farms').json()

    if not os.path.exists('uploaded_farms.json'):
        uploaded_farms = []
    else:
        with open('uploaded_farms.json', 'r') as outfile:
            uploaded_farms = json.load(outfile)

    farms = {f['id']: f for f in farms}

    ## update upload_farms dict
    uploaded_farms = {f['id']: f for f in uploaded_farms}

    for id in farms.keys():
        if id not in uploaded_farms:
            uploaded_farms[id] = farms[id]

        elif farms[id]['coordinates'] != uploaded_farms[id]['coordinates']:
            uploaded_farms[id] = farms[id]

    for ii, farm in enumerate(uploaded_farms.values()):
        for data_set in all_data_sets:
            print("data_set:%s farm:%d %d out of %d" %
                  (data_set, farm['id'], ii + 1, len(uploaded_farms)))
            proccess_farm(farm, data_set, tran_api)

    with open('uploaded_farms.json', 'w') as outfile:
        json.dump(list(uploaded_farms.values()), outfile)
示例#5
0
def debug_main(aws_mode=False):
    if aws_mode:
        import aws_api
        tran_api = aws_api.TransparentApi()
        all_data_sets = aws_api.ALL_DATA_SETS
    else:
        import earth_explorer_api
        tran_api = earth_explorer_api.TransparentApi()
        all_data_sets = earth_explorer_api.ALL_DATA_SETS

    uploaded_farms = {}
    uploaded_farms[0] = {}
    uploaded_farms[0]['coordinates'] = [[19.005, 105.005], [19.005, 105.009],
                                        [19.009, 105.009]]
    uploaded_farms[0]['id'] = 0

    for ii, farm in enumerate(uploaded_farms.values()):
        for data_set in all_data_sets:
            print("data_set:%s farm:%d %d out of %d" %
                  (data_set, farm['id'], ii + 1, len(uploaded_farms)))
            proccess_farm(farm, data_set, tran_api)

    with open('uploaded_farms.json', 'w') as outfile:
        json.dump(list(uploaded_farms.values()), outfile)
示例#6
0
from shapely.geometry import Polygon
from shapely.ops import unary_union
import boto3, time

from multiprocessing import Process
from datetime import datetime, timedelta
import config_file

root_dir = 'temp'
## aws params
KEY = config_file.KEY
SECRET_KEY = config_file.SECRET_KEY
REGION = config_file.REGION

import earth_explorer_api
tran_api = earth_explorer_api.TransparentApi()
all_data_sets = earth_explorer_api.ALL_DATA_SETS

all_scenes = {}
polygon = [[14.56, 100.453], [14.6, 100.453], [14.6, 100.457]]

end_datetime = datetime.now()
start_datetime = end_datetime - timedelta(days=365 * 2)

all_scenes = tran_api.get_all_scenes(polygon=polygon,
                                     dataset='sentinel',
                                     start_datetime=start_datetime,
                                     end_datetime=end_datetime)

for scene in all_scenes:
    temp_dir = root_dir + '/' + scene