Beispiel #1
0
def shapefile_upload(request, job_id, index_name):
    """
    Controller for uploading shapefiles.
    """
    context = {}
    user = str(request.user)
    user.lower()

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir, 'public')
    userDir = os.path.join(publicDir, str(user))

    # Clear the workspace
    gi_lib.clear_folder(userDir)

    # Create a session
    gsshapy_session = gsshapy_sessionmaker()

    # Get the params
    params = request.POST
    files = request.FILES.getlist('shapefile_files')

    shp_name = ''
    for file in files:
        if file.name.endswith('.shp'):
            shp_name = file.name[:-4]

    # Start Spatial Dataset Engine
    dataset_engine = get_spatial_dataset_engine(name='gsshaindex_geoserver',
                                                app_class=GSSHAIndex)

    # Check to see if Spatial Dataset Engine Exists
    workspace = gi_lib.check_workspace(dataset_engine)

    # Clear the store and create a new feature resource
    # store = gi_lib.clear_store(dataset_engine, user)
    feature_resource = dataset_engine.create_shapefile_resource(
        store_id='gsshaindex:' + user + '-' + shp_name,
        shapefile_upload=files,
        overwrite=True,
        debug=True)

    return redirect(
        reverse('gsshaindex:shapefile_index',
                kwargs={
                    'job_id': job_id,
                    'index_name': index_name,
                    'shapefile_name': shp_name
                }))
Beispiel #2
0
def extract_existing_gssha(request, job_id):
    '''
    This takes the file name and id that were submitted and unzips the files.
    '''
    context = {}
    user = str(request.user)
    session = jobs_sessionmaker()
    job, success = gi_lib.get_pending_job(job_id, user, session)
    CKAN_engine = get_dataset_engine(name='gsshaindex_ciwweb',
                                     app_class=GSSHAIndex)

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir, 'public')
    userDir = os.path.join(publicDir, str(user))
    indexMapDir = os.path.join(userDir, 'index_maps')

    # Clear the workspace
    gi_lib.clear_folder(userDir)
    gi_lib.clear_folder(indexMapDir)

    # Get url for the resource and extract the GSSHA file
    url = job.original_url
    extract_path, unique_dir = gi_lib.extract_zip_from_url(user, url, userDir)

    # Create GSSHAPY Session
    gsshapy_session = gsshapy_sessionmaker()

    # Find the project file
    for root, dirs, files in os.walk(userDir):
        for file in files:
            if file.endswith(".prj"):
                project_name = file
                project_path = os.path.join(root, file)
                read_dir = os.path.dirname(project_path)

    context['job_id'] = job_id

    return redirect(
        reverse('gsshaindex:select_index', kwargs={'job_id': job_id}))
def shapefile_upload(request, job_id, index_name):
    """
    Controller for uploading shapefiles.
    """
    context = {}
    user = str(request.user)
    user.lower()

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir,'public')
    userDir = os.path.join(publicDir, str(user))

    # Clear the workspace
    gi_lib.clear_folder(userDir)

    # Create a session
    gsshapy_session = gsshapy_sessionmaker()

    # Get the params
    params = request.POST
    files = request.FILES.getlist('shapefile_files')

    shp_name = ''
    for file in files:
        if file.name.endswith('.shp'):
            shp_name = file.name[:-4]

    # Start Spatial Dataset Engine
    dataset_engine = get_spatial_dataset_engine(name='gsshaindex_geoserver', app_class=GSSHAIndex)

    # Check to see if Spatial Dataset Engine Exists
    workspace = gi_lib.check_workspace(dataset_engine)

    # Clear the store and create a new feature resource
    # store = gi_lib.clear_store(dataset_engine, user)
    feature_resource = dataset_engine.create_shapefile_resource(store_id='gsshaindex:'+user+'-'+shp_name, shapefile_upload=files, overwrite=True, debug=True)


    return redirect(reverse('gsshaindex:shapefile_index', kwargs={'job_id':job_id, 'index_name':index_name, 'shapefile_name':shp_name}))
Beispiel #4
0
def extract_existing_gssha(request, job_id):
    '''
    This takes the file name and id that were submitted and unzips the files.
    '''
    context = {}
    user = str(request.user)
    session = jobs_sessionmaker()
    job, success = gi_lib.get_pending_job(job_id, user,session)
    CKAN_engine = get_dataset_engine(name='gsshaindex_ciwweb', app_class=GSSHAIndex)

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir,'public')
    userDir = os.path.join(publicDir, str(user))
    indexMapDir = os.path.join(userDir, 'index_maps')

    # Clear the workspace
    gi_lib.clear_folder(userDir)
    gi_lib.clear_folder(indexMapDir)

    # Get url for the resource and extract the GSSHA file
    url = job.original_url
    extract_path, unique_dir = gi_lib.extract_zip_from_url(user, url, userDir)

    # Create GSSHAPY Session
    gsshapy_session = gsshapy_sessionmaker()

    # Find the project file
    for root, dirs, files in os.walk(userDir):
        for file in files:
            if file.endswith(".prj"):
                project_name = file
                project_path = os.path.join(root, file)
                read_dir = os.path.dirname(project_path)

    context['job_id'] = job_id

    return redirect(reverse('gsshaindex:select_index', kwargs={'job_id':job_id}))
Beispiel #5
0
def results(request, job_id, view_type):
    context = {}

    # Get the user id
    user = str(request.user)

    # Get the job from the database and delete
    session = jobs_sessionmaker()
    job = session.query(Jobs).\
                    filter(Jobs.user_id == user).\
                    filter(Jobs.original_id == job_id).one()

    # Get the run result urls
    result_files = job.result_urls

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir,'public')
    userDir = os.path.join(publicDir, str(user))
    jobDir = os.path.join(userDir,str(job_id))
    fakeDir = os.path.join(userDir,'fake')
    newResultsPath = os.path.join(jobDir, 'new_results')
    originalResultsPath = os.path.join(jobDir, 'original_results')

    # Check to see if the otl folder for this job is already downloaded and has values and get a handle on them if it does
    if os.path.exists(jobDir) and os.listdir(newResultsPath)!=[] and os.listdir(originalResultsPath)!=[]:
        # Get the otl files
        new_otl_file = gi_lib.find_otl(newResultsPath)
        original_otl_file = gi_lib.find_otl(originalResultsPath)
    # If it doesn't, get the otl files
    else:
        # Clear the results folder
        gi_lib.clear_folder(userDir)
        gi_lib.clear_folder(jobDir)
        gi_lib.clear_folder(newResultsPath)
        gi_lib.clear_folder(originalResultsPath)
        # Get the otl files
        new_otl_file = gi_lib.extract_otl(result_files['new'], newResultsPath)
        original_otl_file = gi_lib.extract_otl(result_files['original'], originalResultsPath)

    # Format the values for display with high charts
    new_values = []
    originalValues = []

    new_values = gi_lib.get_otl_values(new_otl_file, new_values)
    originalValues = gi_lib.get_otl_values(original_otl_file,  originalValues)

    # Set up for high charts hydrograph
    highcharts_object = {
            'chart': {
                'type': 'spline'
            },
            'title': {
                'text': 'Comparison Hydrograph'
            },
            'subtitle': {
                'text': 'Display of the two model results'
            },
            'legend': {
                'layout': 'vertical',
                'align': 'right',
                'verticalAlign': 'middle',
                'borderWidth': 0
            },
            'xAxis': {
                'title': {
                    'enabled': True,
                    'text': 'Time (hours)'
                },
                'labels': {
                    'formatter': 'function () { return this.value + " hr"; }'
                }
            },
            'yAxis': {
                'title': {
                    'enabled': True,
                    'text': 'Discharge (cfs)'
                },
                'labels': {
                    'formatter': 'function () { return this.value + " cfs"; }'
                }
            },
            'tooltip': {
                'headerFormat': '{series.name}',
                'pointFormat': '{point.x} hours: {point.y} cfs'
             },
            'series': [{
                'name': job.original_name.replace("_", " "),
                'color': '#0066ff',
                'dashStyle': 'ShortDash',
                'marker' : {'enabled': False},
                'data': originalValues
                },{
                'name': job.new_name.replace("_", " "),
                'marker' : {'enabled': False},
                'color': '#ff6600',
                'data': new_values}
            ]}

    hydrograph = {'highcharts_object': highcharts_object,
                        'width': '500px',
                        'height': '500px'}

    google_map = {'height': '600px',
                    'width': '100%',
                    'reference_kml_action': '/apps/gsshaindex/'+ job_id + '/get-depth-map/' + view_type}

    session.close()

    kml_link = ''
    title = ''

    if view_type == 'originalMax':
        title = job.original_name.replace("_", " ") + ' Maximum Depth'
        kml_link = job.originalMax
    elif view_type == 'newMax':
        title = job.new_name.replace("_", " ") + ' Maximum Depth'
        kml_link = job.newMax
    elif view_type == 'newTime':
        title = job.new_name.replace("_", " ") + ' Time Series'
        kml_link = job.newTime
    elif view_type == 'originalTime':
        title = job.original_name.replace("_", " ") + ' Time Series'
        kml_link = job.originalTime
    else:
        kml_link = ''
        title = ''


    context['map_title'] = title
    context['kml_link'] = kml_link
    context['hydrograph'] = hydrograph
    context['google_map'] = google_map
    context['map_type'] = view_type
    context['original_name'] = job.original_name.replace("_", " ")
    context['new_name'] = job.new_name.replace("_", " ")
    context['original_file'] = job.result_urls['original']
    context['new_file'] = job.result_urls['new']
    context['job_id'] = job_id

    return render(request, 'gsshaindex/results.html', context)
Beispiel #6
0
def fly(request, job_id):
    context = {}

    # Get the user id
    user = str(request.user)
    CKAN_engine = get_dataset_engine(name='gsshaindex_ciwweb', app_class=GSSHAIndex)

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir,'public')
    userDir = os.path.join(publicDir, str(user))
    resultsPath = os.path.join(userDir, 'results')
    originalFileRunPath = os.path.join(userDir, "preRun")
    writeFile = os.path.join(userDir, "writeFile")
    zipPath = os.path.join(userDir, "zipPath")

    #Create session
    gsshapy_session = gsshapy_sessionmaker()

    # Clear the results folder
    gi_lib.clear_folder(userDir)
    gi_lib.clear_folder(resultsPath)
    gi_lib.clear_folder(originalFileRunPath)
    gi_lib.clear_folder(writeFile)

    # Get the jobs from the database
    session = jobs_sessionmaker()
    job = session.query(Jobs).\
                    filter(Jobs.user_id == user).\
                    filter(Jobs.original_id == job_id).one()

    # Get the urls and names for the analysis
    run_urls = job.run_urls

    arguments={'new': {'url':run_urls['new']['url'], 'name':run_urls['new']['name']}, 'original':{'url':run_urls['original']['url'], 'name':run_urls['original']['name']}}

    # Set up for fly GSSHA
    job.status = "processing"
    session.commit()

    status = 'complete'

    results = []
    # results_urls = []
    results_urls = {}
    count = 0

    GSSHA_dataset = gi_lib.check_dataset("gssha-models", CKAN_engine)

    # Try running the web service
    # try:
    for k in arguments:
        url = str(arguments[k]['url'])

        if k == 'original' and job.original_certification=="Certified":
            results_urls['original']=url
            count +=1
            continue
        elif k == 'original' and job.original_certification=="Missing gfl":
            # Need to download from url, add gfl, zip, send to ckan, run, and save the url
            downloaded_project = gi_lib.extract_zip_from_url(user, url, originalFileRunPath)
            # Create an empty Project File Object
            # Find the project file
            for root, dirs, files in os.walk(originalFileRunPath):
                for file in files:
                    if file.endswith(".prj"):
                        project_name = file
                        project_path = os.path.join(root, file)
                        read_dir = os.path.dirname(project_path)
            project = ProjectFile()
            project.readInput(directory=read_dir,
                      projectFileName=project_name,
                      session = gsshapy_session,
                      spatial=True)

            if project.getCard("FLOOD_GRID") == None:
                max_depth_card = ProjectCard("FLOOD_GRID", '"{0}.gfl"'.format(project_name[:-4]))
                project_cards = project.projectCards.append(max_depth_card)
                gsshapy_session.commit()

            # Need to format so that it will work for the file I just did
            # Get all the project files
            project.writeInput(session=gsshapy_session, directory=writeFile, name=project_name[:-4])

            # Make a list of the project files
            writeFile_list = os.listdir(writeFile)

            # Add each project file to the zip folder
            with zipfile.ZipFile(zipPath, "w") as gssha_zip:
                for item in writeFile_list:
                    abs_path = os.path.join(writeFile, item)
                    archive_path = os.path.join(project_name, item)
                    gssha_zip.write(abs_path, archive_path)

            GSSHA_dataset = gi_lib.check_dataset("gssha-models", CKAN_engine)

            description = job.original_description + "with a gfl added"
            pretty_date= time.strftime("%A %B %d, %Y %I:%M:%S %p")

            # Add the zipped GSSHA file to the public ckan
            results, success = gi_lib.add_zip_GSSHA(GSSHA_dataset, zipPath, CKAN_engine, project_name[:-4] + " with gfl", description, pretty_date, user)

            job.original_url = results['url']

            url = job.original_url

            resultsFile = os.path.join(resultsPath, arguments[k]['name'].replace(" ","_")+datetime.now().strftime('%Y%d%m%H%M%S'))

            gi_lib.flyGssha(str(url), resultsFile)

            # Push file to ckan dataset
            resource_name = ' '.join((arguments[k]['name'], '-Run',datetime.now().strftime('%b %d %y %H:%M:%S')))
            pretty_date= time.strftime("%A %B %d, %Y %I:%M:%S %p")
            result, success = gi_lib.add_zip_GSSHA(GSSHA_dataset, resultsFile, CKAN_engine, resource_name, "", pretty_date, user, certification="Certified")

            # Save the new url as the original_url and run

            job.original_certification = "Certified"

            # Publish link to table
            results_urls['original']=result['url']
            count +=1
        else:
            resultsFile = os.path.join(resultsPath, arguments[k]['name'].replace(" ","_")+datetime.now().strftime('%Y%d%m%H%M%S'))
            gi_lib.flyGssha(url, resultsFile)

            # Push file to ckan dataset
            resource_name = ' '.join((arguments[k]['name'], '-Run',datetime.now().strftime('%b %d %y %H:%M:%S')))
            pretty_date= time.strftime("%A %B %d, %Y %I:%M:%S %p")
            result, success = gi_lib.add_zip_GSSHA(GSSHA_dataset, resultsFile, CKAN_engine, resource_name, "", pretty_date, user, certification="Certified")

            # Publish link to table
            if k=='original':
                results_urls['original']=result['url']
                job.original_certification = "Certified"
            else:
                results_urls['new']=result['url']
            count +=1

    if (count == 2):
        print results_urls
    else:
        status = 'failed'

    # except:
    #     status = 'failed'

    job.status = status
    job.result_urls = results_urls
    session.commit()
    session.close()
    gsshapy_session.commit()
    gsshapy_session.close()


    return redirect(reverse('gsshaindex:status'))
Beispiel #7
0
def zip_file(request, job_id):
    '''
    This zips up the GSSHA files in preparation of their being run
    '''
    context = {}

    # Get the job id and user id
    job_id = job_id
    user = str(request.user)
    session = jobs_sessionmaker()
    job, success = gi_lib.get_pending_job(job_id, user,session)
    CKAN_engine = get_dataset_engine(name='gsshaindex_ciwweb', app_class=GSSHAIndex)

    project_file_id = job.new_model_id

    # Get the name and description from the submission
    params=request.POST
    not_clean_name = params['new_name']
    new_description = params['new_description']

    # Reformat the name by removing bad characters
    # bad_char = "',.<>()[]{}=+-/\"|:;\\^?!~`@#$%&* "
    bad_char = "',.<>[]{}=+-/\"|:;\\^?!~`@#$%&*"
    for char in bad_char:
        new_name = not_clean_name.replace(char,"_")

    #Create session
    gsshapy_session = gsshapy_sessionmaker()

    # Get project from the database
    projectFileAll = gsshapy_session.query(ProjectFile).get(project_file_id)

    # Create name for files
    project_name = projectFileAll.name
    if project_name.endswith('.prj'):
        project_name = project_name[:-4]
    pretty_date= time.strftime("%A %B %d, %Y %I:%M:%S %p")

    # Set depth map
    if projectFileAll.getCard("FLOOD_GRID") == None:
        max_depth_card = ProjectCard("FLOOD_GRID", '"{0}.gfl"'.format(new_name))
        project_cards = projectFileAll.projectCards.append(max_depth_card)
        gsshapy_session.commit()
        job.original_certification = "Missing gfl"

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir,'public')
    userDir = os.path.join(publicDir, str(user))
    newFileDir = os.path.join(userDir, 'newFile')
    writeFile = os.path.join(newFileDir, new_name)
    zipPath = os.path.join(newFileDir, new_name + "_zip")

    # Clear workspace folders
    gi_lib.clear_folder(userDir)
    gi_lib.clear_folder(newFileDir)
    gi_lib.clear_folder(writeFile)

    # Get all the project files
    projectFileAll.writeInput(session=gsshapy_session, directory=writeFile, name=new_name)

    # Make a list of the project files
    writeFile_list = os.listdir(writeFile)

    # Add each project file to the zip folder
    with zipfile.ZipFile(zipPath, "w") as gssha_zip:
        for item in writeFile_list:
            abs_path = os.path.join(writeFile, item)
            archive_path = os.path.join(new_name, item)
            gssha_zip.write(abs_path, archive_path)

    GSSHA_dataset = gi_lib.check_dataset("gssha-models", CKAN_engine)

    # Add the zipped GSSHA file to the public ckan
    results, success = gi_lib.add_zip_GSSHA(GSSHA_dataset, zipPath, CKAN_engine, new_name, new_description, pretty_date, user)

    # If the file zips correctly, get information and store it in the database
    if success == True:
        new_url = results['url']
        new_name = results['name']
        original_url = job.original_url
        original_name = job.original_name

    model_data = {'original': {'url':original_url, 'name':original_name}, 'new':{'url':new_url, 'name':new_name}}
    job.run_urls = model_data
    job.new_name = new_name
    job.status = "ready to run"
    session.commit()

    return redirect(reverse('gsshaindex:status'))
Beispiel #8
0
def extract_gssha(request, job_id):
    '''
    This takes the file name and id that were submitted and unzips the files, finds the index maps, and creates kmls.
    '''
    context = {}
    user = str(request.user)
    session = jobs_sessionmaker()
    job, success = gi_lib.get_pending_job(job_id, user,session)
    CKAN_engine = get_dataset_engine(name='gsshaindex_ciwweb', app_class=GSSHAIndex)

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir,'public')
    userDir = os.path.join(publicDir, str(user))
    indexMapDir = os.path.join(userDir, 'index_maps')

    # Clear the workspace
    gi_lib.clear_folder(userDir)
    gi_lib.clear_folder(indexMapDir)

    # Get url for the resource and extract the GSSHA file
    url = job.original_url
    extract_path, unique_dir = gi_lib.extract_zip_from_url(user, url, userDir)

    # Create GSSHAPY Session
    gsshapy_session = gsshapy_sessionmaker()

    # Find the project file
    for root, dirs, files in os.walk(userDir):
        for file in files:
            if file.endswith(".prj"):
                project_name = file
                project_path = os.path.join(root, file)
                read_dir = os.path.dirname(project_path)

    # Create an empty Project File Object
    project = ProjectFile()

    project.readInput(directory=read_dir,
                      projectFileName=project_name,
                      session = gsshapy_session,
                      spatial=True)

    # Create empty dictionary to hold the kmls from this session
    current_kmls = {}

    # Store model information
    job.new_model_name = project.name
    job.new_model_id = project.id
    job.created = datetime.now()

    # Get index maps
    index_list = gsshapy_session.query(IndexMap).filter(IndexMap.mapTableFile == project.mapTableFile).all()

    # Loop through the index
    for current_index in index_list:
        # Create kml file name and path
        current_time = time.strftime("%Y%m%dT%H%M%S")
        resource_name = current_index.name + "_" + str(user) + "_" + current_time
        kml_ext = resource_name + '.kml'
        clusterFile = os.path.join(indexMapDir, kml_ext)

        # Generate color ramp
        current_index.getAsKmlClusters(session=gsshapy_session,
                                       path = clusterFile,
                                       colorRamp = ColorRampEnum.COLOR_RAMP_HUE,
                                       alpha=0.6)

        index_map_dataset = gi_lib.check_dataset("index-maps", CKAN_engine)

        resource, status = gi_lib.add_kml_CKAN(index_map_dataset, CKAN_engine, clusterFile, resource_name)

        # If the kml is added correctly, create an entry for the current_kmls with the name as the index name
        if status == True:
            current_kmls[current_index.name] = {'url':resource['url'], 'full_name':resource['name']}

    # Add the kmls with their url to the database
    job.current_kmls = json.dumps(current_kmls)
    session.commit()
    session.close()
    gsshapy_session.close()

    context['job_id'] = job_id

    return redirect(reverse('gsshaindex:select_index', kwargs={'job_id':job_id}))
Beispiel #9
0
def get_mask_map(request, file_id):
    """
    This action is used to pass the kml data to the google map.
    It must return a JSON response with a Python dictionary that
    has the key 'kml_links'.
    """
    kml_links = []
    session = jobs_sessionmaker()
    user = str(request.user)
    job, success = gi_lib.get_new_job(file_id, user,session)

    if job.kml_url != None:
        kml_links.append(job.kml_url)
        #TODO Need some way to check and see if the link works or if it's broken
        return JsonResponse({'kml_links': kml_links})
    else:
        # Check that there's a package to store kmls
        CKAN_engine = get_dataset_engine(name='gsshaindex_ciwweb', app_class=GSSHAIndex)
        present = gi_lib.check_package('kmls', CKAN_engine)

        # Specify the workspace
        controllerDir = os.path.abspath(os.path.dirname(__file__))
        gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
        publicDir = os.path.join(gsshaindexDir,'public')
        userDir = os.path.join(publicDir, str(user))

        # Clear the workspace
        gi_lib.clear_folder(userDir)

        url = job.original_url
        maskMapDir = os.path.join(userDir, 'mask_maps')
        extractPath = os.path.join(maskMapDir, file_id)
        mask_file = gi_lib.extract_mask(url, extractPath)
        if mask_file == "blank":
            job.kml_url = ''
            session.commit()
            return JsonResponse({'kml_links': ''})
        else:
            projection_file = gi_lib.extract_projection(url,extractPath)

            # Set up kml file name and save location
            name = job.original_name
            norm_name = name.replace(" ","")
            current_time = time.strftime("%Y%m%dT%H%M%S")
            kml_name = norm_name + "_" + user + "_" + current_time
            kml_ext = kml_name + ".kml"
            kml_file = os.path.join(extractPath, kml_ext)

            colors = [(237,9,222),(92,245,61),(61,184,245),(171,61,245),(250,245,105),(245,151,44),(240,37,14),(88,5,232),(5,232,190),(11,26,227)]
            color = [random.choice(colors)]

            # Extract mask map and create kml
            gsshapy_session = gsshapy_sessionmaker()
            if projection_file != "blank":
                srid = ProjectionFile.lookupSpatialReferenceID(extractPath, projection_file)
            else:
                srid = 4302
            mask_map = RasterMapFile()
            mask_map.read(directory=extractPath, filename=mask_file, session=gsshapy_session, spatial=True, spatialReferenceID=srid)
            mask_map.getAsKmlClusters(session=gsshapy_session, path=kml_file, colorRamp={'colors':color, 'interpolatedPoints':1})

            mask_map_dataset = gi_lib.check_dataset("mask-maps", CKAN_engine)

            # Add mask kml to CKAN for viewing
            resource, success = gi_lib.add_kml_CKAN(mask_map_dataset, CKAN_engine, kml_file, kml_name)

            # Check to ensure the resource was added and save it to database by adding "kml_url"
            if success == True:
                job.kml_url = resource['url']
                session.commit()
                kml_links.append(job.kml_url)
                return JsonResponse({'kml_links': kml_links})
Beispiel #10
0
def replace_index_with_shapefile(request, job_id, index_name, shapefile_name):
    """
    Controller to replace the index map with the selected shapefile.
    """
    context = {}
    user = str(request.user)

    geojson = get_geojson_from_geoserver(user, shapefile_name)

    # Get the job from the database
    job_session = jobs_sessionmaker()
    job, success = gi_lib.get_pending_job(job_id, user, job_session)
    CKAN_engine = get_dataset_engine(name='gsshaindex_ciwweb',
                                     app_class=GSSHAIndex)

    # Get project file id
    project_file_id = job.new_model_id

    # Create a session
    gsshapy_session = gsshapy_sessionmaker()

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir, 'public')
    userDir = os.path.join(publicDir, str(user))
    indexMapDir = os.path.join(userDir, 'index_maps')

    # Clear the workspace
    gi_lib.clear_folder(userDir)
    gi_lib.clear_folder(indexMapDir)

    # Use project id to link to original map table file
    project_file = gsshapy_session.query(ProjectFile).filter(
        ProjectFile.id == project_file_id).one()
    index_raster = gsshapy_session.query(IndexMap).filter(
        IndexMap.mapTableFile == project_file.mapTableFile).filter(
            IndexMap.name == index_name).one()

    mapTables = index_raster.mapTables

    if geojson['success'] != False:
        geojson_result = geojson['geojson']

        # Get existing indices
        index_raster_indices = index_raster.indices

        srid_name = geojson['crs']

        project_file_srid = project_file.srid

        id = 200

        # Loop through each geometry
        for object in geojson_result:
            index_present = False
            object_id = object['id']

            # Check to see if the index is present
            for index in index_raster_indices:
                if object_id == index.index:
                    index_present = True
                    break

            # Create new index value if it doesn't exist and add the number of ids
            if index_present == False:
                new_indice = MTIndex(id, object_id, "")
                new_indice.indexMap = index_raster
                for mapping_table in mapTables:
                    distinct_vars = gsshapy_session.query(distinct(MTValue.variable)).\
                                     filter(MTValue.mapTable == mapping_table).\
                                     order_by(MTValue.variable).\
                                     all()
                    variables = []
                    for var in distinct_vars:
                        variables.append(var[0])

                    for variable in variables:
                        new_value = MTValue(variable, 0)
                        new_value.mapTable = mapping_table
                        new_value.index = new_indice
                    gsshapy_session.commit()

            geom = object['geometry']
            geom['crs'] = srid_name
            geom_full = json.dumps(geom)

            # Change values in the index map
            change_index_values = "SELECT ST_SetValue(raster,1,ST_Transform(ST_GeomFromGeoJSON('{0}'), {1}),{2}) " \
                                  "FROM idx_index_maps " \
                                  "WHERE id = {3};".format(str(geom_full), project_file_srid, id, index_raster.id)
            result = gi_lib.timeout(gi_lib.draw_update_index,
                                    args=(change_index_values,
                                          index_raster.id),
                                    kwargs={},
                                    timeout=10,
                                    result_can_be_pickled=True,
                                    default=None)

            # If there is a timeout
            if result == None:
                messages.error(request,
                               'The submission timed out. Please try again.')
                job_session.close()
                gsshapy_session.close()
                context['index_name'] = index_name
                context['job_id'] = job_id

                return redirect(
                    reverse('gsshaindex:shapefile_index',
                            kwargs={
                                'job_id': job_id,
                                'index_name': index_name,
                                'shapefile_name': shapefile_name
                            }))

            id += 1

        # Get the values in the index map
        statement3 = '''SELECT (pvc).*
                        FROM (SELECT ST_ValueCount(raster,1,true) As pvc
                        FROM idx_index_maps WHERE id = ''' + unicode(
            index_raster.id) + ''') AS foo
                        ORDER BY (pvc).value;
                        '''
        result3 = gsshapy_engine.execute(statement3)

        numberIDs = 0
        ids = []
        for row in result3:
            numberIDs += 1
            ids.append(row.value)

        map_table_count = 0
        for mapping_table in mapTables:

            index_raster.mapTables[map_table_count].numIDs = numberIDs

            indices = gsshapy_session.query(distinct(MTIndex.index), MTIndex.id, MTIndex.description1, MTIndex.description2).\
                                   join(MTValue).\
                                   filter(MTValue.mapTable == mapping_table).\
                                   order_by(MTIndex.index).\
                                   all()

            for index in indices:
                if not int(index[0]) in ids:
                    bob = gsshapy_session.query(MTIndex).get(index.id)
                    for val in bob.values:
                        gsshapy_session.delete(val)
                    gsshapy_session.delete(bob)
            gsshapy_session.commit()
            map_table_count += 1

        index_raster = gsshapy_session.query(IndexMap).filter(
            IndexMap.mapTableFile == project_file.mapTableFile).filter(
                IndexMap.name == index_name).one()

        # Create kml file name and path
        current_time = time.strftime("%Y%m%dT%H%M%S")
        resource_name = index_raster.name + "_" + str(
            user) + "_" + current_time
        kml_ext = resource_name + '.kml'
        clusterFile = os.path.join(indexMapDir, kml_ext)

        # Generate color ramp
        index_raster.getAsKmlClusters(session=gsshapy_session,
                                      path=clusterFile,
                                      colorRamp=ColorRampEnum.COLOR_RAMP_HUE,
                                      alpha=0.6)

        index_map_dataset = gi_lib.check_dataset("index-maps", CKAN_engine)
        resource, status = gi_lib.add_kml_CKAN(index_map_dataset, CKAN_engine,
                                               clusterFile, resource_name)

        temp_list = json.loads(job.current_kmls)

        if status == True:
            for item in temp_list:
                if item == index_name:
                    del temp_list[item]
                    temp_list[index_name] = {
                        'url': resource['url'],
                        'full_name': resource['name']
                    }
                    break

        job.current_kmls = json.dumps(temp_list)
        job_session.commit()
        job_session.close()
        gsshapy_session.close()

    context['index_name'] = index_name
    context['job_id'] = job_id

    return redirect(
        reverse('gsshaindex:edit_index',
                kwargs={
                    'job_id': job_id,
                    'index_name': index_name
                }))
def replace_index_with_shapefile(request, job_id, index_name, shapefile_name):
    """
    Controller to replace the index map with the selected shapefile.
    """
    context = {}
    user = str(request.user)

    geojson = get_geojson_from_geoserver(user, shapefile_name)

    # Get the job from the database
    job_session = jobs_sessionmaker()
    job, success = gi_lib.get_pending_job(job_id, user, job_session)
    CKAN_engine = get_dataset_engine(name='gsshaindex_ciwweb', app_class=GSSHAIndex)

    # Get project file id
    project_file_id = job.new_model_id

    # Create a session
    gsshapy_session = gsshapy_sessionmaker()

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir,'public')
    userDir = os.path.join(publicDir, str(user))
    indexMapDir = os.path.join(userDir, 'index_maps')

    # Clear the workspace
    gi_lib.clear_folder(userDir)
    gi_lib.clear_folder(indexMapDir)

    # Use project id to link to original map table file
    project_file = gsshapy_session.query(ProjectFile).filter(ProjectFile.id == project_file_id).one()
    index_raster = gsshapy_session.query(IndexMap).filter(IndexMap.mapTableFile == project_file.mapTableFile).filter(IndexMap.name == index_name).one()

    mapTables = index_raster.mapTables

    if geojson['success'] != False:
        geojson_result = geojson['geojson']

        # Get existing indices
        index_raster_indices = index_raster.indices

        srid_name = geojson['crs']

        project_file_srid = project_file.srid

        id = 200

        # Loop through each geometry
        for object in geojson_result:
            index_present = False
            object_id = object['id']

            # Check to see if the index is present
            for index in index_raster_indices:
                if object_id == index.index:
                    index_present = True
                    break

            # Create new index value if it doesn't exist and add the number of ids
            if index_present == False:
                new_indice = MTIndex(id, object_id,"")
                new_indice.indexMap = index_raster
                for mapping_table in mapTables:
                    distinct_vars = gsshapy_session.query(distinct(MTValue.variable)).\
                                     filter(MTValue.mapTable == mapping_table).\
                                     order_by(MTValue.variable).\
                                     all()
                    variables = []
                    for var in distinct_vars:
                        variables.append(var[0])

                    for variable in variables:
                        new_value = MTValue(variable, 0)
                        new_value.mapTable = mapping_table
                        new_value.index = new_indice
                    gsshapy_session.commit()

            geom = object['geometry']
            geom['crs'] = srid_name
            geom_full = json.dumps(geom)

            # Change values in the index map
            change_index_values = "SELECT ST_SetValue(raster,1,ST_Transform(ST_GeomFromGeoJSON('{0}'), {1}),{2}) " \
                                  "FROM idx_index_maps " \
                                  "WHERE id = {3};".format(str(geom_full), project_file_srid, id, index_raster.id)
            result = gi_lib.timeout(gi_lib.draw_update_index, args=(change_index_values,index_raster.id), kwargs={}, timeout=10, result_can_be_pickled=True, default=None)

            # If there is a timeout
            if result == None:
                messages.error(request, 'The submission timed out. Please try again.')
                job_session.close()
                gsshapy_session.close()
                context['index_name'] = index_name
                context['job_id'] = job_id

                return redirect(reverse('gsshaindex:shapefile_index', kwargs={'job_id':job_id, 'index_name':index_name, 'shapefile_name':shapefile_name}))

            id += 1

        # Get the values in the index map
        statement3 = '''SELECT (pvc).*
                        FROM (SELECT ST_ValueCount(raster,1,true) As pvc
                        FROM idx_index_maps WHERE id = '''+ unicode(index_raster.id) +''') AS foo
                        ORDER BY (pvc).value;
                        '''
        result3 = gsshapy_engine.execute(statement3)

        numberIDs = 0
        ids = []
        for row in result3:
            numberIDs +=1
            ids.append(row.value)

        map_table_count = 0
        for mapping_table in mapTables:

            index_raster.mapTables[map_table_count].numIDs = numberIDs

            indices = gsshapy_session.query(distinct(MTIndex.index), MTIndex.id, MTIndex.description1, MTIndex.description2).\
                                   join(MTValue).\
                                   filter(MTValue.mapTable == mapping_table).\
                                   order_by(MTIndex.index).\
                                   all()

            for index in indices:
                if not int(index[0]) in ids:
                    bob = gsshapy_session.query(MTIndex).get(index.id)
                    for val in bob.values:
                        gsshapy_session.delete(val)
                    gsshapy_session.delete(bob)
            gsshapy_session.commit()
            map_table_count +=1

        index_raster =  gsshapy_session.query(IndexMap).filter(IndexMap.mapTableFile == project_file.mapTableFile).filter(IndexMap.name == index_name).one()

        # Create kml file name and path
        current_time = time.strftime("%Y%m%dT%H%M%S")
        resource_name = index_raster.name + "_" + str(user) + "_" + current_time
        kml_ext = resource_name + '.kml'
        clusterFile = os.path.join(indexMapDir, kml_ext)

        # Generate color ramp
        index_raster.getAsKmlClusters(session=gsshapy_session, path=clusterFile, colorRamp=ColorRampEnum.COLOR_RAMP_HUE, alpha=0.6)

        index_map_dataset = gi_lib.check_dataset("index-maps", CKAN_engine)
        resource, status = gi_lib.add_kml_CKAN(index_map_dataset, CKAN_engine, clusterFile, resource_name)

        temp_list = json.loads(job.current_kmls)

        if status == True:
            for item in temp_list:
                    if item == index_name:
                        del temp_list[item]
                        temp_list[index_name] = {'url':resource['url'], 'full_name':resource['name']}
                        break

        job.current_kmls = json.dumps(temp_list)
        job_session.commit()
        job_session.close()
        gsshapy_session.close()

    context['index_name'] = index_name
    context['job_id'] = job_id

    return redirect(reverse('gsshaindex:edit_index', kwargs={'job_id':job_id, 'index_name':index_name}))
Beispiel #12
0
def results(request, job_id, view_type):
    context = {}

    # Get the user id
    user = str(request.user)

    # Get the job from the database and delete
    session = jobs_sessionmaker()
    job = session.query(Jobs).\
                    filter(Jobs.user_id == user).\
                    filter(Jobs.original_id == job_id).one()

    # Get the run result urls
    result_files = job.result_urls

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir, 'public')
    userDir = os.path.join(publicDir, str(user))
    jobDir = os.path.join(userDir, str(job_id))
    fakeDir = os.path.join(userDir, 'fake')
    newResultsPath = os.path.join(jobDir, 'new_results')
    originalResultsPath = os.path.join(jobDir, 'original_results')

    # Check to see if the otl folder for this job is already downloaded and has values and get a handle on them if it does
    if os.path.exists(jobDir) and os.listdir(
            newResultsPath) != [] and os.listdir(originalResultsPath) != []:
        # Get the otl files
        new_otl_file = gi_lib.find_otl(newResultsPath)
        original_otl_file = gi_lib.find_otl(originalResultsPath)
    # If it doesn't, get the otl files
    else:
        # Clear the results folder
        gi_lib.clear_folder(userDir)
        gi_lib.clear_folder(jobDir)
        gi_lib.clear_folder(newResultsPath)
        gi_lib.clear_folder(originalResultsPath)
        # Get the otl files
        new_otl_file = gi_lib.extract_otl(result_files['new'], newResultsPath)
        original_otl_file = gi_lib.extract_otl(result_files['original'],
                                               originalResultsPath)

    # Format the values for display with high charts
    new_values = []
    originalValues = []

    new_values = gi_lib.get_otl_values(new_otl_file, new_values)
    originalValues = gi_lib.get_otl_values(original_otl_file, originalValues)

    # Set up for high charts hydrograph
    highcharts_object = {
        'chart': {
            'type': 'spline'
        },
        'title': {
            'text': 'Comparison Hydrograph'
        },
        'subtitle': {
            'text': 'Display of the two model results'
        },
        'legend': {
            'layout': 'vertical',
            'align': 'right',
            'verticalAlign': 'middle',
            'borderWidth': 0
        },
        'xAxis': {
            'title': {
                'enabled': True,
                'text': 'Time (hours)'
            },
            'labels': {
                'formatter': 'function () { return this.value + " hr"; }'
            }
        },
        'yAxis': {
            'title': {
                'enabled': True,
                'text': 'Discharge (cfs)'
            },
            'labels': {
                'formatter': 'function () { return this.value + " cfs"; }'
            }
        },
        'tooltip': {
            'headerFormat': '{series.name}',
            'pointFormat': '{point.x} hours: {point.y} cfs'
        },
        'series': [{
            'name': job.original_name.replace("_", " "),
            'color': '#0066ff',
            'dashStyle': 'ShortDash',
            'marker': {
                'enabled': False
            },
            'data': originalValues
        }, {
            'name': job.new_name.replace("_", " "),
            'marker': {
                'enabled': False
            },
            'color': '#ff6600',
            'data': new_values
        }]
    }

    hydrograph = {
        'highcharts_object': highcharts_object,
        'width': '500px',
        'height': '500px'
    }

    google_map = {
        'height':
        '600px',
        'width':
        '100%',
        'reference_kml_action':
        '/apps/gsshaindex/' + job_id + '/get-depth-map/' + view_type
    }

    session.close()

    kml_link = ''
    title = ''

    if view_type == 'originalMax':
        title = job.original_name.replace("_", " ") + ' Maximum Depth'
        kml_link = job.originalMax
    elif view_type == 'newMax':
        title = job.new_name.replace("_", " ") + ' Maximum Depth'
        kml_link = job.newMax
    elif view_type == 'newTime':
        title = job.new_name.replace("_", " ") + ' Time Series'
        kml_link = job.newTime
    elif view_type == 'originalTime':
        title = job.original_name.replace("_", " ") + ' Time Series'
        kml_link = job.originalTime
    else:
        kml_link = ''
        title = ''

    context['map_title'] = title
    context['kml_link'] = kml_link
    context['hydrograph'] = hydrograph
    context['google_map'] = google_map
    context['map_type'] = view_type
    context['original_name'] = job.original_name.replace("_", " ")
    context['new_name'] = job.new_name.replace("_", " ")
    context['original_file'] = job.result_urls['original']
    context['new_file'] = job.result_urls['new']
    context['job_id'] = job_id

    return render(request, 'gsshaindex/results.html', context)
Beispiel #13
0
def fly(request, job_id):
    context = {}

    # Get the user id
    user = str(request.user)
    CKAN_engine = get_dataset_engine(name='gsshaindex_ciwweb',
                                     app_class=GSSHAIndex)

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir, 'public')
    userDir = os.path.join(publicDir, str(user))
    resultsPath = os.path.join(userDir, 'results')
    originalFileRunPath = os.path.join(userDir, "preRun")
    writeFile = os.path.join(userDir, "writeFile")
    zipPath = os.path.join(userDir, "zipPath")

    #Create session
    gsshapy_session = gsshapy_sessionmaker()

    # Clear the results folder
    gi_lib.clear_folder(userDir)
    gi_lib.clear_folder(resultsPath)
    gi_lib.clear_folder(originalFileRunPath)
    gi_lib.clear_folder(writeFile)

    # Get the jobs from the database
    session = jobs_sessionmaker()
    job = session.query(Jobs).\
                    filter(Jobs.user_id == user).\
                    filter(Jobs.original_id == job_id).one()

    # Get the urls and names for the analysis
    run_urls = job.run_urls

    arguments = {
        'new': {
            'url': run_urls['new']['url'],
            'name': run_urls['new']['name']
        },
        'original': {
            'url': run_urls['original']['url'],
            'name': run_urls['original']['name']
        }
    }

    # Set up for fly GSSHA
    job.status = "processing"
    session.commit()

    status = 'complete'

    results = []
    # results_urls = []
    results_urls = {}
    count = 0

    GSSHA_dataset = gi_lib.check_dataset("gssha-models", CKAN_engine)

    # Try running the web service
    # try:
    for k in arguments:
        url = str(arguments[k]['url'])

        if k == 'original' and job.original_certification == "Certified":
            results_urls['original'] = url
            count += 1
            continue
        elif k == 'original' and job.original_certification == "Missing gfl":
            # Need to download from url, add gfl, zip, send to ckan, run, and save the url
            downloaded_project = gi_lib.extract_zip_from_url(
                user, url, originalFileRunPath)
            # Create an empty Project File Object
            # Find the project file
            for root, dirs, files in os.walk(originalFileRunPath):
                for file in files:
                    if file.endswith(".prj"):
                        project_name = file
                        project_path = os.path.join(root, file)
                        read_dir = os.path.dirname(project_path)
            project = ProjectFile()
            project.readInput(directory=read_dir,
                              projectFileName=project_name,
                              session=gsshapy_session,
                              spatial=True)

            if project.getCard("FLOOD_GRID") == None:
                max_depth_card = ProjectCard(
                    "FLOOD_GRID", '"{0}.gfl"'.format(project_name[:-4]))
                project_cards = project.projectCards.append(max_depth_card)
                gsshapy_session.commit()

            # Need to format so that it will work for the file I just did
            # Get all the project files
            project.writeInput(session=gsshapy_session,
                               directory=writeFile,
                               name=project_name[:-4])

            # Make a list of the project files
            writeFile_list = os.listdir(writeFile)

            # Add each project file to the zip folder
            with zipfile.ZipFile(zipPath, "w") as gssha_zip:
                for item in writeFile_list:
                    abs_path = os.path.join(writeFile, item)
                    archive_path = os.path.join(project_name, item)
                    gssha_zip.write(abs_path, archive_path)

            GSSHA_dataset = gi_lib.check_dataset("gssha-models", CKAN_engine)

            description = job.original_description + "with a gfl added"
            pretty_date = time.strftime("%A %B %d, %Y %I:%M:%S %p")

            # Add the zipped GSSHA file to the public ckan
            results, success = gi_lib.add_zip_GSSHA(
                GSSHA_dataset, zipPath, CKAN_engine,
                project_name[:-4] + " with gfl", description, pretty_date,
                user)

            job.original_url = results['url']

            url = job.original_url

            resultsFile = os.path.join(
                resultsPath, arguments[k]['name'].replace(" ", "_") +
                datetime.now().strftime('%Y%d%m%H%M%S'))

            gi_lib.flyGssha(str(url), resultsFile)

            # Push file to ckan dataset
            resource_name = ' '.join(
                (arguments[k]['name'], '-Run',
                 datetime.now().strftime('%b %d %y %H:%M:%S')))
            pretty_date = time.strftime("%A %B %d, %Y %I:%M:%S %p")
            result, success = gi_lib.add_zip_GSSHA(GSSHA_dataset,
                                                   resultsFile,
                                                   CKAN_engine,
                                                   resource_name,
                                                   "",
                                                   pretty_date,
                                                   user,
                                                   certification="Certified")

            # Save the new url as the original_url and run

            job.original_certification = "Certified"

            # Publish link to table
            results_urls['original'] = result['url']
            count += 1
        else:
            resultsFile = os.path.join(
                resultsPath, arguments[k]['name'].replace(" ", "_") +
                datetime.now().strftime('%Y%d%m%H%M%S'))
            gi_lib.flyGssha(url, resultsFile)

            # Push file to ckan dataset
            resource_name = ' '.join(
                (arguments[k]['name'], '-Run',
                 datetime.now().strftime('%b %d %y %H:%M:%S')))
            pretty_date = time.strftime("%A %B %d, %Y %I:%M:%S %p")
            result, success = gi_lib.add_zip_GSSHA(GSSHA_dataset,
                                                   resultsFile,
                                                   CKAN_engine,
                                                   resource_name,
                                                   "",
                                                   pretty_date,
                                                   user,
                                                   certification="Certified")

            # Publish link to table
            if k == 'original':
                results_urls['original'] = result['url']
                job.original_certification = "Certified"
            else:
                results_urls['new'] = result['url']
            count += 1

    if (count == 2):
        print results_urls
    else:
        status = 'failed'

    # except:
    #     status = 'failed'

    job.status = status
    job.result_urls = results_urls
    session.commit()
    session.close()
    gsshapy_session.commit()
    gsshapy_session.close()

    return redirect(reverse('gsshaindex:status'))
Beispiel #14
0
def zip_file(request, job_id):
    '''
    This zips up the GSSHA files in preparation of their being run
    '''
    context = {}

    # Get the job id and user id
    job_id = job_id
    user = str(request.user)
    session = jobs_sessionmaker()
    job, success = gi_lib.get_pending_job(job_id, user, session)
    CKAN_engine = get_dataset_engine(name='gsshaindex_ciwweb',
                                     app_class=GSSHAIndex)

    project_file_id = job.new_model_id

    # Get the name and description from the submission
    params = request.POST
    not_clean_name = params['new_name']
    new_description = params['new_description']

    # Reformat the name by removing bad characters
    # bad_char = "',.<>()[]{}=+-/\"|:;\\^?!~`@#$%&* "
    bad_char = "',.<>[]{}=+-/\"|:;\\^?!~`@#$%&*"
    for char in bad_char:
        new_name = not_clean_name.replace(char, "_")

    #Create session
    gsshapy_session = gsshapy_sessionmaker()

    # Get project from the database
    projectFileAll = gsshapy_session.query(ProjectFile).get(project_file_id)

    # Create name for files
    project_name = projectFileAll.name
    if project_name.endswith('.prj'):
        project_name = project_name[:-4]
    pretty_date = time.strftime("%A %B %d, %Y %I:%M:%S %p")

    # Set depth map
    if projectFileAll.getCard("FLOOD_GRID") == None:
        max_depth_card = ProjectCard("FLOOD_GRID",
                                     '"{0}.gfl"'.format(new_name))
        project_cards = projectFileAll.projectCards.append(max_depth_card)
        gsshapy_session.commit()
        job.original_certification = "Missing gfl"

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir, 'public')
    userDir = os.path.join(publicDir, str(user))
    newFileDir = os.path.join(userDir, 'newFile')
    writeFile = os.path.join(newFileDir, new_name)
    zipPath = os.path.join(newFileDir, new_name + "_zip")

    # Clear workspace folders
    gi_lib.clear_folder(userDir)
    gi_lib.clear_folder(newFileDir)
    gi_lib.clear_folder(writeFile)

    # Get all the project files
    projectFileAll.writeInput(session=gsshapy_session,
                              directory=writeFile,
                              name=new_name)

    # Make a list of the project files
    writeFile_list = os.listdir(writeFile)

    # Add each project file to the zip folder
    with zipfile.ZipFile(zipPath, "w") as gssha_zip:
        for item in writeFile_list:
            abs_path = os.path.join(writeFile, item)
            archive_path = os.path.join(new_name, item)
            gssha_zip.write(abs_path, archive_path)

    GSSHA_dataset = gi_lib.check_dataset("gssha-models", CKAN_engine)

    # Add the zipped GSSHA file to the public ckan
    results, success = gi_lib.add_zip_GSSHA(GSSHA_dataset, zipPath,
                                            CKAN_engine, new_name,
                                            new_description, pretty_date, user)

    # If the file zips correctly, get information and store it in the database
    if success == True:
        new_url = results['url']
        new_name = results['name']
        original_url = job.original_url
        original_name = job.original_name

    model_data = {
        'original': {
            'url': original_url,
            'name': original_name
        },
        'new': {
            'url': new_url,
            'name': new_name
        }
    }
    job.run_urls = model_data
    job.new_name = new_name
    job.status = "ready to run"
    session.commit()

    return redirect(reverse('gsshaindex:status'))
Beispiel #15
0
def extract_gssha(request, job_id):
    '''
    This takes the file name and id that were submitted and unzips the files, finds the index maps, and creates kmls.
    '''
    context = {}
    user = str(request.user)
    session = jobs_sessionmaker()
    job, success = gi_lib.get_pending_job(job_id, user, session)
    CKAN_engine = get_dataset_engine(name='gsshaindex_ciwweb',
                                     app_class=GSSHAIndex)

    # Specify the workspace
    controllerDir = os.path.abspath(os.path.dirname(__file__))
    gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
    publicDir = os.path.join(gsshaindexDir, 'public')
    userDir = os.path.join(publicDir, str(user))
    indexMapDir = os.path.join(userDir, 'index_maps')

    # Clear the workspace
    gi_lib.clear_folder(userDir)
    gi_lib.clear_folder(indexMapDir)

    # Get url for the resource and extract the GSSHA file
    url = job.original_url
    extract_path, unique_dir = gi_lib.extract_zip_from_url(user, url, userDir)

    # Create GSSHAPY Session
    gsshapy_session = gsshapy_sessionmaker()

    # Find the project file
    for root, dirs, files in os.walk(userDir):
        for file in files:
            if file.endswith(".prj"):
                project_name = file
                project_path = os.path.join(root, file)
                read_dir = os.path.dirname(project_path)

    # Create an empty Project File Object
    project = ProjectFile()

    project.readInput(directory=read_dir,
                      projectFileName=project_name,
                      session=gsshapy_session,
                      spatial=True)

    # Create empty dictionary to hold the kmls from this session
    current_kmls = {}

    # Store model information
    job.new_model_name = project.name
    job.new_model_id = project.id
    job.created = datetime.now()

    # Get index maps
    index_list = gsshapy_session.query(IndexMap).filter(
        IndexMap.mapTableFile == project.mapTableFile).all()

    # Loop through the index
    for current_index in index_list:
        # Create kml file name and path
        current_time = time.strftime("%Y%m%dT%H%M%S")
        resource_name = current_index.name + "_" + str(
            user) + "_" + current_time
        kml_ext = resource_name + '.kml'
        clusterFile = os.path.join(indexMapDir, kml_ext)

        # Generate color ramp
        current_index.getAsKmlClusters(session=gsshapy_session,
                                       path=clusterFile,
                                       colorRamp=ColorRampEnum.COLOR_RAMP_HUE,
                                       alpha=0.6)

        index_map_dataset = gi_lib.check_dataset("index-maps", CKAN_engine)

        resource, status = gi_lib.add_kml_CKAN(index_map_dataset, CKAN_engine,
                                               clusterFile, resource_name)

        # If the kml is added correctly, create an entry for the current_kmls with the name as the index name
        if status == True:
            current_kmls[current_index.name] = {
                'url': resource['url'],
                'full_name': resource['name']
            }

    # Add the kmls with their url to the database
    job.current_kmls = json.dumps(current_kmls)
    session.commit()
    session.close()
    gsshapy_session.close()

    context['job_id'] = job_id

    return redirect(
        reverse('gsshaindex:select_index', kwargs={'job_id': job_id}))
Beispiel #16
0
def get_mask_map(request, file_id):
    """
    This action is used to pass the kml data to the google map.
    It must return a JSON response with a Python dictionary that
    has the key 'kml_links'.
    """
    kml_links = []
    session = jobs_sessionmaker()
    user = str(request.user)
    job, success = gi_lib.get_new_job(file_id, user, session)

    if job.kml_url != None:
        kml_links.append(job.kml_url)
        #TODO Need some way to check and see if the link works or if it's broken
        return JsonResponse({'kml_links': kml_links})
    else:
        # Check that there's a package to store kmls
        CKAN_engine = get_dataset_engine(name='gsshaindex_ciwweb',
                                         app_class=GSSHAIndex)
        present = gi_lib.check_package('kmls', CKAN_engine)

        # Specify the workspace
        controllerDir = os.path.abspath(os.path.dirname(__file__))
        gsshaindexDir = os.path.abspath(os.path.dirname(controllerDir))
        publicDir = os.path.join(gsshaindexDir, 'public')
        userDir = os.path.join(publicDir, str(user))

        # Clear the workspace
        gi_lib.clear_folder(userDir)

        url = job.original_url
        maskMapDir = os.path.join(userDir, 'mask_maps')
        extractPath = os.path.join(maskMapDir, file_id)
        mask_file = gi_lib.extract_mask(url, extractPath)
        if mask_file == "blank":
            job.kml_url = ''
            session.commit()
            return JsonResponse({'kml_links': ''})
        else:
            projection_file = gi_lib.extract_projection(url, extractPath)

            # Set up kml file name and save location
            name = job.original_name
            norm_name = name.replace(" ", "")
            current_time = time.strftime("%Y%m%dT%H%M%S")
            kml_name = norm_name + "_" + user + "_" + current_time
            kml_ext = kml_name + ".kml"
            kml_file = os.path.join(extractPath, kml_ext)

            colors = [(237, 9, 222), (92, 245, 61), (61, 184, 245),
                      (171, 61, 245), (250, 245, 105), (245, 151, 44),
                      (240, 37, 14), (88, 5, 232), (5, 232, 190),
                      (11, 26, 227)]
            color = [random.choice(colors)]

            # Extract mask map and create kml
            gsshapy_session = gsshapy_sessionmaker()
            if projection_file != "blank":
                srid = ProjectionFile.lookupSpatialReferenceID(
                    extractPath, projection_file)
            else:
                srid = 4302
            mask_map = RasterMapFile()
            mask_map.read(directory=extractPath,
                          filename=mask_file,
                          session=gsshapy_session,
                          spatial=True,
                          spatialReferenceID=srid)
            mask_map.getAsKmlClusters(session=gsshapy_session,
                                      path=kml_file,
                                      colorRamp={
                                          'colors': color,
                                          'interpolatedPoints': 1
                                      })

            mask_map_dataset = gi_lib.check_dataset("mask-maps", CKAN_engine)

            # Add mask kml to CKAN for viewing
            resource, success = gi_lib.add_kml_CKAN(mask_map_dataset,
                                                    CKAN_engine, kml_file,
                                                    kml_name)

            # Check to ensure the resource was added and save it to database by adding "kml_url"
            if success == True:
                job.kml_url = resource['url']
                session.commit()
                kml_links.append(job.kml_url)
                return JsonResponse({'kml_links': kml_links})