Ejemplo n.º 1
0
def wfs_proxy(request, *args, **kwargs):
    if request.method == 'GET':
        endpoint = request.GET['endpoint']
        typename = request.GET['typename']
        user = request.session['user']
        password = request.session['password']

        endpoint_parsed = urlparse.urlparse(endpoint)
        q_dict = {
            'version': '1.0.0',
            'typename': typename,
            'outputFormat': 'json',
            'request': 'GetFeature',
            'service': 'WFS',
            'srsName': 'EPSG:4326',
        }
        parsed_url = urlparse.ParseResult(scheme=endpoint_parsed.scheme,
                                          netloc=endpoint_parsed.netloc,
                                          path=endpoint_parsed.path,
                                          params=None,
                                          query=urllib.urlencode(q_dict),
                                          fragment=None)
        LOGGER.info('Proxy to url: %s' % parsed_url.geturl())
        tmpfile = download_file(parsed_url.geturl(),
                                user=user,
                                password=password)
        with open(tmpfile) as f:
            return HttpResponse(f.read(), content_type='application/json')
    return HttpResponseServerError()
Ejemplo n.º 2
0
    def test_download_file_url(self):
        """Test downloading file directly using url."""

        data_helper = InaSAFETestData()
        filename = data_helper.hazard('flood_data.geojson')
        hazard = file_upload(filename)

        wait_metadata(hazard)

        # download zipped layer
        hazard_layer_url = Analysis.get_layer_url(hazard)

        downloaded_file = download_file(hazard_layer_url)

        # check that file is downloaded
        self.assertTrue(os.path.exists(downloaded_file))

        basename, ext = os.path.splitext(downloaded_file)

        # check that file has .zip extension
        self.assertEqual(ext, '.zip')

        # delete zipfile
        os.remove(downloaded_file)

        # download metadata xml file of geosafe
        hazard_layer_xml = reverse('geosafe:layer-metadata',
                                   kwargs={'layer_id': hazard.id})
        hazard_layer_xml = urlparse.urljoin(settings.GEONODE_BASE_URL,
                                            hazard_layer_xml)

        downloaded_file = download_file(hazard_layer_xml)

        # check that file is donwloaded
        self.assertTrue(os.path.exists(downloaded_file))

        basename, ext = os.path.splitext(downloaded_file)

        # check that file has .xml extension
        self.assertEqual(ext, '.xml')

        # delete xmlfile
        os.remove(downloaded_file)

        # delete layer
        hazard.delete()
Ejemplo n.º 3
0
def add_wcs_layer(
        endpoint,
        version,
        coverage_id,
        metadata_string=None,
        title=None,
        bbox=None,
        user=None, password=None):
    # build url
    endpoint_parsed = urlparse.urlparse(endpoint)
    q_dict = {
        'version': version,
        'coverageid': coverage_id,
        'format': 'image/tiff',
        'request': 'GetCoverage',
        'service': 'WCS',
        'crs': 'EPSG:4326',
    }
    if bbox:
        q_dict['bbox'] = ','.join(bbox)
    parsed_url = urlparse.ParseResult(
        scheme=endpoint_parsed.scheme,
        netloc=endpoint_parsed.netloc,
        path=endpoint_parsed.path,
        params=None,
        query=urllib.urlencode(q_dict),
        fragment=None
    )
    tmpfile = download_file(parsed_url.geturl(), user=user, password=password)
    shutil.move(tmpfile, tmpfile + '.tif')
    metadata_file = '%s.xml' % tmpfile
    tmpfile += '.tif'

    # get metadata file
    if metadata_string:
        if not isinstance(metadata_string, unicode):
            metadata_string = unicode(metadata_string, 'utf-8')

        metadata_string = cleanup_metadata(metadata_string)
        with io.open(metadata_file, mode='w', encoding='utf-8') as f:
            f.write(metadata_string)

    saved_layer = None
    with open(tmpfile) as f:
        saved_layer = file_upload(tmpfile, overwrite=True)
        saved_layer.set_default_permissions()
        saved_layer.title = title or coverage_id
        saved_layer.save()
    try:
        os.remove(tmpfile)
        os.remove(metadata_file)
    except:
        pass
    return saved_layer
Ejemplo n.º 4
0
def process_impact_result(self, impact_result, analysis_id):
    """Extract impact analysis after running it via InaSAFE-Headless celery

    :param self: Task instance
    :type self: celery.task.Task

    :param impact_result: A dictionary of output's layer key and Uri with
        status and message.
    :type impact_result: dict

    :param analysis_id: analysis id of the object
    :type analysis_id: int

    :return: True if success
    :rtype: bool
    """
    # Track the current task_id
    analysis = Analysis.objects.get(id=analysis_id)

    success = False
    report_success = False
    impact_url = None
    impact_path = None

    if impact_result['status'] == RESULT_SUCCESS:
        impact_url = (
            impact_result['output'].get('impact_analysis') or
            impact_result['output'].get('hazard_aggregation_summary'))
        analysis_summary_url = (
            impact_result['output'].get('analysis_summary'))

        custom_template_path = prepare_custom_template(analysis, impact_url)

        layer_order = prepare_context_layer_order(analysis, impact_url)

        # generate report when analysis has ran successfully
        result = generate_report.delay(
            impact_url,
            # If it is None, it will use default headless template
            custom_report_template_uri=custom_template_path,
            custom_layer_order=layer_order,
            locale=analysis.language_code)

        retries = 10
        for r in range(retries):
            try:

                with allow_join_result():
                    report_metadata = result.get().get('output', {})
                break
            except BaseException as e:
                if result.state == 'FAILURE':
                    raise e
                LOGGER.exception(e)
                time.sleep(5)
                result = AsyncResult(result.id)
                if r >= retries - 1:
                    # Generate report has failed.
                    # We need to reraise the error so we know something is
                    # wrong
                    raise e

        for product_key, products in report_metadata.iteritems():
            for report_key, report_url in products.iteritems():
                report_url = download_file(report_url, direct_access=True)
                report_metadata[product_key][report_key] = report_url

        # decide if we are using direct access or not
        impact_url = get_impact_path(impact_url)

        # download impact layer path
        impact_path = download_file(impact_url, direct_access=True)
        dir_name = os.path.dirname(impact_path)
        is_zipfile = os.path.splitext(impact_path)[1].lower() == '.zip'
        if is_zipfile:
            # Extract the layer first
            with ZipFile(impact_path) as zf:
                zf.extractall(path=dir_name)
                for name in zf.namelist():
                    basename, ext = os.path.splitext(name)
                    if ext in cov_exts + vec_exts:
                        # process this in the for loop to make sure it
                        # works only when we found the layer
                        success = process_impact_layer(
                            analysis, dir_name, basename, name)
                        report_success = process_impact_report(
                            analysis, report_metadata)
                        break

                # cleanup
                for name in zf.namelist():
                    filepath = os.path.join(dir_name, name)
                    try:
                        os.remove(filepath)
                    except BaseException:
                        pass
        else:
            # It means it is accessing an shp or tif directly
            analysis_summary_filename = os.path.basename(analysis_summary_url)
            impact_filename = os.path.basename(impact_path)
            impact_basename, ext = os.path.splitext(impact_filename)
            success = process_impact_layer(
                analysis, dir_name, impact_basename,
                impact_filename, analysis_summary_filename)
            report_success = process_impact_report(analysis, report_metadata)

            # cleanup
            for name in os.listdir(dir_name):
                filepath = os.path.join(dir_name, name)
                is_file = os.path.isfile(filepath)
                should_delete = name.split('.')[0] == impact_basename
                if is_file and should_delete:
                    try:
                        os.remove(filepath)
                    except BaseException:
                        pass

    # cleanup
    try:
        os.remove(impact_path)
    except BaseException:
        pass

    if not success:
        LOGGER.info('No impact layer found in {0}'.format(impact_url))

    if not report_success:
        LOGGER.info('No impact report generated.')

    send_analysis_result_email(analysis)

    return success
Ejemplo n.º 5
0
    def test_download_file_path(self):
        """Test that download_file returns correctly.

        Will test against several file:// scheme
        """

        cases = [
            {
                # Check regular file:// scheme case
                'input': 'file:///foo/bar.zip',
                'output': '/foo/bar.zip'
            },
            {
                # Check that relative file:// scheme case
                'input': 'file://foo/bar.zip',
                'output': '/bar.zip'
            },
            {
                # Check with + sign
                'input': 'file:///foo+bar/file+name+long.zip',
                'output': '/foo bar/file name long.zip'
            },
            {
                # Check urlencoded
                'input': 'file:///foo%20bar%2Flong%20file%2Cname.zip',
                'output': '/foo bar/long file,name.zip'
            },
            {
                # Check urlencoded and utf-8 encode
                'input': 'file:///movies%2F'
                '%E5%90%9B%E3%81%AE%E5%90%8D%E3%81%AF.mkv',
                'output': u'/movies/君の名は.mkv'
            },
            {
                # Check regular path
                'input': '/foo/bar.xml',
                'output': '/foo/bar.xml'
            },
            {
                # Check do not decode regular path
                'input': '/foo/file+name.shp',
                'output': '/foo/file+name.shp'
            },
            {
                # Check do not decode regular path
                'input': '/foo/file%20name.shp',
                'output': '/foo/file%20name.shp'
            },
            {
                # Check do not decode regular path
                'input': u'/movies/君の名は.mkv',
                'output': u'/movies/君の名は.mkv'
            }
        ]

        for case in cases:
            message = 'The following case has failed: {case}'.format(case=case)
            self.assertEqual(case.get('output'),
                             download_file(case.get('input'),
                                           direct_access=True),
                             msg=message)

        # Check that download file will generate temporary file
        data_helper = InaSAFETestData()
        filename = data_helper.hazard('flood_data.geojson')
        hazard = file_upload(filename)
        """:type: geonode.layers.models.Layer"""

        wait_metadata(hazard)

        hazard_base_file, _ = hazard.get_base_file()
        hazard_path = hazard_base_file.file.path

        # Check file exists
        self.assertTrue(os.path.exists(hazard_path))

        downloaded_file = download_file(hazard_path, direct_access=False)

        # Should be a different filename
        self.assertNotEqual(hazard_path, downloaded_file)

        # Check file exists
        self.assertTrue(os.path.exists(downloaded_file))

        os.remove(downloaded_file)

        hazard.delete()
Ejemplo n.º 6
0
def add_wfs_layer(endpoint,
                  version,
                  typename,
                  metadata_string=None,
                  title=None,
                  bbox=None,
                  user=None,
                  password=None):
    endpoint_parsed = urlparse.urlparse(endpoint)
    q_dict = {
        'version': version,
        'typename': typename,
        'outputFormat': 'shape-zip',
        'request': 'GetFeature',
        'service': 'WFS',
        'srsName': 'EPSG:4326',
    }
    if bbox:
        q_dict['bbox'] = ','.join(bbox)
    parsed_url = urlparse.ParseResult(scheme=endpoint_parsed.scheme,
                                      netloc=endpoint_parsed.netloc,
                                      path=endpoint_parsed.path,
                                      params=None,
                                      query=urllib.urlencode(q_dict),
                                      fragment=None)
    tmpfile = download_file(parsed_url.geturl(), user=user, password=password)

    # args = [
    #     'ogr2ogr',
    #     '-nlt POLYGON',
    #     '-skipfailures',
    #     '%s.shp' % tmpfile,
    #     tmpfile,
    #     'OGRGeoJSON'
    # ]
    #
    # retval = subprocess.call(args)

    # # get metadata file
    # if metadata_string:
    #     if not isinstance(metadata_string, unicode):
    #         metadata_string = unicode(metadata_string, 'utf-8')
    #     metadata_file = '%s.xml' % tmpfile
    #     with io.open(metadata_file, mode='w', encoding='utf-8') as f:
    #         f.write(metadata_string)
    #
    # saved_layer = None
    # if retval == 0:
    #     saved_layer = file_upload(
    #         '%s.shp' % tmpfile,
    #         overwrite=True)
    #     saved_layer.set_default_permissions()
    #     saved_layer.title = title or typename
    #     saved_layer.save()
    #
    # # cleanup
    # dir_name = os.path.dirname(tmpfile)
    # for root, dirs, files in os.walk(dir_name):
    #     for f in files:
    #         if tmpfile in f:
    #             try:
    #                 os.remove(os.path.join(root, f))
    #             except:
    #                 pass

    dir_name = os.path.dirname(tmpfile)
    saved_layer = None
    metadata_file = None

    with ZipFile(tmpfile) as zf:
        zf.extractall(path=dir_name)
        for name in zf.namelist():
            basename, ext = os.path.splitext(name)
            if '.shp' in ext:
                # get metadata file
                if metadata_string:
                    if not isinstance(metadata_string, unicode):
                        metadata_string = unicode(metadata_string, 'utf-8')
                    metadata_file = '%s.xml' % basename
                    metadata_file = os.path.join(dir_name, metadata_file)
                    metadata_string = cleanup_metadata(metadata_string)
                    with io.open(metadata_file, mode='w',
                                 encoding='utf-8') as f:
                        f.write(metadata_string)

                # process shapefile layer
                saved_layer = file_upload(os.path.join(dir_name, name),
                                          overwrite=True)
                saved_layer.set_default_permissions()
                saved_layer.title = title or typename
                saved_layer.save()
                break

        # cleanup
        for name in zf.namelist():
            filepath = os.path.join(dir_name, name)
            try:
                os.remove(filepath)
            except BaseException:
                pass

        if metadata_file:
            try:
                os.remove(metadata_file)
            except BaseException:
                pass

    # cleanup
    try:
        os.remove(tmpfile)
    except BaseException:
        pass
    return saved_layer
Ejemplo n.º 7
0
def process_impact_result(self, impact_result, analysis_id):
    """Extract impact analysis after running it via InaSAFE-Headless celery

    :param self: Task instance
    :type self: celery.task.Task

    :param impact_result: A dictionary of output's layer key and Uri with
        status and message.
    :type impact_result: dict

    :param analysis_id: analysis id of the object
    :type analysis_id: int

    :return: True if success
    :rtype: bool
    """
    # Track the current task_id
    analysis = Analysis.objects.get(id=analysis_id)

    analysis.task_id = self.request.id
    analysis.save()

    success = False
    report_success = False
    impact_url = None
    impact_path = None

    if impact_result['status'] == RESULT_SUCCESS:
        impact_url = (
            impact_result['output'].get('impact_analysis')
            or impact_result['output'].get('hazard_aggregation_summary'))
        analysis_summary_url = (
            impact_result['output'].get('analysis_summary'))

        # generate report when analysis has ran successfully
        async = generate_report.delay(impact_url)
        with allow_join_result():
            report_metadata = async .get().get('output', {})

        for product_key, products in report_metadata.iteritems():
            for report_key, report_url in products.iteritems():
                report_url = download_file(report_url, direct_access=True)
                report_metadata[product_key][report_key] = report_url

        # decide if we are using direct access or not
        impact_url = get_impact_path(impact_url)

        # download impact layer path
        impact_path = download_file(impact_url, direct_access=True)
        dir_name = os.path.dirname(impact_path)
        is_zipfile = os.path.splitext(impact_path)[1].lower() == '.zip'
        if is_zipfile:
            # Extract the layer first
            with ZipFile(impact_path) as zf:
                zf.extractall(path=dir_name)
                for name in zf.namelist():
                    basename, ext = os.path.splitext(name)
                    if ext in cov_exts + vec_exts:
                        # process this in the for loop to make sure it
                        # works only when we found the layer
                        success = process_impact_layer(analysis, dir_name,
                                                       basename, name)
                        report_success = process_impact_report(
                            analysis, report_metadata)
                        break

                # cleanup
                for name in zf.namelist():
                    filepath = os.path.join(dir_name, name)
                    try:
                        os.remove(filepath)
                    except BaseException:
                        pass
        else:
            # It means it is accessing an shp or tif directly
            analysis_summary_filename = os.path.basename(analysis_summary_url)
            impact_filename = os.path.basename(impact_path)
            impact_basename, ext = os.path.splitext(impact_filename)
            success = process_impact_layer(analysis, dir_name, impact_basename,
                                           impact_filename,
                                           analysis_summary_filename)
            report_success = process_impact_report(analysis, report_metadata)

            # cleanup
            for name in os.listdir(dir_name):
                filepath = os.path.join(dir_name, name)
                is_file = os.path.isfile(filepath)
                should_delete = name.split('.')[0] == impact_basename
                if is_file and should_delete:
                    try:
                        os.remove(filepath)
                    except BaseException:
                        pass

    # cleanup
    try:
        os.remove(impact_path)
    except BaseException:
        pass

    if not success:
        LOGGER.info('No impact layer found in {0}'.format(impact_url))

    if not report_success:
        LOGGER.info('No impact report generated.')

    return success
Ejemplo n.º 8
0
def process_impact_result(self, impact_url, analysis_id):
    """Extract impact analysis after running it via InaSAFE-Headless celery

    :param self: Task instance
    :type self: celery.task.Task

    :param impact_url: impact url returned from analysis
    :type impact_url: str

    :param analysis_id: analysis id of the object
    :type analysis_id: int

    :return: True if success
    :rtype: bool
    """
    # Track the current task_id
    analysis = Analysis.objects.get(id=analysis_id)

    analysis.task_id = self.request.id
    analysis.save()

    # decide if we are using direct access or not
    impact_url = get_impact_path(impact_url)

    # download impact zip
    impact_path = download_file(impact_url)
    dir_name = os.path.dirname(impact_path)
    success = False
    with ZipFile(impact_path) as zf:
        zf.extractall(path=dir_name)
        for name in zf.namelist():
            basename, ext = os.path.splitext(name)
            if ext in ['.shp', '.tif']:
                # process this in the for loop to make sure it works only
                # when we found the layer
                saved_layer = file_upload(os.path.join(dir_name, name),
                                          overwrite=True)
                saved_layer.set_default_permissions()
                if analysis.user_title:
                    layer_name = analysis.user_title
                else:
                    layer_name = analysis.get_default_impact_title()
                saved_layer.title = layer_name
                saved_layer.save()
                current_impact = None
                if analysis.impact_layer:
                    current_impact = analysis.impact_layer
                analysis.impact_layer = saved_layer

                # check map report and table
                report_map_path = os.path.join(dir_name, '%s.pdf' % basename)

                if os.path.exists(report_map_path):
                    analysis.assign_report_map(report_map_path)

                report_table_path = os.path.join(dir_name,
                                                 '%s_table.pdf' % basename)

                if os.path.exists(report_table_path):
                    analysis.assign_report_table(report_table_path)

                analysis.task_id = process_impact_result.request.id
                analysis.task_state = 'SUCCESS'
                analysis.save()

                if current_impact:
                    current_impact.delete()
                success = True
                break

        # cleanup
        for name in zf.namelist():
            filepath = os.path.join(dir_name, name)
            try:
                os.remove(filepath)
            except:
                pass

    # cleanup
    try:
        os.remove(impact_path)
    except:
        pass

    if not success:
        LOGGER.info('No impact layer found in %s' % impact_url)

    return success
Ejemplo n.º 9
0
def process_impact_result(self, impact_url, analysis_id):
    """Extract impact analysis after running it via InaSAFE-Headless celery

    :param self: Task instance
    :type self: celery.task.Task

    :param impact_url: impact url returned from analysis
    :type impact_url: str

    :param analysis_id: analysis id of the object
    :type analysis_id: int

    :return: True if success
    :rtype: bool
    """
    # Track the current task_id
    analysis = Analysis.objects.get(id=analysis_id)

    analysis.task_id = self.request.id
    analysis.save()

    # decide if we are using direct access or not
    impact_url = get_impact_path(impact_url)

    # download impact layer path
    impact_path = download_file(impact_url, direct_access=True)
    dir_name = os.path.dirname(impact_path)
    success = False
    is_zipfile = os.path.splitext(impact_path)[1].lower() == '.zip'
    if is_zipfile:
        # Extract the layer first
        with ZipFile(impact_path) as zf:
            zf.extractall(path=dir_name)
            for name in zf.namelist():
                basename, ext = os.path.splitext(name)
                if ext in ['.shp', '.tif']:
                    # process this in the for loop to make sure it works only
                    # when we found the layer
                    success = process_impact_layer(
                        analysis, basename, dir_name, name)
                    break

            # cleanup
            for name in zf.namelist():
                filepath = os.path.join(dir_name, name)
                try:
                    os.remove(filepath)
                except BaseException:
                    pass
    else:
        # It means it is accessing an shp or tif directly
        filename = os.path.basename(impact_path)
        basename, ext = os.path.splitext(filename)
        success = process_impact_layer(analysis, basename, dir_name, filename)

        # cleanup
        for name in os.listdir(dir_name):
            filepath = os.path.join(dir_name, name)
            is_file = os.path.isfile(filepath)
            should_delete = name.split('.')[0] == basename
            if is_file and should_delete:
                try:
                    os.remove(filepath)
                except BaseException:
                    pass

    # cleanup
    try:
        os.remove(impact_path)
    except BaseException:
        pass

    if not success:
        LOGGER.info('No impact layer found in %s' % impact_url)

    return success