Exemple #1
0
    def execute(self, userdata):
        userdata.drone.activate()
        userdata.drone.takeoff(TAKEOFF_HEIGHT)

        # Gets windmill position and makes a path that begins and ends at launch site
        windmill_positions = get_windmill_positions()
        userdata.windmill_list = windmill_positions
        home_point = Super_point(0, 0, 0)
        windmill_positions.append(home_point)
        windmill_positions.insert(0, home_point)
        userdata.path = make_path.make_path(windmill_positions)

        return 'startup_complete'
SDDET = 'SDDetectionTime'

dataset_folder = ('OSU_YR4_Hip_30Hz.ws120.7cls' if DATASET == 'OSU_HIP' else ('uq_30Hz_day%d' % DAY))
input_folder = '%s/%s' % (ROOT_INPUT_FOLDER, dataset_folder)
output_folder = '%s/%s' % (ROOT_OUTPUT_FOLDER, dataset_folder)

FPRS = [str(i*0.0001) for i in range(1,101)]
FPRS = ['0.0005','0.001', '0.0017', '0.0019', '0.0021', '0.0024', '0.0028', '0.0033', '0.005','0.01']

for cls_alg in CLASS_ALGS:
    for cpd_alg in CPD_ALGS:
        files = listdir('%s/%s' % (input_folder, cls_alg))
        files = [file for file in files if cpd_alg in file and 'test.summary' in file and ('0.' + file.split('.')[-4]) in FPRS]
        fprs = ['0.' + file.split('.')[-4] for file in files]
        print cls_alg, cpd_alg
        print fprs
        out_path = '%s/%s_%s_results.csv' % (output_folder, cls_alg, cpd_alg)
        make_path(out_path)
        output_fobject = open(out_path, 'w')
        output_fobject.write('FPR,%s,%s,%s,%s\n' % (ACC, SDA, DET, SDDET))
        for fpr in fprs:
            lines = open('%s/%s/%s.model.best.AllWoFFT.%s.%s.test.summary.csv' % (input_folder, cls_alg, cls_alg, cpd_alg, fpr)).readlines()
            lines = [line.strip() for line in lines]
            keys = lines[0][1:-1].split('","')
            values = [float(i) for i in lines[1].split(',')]
            file_dict = {}
            for i in range(len(keys)):
                file_dict[keys[i]] = values[i]
            output_fobject.write('%s,%f,%f,%f,%f\n' % (fpr, file_dict[ACC], file_dict[SDA], file_dict[DET], file_dict[SDDET]))
        output_fobject.close()
Exemple #3
0
def topomult(input_dem, mh_data_dir, direction):
    
    if not exists(mh_data_dir):
        try:
            os.makedirs(mh_data_dir)
        except OSError:
            pass
    
    # read input data using ascii_read. Note: data was flattened to a 
    # single array
    log.info('Reading data from {0}'.format(input_dem))
    
    DEM = ElevationData(input_dem)
    nr = int(DEM.nrows)
    nc = int(DEM.ncols)
    xll = DEM.xllcorner
    yll = DEM.yllcorner
    cellsize = DEM.cellsize
    data =  DEM.data.flatten()
    
    log.info('xll = %f' % xll)
    log.info('yll = %f' % yll)
    log.info('data_spacing = %f' % cellsize)
    
    # Compute the starting positions along the boundaries depending on dir 
    # Together, the direction and the starting position determines a line.
    # Note that the starting positions are defined
    # in terms of the 1-d index of the array.
    
    if len(direction) == 2:
        data_spacing = DEM.cellsize*math.sqrt(2)
    else:
        data_spacing = DEM.cellsize
        
    Mhdata = np.ones(data.shape)
    strt_idx = []    
    if direction.find('n') >= 0:
        strt_idx = np.append(strt_idx, list(range(0, nr * nc, nr)))
    if direction.find('s') >= 0:
        strt_idx =  np.append(strt_idx, list(range(nr - 1, nr * nc, nr)))
    if direction.find('e') >= 0:
        strt_idx =  np.append(strt_idx, list(range((nc - 1) * nr, nr * nc)))
    if direction.find('w') >= 0:
        strt_idx =  np.append(strt_idx, list(range(0, nr)))
       
    # For the diagonal directions the corner will have been counted twice 
    # so get rid of the duplicates then loop over the data lines 
    # (i.e. over the starting positions)
    strt_idx = np.unique(strt_idx)
    
    for ctr, idx in enumerate(strt_idx):
        log.debug( 'Processing path %3i' % ctr+' of %3i' % len(strt_idx)+', index %5i.' % idx )
       
        # Get a line of the data
        # path is a 1-d vector which gives the indices of the data    
        path = make_path.make_path(nr, nc, idx, direction)
        line = data[path]
        M = multiplier_calc.multiplier_calc(line, data_spacing)
          
        # write the line back to the data array
        M = M.conj().transpose()
        Mhdata[path] = M[0,].flatten()
    
    # Reshape the result to matrix like 
    Mhdata = np.reshape(Mhdata, (nc, nr))
    Mhdata = Mhdata.conj().transpose()
    
    # Output unsmoothed data to an ascii file
    ofn = pjoin(mh_data_dir, 'mh_'+ direction + '.asc')
    log.info( 'outputting unsmoothed data to: %s' % ofn )
    
    fid = open(ofn, 'w')
    
    fid.write('ncols         '+str(nc)+'\n')
    fid.write('nrows         '+str(nr)+'\n')
    fid.write('xllcorner     '+str(xll)+'\n')
    fid.write('yllcorner     '+str(yll)+'\n')
    fid.write('cellsize       '+str(cellsize)+'\n')
    fid.write('NOdata_struct_value  -9999\n')
    
    np.savetxt(fid, Mhdata, fmt ='%4.2f', delimiter = ' ', newline = '\n') 
    
    # Output smoothed data to an ascii file
    ofn = pjoin(mh_data_dir, 'mh_'+ direction + '_smooth.asc')
    log.info( 'outputting smoothed data to: %s' % ofn )
     
    fid = open(ofn,'w')
    fid.write('ncols         '+str(nc)+'\n')
    fid.write('nrows         '+str(nr)+'\n')
    fid.write('xllcorner     '+str(xll)+'\n')
    fid.write('yllcorner     '+str(yll)+'\n')
    fid.write('cellsize       '+str(cellsize)+'\n')
    fid.write('NOdata_struct_value  -9999\n')
    
    g = np.ones((3, 3))/9.
    
    mhsmooth = signal.convolve2d(Mhdata, g, mode='same', boundary='fill', 
                                 fillvalue=1)
   
    np.savetxt(fid, mhsmooth, fmt ='%4.2f', delimiter = ' ', newline = '\n') 
    
    fid.close()
    
    log.info('Finished direction %s' % direction)
Exemple #4
0
    '0.0033', '0.005', '0.01'
]

for cls_alg in CLASS_ALGS:
    for cpd_alg in CPD_ALGS:
        files = listdir('%s/%s' % (input_folder, cls_alg))
        files = [
            file for file in files
            if cpd_alg in file and 'test.summary' in file and (
                '0.' + file.split('.')[-4]) in FPRS
        ]
        fprs = ['0.' + file.split('.')[-4] for file in files]
        print cls_alg, cpd_alg
        print fprs
        out_path = '%s/%s_%s_results.csv' % (output_folder, cls_alg, cpd_alg)
        make_path(out_path)
        output_fobject = open(out_path, 'w')
        output_fobject.write('FPR,%s,%s,%s,%s\n' % (ACC, SDA, DET, SDDET))
        for fpr in fprs:
            lines = open(
                '%s/%s/%s.model.best.AllWoFFT.%s.%s.test.summary.csv' %
                (input_folder, cls_alg, cls_alg, cpd_alg, fpr)).readlines()
            lines = [line.strip() for line in lines]
            keys = lines[0][1:-1].split('","')
            values = [float(i) for i in lines[1].split(',')]
            file_dict = {}
            for i in range(len(keys)):
                file_dict[keys[i]] = values[i]
            output_fobject.write('%s,%f,%f,%f,%f\n' %
                                 (fpr, file_dict[ACC], file_dict[SDA],
                                  file_dict[DET], file_dict[SDDET]))
def topomult(input_dem, tile_extents_nobuffer):
    """
    Executes core topographic multiplier functionality

    :param input_dem: `file` the input tile of the DEM
    :param tile_extents_nobuffer: `tuple` the input tile extent without buffer
    """

    # find output folder
    mh_folder = pjoin(os.path.dirname(input_dem), 'topographic')
    file_name = os.path.basename(input_dem)

    ds = gdal.Open(input_dem)
    nc = ds.RasterXSize
    nr = ds.RasterYSize

    geotransform = ds.GetGeoTransform()
    x_left = geotransform[0]
    y_upper = -geotransform[3]
    pixelwidth = geotransform[1]
    pixelheight = -geotransform[5]

    lon, lat = get_lat_lon(tile_extents_nobuffer, pixelwidth, pixelheight)

    band = ds.GetRasterBand(1)
    elevation_array = band.ReadAsArray(0, 0, nc, nr)

    nodata_value = band.GetNoDataValue()
    if nodata_value is not None:
        elevation_array[np.where(elevation_array == nodata_value)] = np.nan
    else:
        elevation_array[np.where(elevation_array is None)] = np.nan

    elevation_array_tran = np.transpose(elevation_array)
    data = elevation_array_tran.flatten()

    x_m_array, y_m_array = get_pixel_size_grids(ds)
    cellsize = 0.5 * (np.mean(x_m_array) + np.mean(y_m_array))

    # Compute the starting positions along the boundaries depending on dir
    # Together, the direction and the starting position determines a line.
    # Note that the starting positions are defined
    # in terms of the 1-d index of the array.

    directions = ['n', 's', 'e', 'w', 'ne', 'nw', 'se', 'sw']

    for direction in directions:
        log.info(direction)

        if len(direction) == 2:
            data_spacing = cellsize * math.sqrt(2)
        else:
            data_spacing = cellsize

        mhdata = np.ones(data.shape)

        strt_idx = []
        if direction.find('n') >= 0:
            strt_idx = np.append(strt_idx, list(range(0, nr * nc, nr)))
        if direction.find('s') >= 0:
            strt_idx = np.append(strt_idx, list(range(nr - 1, nr * nc, nr)))
        if direction.find('e') >= 0:
            strt_idx = np.append(strt_idx, list(range((nc - 1) * nr, nr * nc)))
        if direction.find('w') >= 0:
            strt_idx = np.append(strt_idx, list(range(0, nr)))

        # For the diagonal directions the corner will have been counted twice
        # so get rid of the duplicates then loop over the data lines
        # (i.e. over the starting positions)
        strt_idx = np.unique(strt_idx)

        for ctr, idx in enumerate(strt_idx):
            log.debug('Processing path %3i' % ctr + ' of %3i' % len(strt_idx) +
                      ', index %5i.' % idx)

            # Get a line of the data
            # path is a 1-d vector which gives the indices of the data
            path = make_path.make_path(nr, nc, idx, direction)
            line = data[path]
            line[np.isnan(line)] = 0.
            m = multiplier_calc.multiplier_calc(line, data_spacing)

            # write the line back to the data array
            m = np.transpose(m)
            mhdata[path] = m[0, ].flatten()

        # Reshape the result to matrix like
        mhdata = np.reshape(mhdata, (nc, nr))
        mhdata = np.transpose(mhdata)

        # Remove the conservatism as described in the Reference
        mhdata = remove_conservatism(mhdata)

        # consider the Tasmania factor
        if x_left > 143.0 and y_upper > 40.0:
            mhdata = tasmania(mhdata, elevation_array)

        # smooth
        g = np.ones((3, 3)) / 9.
        mhsmooth = signal.convolve(mhdata, g, mode='same')
        mhsmooth[np.isnan(elevation_array)] = np.nan
        del mhdata

        # output format as netCDF4
        tile_nc = pjoin(
            mh_folder,
            os.path.splitext(file_name)[0][:-4] + '_mt_' + direction + '.nc')

        mhsmooth_nobuffer = clip_array(mhsmooth, x_left, y_upper, pixelwidth,
                                       pixelheight, tile_extents_nobuffer)

        save_multiplier('Mt', mhsmooth_nobuffer, lat, lon, tile_nc)
        del mhsmooth

        log.info('Finished direction {0}'.format(direction))

    ds = None
Exemple #6
0
def topomult(input_dem):
    """
    Executes core topographic multiplier functionality

    :param input_dem: `file` the input tile of the DEM
    """

    # find output folder
    mh_folder = pjoin(os.path.dirname(input_dem), 'topographic')
    file_name = os.path.basename(input_dem)
    nc_folder = pjoin(mh_folder, 'netcdf')

    ds = gdal.Open(input_dem)
    nc = ds.RasterXSize
    nr = ds.RasterYSize

    geotransform = ds.GetGeoTransform()
    x_left = geotransform[0]
    y_upper = -geotransform[3]
    pixelwidth = geotransform[1]
    pixelheight = -geotransform[5]

    lon, lat = get_lat_lon(x_left, y_upper, pixelwidth, pixelheight, nc, nr)

    band = ds.GetRasterBand(1)
    elevation_array = band.ReadAsArray(0, 0, nc, nr)
    elevation_array[np.where(elevation_array < -0.001)] = np.nan

    elevation_array_tran = np.transpose(elevation_array)
    data = elevation_array_tran.flatten()

    x_m_array, y_m_array = get_pixel_size_grids(ds)
    cellsize = 0.5 * (np.mean(x_m_array) + np.mean(y_m_array))

    # Compute the starting positions along the boundaries depending on dir
    # Together, the direction and the starting position determines a line.
    # Note that the starting positions are defined
    # in terms of the 1-d index of the array.

    directions = ['n', 's', 'e', 'w', 'ne', 'nw', 'se', 'sw']

    for direction in directions:
        log.info(direction)

        if len(direction) == 2:
            data_spacing = cellsize * math.sqrt(2)
        else:
            data_spacing = cellsize

        mhdata = np.ones(data.shape)

        strt_idx = []
        if direction.find('n') >= 0:
            strt_idx = np.append(strt_idx, list(range(0, nr * nc, nr)))
        if direction.find('s') >= 0:
            strt_idx = np.append(strt_idx, list(range(nr - 1, nr * nc, nr)))
        if direction.find('e') >= 0:
            strt_idx = np.append(strt_idx, list(range((nc - 1) * nr, nr * nc)))
        if direction.find('w') >= 0:
            strt_idx = np.append(strt_idx, list(range(0, nr)))

        # For the diagonal directions the corner will have been counted twice
        # so get rid of the duplicates then loop over the data lines
        # (i.e. over the starting positions)
        strt_idx = np.unique(strt_idx)

        for ctr, idx in enumerate(strt_idx):
            log.debug('Processing path %3i' % ctr + ' of %3i' % len(strt_idx)
                      + ', index %5i.' % idx)

            # Get a line of the data
            # path is a 1-d vector which gives the indices of the data
            path = make_path.make_path(nr, nc, idx, direction)
            line = data[path]
            line[np.isnan(line)] = 0.
            m = multiplier_calc.multiplier_calc(line, data_spacing)

            # write the line back to the data array
            m = np.transpose(m)
            mhdata[path] = m[0, ].flatten()

        # Reshape the result to matrix like
        mhdata = np.reshape(mhdata, (nc, nr))
        mhdata = np.transpose(mhdata)

        # smooth
        g = np.ones((3, 3)) / 9.
        mhsmooth = signal.convolve(mhdata, g, mode='same')
        mhsmooth[np.isnan(elevation_array)] = np.nan
        del mhdata

        # output format as netCDF4
        tile_nc = pjoin(nc_folder, os.path.splitext(file_name)[0][:-4] + '_mt_' +
                        direction + '.nc')
        save_multiplier('Mt', mhsmooth, lat, lon, tile_nc)
        del mhsmooth

        log.info('Finished direction {0}'.format(direction))

    ds = None