예제 #1
0
def ConfigureRunningEnv(num_process, size_tile_cache):
    """Configures the running environment.

  Optimal performance is obtained by increasing the number of process
  and a geo cache big enough to avoid swapping. Each tile covers about
  100kmx100km. Swapping statistics can be obtaines using the routine:
    `CheckTerrainTileCacheOk()`.

  Args:
    num_process: Number of process. Special values: -1: #cpu/2 ; -2: #cpu-1
    size_tile_cache: Geo cache size in number of tiles. The memory usage is
      about: `num_process * size_tile_cache * 60 MB`
  """
    # Configure the global pool of processes
    mpool.Configure(num_process)

    # Configure the geo drivers to avoid swap
    # - for main process
    drive.ConfigureTerrainDriver(cache_size=size_tile_cache)
    drive.ConfigureNlcdDriver(cache_size=size_tile_cache)
    # - for worker processes
    mpool.RunOnEachWorkerProcess(drive.ConfigureTerrainDriver,
                                 terrain_dir=None,
                                 cache_size=size_tile_cache)
    mpool.RunOnEachWorkerProcess(drive.ConfigureNlcdDriver,
                                 nlcd_dir=None,
                                 cache_size=size_tile_cache)
    return mpool.GetNumWorkerProcesses()
# Define protection point, i.e., a tuple with named fields of
# 'latitude', 'longitude'
ProtectionPoint = namedtuple('ProtectionPoint', ['latitude', 'longitude'])


if __name__ == '__main__':
  # Number of Monte Carlo iterations
  num_iter = 2000

  # Margin check
  margin_db = 0.01  # Possible only because cache manager is used

  # Configure the global pool manager
  num_processes = 4
  mpool.Configure(num_processes) # Note: shall not be run in child process
                                 # so protect it.

  # Populate protection points
  protection_points = [ProtectionPoint(latitude=36.9400, longitude=-75.9989),
                       ProtectionPoint(latitude=37.7579, longitude=-75.4105),
                       ProtectionPoint(latitude=36.1044, longitude=-73.3147),
                       ProtectionPoint(latitude=36.1211, longitude=-75.5939)]

  channel = (3600, 3610)

  # Configure operating parameters
  dpa_mgr.Dpa.Configure(num_iteration=num_iter)
  dpa_ref = dpa_mgr.Dpa(protection_points,
                        name='test(East1)',
                        threshold=-144,
예제 #3
0
import os
import iap
import time
import multiprocessing

from reference_models.common import mpool
from reference_models.interference import interference as interf
from full_activity_dump import FullActivityDump

# Number of processes for parallel execution
NUM_OF_PROCESS = 30

if __name__ == '__main__':

    # Configure the multiprocess pool
    mpool.Configure(NUM_OF_PROCESS)

    cbsd_0 = json.load(open(os.path.join('test_data', 'cbsd_0.json')))
    cbsd_1 = json.load(open(os.path.join('test_data', 'cbsd_1.json')))
    cbsd_2 = json.load(open(os.path.join('test_data', 'cbsd_2.json')))
    cbsd_3 = json.load(open(os.path.join('test_data', 'cbsd_3.json')))
    cbsd_4 = json.load(open(os.path.join('test_data', 'cbsd_4.json')))
    cbsd_5 = json.load(open(os.path.join('test_data', 'cbsd_5.json')))
    cbsd_6 = json.load(open(os.path.join('test_data', 'cbsd_6.json')))
    cbsd_7 = json.load(open(os.path.join('test_data', 'cbsd_7.json')))
    cbsd_8 = json.load(open(os.path.join('test_data', 'cbsd_8.json')))
    cbsd_9 = json.load(open(os.path.join('test_data', 'cbsd_9.json')))
    cbsd_10 = json.load(open(os.path.join('test_data', 'cbsd_10.json')))
    cbsd_11 = json.load(open(os.path.join('test_data', 'cbsd_11.json')))
    cbsd_12 = json.load(open(os.path.join('test_data', 'cbsd_12.json')))
    cbsd_13 = json.load(open(os.path.join('test_data', 'cbsd_13.json')))
예제 #4
0
def findMoveList(protection_specs,
                 protection_points,
                 registration_requests,
                 grant_requests,
                 num_iter,
                 num_processes,
                 pool=None):
    """Main routine to find CBSD indices on the move list.

  Inputs:
    protection_specs:   protection specifications, an object with attributes
                        'lowFreq' (in Hz), 'highFreq' (in Hz),
                        'antHeight' (in meters), 'beamwidth' (in degrees),
                        'threshold' (in dBm/10MHz), 'neighbor_distances' (km),
                        'min_azimuth', 'max_azimuth' (degrees)
                        where `neighbor_distances` are the neighborhood
                        distances (km) as a sequence:
                          [cata_dist, catb_dist, cata_oob_dist, catb_oob_dist]
    protection_points:  a list of protection points, each one being an object
                        providing attributes 'latitude' and 'longitude'
    registration_requests: a list of CBSD registration requests, each one being
                           a dictionary containing CBSD registration information
    grant_requests:     a list of grant requests, each one being a dictionary
                        containing grant information; there is a one-to-one
                        mapping between items in registration_requests and
                        grant_requests; a CBSD with more than one grant will
                        have corresponding duplicate items in registration_requests
    num_iter:           number of Monte Carlo iterations
    num_processes:      number of parallel processes to use
    pool:               optional |multiprocessing.Pool| to use

  Returns:
    result:             a Boolean list (same size as registration_requests/
                        grant_requests) with TRUE elements at indices having
                        grants on the move list
  """
    grants = data.getGrantsFromRequests(registration_requests, grant_requests)
    # Find the move list of each protection constraint with a pool of parallel processes.
    if pool is None:
        mpool.Configure(num_processes)
        pool = mpool.Pool()

    neighbor_distances = protection_specs.neighbor_distances
    try:
        min_azimuth = protection_specs.min_azimuth
        max_azimuth = protection_specs.max_azimuth
    except AttributeError:
        min_azimuth, max_azimuth = 0, 360

    moveListC = partial(moveListConstraint,
                        low_freq=protection_specs.lowFreq,
                        high_freq=protection_specs.highFreq,
                        grants=grants,
                        inc_ant_height=protection_specs.antHeight,
                        num_iter=num_iter,
                        threshold=protection_specs.threshold,
                        beamwidth=protection_specs.beamwidth,
                        min_azimuth=min_azimuth,
                        max_azimuth=max_azimuth,
                        neighbor_distances=neighbor_distances)
    M_c, _ = zip(*pool.map(moveListC, protection_points))

    # Find the unique CBSD indices in the M_c list of lists.
    M = set().union(*M_c)

    # Set to TRUE the elements of the output Boolean array that have grant_index in
    # the move list.
    result = np.zeros(len(grants), bool)
    for k, grant in enumerate(grants):
        if grant in M:
            result[k] = True
    output = result.tolist()
    return output
예제 #5
0
if __name__ == '__main__':

    # Process the commad line arguments.
    options = parser.parse_args()
    logging.getLogger().setLevel(_LOGGER_MAP[options.log_level.lower()])

    # Configure the multiprocessing worker pool.
    # Your options are:
    #   0: single process (default if not called)
    #  -1: use half of the cpus
    #  -2: use all cpus (minus one)
    #  a specific number of cpus
    # Or your own `pool`.

    logging.info('Start Worker processes')
    mpool.Configure(num_processes=NUM_PROCESSES)
    num_workers = mpool.GetNumWorkerProcesses()
    logging.info(' ... %d workers started' % num_workers)

    # Configure geo drivers
    logging.info('Configure geo drivers')
    (num_tiles_master_ned, num_tiles_worker_ned, num_tiles_master_nlcd,
     num_tiles_worker_nlcd) = GetGeoCacheSize(num_workers)
    if num_tiles_master_ned < 16:
        logging.warning('Required geo cache size %d (for master) is low'
                        '- too few memory or too many workers' %
                        num_tiles_master_ned)
    logging.info(' ... NED: cache size: %d per master, %d for workers' %
                 (num_tiles_master_ned, num_tiles_worker_ned))
    logging.info(' ... NLCD: cache size: %d per master, %d for workers' %
                 (num_tiles_master_ned, num_tiles_worker_ned))
    out_list = list(out_dict.items())
    out_list.sort()
    return out_list


if __name__ == '__main__':

    #=======================================
    # Decrease resolution of GWPZ/PPA for reducing time
    aggregate_interference.GWPZ_GRID_RES_ARCSEC = 10
    aggregate_interference.PPA_GRID_RES_ARCSEC = 10

    #=======================================
    # Configure the multiprocess pool with profiler enabled
    pool = profpool.PoolWithProfiler(NUM_PROCESS)
    mpool.Configure(pool=pool)
    prof0 = cProfile.Profile()

    #=======================================
    # Data directory
    _BASE_DATA_DIR = os.path.join(os.path.dirname(__file__),
                                  '../../interference/test_data')

    # Populate a list of CBSD registration requests
    cbsd_filename = [
        'cbsd_0.json', 'cbsd_1.json', 'cbsd_2.json', 'cbsd_3.json',
        'cbsd_4.json', 'cbsd_5.json', 'cbsd_6.json', 'cbsd_7.json',
        'cbsd_8.json', 'cbsd_9.json', 'cbsd_10.json', 'cbsd_11.json',
        'cbsd_12.json', 'cbsd_13.json', 'cbsd_14.json', 'cbsd_15.json',
        'cbsd_16.json', 'cbsd_17.json', 'cbsd_18.json', 'cbsd_19.json',
        'cbsd_20.json', 'cbsd_21.json', 'cbsd_22.json', 'cbsd_23.json'
예제 #7
0
def getTileStats():
    return drive.terrain_driver.stats.ActiveTilesCount()


def printTileStats():
    drive.terrain_driver.stats.Report()


#--------------------------------------------------
# The simulation
if __name__ == '__main__':
    # reset the random seed
    np.random.seed(12345)

    # Configure the global pool of processes
    mpool.Configure(num_processes)
    num_workers = mpool.GetNumWorkerProcesses()

    (all_cbsds, reg_requests, grant_requests, protection_zone,
     (n_a_indoor, n_a_outdoor, n_b), ax) = PrepareSimulation()

    # Build the grants
    grants = data.getGrantsFromRequests(reg_requests, grant_requests)

    # Build the DPA manager
    dpa = dpa_mgr.BuildDpa(
        dpa_name, 'default (%d,%d,%d,%d,%d)' %
        (npts_front_dpa_contour, npts_back_dpa_contour, npts_within_dpa_front,
         npts_within_dpa_back, front_usborder_buffer_km))
    dpa.ResetFreqRange([(fmin, fmax)])