コード例 #1
0
ファイル: specCompiler.py プロジェクト: voiceheard/LTLMoP
    def loadSimpleSpec(self,text="", regionList=[], sensors=[], actuators=[], customs=[], adj=[], outputfile=""):
        """
        Load a simple spec given by the arguments without reading from a spec file
        
        For Slurp

        region, sensors, actuators, customs are lists of strings representing props
        adj is a list of tuples [(region1,region2),...]
        """

        if outputfile == "":
            logging.error("Need to specify output filename")
            return

        self.proj.compile_options['decompose'] = False
        self.proj.project_root = os.path.abspath(os.path.dirname(os.path.expanduser(outputfile)))
        self.proj.project_basename, ext = os.path.splitext(os.path.basename(outputfile))
        self.proj.specText=text
        # construct a list of region objects with given names
        self.proj.rfi = regions.RegionFileInterface()
        for rname in regionList:
            self.proj.rfi.regions.append(regions.Region(name=rname))

        self.proj.enabled_sensors = sensors
        self.proj.enabled_actuators = actuators
        self.proj.all_customs = customs

        # construct adjacency matrix
        self.proj.rfi.transitions= [[[] for j in range(len(self.proj.rfi.regions))] for i in range(len(self.proj.rfi.regions))]
        for tran in adj:
            idx0 = self.proj.rfi.indexOfRegionWithName(tran[0])
            idx1 = self.proj.rfi.indexOfRegionWithName(tran[1])
            self.proj.rfi.transitions[idx0][idx1] = [(0,0)] # fake trans face
            self.proj.rfi.transitions[idx1][idx0] = [(0,0)]
コード例 #2
0
def make_boundary_and_obstacles(regionsList):
    """
    create obstacles and boundary based on all roads
    """

    bound_poly = Polygon.Polygon()
    for r in regionsList:
        points = [(pt.x, pt.y) for pt in r.getPoints()]
        bound_poly += Polygon.Polygon(points)

    print '------------------------------------'
    print "Boundary Polygon:" + str(bound_poly)
    print '------------------------------------'

    # form holes regions
    obstacle_regions_list = []
    for idx_contour, contour in enumerate(bound_poly):
        if bound_poly.isHole(idx_contour):
            #print contour
            obstacle_region = regions.Region(
                name="hole" + str(idx_contour))  #position=regions.Point(x, y)
            for idx_pt, pt in enumerate(contour):
                obstacle_region.addPoint(
                    regions.Point(pt[0] - obstacle_region.position.x,
                                  pt[1] - obstacle_region.position.y), idx_pt)
            obstacle_region.isObstacle = True

            obstacle_regions_list.append(obstacle_region)

        else:  #this is boundary
            bound_region = regions.Region(
                name="boundary")  #position=regions.Point(x, y)
            for idx_boundary, pt in enumerate(contour):
                bound_region.addPoint(
                    regions.Point(pt[0] - bound_region.position.x,
                                  pt[1] - bound_region.position.y),
                    idx_boundary)

    #for x in [region.getPoints() for region in obstacle_regions_list]:
    #    print list(x)
    #print [x for x in bound_region.getPoints()]

    round_floating_points(obstacle_regions_list + [bound_region])
    return obstacle_regions_list + [bound_region]
コード例 #3
0
 def testFieldsDict(self):
     # 'description' and 'notes' should be missing.
     self.assertEqual(
         {
             'keyboards': ['xkb:b::b'],
             'keyboard_mechanical_layout': 'e',
             'description': 'description',
             'locales': ['d'],
             'region_code': 'aa',
             'regulatory_domain': 'AA',
             'time_zones': ['c']
         }, (regions.Region('aa', 'xkb:b::b', 'c', 'd', 'e', 'description',
                            'notes').GetFieldsDict()))
コード例 #4
0
def createPoly(name, region_point_list):
    """
    create poly
    """
    x_origin, y_origin = 0, 0
    poly = regions.Region(name=name)  #position=regions.Point(x, y)
    for idx, (x, y) in enumerate(region_point_list):
        if not idx:
            x_origin, y_origin = x, y
        poly.addPoint(regions.Point(((offset_x+x)*scale-poly.position[0]),\
                                    ((offset_y+y)*scale-poly.position[1])),idx)

    return poly
コード例 #5
0
  def testConsolidateRegionsDups(self):
    """Test duplicate handling.  Two identical Regions are OK."""
    # Make two copies of the same region.
    region_list = [regions.Region('aa', 'xkb:b::b', 'c', 'd', 'e')
                   for _ in range(2)]
    # It's OK.
    self.assertEquals(
        {'aa': region_list[0]}, regions.ConsolidateRegions(region_list))

    # Modify the second copy.
    region_list[1].keyboards = ['f']
    # Not OK anymore!
    self.assertRaisesRegexp(
        regions.RegionException, "Conflicting definitions for region 'aa':",
        regions.ConsolidateRegions, region_list)
コード例 #6
0
ファイル: model.py プロジェクト: xvalier/genericDialInspect
 def __init__(self, image, regions_path, fixture_path, template_path):
     self.image = image
     self.graphics = image
     self.paths['regions'] = regions_path
     self.paths['fixtures'] = fixture_path
     self.paths['templates'] = template_path
     for i in range(0,self.rows):
         region_row = []
         for j in range(0,self.cols):
             name = '{0}{1}'.format(i,j)
             #Default Colors and dimensions for search regions are the same
             k = 0 if j < 2 else j-1     
             k = 0 if j < 2 else j-1
             region = regions.Region(name, self.type_dims[k], self.available_colors[i])
             region_row.append(region)
         self.regions.append(region_row)
     self.load()
     self.draw()
コード例 #7
0
def load_dataset_from_hdf(fname):
    """Load dataset from HDF5 file and instantiate a `VoigtFit.Dataset' class."""
    with h5py.File(fname, 'r') as hdf:
        z_sys = hdf.attrs['redshift']
        ds = dataset.DataSet(z_sys)
        ds.velspan = hdf.attrs['velspan']
        ds.verbose = hdf.attrs['verbose']
        if 'name' in hdf.attrs.keys():
            ds.set_name(hdf.attrs['name'])
        else:
            ds.set_name('')

        # Load .data:
        data = hdf['data']
        for chunk in data.values():
            res = chunk.attrs['res']
            norm = chunk.attrs['norm']
            ds.add_data(chunk['wl'].value, chunk['flux'].value, res,
                        err=chunk['error'].value, normalized=norm)

        # Load .regions:
        # --- this will be deprecated in later versions
        hdf_regions = hdf['regions']
        for reg in hdf_regions.values():
            region_lines = list()
            for line_tag, line_group in reg['lines'].items():
                act = line_group.attrs['active']
                # Add check for backward compatibility:
                if line_tag in dataset.lineList['trans']:
                    line_instance = dataset.Line(line_tag, active=act)
                    region_lines.append(line_instance)
                    ds.all_lines.append(line_tag)
                    ds.lines[line_tag] = line_instance
                else:
                    print(" [WARNING] - Anomaly detected for line:")
                    print("             %s" % line_tag)
                    print(" I suspect that the atomic linelist has changed...")
                    print("")

            # Instantiate the Region Class with the first Line:
            line_init = region_lines[0]
            v = reg.attrs['velspan']
            specID = reg.attrs['specID']
            Region = regions.Region(v, specID, line_init)
            if len(region_lines) == 1:
                # The first and only line has already been loaded
                pass

            elif len(region_lines) > 1:
                # Load the rest of the lines:
                for line in region_lines[1:]:
                    Region.lines.append(line)
            else:
                err_msg = "Something went wrong in this region: %s. No lines are defined!" % str(reg.name)
                raise ValueError(err_msg)

            # Set region data and attributes:
            Region.res = reg.attrs['res']
            Region.normalized = reg.attrs['normalized']
            Region.cont_err = reg.attrs['cont_err']
            Region.new_mask = reg.attrs['new_mask']
            Region.kernel_fwhm = reg.attrs['kernel_fwhm']
            try:
                Region.label = reg.attrs['label']
            except KeyError:
                Region.label = ''

            Region.kernel = reg['kernel'].value
            Region.wl = reg['wl'].value
            Region.flux = reg['flux'].value
            Region.mask = reg['mask'].value
            Region.err = reg['error'].value

            ds.regions.append(Region)

        # Load .molecules:
        molecules = hdf['molecules']
        if len(molecules) > 0:
            for molecule, band_data in molecules.items():
                bands = [[b, J] for b, J in band_data]
                ds.molecules[molecule] = bands
                # No need to call ds.add_molecule
                # lines are added above when defining the regions.

        # Load .components:
        components = hdf['components']
        if 'best_fit' in hdf:
            # --- Prepare fit parameters  [class: lmfit.Parameters]
            ds.best_fit = Parameters()

        for ion, comps in components.items():
            ds.components[ion] = list()
            if len(comps) > 0:
                for n, comp in enumerate(comps.values()):
                    if 'best_fit' in hdf:
                        # If 'best_fit' exists, use the best-fit values.
                        # The naming for 'best_fit' and 'components' is parallel
                        # so one variable in components can easily be identified
                        # in the best_fit data group by replacing the path:
                        pointer = comp.name
                        fit_pointer = pointer.replace('components', 'best_fit')
                        z = hdf[fit_pointer+'/z'].value
                        z_err = hdf[fit_pointer+'/z'].attrs['error']
                        b = hdf[fit_pointer+'/b'].value
                        b_err = hdf[fit_pointer+'/b'].attrs['error']
                        logN = hdf[fit_pointer+'/logN'].value
                        logN_err = hdf[fit_pointer+'/logN'].attrs['error']

                    else:
                        z = comp['z'].value
                        z_err = None
                        b = comp['b'].value
                        b_err = None
                        logN = comp['logN'].value
                        logN_err = None

                    # Extract component options:
                    opts = dict()
                    for varname in ['z', 'b', 'N']:
                        if varname == 'N':
                            hdf_name = 'logN'
                        else:
                            hdf_name = varname

                        tie = comp[hdf_name].attrs['tie_%s' % varname]
                        tie = None if tie == 'None' else tie
                        vary = comp[hdf_name].attrs['var_%s' % varname]
                        opts['tie_%s' % varname] = tie
                        opts['var_%s' % varname] = vary

                    # Add component to DataSet class:
                    ds.add_component(ion, z, b, logN, **opts)

                    if 'best_fit' in hdf:
                        # Add Parameters to DataSet.best_fit:
                        z_name = 'z%i_%s' % (n, ion)
                        b_name = 'b%i_%s' % (n, ion)
                        N_name = 'logN%i_%s' % (n, ion)
                        ds.best_fit.add(z_name, value=z, vary=opts['var_z'])
                        ds.best_fit[z_name].stderr = z_err
                        ds.best_fit.add(b_name, value=b, vary=opts['var_b'],
                                        min=0., max=500.)
                        ds.best_fit[b_name].stderr = b_err
                        ds.best_fit.add(N_name, value=logN, vary=opts['var_N'],
                                        min=0., max=40.)
                        ds.best_fit[N_name].stderr = logN_err

        if 'best_fit' in hdf:
            # Now the components have been defined in ds, so I can use them for the loop
            # to set the parameter ties:
            for ion, comps in ds.components.items():
                for n, comp in enumerate(comps):
                    z, b, logN, opts = comp
                    z_name = 'z%i_%s' % (n, ion)
                    b_name = 'b%i_%s' % (n, ion)
                    N_name = 'logN%i_%s' % (n, ion)

                    if opts['tie_z']:
                        ds.best_fit[z_name].expr = opts['tie_z']
                    if opts['tie_b']:
                        ds.best_fit[b_name].expr = opts['tie_b']
                    if opts['tie_N']:
                        ds.best_fit[N_name].expr = opts['tie_N']

        return ds
コード例 #8
0
def ParseRegions(e, smod):

    nregions = int(e[0])
    print " - reading %d regions..." % nregions
    at = 1

    regs = {}
    all_regions = {}  # Includes groups.

    import regions
    reload(regions)

    for i in range(nregions):

        try:
            nvoxels = int(e[at])
        except:
            print " - reached end of file before reading all regions"
            break

        at += 1
        rvs = e[at:at + (nvoxels * 3)]
        at += nvoxels * 3

        print "Region %d - %d voxels" % (i, nvoxels)

        rpoints = numpy.reshape(rvs, (nvoxels, 3)).astype(numpy.int32)

        #print rpoints

        nparents = int(e[at])
        at += 1
        parents = e[at:at + nparents].astype(numpy.int)
        at += nparents

        rid = i + 1
        reg = regions.Region(smod, rid, rpoints[0])

        smod.mask[rpoints[:, 2], rpoints[:, 1],
                  rpoints[:, 0]] = rid  # set mask at points

        all_regions[reg.rid] = reg
        regs[reg.rid] = reg

        last_reg = reg
        reg.preg = None

        for pi in parents:

            if pi in all_regions:
                preg = all_regions[pi]
            else:
                preg = regions.Region(smod, pi)
                preg.max_point = rpoints[0]
                all_regions[pi] = preg

            last_reg.preg = preg

            if preg.cregs.count(last_reg) == 0:
                preg.cregs.append(last_reg)

            last_reg = preg

    # Regions table only includes top level groups.
    groups = [reg for reg in all_regions.values() if reg.preg is None]

    return all_regions, groups, at
コード例 #9
0
 def processInitialGrid(self):
     for source in self.regionSources:
         self.regions.append(regions.Region(source))
コード例 #10
0
# load in the LTLMoP library
import regions

# load in topo file
print "Loading '{}'...".format(sys.argv[1])
with open(sys.argv[1]) as f:
    data = f.read()

data = json.loads(data)

# create LTLMoP regions
rfi = regions.RegionFileInterface()

for rn in data["region_names"]:
    newRegion = regions.Region(name=rn)
    rfi.regions.append(newRegion)

# force topology (we avoid LTLMoP's automatic coincident edge detection)
rfi.transitions = [[[] for j in range(len(rfi.regions))]
                   for i in range(len(rfi.regions))]
for path in data["adjacencies"]:
    r1 = rfi.indexOfRegionWithName(path[0])
    r2 = rfi.indexOfRegionWithName(path[1])
    fake_face = [frozenset((regions.Point(1, 2), regions.Point(3, 4)))]
    rfi.transitions[r1][r2] = fake_face
    rfi.transitions[r2][r1] = fake_face

# save file
output_filename = os.path.splitext(sys.argv[1])[0] + ".converted.regions"
rfi.writeFile(output_filename)
コード例 #11
0
    if args.doRegions:
        eta_edges_all = [[-3, 3.], [-3, 0., 3.], [-3, 0., 3.],
                         [-3, -1.0, 1.0, 3],
                         [-3, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3]]
        phi_ndivisions = [2, 2, 3, 3, 10]

        jetCollections = {
            "df%i" % i: pd.DataFrame()
            for i in range(len(phi_ndivisions))
        }
        legends = []
        for i, (eta_edges, phi) in enumerate(zip(eta_edges_all,
                                                 phi_ndivisions)):
            phi_edges = np.linspace(-np.pi, np.pi, phi)
            PFRegions = np.array([[
                regions.Region(eta_edges[i], eta_edges[i + 1], phi_edges[j],
                               phi_edges[j + 1])
                for j in range(len(phi_edges) - 1)
            ] for i in range(len(eta_edges) - 1)])
            nRegions = (len(PFRegions) * len(PFRegions[0]))
            legends.append(r'Seed, %i regions' % nRegions)
            print("Using %i regions for jet reconstruction:" % nRegions)
            print("Eta bins: ")
            print(eta_edges)
            print("Phi bins: ")
            print(phi_edges)

            for index, event in events.groupby(level=0):
                if event.empty: continue
                particles_regionized = regions.regionize(event, PFRegions)
                for eta_bins in particles_regionized:
                    for phi_bins in eta_bins: