예제 #1
0
    def independent_2(self):
        """	Returns a maximal independent set
		(maximal means that no other independent set cantains it, 
		doesn't mean that it has the most vertices)"""

        if self.ver_len() == 0:
            return None
        else:
            #Let 'v' be a vertice closest to (0, 0)
            #'v' will be added to the independent set
            v = self[0]
            odle = dist(v, (0, 0))
            for w in self.vertices:
                if dist(w, (0, 0)) < odle:
                    odle = dist(w, (0, 0))
                    v = w

            #Remove neighbors of 'v'
            new_graph = self - (self.get_neighbors(v) + [v])

            #Repeat the action for the vertices that remained
            ind = new_graph.independent_2()

            if ind is not None:
                return [v] + ind
            return [v]
예제 #2
0
파일: getTracks.py 프로젝트: hsp/GPS_HSP
 def getSubTrackLength(self, subTrack):
     pointList = self.pointLists[subTrack]
     totalLength = 0
     p0 = point(0,0)
     for iP in range(len(pointList)):
         p1 = pointList[iP].obj.GetGeometryRef().GetPoint(0)
         p1 = point(p1[0], p1[1])
         if iP > 0:
             totalLength += dist(p0, p1)
         else:
             pFirst = p1 
         p0 = p1
     return totalLength, dist(pFirst, p1)
    def snap_point(self, x: float, y: float) -> Point:
        p = Point(x, y)

        # snap to active polygon
        if self.points:
            for p2 in self.points[:-1]:
                if dist(p, p2) < SNAP_RADIUS:
                    return p2

        # snap to polygons
        for p2 in self.collection.all_points:
            if dist(p, p2) < SNAP_RADIUS:
                return p2
        return p
예제 #4
0
    def __init__(self, name, index=0):

        PointElectrode.__init__(self, name, index)
        settings = self.settings

        try:
            self.xyz = settings['point']
        except KeyError:
            self.xyz = settings['points'][self.index]
        self.x, self.y, self.z = self.xyz

        # Find the cable this corresponds to
        dists = geo.dist((self.x, self.y), (ws.nvt.x, ws.nvt.y))
        these = np.where(dists == dists.min())[0]
        # print('Points:', these)
        # Choose only one cable
        cable_i = these[0]

        # print('Cable:', ws.nvt.cables[cable_i])

        # Locate the section and segment (z value) of the cable
        j_sec, z_on_j = anatomy.locate(ws.seclens[cable_i], self.z)

        # print('Section and segment:', j_sec, z_on_j)

        # Cable and point of the cable to work on
        self.point = {'cable': cable_i, 'section': j_sec, 'z': z_on_j}

        self.set_stim_info()
예제 #5
0
    def hasCrashed (self):

        for i in range(1, len(self.tail) - self.hasEaten):
            if geometry.dist(self.pos, self.tail[i].getCenter()) < self.snakeWidth:
                return True;

        return False;
예제 #6
0
    def multi_dist(self) -> (pd.DataFrame, pd.DataFrame):
        '''
        This function calculates the distance between source/s and antenna/s and also returns a vector of
        source-antenna for every set of source-antenna.

        :returns:
            - dist_arr: the distances from the source to the antenna
            - sa_vec:  the vector from source to the antenna in Cartesian coordinate system (x,y,z).
                        each elements are a list of vector components
        :rtype: pandas.Dataframe
        '''

        dist_arr = pd.DataFrame(np.zeros((self.radar_n, self.source_n)),
                                columns=self.s_columns)
        sa_vec = pd.DataFrame(np.zeros((self.radar_n, self.source_n)),
                              columns=self.s_columns)

        for i in range(self.radar_n):
            for j in range(self.source_n):
                dist_arr.iloc[i,
                              j] = geometry.dist(self.source_location[j, :],
                                                 self.antenna_location[i, :])
                sa_vec.iloc[[i], j] = pd.Series([
                    -self.source_location[j, :] + self.antenna_location[i, :]
                ],
                                                index=[i])
        return dist_arr, sa_vec
예제 #7
0
    def preprocess_world_state(self, world_state):
        """
        Uses historical information about the world to build a more accurate map of where all balls and obstacles are.
        Places results back into the world_state dict without adding any new fields.
        """
        curr_pos = world_state['pose'][0]  # Our current (x,y)

        #
        # Build obstacle grid
        #

        # Add all obstacles into a temporary obstacle grid
        self.temp_obstacle_grid.occupancy.fill(0)
        for static_obstacle in self.field_columns:
            self.temp_obstacle_grid.insert_convex_polygon(static_obstacle, 1.0)
        for static_obstacle in self.field_trenches:
            self.temp_obstacle_grid.insert_rectangular_obstacle(static_obstacle.bounding_box, 1.0)
        for dynamic_obstacle in world_state['obstacles']:
            self.temp_obstacle_grid.insert_rectangular_obstacle(dynamic_obstacle,
                                                                self.obstacle_probability_growth_factor)

        # Inflate all obstacles in the temporary obstacle grid
        self.temp_obstacle_grid.inflate_obstacles(kernel_size=self.occupancy_grid_dilation_kernel_size)

        # Decay the probability of all cells in the master obstacle grid
        self.obstacle_grid.decay_probabilities(self.obstacle_probability_decay_factor)

        # Update the master obstacle grid with our temporary one
        self.obstacle_grid.occupancy += self.temp_obstacle_grid.occupancy
        self.obstacle_grid.occupancy = np.clip(self.obstacle_grid.occupancy, a_min=0, a_max=1)

        #
        # Update ball positions
        #

        # Recover any balls that are now within the deadzone
        if self.prev_obstacles is not None:
            for ball in self.prev_obstacles:
                if 0.5 < geom.dist(curr_pos, ball) <= self.deadzone_radius and ball not in world_state['balls']:
                    world_state['balls'].append(ball)
        self.prev_obstacles = world_state['balls']

        # Decay the probability of all balls
        self.ball_grid.decay_probabilities(self.ball_probability_decay_factor)

        # Grow the probability of all ball grid cells currently containing balls
        for ball_pos in world_state['balls']:
            self.ball_grid.grow_probability(ball_pos, self.ball_probability_growth_factor)

        # Get final ball positions out of ball grid
        world_state['balls'] = []
        for i in range(self.ball_grid.occupancy.shape[0]):
            for j in range(self.ball_grid.occupancy.shape[1]):
                cell_position = self.ball_grid.grid[i][j].position
                p_ball = self.ball_grid.occupancy[i][j]
                p_occupied = self.obstacle_grid.occupancy[i][j]
                if p_ball >= self.ball_probability_threshold and p_occupied < self.obstacle_probability_threshold:
                    world_state['balls'].append(cell_position)
예제 #8
0
    def project(self, source_point):
        """Finds the closest point on the destination mesh for the given
        source_point, and returns the projected point as well as the distance
        to that point."""
        projected_point_index = self.destination_nearest_neighbors.kneighbors(
                                    source_point
                                )[1][0][0]
        projected_point = self.destination_mesh.vs[projected_point_index]

        return projected_point, dist(projected_point, source_point)
예제 #9
0
    def find_element(self, x: float, y: float, radius: float = 20.0):
        """ Returns element from collection that is in radius. """
        p = Point(x, y)

        for p2 in self.collection.points:
            if dist(p, p2) < radius:
                return p2

        for seg in self.collection.segments:
            for p2 in seg:
                if dist(p, p2) < radius:
                    return seg

        for poly in self.collection.polygons:
            for p2 in poly.points:
                if dist(p, p2) < radius:
                    return poly

        return None
예제 #10
0
def testBugEnviroment():
  print("#________________TESTING_____________________#")
  #Define start and goal positions plus a couple of polygonal obstacles
  start = (.3,.3)
  #goal = (5.4,4.2)
  goal = (3.5,3.5)

  Poly1 = pol.Polygon()
  Poly1.initiate_list([-0.6,-0.4,0.7,0.6,0.2,-0.296057],[0.3,-0.4,-0.3,0.4,0.3,0.596997])


  Poly2 = pol.Polygon()
  Poly2.initiate_list([-0.8,-0.1,0.9,0.3,0.102922,-0.3],[-0.4,-0.1,-0.4,0.2,0.598169,0.4])

  #Shift polygons into positions along path to create obstacles
  Poly1.set_shift(1.5, 1.5)
  Poly2.set_shift(2.0, 3.5)

  #Need to create a list of these obstacles to define the environment
  PolyList = [Poly1,Poly2]

  path = []

  #Bug algorithm invocation goes here
  tic = time.time()
  path = bug.computeBug1Path(start,goal,PolyList);
  toc = time.time()
  print "Time to find path in ms: ", (toc-tic)*1000

  totalPathLenght = 0
  for i in range(0,len(path)-1):
      totalPathLenght += dist(path[i],path[i+1])
  print "Total path length: ", totalPathLenght

  #Plot path of Bug
  drawPolygonAndPoint(PolyList,start,goal,path)

  #Plot distance to goal
  distanceToGoal = []
  total_distance_traveled = 0
  for i in range(len(path)):
    distanceToGoal.append(dist((path[i][0],path[i][1]),goal))
  drawDistanceToGoal(distanceToGoal)
예제 #11
0
파일: getTracks.py 프로젝트: hsp/GPS_HSP
 def getSpeedSlope(self, shapeFile, subTrack, slopeSpeedOutFileHdl, paramDict, speedSlopeOutFileHdl):
     # one record per point
     # get distance
     # get duration
     # get speed
     # get altitudes
     # get slope
     #write speed and slope
     pointList = self.pointLists[subTrack]
     if speedSlopeOutFileHdl:
         slopeSpeedOutFileHdl.write("ShapeFile" + delim + "SubTrack" + delim + "Distance" + delim + "FromID" + delim + 
                                    "ToID" + delim + "Speed" + delim + "Slope" + delim + "\n")
     pp0 = point(0,0)
     slopeList = []
     speedList = []
     for iP in range(len(pointList)):
         p1 = pointList[iP].obj.GetGeometryRef().GetPoint(0)
         pp1 = point(p1[0], p1[1])
         distance = 0.0
         offset = 1
         if iP > 0:
             while distance < slopeSpeedResolution and iP+offset < len(pointList):
                 ##print iP+offset, len(pointList)
                 p2 = pointList[iP+offset].obj.GetGeometryRef().GetPoint(0)
                 pp2 = point(p2[0], p2[1])
                 p3 = pointList[iP+offset-1].obj.GetGeometryRef().GetPoint(0)
                 pp3 = point(p3[0], p3[1])
                 ## Collect slopes on the way (as a list). Enable average slopes by the end.
                 distance += dist(pp3, pp2)
                 startId   = pointList[iP].obj.GetField(pointIdAttributeName)
                 endId     = pointList[iP+offset].obj.GetField(pointIdAttributeName)
                 startTime = pointList[iP].dateTime
                 endTime   =  pointList[iP+offset].dateTime
                 offset += 1
             deltaTimeHours = (endTime - startTime).seconds / 3600.00
             rasterVal1 = paramDict["Altitude"].getCellValueAtGeolocation(pp2.x, pp2.y)
             rasterVal0 = paramDict["Altitude"].getCellValueAtGeolocation(pp0.x, pp0.y)
             deltaAltitude = rasterVal1 - rasterVal0
             if distance != 0:
                 ## Calc slopePCT as the average oof all segments slope
                 slopePct = (deltaAltitude * 100.00) / distance
             else:
                 slopePct = 0
             speed = (distance / 1000) / deltaTimeHours
             if slopePct > minSlope and slopePct < maxSlope and speed > minSpeed and speed < maxSpeed:
                 slopeSpeedOutFileHdl.write(shapeFile + delim + str(subTrack) + delim + str(distance) + delim + str(startId) + delim + 
                                            str(endId) + delim + str(speed) + delim + str(slopePct) + "\n")
                 slopeList.append(slopePct)
                 speedList.append(speed)
         p0 = p1
         pp0 = point(p0[0], p0[1])
     ##print len(slopeList), len(speedList), len([slopeList, speedList])
     return [slopeList, speedList]
예제 #12
0
    def compare(a: Union[Point, Segment], b: Union[Point, Segment]):
        # get current ray
        ray = ray_getter()

        if isinstance(a, Point) and isinstance(b, Point):
            return dist(ray.p1, a) - dist(ray.p2, b)

        if isinstance(a, Point) and isinstance(b, Segment):
            pb = intersection(*ray,
                              *b,
                              restriction_1='ray',
                              restriction_2='segment')
            dist_a = dist(ray.p1, a)
            dist_b = dist(ray.p1, pb)
            if dist_a == dist_b:
                return -1
            return dist_a - dist_b

        if isinstance(a, Segment) and isinstance(b, Point):
            pa = intersection(*ray,
                              *a,
                              restriction_1='ray',
                              restriction_2='segment')
            dist_a = dist(ray.p1, pa)
            dist_b = dist(ray.p1, b)
            if dist_a == dist_b:
                return 1
            return dist_a - dist_b

        if isinstance(a, Segment) and isinstance(b, Segment):
            pa = intersection(*ray,
                              *a,
                              restriction_1='ray',
                              restriction_2='segment')
            pb = intersection(*ray,
                              *b,
                              restriction_1='ray',
                              restriction_2='segment')
            dist_a = dist(ray.p1, pa)
            dist_b = dist(ray.p1, pb)
            if dist_a == dist_b:
                pa2 = a.p1 if a.p2 == pa else a.p2
                pb2 = b.p1 if b.p2 == pb else b.p2
                return angle_between_points(ray.p1, ray.p2,
                                            pa2) - angle_between_points(
                                                ray.p1, ray.p2, pb2)
            return dist_a - dist_b

        raise ValueError(
            f'Comparator got unexpected types: {type(a)}, {type(b)}')
예제 #13
0
    def pm6_error(params):
        # Run Gaussian jobs with new parameters
        for i, example in enumerate(examples):
            running_jobs = [job('%s-%d-%d' % (name, counter[0], i),
                                'PM6=(Input,Print) Opt=Loose',
                                example.atoms,
                                extra_section=param_string % tuple(params),
                                queue=queue,
                                force=True)]
        # Wait for all jobs to finish
        for j in running_jobs:
            j.wait()

        # Get forces and energies resulting from new parameters
        geom_error = 0.0
        for i, example in enumerate(examples):
            try:
                new_energy, new_atoms = parse_atoms(
                    '%s-%d-%d' % (name, counter[0], i),
                    check_convergence=False)
            except:
                print '%s-%d-%d' % (name, counter[0], i), 'has no data'
                exit()
            if parse_atoms('%s-%d-%d' % (name, counter[0], i)) is None:
                print '%s-%d-%d' % (name, counter[0], i),\
                      'did not converge fully'

            # Compare results
            for b in example.bonds:
                d1 = geometry.dist(example.atoms[b[0]], example.atoms[b[1]])
                d2 = geometry.dist(new_atoms[b[0]], new_atoms[b[1]])
                geom_error += (d1 - d2)**2

        error = geom_error / n_bonds

        print error**0.5, params

        counter[0] += 1

        return error
예제 #14
0
파일: getTracks.py 프로젝트: hsp/GPS_HSP
    def getSamples(self, subIndex, angleWidth, length, numPoints, targetFileHdl, rasterDataDict={}, header=False):
        ##print length
        pointList    = self.pointLists[subIndex]    
        lastPoint    = pointList[-1].obj
        ##import pdb;pdb.set_trace()

        p3 = lastPoint.GetGeometryRef().GetPoint(0)
        paramListStr = ""
        orgLength = length
        for key in rasterDataDict.keys():
            paramListStr = paramListStr + delim + key
        if header:
            targetFileHdl.write("FileName" + delim + "SubTrack" + delim + "Id" + delim + "Choice" + delim + "Distance" + delim + "X" + delim + "Y" + delim + "AngPrev" + delim + "AngLast" + paramListStr + "\n")
        for i in range(len(pointList)):
            length = orgLength
            if i == 0:
                presentPoint = pointList[0].obj
            if i == 1:
                previousPoint = presentPoint
                presentPoint = pointList[1].obj
            if i > 1:
                for ii in  range(len(pointList)):
                    if ii >= i:
                        pp1 = presentPoint.GetGeometryRef().GetPoint(0)
                        pp1 = point(pp1[0], pp1[1])
                        pp2 = pointList[ii].obj.GetGeometryRef().GetPoint(0)
                        pp2 = point(pp2[0], pp2[1])
                        akkuLength = dist(pp1, pp2) 
                        ##import pdb;pdb.set_trace()
                        ##print "Length", length, "akkuLength", akkuLength
                        if akkuLength >= length:
                            samplePoint = pointList[ii].obj
                            ##pp2 = nextPoint.GetGeometryRef().GetPoint(0)
                            break
                        else:
                            ##nextPoint = pointList[ii - 1].obj
                            pass
                length = akkuLength    
                nextPoint = pointList[i].obj                
                id =  presentPoint.GetField(pointIdAttributeName)
                p0 = previousPoint.GetGeometryRef().GetPoint(0)
                p1 =  presentPoint.GetGeometryRef().GetPoint(0)
                p2 =     samplePoint.GetGeometryRef().GetPoint(0)
                samples = samplePoints(point(p1[0], p1[1]), point(p2[0], p2[1]), angleWidth, length, numPoints, point(p0[0], p0[1]), point(p3[0], p3[1]))
                for p in samples:
                    ## Here raster values are collected.
                    paramValueStr = ""
                    for key in rasterDataDict.keys():
                        paramValueStr = paramValueStr + delim + str(rasterDataDict[key].getCellValueAtGeolocation(p.x, p.y))
                    targetFileHdl.write(self.fileName + delim + str(subIndex) + delim + str(id) + delim + str(p.selected) + delim + str("%.2f" % p.length) + delim + str(p.x) + delim + str(p.y) + delim + str("%.4f" % p.angleToLast) + delim + str("%.4f" % p.angleFromPrevious) + paramValueStr + "\n")
                previousPoint = presentPoint
                presentPoint = nextPoint
예제 #15
0
 def get_tanget_vector(self, q):
   p1, p2, distance = self.get_closest_segment(q)
   closest_point = None
   if geo.dist(q, p1) == distance:
     closest_point = p1
   elif geo.dist(q, p2) == distance:
     closest_point = p2
   print closest_point
   if closest_point != None:
     # Need to rotate vector -90 degrees corresponding to origin point
     # Rotating by -90 degrees (-PI /2 rads)
     computed_point = (closest_point[0] - q[0], closest_point[1] - q[1]) # Computed line
     #lot = (cp[0]*4.0,cp[1]*4.0)
     normal_point = (q[0] + computed_point[0], q[1] + computed_point[1])
     theta = -math.pi/2
     rp = geo.rotate_point_around_fixed_point(normal_point, q, theta)
   else:
     # Segment closest
     x = p2[0] - p1[0]
     y = p2[1] - p1[1]
     rp = (x, y)
   return rp
예제 #16
0
  def get_tangent_vector(self, point):
    min_vertex_dist = float("inf")
    min_vertex = None
    min_edge_dist = float("inf")
    min_edge = None
    # first find the closest vertex
    for vertex in self.points:
      dist_to_vertex = dist(point, vertex)
      if dist_to_vertex < min_vertex_dist:
        min_vertex_dist = dist_to_vertex
        min_vertex = vertex

    # next, find the closest edge
    for i, vertex1 in enumerate(self.points):
      if i < len(self.points) - 1:
        vertex2 = self.points[i + 1]
      else:
        vertex2 = self.points[0]
        if vertex1[0] == vertex2[0] and vertex1[1] == vertex2[1]:
          break

      dist_to_edge = get_distance_to_segment(point, vertex1, vertex2)
      if dist_to_edge < min_edge_dist:
        min_edge_dist = dist_to_edge
        min_edge = [vertex1, vertex2]

    # compare closest edge and closest vertex
    if min_vertex_dist <= min_edge_dist + 10e-5:
      # then closest point on polygon is min_vertex
      # compute unit vector from point to min_vertex
      x_component = (min_vertex[0] - point[0]) / dist(min_vertex, point)
      y_component = (min_vertex[1] - point[1]) / dist(min_vertex, point)
      # now rotate this vector by -pi/2 to get counterclockwise tangent
      return (y_component, -x_component)

    else:  # the closest point on the polygon is min_edge
      x_component = (min_edge[1][0] - min_edge[0][0]) / dist(min_edge[0], min_edge[1])
      y_component = (min_edge[1][1] - min_edge[0][1]) / dist(min_edge[0], min_edge[1])
      return (x_component, y_component)
예제 #17
0
    def cond(self, edge):
        """This is a condition that must be satisfied by an edge to be included 
		in creating a graph 
		It says that distance between vertices connected by it must be between 
		1 and 2"""

        if Graph.cond(self, edge):
            v = list(edge)[0]
            w = list(edge)[1]
            return between(1, dist(v, w), 2)

        else:
            return False
예제 #18
0
def nearbyPoints(pointXyz, pdbD):
  '''finds the nearest atom to each surface point, makes list'''
  outputList = []
  for pointCoord in pointXyz:
    pointNum = pointCoord[0]
    xyz = pointCoord[1:4]
    closestIndex, closestDist = 0, 1000000000000.
    for index, atom in enumerate(pdbD.coords):
      thisDist = geometry.dist(xyz, atom, metric="L2SQUARED")  # l2^2 monotonic
      if thisDist < closestDist:
        closestDist = thisDist
        closestIndex = index
    outputList.append([pointNum, closestIndex+1])
  return outputList
예제 #19
0
def compareColumns(
    tmNode1, tmNode2, columnList, columnsToMean, columnsToStddev):
  '''compares 2 nodes,  report dist between vectors of z-scores
  '''
  zScores1, zScores2 = [], []
  for col in columnList:
    colMean = columnsToMean[col]
    colStddev = columnsToStddev[col]
    zScore1 = (tmNode1.attributes[col] - colMean) / colStddev
    zScore2 = (tmNode2.attributes[col] - colMean) / colStddev
    zScores1.append(zScore1)
    zScores2.append(zScore2)
  #distZ = geometry.distL2(zScores1, zScores2)
  distZ = geometry.dist(zScores1, zScores2, metric='L1')   # L1 seems to be best
  return distZ
예제 #20
0
    def check_colors(self):
        """Returns True if a graph is colored properly, that is no two neighbors 
		have the same color, False otherwise"""

        for v in self.vertices:
            i = self.vertices.index(v)
            for w in self.vertices:
                j = self.vertices.index(w)
                if (self.are_neighbors(v, w)
                        and self.colors[i] == self.colors[j]):
                    print('Bad colors :(')
                    print('vertices:', v, w)
                    print('dist:', dist(v, w))
                    print('colors:', self.colors[i], self.colors[j])
                    return False

        print('Good colors :)')
        return True
예제 #21
0
    def findFragments(self, dist_cutoff=5.0):
        """
        Take lines of pdb in self.atom_lines and find breaks in chain.
        """

        # For every residue in the pdb file, grab the CA and N lines.  This
        # assumes that there are no duplicate residues and that all residues
        # have CA and N.  If there is a duplicate residue or missing atom, an
        # error is raised.

        residue_list = []
        for line in self.atom_lines:
            if line[21:26] not in residue_list:
                residue_list.append(line[21:26])

        ca_list = []
        n_list = []
        for resid in residue_list:
            resid_atoms = [l for l in self.atom_lines if l[21:26] == resid]

            ca = [l for l in resid_atoms if l[13:16] == "CA "]
            n = [l for l in resid_atoms if l[13:16] == "N  "]
            if len(ca) > 1 or len(n) > 1:
                err = "Residue \"%s\" is duplicated!" % resid
                raise PdbContainerError(err)
            elif len(ca) == 0 or len(n) == 0:
                err = "Residue \"%s\" has missing CA or N atoms!" % resid
                raise PdbContainerError(err)

            ca_list.append(ca[0])
            n_list.append(n[0])

        # Grab coordinates of CA atoms and index of residues indexes of
        # amide nitrogens.
        ca_coord = [[float(l[30 + 8 * i:38 + 8 * i]) for i in range(3)]
                    for l in ca_list]
        res_index = [self.atom_lines.index(n) for n in n_list]

        # Check the distance between every ca carbon and the next.  If these
        # are further apart than dist_cutoff, place in different fragments.
        self.frag_index = []
        for i in range(1, len(ca_list)):
            if dist(ca_coord[i - 1], ca_coord[i]) > dist_cutoff:
                self.frag_index.append(res_index[i])
예제 #22
0
    def findFragments(self,dist_cutoff=5.0):
        """
        Take lines of pdb in self.atom_lines and find breaks in chain.
        """
        
        # For every residue in the pdb file, grab the CA and N lines.  This
        # assumes that there are no duplicate residues and that all residues
        # have CA and N.  If there is a duplicate residue or missing atom, an
        # error is raised.

        residue_list = []
        for line in self.atom_lines:
            if line[21:26] not in residue_list:
                residue_list.append(line[21:26])

        ca_list = []
        n_list = []        
        for resid in residue_list:
            resid_atoms = [l for l in self.atom_lines if l[21:26] == resid]

            ca = [l for l in resid_atoms if l[13:16] == "CA "]
            n = [l for l in resid_atoms if l[13:16] == "N  "]
            if len(ca) > 1 or len(n) > 1:
                err = "Residue \"%s\" is duplicated!" % resid
                raise PdbContainerError(err)
            elif len(ca) == 0 or len(n) == 0:
                err = "Residue \"%s\" has missing CA or N atoms!" % resid
                raise PdbContainerError(err)

            ca_list.append(ca[0])
            n_list.append(n[0])
   
        # Grab coordinates of CA atoms and index of residues indexes of 
        # amide nitrogens.
        ca_coord = [[float(l[30+8*i:38+8*i]) for i in range(3)] for l in ca_list]
        res_index = [self.atom_lines.index(n) for n in n_list]
            
        # Check the distance between every ca carbon and the next.  If these
        # are further apart than dist_cutoff, place in different fragments.
        self.frag_index = []
        for i in range(1,len(ca_list)):
            if dist(ca_coord[i-1],ca_coord[i]) > dist_cutoff:
                self.frag_index.append(res_index[i])
예제 #23
0
def crop_head_using_skeleton(image, skeleton):
    """ Given an image taken from the kinect RGB in PIL format, and a set
        of skeleton coordinates, crop the head and return the cropped image.
    """

    # We will cut off a rectangle with the center in the head
    # and the dimension 2 * head-neck distance.
    neck_length = dist(skeleton['head']['X'], skeleton['head']['Y'],
                       skeleton['neck']['X'], skeleton['neck']['Y'])
    head_rect = (skeleton['head']['X'] - neck_length,
                 skeleton['head']['Y'] - neck_length,
                 skeleton['head']['X'] + neck_length,
                 skeleton['head']['Y'] + neck_length)

    # Intersect our rectangle with the actual image, and crop it
    image_rect = (0, 0, image.size[0], image.size[1])
    cropping_rect = rectangle_intersection(head_rect, image_rect)
    cropping_rect = (int(cropping_rect[0]), int(cropping_rect[1]),
                     int(cropping_rect[2]), int(cropping_rect[3]))

    return image.crop(cropping_rect)
예제 #24
0
def visible_vertices(point: Point, points: Iterable[Point],
                     segments: Dict[Point, List[Segment]]) -> Iterator[Point]:
    """ Yields points from given points that can be seen from given start point. """

    # remove point from points
    points = filter(lambda x: x != point, points)

    # sort points first by angle and then by distance from point
    points = sorted(points,
                    key=lambda x: (angle_to_xaxis(point, x), dist(point, x)))

    # create sorted list from segments that cross starting ray
    # list is sorted using ray that has to be updated
    ray = Segment(point, Point(point.x + 1, point.y))
    status = SortedList(
        iterable=(seg for seg in set(chain(*segments.values()))
                  if intersection(
                      *ray, *seg, restriction_1='ray', restriction_2='segment')
                  and point not in seg),
        key=status_key(lambda: ray))

    # for each point (they are sorted by angle)
    for p in points:

        # update ray
        ray = Segment(point, p)

        # if p is visible yield it
        if status.bisect_left(p) == 0:
            yield p

        # remove segments from this point
        for seg in segments[p]:
            if orient(point, p, seg.p1 if seg.p2 == p else seg.p2) < 0:
                status.remove(seg)

        # add segments to this point
        for seg in segments[p]:
            if orient(point, p, seg.p1 if seg.p2 == p else seg.p2) > 0:
                status.add(seg)
예제 #25
0
    def set_stimulation(self):
        """ Connect grounds """

        # Go straigth to connect the wanted points to ground
        for (x, y, z) in ws.electrodes_settings[self.name]["points"]:

            # Find the cable this corresponds to
            dists = geo.dist((x, y), (ws.nvt.x, ws.nvt.y))
            these = np.where(dists == dists.min())[0]
            # print('Points:', these)
            # Choose only one cable
            cable_i = these[0]

            # print('Cable:', ws.nvt.cables[cable_i])

            # Locate the section and segment (z value) of the cable
            j_sec, z_on_j = anatomy.locate(ws.seclens[cable_i], z)

            # Cable and point of the cable to work on
            point = {'cable': cable_i, 'section': j_sec, 'z': z_on_j}

            # Ground it
            ground_segment(point)
예제 #26
0
    def update (self, fruit):
        prev = self.tail[0].getCenter()
        self.pos = geometry.add(self.pos, self.vel)
        self.tail[0].move(self.vel.getX(), self.vel.getY())

        for i in range(1, len(self.tail) - self.hasEaten):
            temp = self.tail[i].getCenter()
            vector = geometry.sub(prev, temp)
            self.tail[i].move(vector.getX(), vector.getY())
            prev = temp

        self.hasEaten = 0

        if geometry.dist(self.pos, fruit.getCenter()) < self.snakeWidth:
            self.score += 100
            self.hasEaten = 1
            newSegment = self.tail[len(self.tail) - 1].clone()
            # newSegment.setFill(color_rgb(175, 175, 175))
            newSegment.setFill("white")
            self.tail.append(newSegment)
            self.tail[len(self.tail) - 1].draw(self.window)
            return True;

        return False;
예제 #27
0
파일: curve.py 프로젝트: getsalmon/kgg2
 def get_distance_from_unproof_focuses(self, p, foc):
     return abs(
         abs(geom.dist(p, foc[0]) - geom.dist(p, foc[1])) - self.delta)
예제 #28
0
    def turbo_independent(self):
        """Returns a maximal independent set
		(maximal means that no other independent set cantains it, 
		doesn't mean that it has the most vertices)"""

        #Return 'None' if there are no vertices
        if self.ver_len() == 0:
            return None

        #Let 'v' be a vertice closest to (0, 0)
        v = self[0]
        dist_v = dist(v, (0, 0))
        for w in self.vertices:
            d = dist(w, (0, 0))
            if d < dist_v:
                v = w
                dist_v = d
        """'close_circle' is now a circle with a center in 'v' and radius of 
		length 1, this means that no vertice contained in 'close_circle' is 
		connected with 'v'"""
        close_circle = Circle(v, 1)
        """Let 'close_vertices' be a list containing vertices that lay in 
		'close_circle'"""
        close_vertices = []

        for w in self.vertices:
            if w in close_circle:
                close_vertices.append(w)
        """'the_circle' will be a closed circle that will contain vertices 
		chosen as elements of returned set, it has to have diameter of length 1 
		so that no two vertices that lay in it are connected and it has to 
		contain 'v'
		
		'max_card' will be the number of those vertices"""
        max_card = 0
        the_circle = ClosedCircle(v, 0.5)

        #Count vertices in 'the_circle'
        for vertex in close_vertices:
            if vertex in the_circle:
                max_card += 1

        #We want 'the_circle' to contain the most vertices possible
        for w in close_vertices:
            if w == v:
                continue
            """'circ_1' and 'circ_1' are closed circles with diameter of length 
			1, such that 'v' and 'w' lay on edges of 'circ_1' and 'circ_1'"""
            circles = find_circles(v, w, 0.5, type='closed')
            circ_1 = circles[0]
            circ_2 = circles[1]

            #'card_1' will be the number of vertices that lay in 'circ_1'
            card_1 = 0

            #Count vertices in 'circ_1'
            for vertex in close_vertices:
                if vertex in circ_1:
                    card_1 += 1
            """If 'circ_1' contains more vertices than 'the_ circle' swap them 
			with one another"""
            if card_1 > max_card:
                the_circle = circ_1
                max_card = card_1

            #'card_2' will be the number of vertices that lay in 'circ_2'
            card_2 = 0

            #Count vertices in 'circ_2'
            for vertex in close_vertices:
                if vertex in circ_2:
                    card_2 += 1
            """If 'circ_2' contains more vertices than 'the_ circle' swap them 
			with one another"""
            if card_2 > max_card:
                the_circle = circ_2
                max_card = card_2

        #'chosen_vertices' will be a list of all vertices from 'the_circle'
        chosen_vertices = []
        for w in close_vertices:
            if w in the_circle:
                chosen_vertices.append(w)
        """'big_circle' is a circle that contains vertices that can be connected
		with vertices from 'the_circle'"""
        big_circle = Circle(the_circle.center, 2.5)

        #'rejested' will be a list of vertices from 'big_cirle'
        rejected = []
        for w in self.vertices:
            if w in big_circle:
                rejected.append(w)
        """remove vertices from 'rejected' because it contains alredy chosen 
		vertices as well as their potential neighbors"""
        new_graph = self - rejected

        #Repeat the action for what remained
        ind = new_graph.turbo_independent()

        if ind is None:
            return chosen_vertices
        else:
            return ind + chosen_vertices
예제 #29
0
    def behavior_planning(self, world_state):
        """
        Identifies a goal state and places it into world_state['goal']. Also identifies whether to run the intake or
        outtake and places it into world_state['tube_mode'] as one of 'INTAKE', 'OUTTAKE', 'NONE'. Also identifies which
        direction to drive in, one of '1', '-1', or '0'
        """
        curr_pos = world_state['pose'][0]  # Our current (x,y)

        tube_mode = 'NONE'
        direction = 0
        field_outtake = False
        flail = False
        goal = None

        if geom.dist(curr_pos, self.scoring_zone) <= 0.15 and world_state['numIngestedBalls'] > 0:
            # If we're in the scoring zone with some balls then run the outtake
            tube_mode = 'OUTTAKE'
            direction = 0
            field_outtake = False
            flail = False
            goal = self.scoring_zone

        elif geom.dist(curr_pos, self.chute_pos) <= 0.15 and world_state['numIngestedBalls'] <= 5:
            # If we're in the human player station and don't yet have 5 balls then request another ball from the field
            tube_mode = 'INTAKE'
            direction = -1
            field_outtake = True
            flail = False
            goal = self.scoring_zone

        elif self.obstacle_grid.get_occupancy(curr_pos):
            # If we're currently inside an obstacle, flail!
            tube_mode = 'INTAKE'
            direction = 1
            field_outtake = False
            flail = True
            goal = None

        elif world_state['numIngestedBalls'] >= 5:
            # If we have >=5 balls then drive backwards towards the goal
            tube_mode = 'INTAKE'
            direction = -1
            field_outtake = False
            flail = False
            goal = self.scoring_zone

        elif len(world_state['balls']) == 0:
            # If we can't see any more balls to go towards, then go towards the human player station
            tube_mode = 'INTAKE'
            direction = 1
            field_outtake = False
            flail = False
            goal = self.chute_pos

        else:
            # The rest of the time, just run the intake and go towards the closest unobstructed ball
            tube_mode = 'INTAKE'
            direction = 1
            field_outtake = False
            flail = False

            # Find the closest ball
            min_ball_dist = np.inf
            for ball_pos in world_state['balls']:
                ball_dist = geom.dist(curr_pos, ball_pos)
                if ball_dist < min_ball_dist and not self.obstacle_grid.get_occupancy(ball_pos):
                    min_ball_dist = ball_dist
                    goal = ball_pos

        world_state['tube_mode'] = tube_mode
        world_state['direction'] = direction
        world_state['field_outtake'] = field_outtake
        world_state['flail'] = flail
        world_state['goal'] = goal
예제 #30
0
    def calcTrochoidalMilling(self):
        new_paths=[]
        lastPoint = None
        radius = self.trochoidalDiameter.getValue()/2.0
        distPerRev = self.trochoidalStepover.getValue()
        rampdown=self.rampdown.getValue()
        steps_per_rev = 50
        stock_poly = None
        if self.source is not None:
            stock_poly = self.source.getStockPolygon()
        #for path_index,  path in enumerate(self.path.path):
            
        newpath=[]
        angle = 0
        for p_index,  p in enumerate(self.path.path):
            # when plunging, check if we already cut this part before
            cutting = True
            plunging = False
            for cp in self.path.path[0:p_index]:
                if cp.position is None or p.position is None:
                    continue;

                if lastPoint is not None and lastPoint.position[2]>p.position[2] \
                        and geometry.dist(p.position, cp.position) < min(i for i in [radius, cp.dist_from_model] if i is not None ):
                    cutting = False

            if p.rapid or p.order>self.trochoidalOrder.getValue()  or p.dist_from_model< self.trochoidalOuterDist.getValue() or not cutting :
                newpath.append(GPoint(position = (p.position),  rapid = p.rapid,  inside_model=p.inside_model,  in_contact=p.in_contact))
            else:
                if p.order%self.trochoidalSkip.getValue()==0: #skip paths
                    if lastPoint is not None:
                        if lastPoint.position[2] > p.position[2]:
                            plunging = True
                        else:
                            plunging = False
                        dist=sqrt((p.position[0]-lastPoint.position[0])**2 + (p.position[1]-lastPoint.position[1])**2 + (p.position[2]-lastPoint.position[2])**2)
                        distPerRev = self.trochoidalStepover.getValue()
                        if plunging:
                            dradius = radius
                            if  p.dist_from_model is not None:
                                dradius = min(min(radius, p.dist_from_model), self.tool.diameter.getValue()/2.0)
                            if rampdown>0.0:
                                distPerRev = rampdown*(dradius*2.0*pi)

                        steps =  int(float(steps_per_rev)*dist/distPerRev)+1
                        dradius = 0.0
                        for i in range(0,  steps):
                            angle -= (dist/float(distPerRev) / float(steps)) * 2.0*PI
                            dradius = radius
                            bore_expansion = False
                            if  p.dist_from_model is not None and lastPoint.dist_from_model is not None:
                                dradius = min(radius, lastPoint.dist_from_model*(1.0-(float(i)/steps)) + p.dist_from_model*(float(i)/steps))
                            if  p.dist_from_model is not None and lastPoint.dist_from_model is None:
                                dradius = min(radius, p.dist_from_model)
                            # if plunging and radius is larger than tool diameter, bore at smaller radius and expand out
                            if plunging:
                                if dradius>self.tool.diameter.getValue():
                                    dradius = self.tool.diameter.getValue()/2.0
                                    bore_expansion = True

                            x = lastPoint.position[0]*(1.0-(float(i)/steps)) + p.position[0]*(float(i)/steps) + dradius * sin(angle)
                            y = lastPoint.position[1]*(1.0-(float(i)/steps)) + p.position[1]*(float(i)/steps) + dradius * cos(angle)
                            z = lastPoint.position[2]*(1.0-(float(i)/steps)) + p.position[2]*(float(i)/steps)

                            cutting = True
                            if stock_poly is not None and not stock_poly.pointInside((x, y, z)):
                                cutting = False
                            for cp in self.path.path[0:p_index]:
                                if cp.dist_from_model is not None and geometry.dist((x, y, z), cp.position) < min(radius, cp.dist_from_model) - 0.5*self.trochoidalStepover.getValue():
                                    cutting = False
                            if cutting:
                                feedrate=None
                                if plunging:
                                    feedrate=self.plunge_feedrate.getValue()
                                newpath.append(GPoint(position=(x, y, z), rapid=p.rapid, inside_model=p.inside_model,in_contact=p.in_contact, feedrate = feedrate))

                        if bore_expansion:
                            distPerRev = self.trochoidalStepover.getValue()
                            dist = min(radius, p.dist_from_model) - dradius + distPerRev
                            steps = int(float(steps_per_rev) * (dist / distPerRev) )
                            for i in range(0, steps):
                                angle -= (dist / float(distPerRev) / float(steps)) * 2.0 * PI
                                dradius += dist/steps
                                if dradius>p.dist_from_model:
                                    dradius=p.dist_from_model
                                x = p.position[0] + dradius * sin(angle)
                                y = p.position[1] + dradius * cos(angle)
                                z = p.position[2]
                                cutting = True
                                if stock_poly is not None and not stock_poly.pointInside((x, y, z)):
                                    cutting = False
                                if cutting:
                                    newpath.append(GPoint(position = (x,  y,  z),  rapid = p.rapid,  inside_model=p.inside_model,  in_contact=p.in_contact))

            lastPoint = p

        #remove non-cutting points
#        cleanpath=[]
#        for p in newpath:
#            cutting = True
#            for cp in cleanpath:
#                if geometry.dist(p.position, cp.position) < min(radius, cp.dist_from_model):
#                    cutting = False
#            if cutting:
#                cleanpath.append(p)
        new_paths.append(GCode(newpath))
        self.outpaths=new_paths
        self.updateView()
예제 #31
0
 def are_neighbors(self, v, w):
     """Returns True if v and w are neighbors, False otherwise"""
     return between(1, dist(v, w), 2)
예제 #32
0
def pickle_training_set(run_name,
                        training_sets_folder="training_set",
                        pickle_file_name="training_set",
                        high_energy_cutoff=500.0,
                        system_x_offset=1000.0,
                        verbose=False,
			extra_parameters={}):
    """
    A function to pickle together the training set in a manner that is
    readable for MCSMRFF.  This is a single LAMMPs data file with each
    training set offset alongst the x-axis by system_x_offset.  The pickle
    file, when read in later, holds a list of two objects.  The first is
    the entire system as described above.  The second is a dictionary of all
    molecules in the system, organized by composition.

    **Parameters**

        run_name: *str*
            Name of final training set.
        training_sets_folder: *str, optional*
            Path to the folder where all the training set data is.
        pickle_file_name: *str, optional*
            A name for the pickle file and training set system.
        high_energy_cutoff: *float, optional*
            A cutoff for systems that are too large in energy, as MD is likely
            never to sample them.
        system_x_offset: *float, optional*
            The x offset for the systems to be added by.
        verbose: *bool, optional*
            Whether to have additional stdout or not.
        extra_parameters: *dict, optional*
            A dictionaries for additional parameters that do not exist
            in the default OPLSAA parameter file.

    **Returns**

        system: *System*
            The entire training set system.
        systems_by_composition: *dict, list, Molecule*
            Each molecule organized in this hash table.
    """
    # Take care of pickle file I/O
    if training_sets_folder.endswith("/"):
        training_sets_folder = training_sets_folder[:-1]
    if pickle_file_name is not None and pickle_file_name.endswith(".pickle"):
        pickle_file_name = pickle_file_name.split(".pickle")[0]
    pfile = training_sets_folder + "/" + pickle_file_name + ".pickle"
    sys_name = pickle_file_name
    if os.path.isfile(pfile):
        raise Exception("Pickled training set already exists!")

    # Generate empty system for your training set
    system = None
    system = structures.System(box_size=[1e3, 100.0, 100.0], name=sys_name)
    systems_by_composition = {}

    # For each folder in the training_sets folder lets get the cml file we
    # want and write the energies and forces for that file
    for name in os.listdir(training_sets_folder):
        # We'll read in any training subset that succeeded and print a warning
        # on those that failed
        try:
            result = orca.read("%s/%s/%s.out"
                               % (training_sets_folder, name, name))
        except IOError:
            print("Warning - Training Subset %s not included as \
out file not found..." % name)
            continue

        # Check for convergence
        if not result.converged:
            print("Warning - Results for %s have not converged." % name)
            continue

        # Parse the force output and change units. In the case of no force
        # found, do not use this set of data
        try:
            forces = orca.engrad_read("%s/%s/%s.orca.engrad"
                                      % (training_sets_folder, name, name),
                                      pos="Ang")[0]

            # Convert force from Ha/Bohr to kcal/mol-Ang
            def convert(x):
                return units.convert_dist("Ang", "Bohr",
                                          units.convert_energy("Ha",
                                                               "kcal",
                                                               x)
                                          )

            for a, b in zip(result.atoms, forces):
                a.fx, a.fy, a.fz = convert(b.fx), convert(b.fy), convert(b.fz)
        except (IndexError, IOError):
            print("Warning - Training Subset %s not included as \
results not found..." % name)
            continue

        # Get the bonding information
        with_bonds = structures.Molecule("%s/%s/%s.cml"
                                         % (training_sets_folder, name, name),
                                         extra_parameters=extra_parameters,
                                         allow_errors=True,
                                         test_charges=False)

        # Copy over the forces read in into the system that has the bonding
        # information
        for a, b in zip(with_bonds.atoms, result.atoms):
            a.fx, a.fy, a.fz = b.fx, b.fy, b.fz
            # sanity check on atom positions
            if geometry.dist(a, b) > 1e-4:
                raise Exception('Atoms are different:', (a.x, a.y, a.z),
                                                        (b.x, b.y, b.z)
                                )

        # Rename and save energy
        with_bonds.energy = result.energy
        with_bonds.name = name

        # Now, we read in all the potential three-body interactions that our
        # training set takes into account.  This will be in a 1D array
        composition = ' '.join(sorted([a.element for a in result.atoms]))
        if composition not in systems_by_composition:
            systems_by_composition[composition] = []
        systems_by_composition[composition].append(with_bonds)

    # Generate:
    #  (1) xyz file of various systems as different time steps
    #  (2) system to simulate
    xyz_atoms = []
    to_delete = []
    for i, composition in enumerate(systems_by_composition):
        # Sort so that the lowest energy training subset is first
        # in the system
        systems_by_composition[composition].sort(key=lambda s: s.energy)
        baseline_energy = systems_by_composition[composition][0].energy
        # Offset the energies by the lowest energy, and convert energy units
        for j, s in enumerate(systems_by_composition[composition]):
            s.energy -= baseline_energy
            s.energy = units.convert_energy("Ha", "kcal/mol", s.energy)
            # Don't use high-energy systems, because these will not likely
            # be sampled in MD
            if s.energy > high_energy_cutoff:
                to_delete.append([composition, j])
                continue
            # For testing purposes, output
            if verbose:
                print "Using:", s.name, s.energy
            xyz_atoms.append(s.atoms)
            system.add(s, len(system.molecules) * system_x_offset)

    # Delete the system_names that we aren't actually using due to energy
    # being too high
    to_delete = sorted(to_delete, key=lambda x: x[1])[::-1]
    for d1, d2 in to_delete:
        if verbose:
            print "Warning - Training Subset %s not included as energy \
is too high..." % systems_by_composition[d1][d2].name
        del systems_by_composition[d1][d2]

    # Make the box just a little bigger (100) so that we can fit all our
    # systems
    system.xhi = len(system.molecules) * system_x_offset + 100.0

    # Write all of the states we are using to training_sets.xyz
    files.write_xyz(xyz_atoms, training_sets_folder + '/' + pickle_file_name)
    # Generate our pickle file
    print("Saving pickle file %s..." % pfile)
    fptr = open(pfile, "wb")
    pickle.dump([system, systems_by_composition], fptr)
    fptr.close()

    # Now we have the data, save it to files for this simulation of
    # "run_name" and return parameters
    if not os.path.isdir(run_name):
        os.mkdir(run_name)
    os.chdir(run_name)
    mcsmrff_files.write_system_and_training_data(run_name,
                                                 system,
                                                 systems_by_composition
                                                 )
    os.chdir("../")
    shutil.copyfile(pfile, "%s/%s.pickle" % (run_name, run_name))

    return system, systems_by_composition
예제 #33
0
    def build_from_json(self):
        """
		Build the nerve using the parameters stored in json files
		"""

        # Build contours
        self.c_reduction = ws.anatomy_settings["cross-section"][
            "contours point reduction"]
        self.build_contours()

        contour = self.contour
        contour_hd = self.contour_hd
        contour_pslg = self.contour_pslg
        contour_pslg_nerve = self.contour_pslg_nerve

        # Build internal elements
        x = []
        y = []
        r = []
        cables = OrderedDict()
        free_areas = []
        start_positions = []
        cables_tissues = OrderedDict()
        segments = {}
        len_seg = {}
        len_con = {}
        numberof = {'Axon': 0, 'NAELC': 0}
        models = {}

        itpath = ws.anatomy_settings["cross-section"]["internal topology file"]
        topology = read_from_json(itpath, object_pairs_hook=OrderedDict)

        # Read the dictionary and crete the necessary variables from it

        for i, c in topology['cables'].items():
            i = int(i)
            cables[i] = c['type']
            x.append(c['x'])
            y.append(c['y'])
            r.append(c['r'])
            free_areas.append(c['free extracellular area'])
            start_positions.append(c['start position'])
            cables_tissues[i] = OrderedDict()
            cables_tissues[i]['endoneurium'] = c['endoneurium']
            cables_tissues[i]['epineurium'] = c['epineurium']
            numberof[c['type']] += 1
            # Axon model
            try:
                models[i] = c['model']
            except KeyError:
                # It's not an axon
                models[i] = cables[i]

        # Turn the cables dictionary into a sorted list
        # And also sort everything else
        sortorder = np.argsort(np.array(list(cables.keys())))
        cables = np.array(list(cables.values()))[sortorder]
        x = np.array(x)[sortorder]
        y = np.array(y)[sortorder]
        r = np.array(r)[sortorder]
        free_areas = np.array(free_areas)[sortorder]
        start_positions = np.array(start_positions)[sortorder]

        # Now pairs...
        for p in topology['pairs'].values():
            i, j = p['pair']
            pair = (i, j)
            s = p['separator segment']
            a = s['a']
            b = s['b']
            seg = geo.Segment(((a['x'], a['y']), (b['x'], b['y'])))
            segments[pair] = seg
            len_seg[pair] = seg.length
            len_con[pair] = geo.dist((x[i], y[i]), (x[j], y[j]))

        # Save things as attributes
        # self.x = np.array(x)
        # self.y = np.array(y)
        # self.r = np.array(r)
        # self.free_areas = np.array(free_areas)
        # self.start_positions = np.array(start_positions)
        self.x = x
        self.y = y
        self.r = r
        self.free_areas = free_areas
        self.start_positions = start_positions
        self.cables_tissues = cables_tissues
        self.segments = segments
        self.len_seg = len_seg
        self.len_con = len_con
        self.pairs = len_con.keys()
        self.cables = cables
        self.models = models
        # Unique list of models
        self.models_set = set(self.models.values())
        # print('cables:', cables)
        # print('r:', r)
        # for c_, r_ in zip(cables, r):
        # 	print(c_, r_)
        self.nc = len(cables)
        self.naxons_total = numberof['Axon']
        self.nNAELC = numberof['NAELC']
        # Build power diagram
        pd = tess.PowerDiagram(self.x, self.y, self.r, contour_pslg_nerve)
        # for p, s in zip(self.pairs, self.segments.values()):
        # 	print(p, str(s))
        pd.build_preexisting(self.pairs, self.segments)
        self.trios = pd.trios
        self.pd = pd
        self.circ_areas = pd.circ_areas
        # Total endoneurial cross-sectional free area
        self.endo_free_cs_area = self.fas_total_area - self.circ_areas.sum()
예제 #34
0
def get_training_set(run_name, use_pickle=True, pickle_file_name=None):
    # Take care of pickle file I/O
    # Get file name
    if pickle_file_name is None:
        pfile = "training_sets/training_set.pickle"
    else:
        pfile = pickle_file_name

    system = None
    # If the pickle file does not exist, then make it
    # If use_pickle is False, then make the read in the data from the
    # training_sets folder
    if not os.path.isfile(pfile) or not use_pickle:
        if pickle_file_name is not None:
            raise Exception("Requested file %s, but unable to read it in."
                            % pickle_file_name)

        # Generate the pickle itself if it doesn't exist
        # Create the size of the box to be 1000 x 100 x 100 to hold your
        # training sets
        system = structures.System(box_size=[1e3, 100.0, 100.0],
                                   name="training_set")
        systems_by_composition = {}

        # For each folder in the training_sets folder lets get the cml file
        # we want and write the energies and forces for that file
        for name in os.listdir("training_sets"):
            # We'll read in any training subset that succeeded and print
            # a warning on those that failed
            try:
                result = orca.read("training_sets/%s/%s.out" % (name, name))
            except IOError:
                print("Warning - Training Subset %s not included as results \
not found..." % name)
                continue

            # Parse the force output and change units. In the case of no force
            # found, do not use this set of data
            try:
                forces = orca.engrad_read("training_sets/%s/%s.orca.engrad"
                                          % (name, name), pos="Ang")[0]

                # Convert force from Ha/Bohr to kcal/mol-Ang
                def convert(x):
                    units.convert_dist("Ang",
                                       "Bohr",
                                       units.convert_energy("Ha",
                                                            "kcal",
                                                            x))
                for a, b in zip(result.atoms, forces):
                    a.fx = convert(b.fx)
                    a.fy = convert(b.fy)
                    a.fz = convert(b.fz)
            except (IndexError, IOError):
                print("Warning - Training Subset %s not included as results \
not found..." % name)
                continue

            # Get the bonding information
            with_bonds = structures.Molecule(
                "training_sets/%s/system.cml" % name,
                extra_parameters=extra_Pb,
                test_charges=False)

            # Copy over the forces read in into the system that has the
            # bonding information
            for a, b in zip(with_bonds.atoms, result.atoms):
                a.fx, a.fy, a.fz = b.fx, b.fy, b.fz
                if geometry.dist(a, b) > 1e-4:
                    # sanity check on atom positions
                    raise Exception('Atoms are different:',
                                    (a.x, a.y, a.z),
                                    (b.x, b.y, b.z))

            # Rename some things
            with_bonds.energy = result.energy
            with_bonds.name = name

            # Now, we read in all the potential three-body interactions that
            # our training set takes into account
            # This will be in a 1D array
            composition = ' '.join(sorted([a.element for a in result.atoms]))
            if composition not in systems_by_composition:
                systems_by_composition[composition] = []
            systems_by_composition[composition].append(with_bonds)

        # Generate (1) xyz file of various systems as different time steps and
        # (2) system to simulate
        xyz_atoms = []
        to_delete = []
        for i, composition in enumerate(systems_by_composition):
            # Sort so that the lowest energy training subset is first in
            # the system
            systems_by_composition[composition].sort(key=lambda s: s.energy)
            baseline_energy = systems_by_composition[composition][0].energy
            # Offset the energies by the lowest energy, convert units of
            # the energy
            for j, s in enumerate(systems_by_composition[composition]):
                s.energy -= baseline_energy
                s.energy = units.convert_energy("Ha", "kcal/mol", s.energy)
                # Don't use high-energy systems, because these will not likely
                # be sampled in MD
                if s.energy > 500.0:
                    to_delete.append([composition, j])
                    continue
                # For testing purposes, output
                print "DEBUG:", s.name, s.energy
                xyz_atoms.append(s.atoms)
                system.add(s, len(system.molecules) * 1000.0)

        # Delete the system_names that we aren't actually using due to
        # energy being too high
        to_delete = sorted(to_delete, key=lambda x: x[1])[::-1]
        for d1, d2 in to_delete:
            print "Warning - Training Subset %s not included as energy is too \
high..." % systems_by_composition[d1][d2].name
            del systems_by_composition[d1][d2]

        # Make the box just a little bigger (100) so that we can fit all
        # our systems
        system.xhi = len(system.molecules) * 1000.0 + 100.0

        # Write all of the states we are using to training_sets.xyz
        if not os.path.isdir("training_sets"):
            os.mkdir("training_sets")
        os.chdir("training_sets")
        files.write_xyz(xyz_atoms, 'training_sets')
        os.chdir("../")
        # Generate our pickle file if desired
        if use_pickle:
            print("Saving pickle file %s..." % pfile)
            fptr = open(pfile, "wb")
            pickle.dump([system, systems_by_composition], fptr)
            fptr.close()

    # If use_pickle is true AND the pickle file exists, then we can just
    # read it in
    if system is None and use_pickle:
        print("Reading pickle file %s..." % pfile)
        fptr = open(pfile, "rb")
        system, systems_by_composition = pickle.load(fptr)
        system.name = run_name
        fptr.close()
    elif system is None:
        raise Exception("Requested file %s, but unable to read it in." % pfile)

    # Now we have the data, save it to files for this simulation of "run_name"
    # and return parameters
    if not os.path.isdir("lammps"):
        os.mkdir("lammps")
    if not os.path.isdir("lammps/%s" % run_name):
        os.mkdir("lammps/%s" % run_name)
    os.chdir("lammps/%s" % run_name)
    mcsmrff_files.write_system_and_training_data(run_name,
                                                 system,
                                                 systems_by_composition)
    os.chdir("../../")

    return system, systems_by_composition
예제 #35
0
    def run(self, plan_state):
        """
        Calculates controls given the current plan state
        :param plan_state: Dict containing robot's current pose and some goal state
        :return: Dict containing motor speeds for each motor on the robot
        """
        curr_time = time.time()
        vehicle_commands = {
            'leftDriveMotorSpeed': 0,  # Left drive motor speed (-512 - 512)
            'rightDriveMotorSpeed': 0,  # Right drive motor speed (-512 - 512)
            'intakeCenterMotorSpeed':
            0,  # Intake center motor speed (-512 - 512)
            'intakeLeftMotorSpeed': 0,  # Intake left motor speed (-512 - 512)
            'intakeRightMotorSpeed':
            0,  # Intake right motor speed (-512 - 512)
            'tubeMotorSpeed': 0,  # Tube motor speed (-512 - 512)
            'timerStartStop': 0,  # Timer start/stop (0 or 1)
            'reset': 0,  # Reset (0 or 1)
            'outtake': 0,  # Outtake (0 or 1)
            'draw': []  # List of shapes to draw
        }

        # Determine left and right drive motor speeds
        direction = plan_state['direction']
        tube_mode = plan_state['tube_mode']
        if plan_state['flail']:
            curr_time = time.time()
            left_drive_speed = int(self.max_forward_speed *
                                   math.sin(2 * curr_time))
            right_drive_speed = int(self.max_forward_speed *
                                    math.sin(2.1 * curr_time))

        elif plan_state['trajectory'] is None or direction == 0:
            # Nothing to do
            left_drive_speed = 0
            right_drive_speed = 0

        else:
            pose = plan_state['pose']
            start = np.array(plan_state['trajectory'][0])
            goal = np.array(plan_state['trajectory'][1])

            curr_heading = pose[1] % (2 * np.pi)
            vec_start_to_goal = goal - start
            desired_heading = np.arctan2(vec_start_to_goal[1],
                                         vec_start_to_goal[0]) % (2 * np.pi)

            if direction == -1:
                desired_heading += np.pi
                desired_heading = desired_heading % (2 * np.pi)

            # If we're not facing the right way, turn in place. Else move straight.
            heading_norm_vector = np.array(
                [np.cos(curr_heading),
                 np.sin(curr_heading)])
            goal_norm_vector = np.array(
                [np.cos(desired_heading),
                 np.sin(desired_heading)])
            x1 = heading_norm_vector[0]
            y1 = heading_norm_vector[1]
            x2 = goal_norm_vector[0]
            y2 = goal_norm_vector[1]
            heading_error = math.atan2(x1 * y2 - y1 * x2, x1 * x2 + y1 * y2)
            if abs(heading_error) >= self.heading_error_threshold:
                left_drive_speed = -int(
                    self.left_drive_pid.run(0, heading_error, curr_time))
                right_drive_speed = int(
                    self.right_drive_pid.run(0, heading_error, curr_time))
            else:
                distance_to_goal = geom.dist(start, goal)
                speed = int(
                    abs(self.straight_pid.run(distance_to_goal, 0, curr_time)))
                speed = min(speed, self.max_forward_speed)
                left_drive_speed = speed * direction
                right_drive_speed = speed * direction

        # Run the intake/outtake motors
        intake_speed = self.max_intake_speed if tube_mode == 'INTAKE' else 0
        outtake_speed = self.max_outtake_speed if tube_mode == 'OUTTAKE' else 0

        # Request the field to outtake a ball
        if plan_state['field_outtake']:
            self.field_outtake = not self.field_outtake

        vehicle_commands['leftDriveMotorSpeed'] = left_drive_speed
        vehicle_commands['rightDriveMotorSpeed'] = right_drive_speed
        vehicle_commands['intakeCenterMotorSpeed'] = intake_speed
        vehicle_commands['intakeLeftMotorSpeed'] = intake_speed
        vehicle_commands['intakeRightMotorSpeed'] = intake_speed
        vehicle_commands['tubeMotorSpeed'] = outtake_speed
        vehicle_commands['outtake'] = int(self.field_outtake)

        return vehicle_commands
예제 #36
0
def callback_strip_solvents(fpl_obj):
    xyz = fpl_obj.data[-1]
    system = fpl_obj.system
    ## Store end of last LAMMPs simulation to system.atoms variable
    for a, b in zip(system.atoms, xyz[-1]):
        a.x, a.y, a.z = b.x, b.y, b.z
        if any([np.isnan(x) for x in (a.x, a.y, a.z)]):
            return None

    ## Grab only molecules we're interested in.  Here we find relative distances to the solute in question
    if fpl_obj.solute:
        molecules_in_cluster = []

        # Generate a list of all ion atoms
        ion_list = [
            a for m in system.molecules for a in m.atoms
            if a.element == fpl_obj.ion
        ]
        all_solutes = [
            m for m in system.molecules
            if fpl_obj.ion in [a.element for a in m.atoms]
        ]
        if len(ion_list) == 0:
            raise Exception("Could not find %s!" % fpl_obj.ion)

        # Get distance between every ion and oxygen per molecule, saving molecule and min r
        for m in system.molecules:
            # Calculate the distance from the oxygen atom to all pb atoms.
            oxypos = [a for a in m.atoms if a.type.element in [8, "O"]]
            if oxypos == []: continue

            # Get a list of the closest oxygen of molecule m to the ions
            r_min = min(
                [geometry.dist(i, o) for i in ion_list for o in oxypos])

            # If close enough, save
            if r_min < fpl_obj.R_cutoff:
                molecules_in_cluster.append((m, r_min))
    else:
        all_solutes = []
        origin = structures.Atom('X', 0.0, 0.0, 0.0)
        molecules_in_cluster = [(m, geometry.dist(origin, m.atoms[0]))
                                for m in system.molecules]

    # Now, if we want only N solvents, grab closest N
    molecules_in_cluster = sorted(molecules_in_cluster, key=lambda x: x[1])
    if fpl_obj.num_solvents is not None:
        molecules_in_cluster = all_solutes + [
            m[0] for m in molecules_in_cluster[:fpl_obj.num_solvents]
        ]
    else:
        molecules_in_cluster = all_solutes + [
            m[0] for m in molecules_in_cluster
        ]

    # Set indices
    for j, m in enumerate(molecules_in_cluster):
        for i, a in enumerate(m.atoms):
            a.index = i + 1

    ## Generate the new system
    system = None
    system = structures.System(box_size=(100, 100, 100), name=fpl_obj.run_name)
    for m in molecules_in_cluster:
        system.add(m)

    fpl_obj.system = system
예제 #37
0
def pickle_training_set(run_name,
                        training_sets_folder="training_set",
                        pickle_file_name="training_set",
                        high_energy_cutoff=500.0,
                        system_x_offset=1000.0,
                        verbose=False,
                        extra_parameters={}):
    """
    A function to pickle together the training set in a manner that is
    readable for MCSMRFF.  This is a single LAMMPs data file with each
    training set offset alongst the x-axis by system_x_offset.  The pickle
    file, when read in later, holds a list of two objects.  The first is
    the entire system as described above.  The second is a dictionary of all
    molecules in the system, organized by composition.

    **Parameters**

        run_name: *str*
            Name of final training set.
        training_sets_folder: *str, optional*
            Path to the folder where all the training set data is.
        pickle_file_name: *str, optional*
            A name for the pickle file and training set system.
        high_energy_cutoff: *float, optional*
            A cutoff for systems that are too large in energy, as MD is likely
            never to sample them.
        system_x_offset: *float, optional*
            The x offset for the systems to be added by.
        verbose: *bool, optional*
            Whether to have additional stdout or not.
        extra_parameters: *dict, optional*
            A dictionaries for additional parameters that do not exist
            in the default OPLSAA parameter file.

    **Returns**

        system: *System*
            The entire training set system.
        systems_by_composition: *dict, list, Molecule*
            Each molecule organized in this hash table.
    """
    # Take care of pickle file I/O
    if training_sets_folder.endswith("/"):
        training_sets_folder = training_sets_folder[:-1]
    if pickle_file_name is not None and pickle_file_name.endswith(".pickle"):
        pickle_file_name = pickle_file_name.split(".pickle")[0]
    pfile = training_sets_folder + "/" + pickle_file_name + ".pickle"
    sys_name = pickle_file_name
    if os.path.isfile(pfile):
        raise Exception("Pickled training set already exists!")

    # Generate empty system for your training set
    system = None
    system = structures.System(box_size=[1e3, 100.0, 100.0], name=sys_name)
    systems_by_composition = {}

    # For each folder in the training_sets folder lets get the cml file we
    # want and write the energies and forces for that file
    for name in os.listdir(training_sets_folder):
        # We'll read in any training subset that succeeded and print a warning
        # on those that failed
        try:
            result = orca.read("%s/%s/%s.out" %
                               (training_sets_folder, name, name))
        except IOError:
            print(
                "Warning - Training Subset %s not included as \
out file not found..." % name)
            continue

        # Check for convergence
        if not result.converged:
            print("Warning - Results for %s have not converged." % name)
            continue

        # Parse the force output and change units. In the case of no force
        # found, do not use this set of data
        try:
            forces = orca.engrad_read("%s/%s/%s.orca.engrad" %
                                      (training_sets_folder, name, name),
                                      pos="Ang")[0]

            # Convert force from Ha/Bohr to kcal/mol-Ang
            def convert(x):
                return units.convert_dist(
                    "Ang", "Bohr", units.convert_energy("Ha", "kcal", x))

            for a, b in zip(result.atoms, forces):
                a.fx, a.fy, a.fz = convert(b.fx), convert(b.fy), convert(b.fz)
        except (IndexError, IOError):
            print(
                "Warning - Training Subset %s not included as \
results not found..." % name)
            continue

        # Get the bonding information
        with_bonds = structures.Molecule("%s/%s/%s.cml" %
                                         (training_sets_folder, name, name),
                                         extra_parameters=extra_parameters,
                                         allow_errors=True,
                                         test_charges=False)

        # Copy over the forces read in into the system that has the bonding
        # information
        for a, b in zip(with_bonds.atoms, result.atoms):
            a.fx, a.fy, a.fz = b.fx, b.fy, b.fz
            # sanity check on atom positions
            if geometry.dist(a, b) > 1e-4:
                raise Exception('Atoms are different:', (a.x, a.y, a.z),
                                (b.x, b.y, b.z))

        # Rename and save energy
        with_bonds.energy = result.energy
        with_bonds.name = name

        # Now, we read in all the potential three-body interactions that our
        # training set takes into account.  This will be in a 1D array
        composition = ' '.join(sorted([a.element for a in result.atoms]))
        if composition not in systems_by_composition:
            systems_by_composition[composition] = []
        systems_by_composition[composition].append(with_bonds)

    # Generate:
    #  (1) xyz file of various systems as different time steps
    #  (2) system to simulate
    xyz_atoms = []
    to_delete = []
    for i, composition in enumerate(systems_by_composition):
        # Sort so that the lowest energy training subset is first
        # in the system
        systems_by_composition[composition].sort(key=lambda s: s.energy)
        baseline_energy = systems_by_composition[composition][0].energy
        # Offset the energies by the lowest energy, and convert energy units
        for j, s in enumerate(systems_by_composition[composition]):
            s.energy -= baseline_energy
            s.energy = units.convert_energy("Ha", "kcal/mol", s.energy)
            # Don't use high-energy systems, because these will not likely
            # be sampled in MD
            if s.energy > high_energy_cutoff:
                to_delete.append([composition, j])
                continue
            # For testing purposes, output
            if verbose:
                print "Using:", s.name, s.energy
            xyz_atoms.append(s.atoms)
            system.add(s, len(system.molecules) * system_x_offset)

    # Delete the system_names that we aren't actually using due to energy
    # being too high
    to_delete = sorted(to_delete, key=lambda x: x[1])[::-1]
    for d1, d2 in to_delete:
        if verbose:
            print "Warning - Training Subset %s not included as energy \
is too high..." % systems_by_composition[d1][d2].name
        del systems_by_composition[d1][d2]

    # Make the box just a little bigger (100) so that we can fit all our
    # systems
    system.xhi = len(system.molecules) * system_x_offset + 100.0

    # Write all of the states we are using to training_sets.xyz
    files.write_xyz(xyz_atoms, training_sets_folder + '/' + pickle_file_name)
    # Generate our pickle file
    print("Saving pickle file %s..." % pfile)
    fptr = open(pfile, "wb")
    pickle.dump([system, systems_by_composition], fptr)
    fptr.close()

    # Now we have the data, save it to files for this simulation of
    # "run_name" and return parameters
    if not os.path.isdir(run_name):
        os.mkdir(run_name)
    os.chdir(run_name)
    mcsmrff_files.write_system_and_training_data(run_name, system,
                                                 systems_by_composition)
    os.chdir("../")
    shutil.copyfile(pfile, "%s/%s.pickle" % (run_name, run_name))

    return system, systems_by_composition
예제 #38
0
 def snap_point(self, x: float, y: float) -> Point:
     p = Point(x, y)
     for p2 in self.collection.all_points:
         if dist(p, p2) < SNAP_RADIUS:
             return p2
     return p
예제 #39
0
파일: curve.py 프로젝트: getsalmon/kgg2
 def get_distance_from_focuses(self, p):
     return abs(
         abs(geom.dist(p, self.focuses[0]) -
             geom.dist(p, self.focuses[1])) - self.delta)
예제 #40
0
def get_training_set(run_name, use_pickle=True, pickle_file_name=None):
    # Take care of pickle file I/O
    # Get file name
    if pickle_file_name is None:
        pfile = "training_sets/training_set.pickle"
    else:
        pfile = pickle_file_name

    system = None
    # If the pickle file does not exist, then make it
    # If use_pickle is False, then make the read in the data from the
    # training_sets folder
    if not os.path.isfile(pfile) or not use_pickle:
        if pickle_file_name is not None:
            raise Exception("Requested file %s, but unable to read it in." %
                            pickle_file_name)

        # Generate the pickle itself if it doesn't exist
        # Create the size of the box to be 1000 x 100 x 100 to hold your
        # training sets
        system = structures.System(box_size=[1e3, 100.0, 100.0],
                                   name="training_set")
        systems_by_composition = {}

        # For each folder in the training_sets folder lets get the cml file
        # we want and write the energies and forces for that file
        for name in os.listdir("training_sets"):
            # We'll read in any training subset that succeeded and print
            # a warning on those that failed
            try:
                result = orca.read("training_sets/%s/%s.out" % (name, name))
            except IOError:
                print(
                    "Warning - Training Subset %s not included as results \
not found..." % name)
                continue

            # Parse the force output and change units. In the case of no force
            # found, do not use this set of data
            try:
                forces = orca.engrad_read("training_sets/%s/%s.orca.engrad" %
                                          (name, name),
                                          pos="Ang")[0]

                # Convert force from Ha/Bohr to kcal/mol-Ang
                def convert(x):
                    units.convert_dist("Ang", "Bohr",
                                       units.convert_energy("Ha", "kcal", x))

                for a, b in zip(result.atoms, forces):
                    a.fx = convert(b.fx)
                    a.fy = convert(b.fy)
                    a.fz = convert(b.fz)
            except (IndexError, IOError):
                print(
                    "Warning - Training Subset %s not included as results \
not found..." % name)
                continue

            # Get the bonding information
            with_bonds = structures.Molecule("training_sets/%s/system.cml" %
                                             name,
                                             extra_parameters=extra_Pb,
                                             test_charges=False)

            # Copy over the forces read in into the system that has the
            # bonding information
            for a, b in zip(with_bonds.atoms, result.atoms):
                a.fx, a.fy, a.fz = b.fx, b.fy, b.fz
                if geometry.dist(a, b) > 1e-4:
                    # sanity check on atom positions
                    raise Exception('Atoms are different:', (a.x, a.y, a.z),
                                    (b.x, b.y, b.z))

            # Rename some things
            with_bonds.energy = result.energy
            with_bonds.name = name

            # Now, we read in all the potential three-body interactions that
            # our training set takes into account
            # This will be in a 1D array
            composition = ' '.join(sorted([a.element for a in result.atoms]))
            if composition not in systems_by_composition:
                systems_by_composition[composition] = []
            systems_by_composition[composition].append(with_bonds)

        # Generate (1) xyz file of various systems as different time steps and
        # (2) system to simulate
        xyz_atoms = []
        to_delete = []
        for i, composition in enumerate(systems_by_composition):
            # Sort so that the lowest energy training subset is first in
            # the system
            systems_by_composition[composition].sort(key=lambda s: s.energy)
            baseline_energy = systems_by_composition[composition][0].energy
            # Offset the energies by the lowest energy, convert units of
            # the energy
            for j, s in enumerate(systems_by_composition[composition]):
                s.energy -= baseline_energy
                s.energy = units.convert_energy("Ha", "kcal/mol", s.energy)
                # Don't use high-energy systems, because these will not likely
                # be sampled in MD
                if s.energy > 500.0:
                    to_delete.append([composition, j])
                    continue
                # For testing purposes, output
                print "DEBUG:", s.name, s.energy
                xyz_atoms.append(s.atoms)
                system.add(s, len(system.molecules) * 1000.0)

        # Delete the system_names that we aren't actually using due to
        # energy being too high
        to_delete = sorted(to_delete, key=lambda x: x[1])[::-1]
        for d1, d2 in to_delete:
            print "Warning - Training Subset %s not included as energy is too \
high..." % systems_by_composition[d1][d2].name
            del systems_by_composition[d1][d2]

        # Make the box just a little bigger (100) so that we can fit all
        # our systems
        system.xhi = len(system.molecules) * 1000.0 + 100.0

        # Write all of the states we are using to training_sets.xyz
        if not os.path.isdir("training_sets"):
            os.mkdir("training_sets")
        os.chdir("training_sets")
        files.write_xyz(xyz_atoms, 'training_sets')
        os.chdir("../")
        # Generate our pickle file if desired
        if use_pickle:
            print("Saving pickle file %s..." % pfile)
            fptr = open(pfile, "wb")
            pickle.dump([system, systems_by_composition], fptr)
            fptr.close()

    # If use_pickle is true AND the pickle file exists, then we can just
    # read it in
    if system is None and use_pickle:
        print("Reading pickle file %s..." % pfile)
        fptr = open(pfile, "rb")
        system, systems_by_composition = pickle.load(fptr)
        system.name = run_name
        fptr.close()
    elif system is None:
        raise Exception("Requested file %s, but unable to read it in." % pfile)

    # Now we have the data, save it to files for this simulation of "run_name"
    # and return parameters
    if not os.path.isdir("lammps"):
        os.mkdir("lammps")
    if not os.path.isdir("lammps/%s" % run_name):
        os.mkdir("lammps/%s" % run_name)
    os.chdir("lammps/%s" % run_name)
    mcsmrff_files.write_system_and_training_data(run_name, system,
                                                 systems_by_composition)
    os.chdir("../../")

    return system, systems_by_composition
예제 #41
0
def calcEdgeGridDist(pt1, pt2, mins, maxs, gridSize, metric="L1"):
    return geometry.dist(getIndices(mins, gridSize, pt1), getIndices(mins, gridSize, pt2), metric=metric)
예제 #42
0
    def build(self, params):
        """
		Build the tessellation for the nerve
		"""

        # Get parameters
        # self.get_params(params)
        self.__dict__.update(params)

        mnd = self.mnd
        min_sep = self.min_sep
        rmin = self.rmin
        rmax = self.rmax
        circp_tol = self.circp_tol
        max_axs_pf = self.max_axs_pf
        numberofaxons = self.numberofaxons
        locations = self.locations
        radii = self.radii
        # models = self.models

        # Build contours
        self.build_contours()

        contour = self.contour
        contour_hd = self.contour_hd
        contour_pslg = self.contour_pslg
        contour_pslg_nerve = self.contour_pslg_nerve

        ##################################################################
        # Triangulate

        # Max. area for the triangles,
        # inverse to the minimum NAELC density
        maxarea = 1. / mnd
        # Triangulate
        # Instead, triangulate without taking the fascicles' contours
        # into account
        tri = triangle.triangulate(contour_pslg_nerve, 'a%f' % maxarea)
        tri = triangle.triangulate(contour_pslg, 'a%f' % maxarea)
        # Vertices
        tv = tri['vertices']

        self.original_points = tv.T

        ##################################################################
        # Fill fascicles

        # If the axons have fixed locations, get their locations and
        # radii first, and then they will be added to the different
        # fascicles when needed
        if self.packing_type == "fixed locations":
            try:
                xx_, yy_ = np.array(locations).T
            except ValueError:
                # Something went wrong or simply there are no axons
                xx_ = yy_ = rr_ = np.array([])
            else:
                rr_ = np.array(radii)

            # Axon models
            self.models = self.models['fixed']

        # else:
        # 	# Axon models. Create them according to the proportions

        # Remove points inside the fascicles
        remove_these = []
        axons = {}
        naxons = {}
        naxons_total = 0
        print('about to fill the contours')
        for k, v in contour.items():
            varr = np.array(v)
            # Remove points outside the nerve
            if 'Nerve' in k:
                plpol = pl.Polygon(v)
                for i, p in enumerate(tv):
                    # Remove any points in tv falling outside the nerve
                    # and outside its boundaries
                    # Note: that means that I don't remove the points ON the
                    # boundaries
                    if not (plpol.contains_point(p) or np.isin(p, varr).all()):
                        remove_these.append(i)
            # Remove points from the fascicles
            if 'Fascicle' in k:
                print(k)
                plpol = pl.Polygon(v)
                for i, p in enumerate(tv):
                    # Remove the points of the fascicle's contours from tv
                    inclc = plpol.contains_point(p) and (not np.isin(
                        p, varr).all())
                    # Actually, don't remove the fascicle's contours
                    # Remove any points in tv contained in a fascicle
                    notic = plpol.contains_point(p) or np.isin(p, varr).all()
                    if notic:
                        remove_these.append(i)
                # Fill the fascicle
                # Dictionary of axons
                axons[k] = {}
                # Create the circles
                # Different packing strategies yield different results
                if self.packing_type == "uniform":
                    xx, yy, rr = cp.fill(contour_hd[k],
                                         rmin,
                                         rmax,
                                         min_sep,
                                         nmaxpf=max_axs_pf,
                                         tolerance=circp_tol)
                elif self.packing_type == "gamma":
                    distr_params = {
                        "mean": self.avg_r,
                        "shape": self.gamma_shape
                    }
                    xx, yy, rr = cp.fill(contour_hd[k],
                                         rmin,
                                         rmax,
                                         min_sep,
                                         nmaxpf=max_axs_pf,
                                         tolerance=circp_tol,
                                         distribution="gamma",
                                         distr_params=distr_params)
                    print('Filled %s' % k)
                elif self.packing_type == "fixed locations":
                    # Iterate over axons and get those inside
                    # the fascicle
                    xx, yy, rr = [], [], []
                    for x_, y_, r_ in zip(xx_, yy_, rr_):
                        if plpol.contains_point((x_, y_)):
                            xx.append(x_)
                            yy.append(y_)
                            rr.append(r_)
                    xx = np.array(xx)
                    yy = np.array(yy)
                    rr = np.array(rr)

                # Store information in a clean way
                axons[k]['x'] = xx
                axons[k]['y'] = yy
                axons[k]['r'] = rr
                # axons[k]['models'] = models[:]
                naxons[k] = len(xx)
                naxons_total += naxons[k]

        rps = tv[remove_these]

        self.removed_points = rps

        keep_these = np.array(
            list(set(range(tv.shape[0])) - set(remove_these)))
        # print("keep_these:", keep_these)
        # print("remove_these:", remove_these)
        tv = tv[keep_these]

        # List x, y and r once some points have been removed
        tv = tv.T
        x, y = tv
        nc = x.size
        r = np.zeros_like(x)

        # Dictionaries for:
        # cables (their type)

        cables = OrderedDict()
        models = {}

        # Points corresponding to epineurium
        for ic in range(nc):
            cables[ic] = 'NAELC'
            # NAELC model indexing
            models[ic] = 'NAELC'
            print(ic, models, self.models)

        # Add axons to the existing points for the nerve
        for k in axons:
            x = np.array(x.tolist() + axons[k]['x'].tolist())
            y = np.array(y.tolist() + axons[k]['y'].tolist())
            r = np.array(r.tolist() + axons[k]['r'].tolist())

        nc = x.size
        nNAELC = nc - naxons_total

        # Axon models
        if self.packing_type != 'fixed locations':
            # Now that the axons have been placed, determine their models according to the proportions
            # ninst: 'number of instances' (of each model)
            ninst = {}
            proportions = self.models['proportions']
            keys = list(proportions.keys())
            for m, p in proportions.items():
                ninst[m] = int(p * naxons_total)
            # Remainder
            rem = naxons_total - sum(list(ninst.values()))

            # Just add the remaining in an arbitrary (not random) way
            for i in range(rem):
                ninst[keys[i]] += 1

            # Now select the indices of the axons for each model
            axon_indices = (np.arange(naxons_total) + nNAELC).tolist()
            remaining_axon_indices = axon_indices[:]
            # Dictionary for the axon indices for each model
            inds = {}
            # Dictionary for the model names for each index
            model_by_index = {}
            for m, p in proportions.items():
                sample = random.sample(remaining_axon_indices, ninst[m])
                remaining_axon_indices = list(
                    set(remaining_axon_indices) - set(sample))
                inds[m] = sample[:]
                for i in sample:
                    model_by_index[i] = m

        # Points corresponding to axons
        for ik, i in enumerate(np.arange(nNAELC, nc, 1)):
            cables[i] = 'Axon'
            # Axon model indexing
            if self.packing_type == 'fixed locations':
                models[i] = self.models[ik]
            else:
                models[i] = model_by_index[i]
            print(ik, i, models, self.models)

        ##################################################################
        # Power diagram
        # Zero-valued radii: Voronoi diagram

        # Build power diagram
        pd = tess.PowerDiagram(x, y, r, contour_pslg_nerve)
        pd.build()

        # Lengths of the segments and the connections
        pdpairs = pd.pairs
        pdsegments = pd.segments
        pairs = []
        segments = {}
        len_seg = {}
        len_con = {}
        for pair in pdpairs:
            # if len(pdsegments[pair]) > 1:
            if pdsegments[pair] is not None:
                seg = pdsegments[pair]
                pairs.append(pair)
                segments[pair] = seg
                a, b = seg.a, seg.b
                len_seg[pair] = geo.dist(a, b).mean()
                i, j = pair
                len_con[pair] = geo.dist((x[i], y[i]), (x[j], y[j]))

        # Store relevant stuff in the attributes
        self.pd = pd
        self.x = x
        self.y = y
        self.r = r
        self.nc = nc
        self.axons_dict = axons
        self.pairs = pairs
        self.cables = cables
        self.models = models
        # Unique list of models
        self.models_set = set(self.models.values())
        self.segments = segments
        self.trios = pd.trios
        self.len_con = len_con
        self.len_seg = len_seg
        self.free_areas = pd.free_areas
        self.circ_areas = pd.circ_areas
        # Total endoneurial cross-sectional free area
        self.endo_free_cs_area = self.fas_total_area - self.circ_areas.sum()
        self.naxons = naxons
        self.naxons_total = naxons_total
        self.nNAELC = nNAELC