コード例 #1
0
def get_lines(img, birds_eye, thresholds):
    """
    Finds the lane lines in the given image
    :param img: the image to find the lane lines on
    :param birds_eye: the bird's eye transformation object
    :param thresholds: thresholds used for vision filters
    :return: the coefficients of the detected left and right lane lines as 2nd degree polynomials
    """
    wb = get_wb(img, birds_eye, thresholds)
    curves = Curves(number_of_windows=9,
                    margin=100,
                    minimum_pixels=10,
                    ym_per_pix=30 / 720,
                    xm_per_pix=3.7 / 700)
    result = curves.fit(wb)
    plt.imshow(result['image'])
    plt.show()
    visual = birds_eye.project(img, wb, result['pixel_left_best_fit_curve'],
                               result['pixel_right_best_fit_curve'])
    plt.imshow(visual)
    plt.show()

    return (result['pixel_left_best_fit_curve'],
            result['pixel_right_best_fit_curve'])
コード例 #2
0
ファイル: tracing.py プロジェクト: Yennda/ray-tracing
    def __init__(self, source, crystal: Crystal, detector: Detector):
        for s in source:
            s.reset()
        self.source = source
        self.crystal = crystal
        self.detector = detector
        self.curveSi = Curves()

        self.t = True

        for s in self.source:
            s.direction = la.normalize(la.minus(crystal.loc, s.loc))

            alpha = la.cos([
                0, self.crystal.loc[1] - s.loc[1],
                self.crystal.loc[2] - s.loc[2]
            ], [0, 0, 1])
            beta = la.cos([
                self.crystal.loc[0] - s.loc[0], 0,
                self.crystal.loc[2] - s.loc[2]
            ], [0, 0, 1])

            s.solid_angle = (m.pi * self.crystal.D**2 / 4 * alpha *
                             beta) / (la.norm(la.minus(crystal.loc, s.loc))**2)
コード例 #3
0
from curves import Curves
from plot import PlotGraph
import math

curve = Curves()
plot = PlotGraph()

x, y = curve.Spiral(a=1, n=10)
plot.Plot2DGraph(x, y, 'a=1, n=10')

x, y = curve.Spiral(a=2, n=10)
plot.Plot2DGraph(x, y, 'a=2, n=10')
コード例 #4
0
ファイル: scene.py プロジェクト: jkotur/Torrusador
	def __init__( self , fov , ratio , near ) :
		self.fov = fov
		self.near = near 
		self.ratio = ratio
		self.drawmode = Scene.DRAW2D
		self.mousemode = Scene.NONE
		self.cursormode = Scene.PNTBZADD

		self.pdist = 0.025
		self.pdist2= self.pdist*self.pdist

		self.root   = Node()
		self.root3d = Node()

		self.camera = Camera()
		self.proj   = Projection()

		self.curves= Curves()

		#
		# craete planes
		#

		self.load_from_file(u'../data/młotek.gpt')
#        self.load_from_file(u'../data/głowica.gpt')
#        self.load_from_file(u'../data/cut_test_10.gpt')

		#
		# Craete torus
		#
		self.torus = Torus()

		self.node = Node()

		tn = Node( self.torus )
		tn.rotate( 3.1415926/2.0 , 1, 0 , 0 )

		self.cursor = Cursor( Cross( self.pdist ) )

		self.node.add_child( tn )
		self.node.add_child( self.cursor )
		self.node.add_child( self.curves )

		#
		# Craete normal scene
		#
		self.proj  .perspective( self.fov , self.ratio, self.near , 10000 )
		self.camera.lookat( *STARTLOOK )
		col = Node( color = (1,1,1) )


		self.root  .add_child( self.proj   )
		self.proj  .add_child( self.camera )
		self.camera.add_child(      col    )
		col        .add_child( self.node   )

		#
		# Create 3d scene
		#
		self.cam_left  = Camera()
		self.cam_right = Camera()
		self.p_left    = Projection()
		self.p_right   = Projection()
		self.t_left    = Node()
		self.t_right   = Node()

		self.root3d.add_child( self.t_left  )
		self.root3d.add_child( self.t_right )

		self.cam_left .lookat( *STARTLOOK )
		self.cam_right.lookat( *STARTLOOK )

		self.p_left .perspective( self.fov , self.ratio, self.near , 10000 )
		self.p_right.perspective( self.fov , self.ratio, self.near , 10000 )

		self.color_left  = Node( color = (1,0,0) )
		self.color_right = Node( color = (0,1,0) )

		self.t_left .add_child( self.p_left  )
		self.t_right.add_child( self.p_right )

		self.p_left .add_child( self.cam_left  )
		self.p_right.add_child( self.cam_right )

		self.cam_left .add_child( self.color_left  )
		self.cam_right.add_child( self.color_right )

		self.color_left .add_child( self.node )
		self.color_right.add_child( self.node )

		self.node.translate(0,0,-2)
コード例 #5
0
ファイル: scene.py プロジェクト: jkotur/Torrusador
class Scene :
	DRAW2D , DRAW3D = range(2)
	NONE , CURSOR , TRANSLATE , SCALE , ISOSCALE , ROTATE , CAMERA = range(7)
	PNTBZADD , PNTBSADD , PNTDEL , PNTEDIT = range(4)
	C0 , C1 , C2 = Curves.C0 , Curves.C1 , Curves.C2

	def __init__( self , fov , ratio , near ) :
		self.fov = fov
		self.near = near 
		self.ratio = ratio
		self.drawmode = Scene.DRAW2D
		self.mousemode = Scene.NONE
		self.cursormode = Scene.PNTBZADD

		self.pdist = 0.025
		self.pdist2= self.pdist*self.pdist

		self.root   = Node()
		self.root3d = Node()

		self.camera = Camera()
		self.proj   = Projection()

		self.curves= Curves()

		#
		# craete planes
		#

		self.load_from_file(u'../data/młotek.gpt')
#        self.load_from_file(u'../data/głowica.gpt')
#        self.load_from_file(u'../data/cut_test_10.gpt')

		#
		# Craete torus
		#
		self.torus = Torus()

		self.node = Node()

		tn = Node( self.torus )
		tn.rotate( 3.1415926/2.0 , 1, 0 , 0 )

		self.cursor = Cursor( Cross( self.pdist ) )

		self.node.add_child( tn )
		self.node.add_child( self.cursor )
		self.node.add_child( self.curves )

		#
		# Craete normal scene
		#
		self.proj  .perspective( self.fov , self.ratio, self.near , 10000 )
		self.camera.lookat( *STARTLOOK )
		col = Node( color = (1,1,1) )


		self.root  .add_child( self.proj   )
		self.proj  .add_child( self.camera )
		self.camera.add_child(      col    )
		col        .add_child( self.node   )

		#
		# Create 3d scene
		#
		self.cam_left  = Camera()
		self.cam_right = Camera()
		self.p_left    = Projection()
		self.p_right   = Projection()
		self.t_left    = Node()
		self.t_right   = Node()

		self.root3d.add_child( self.t_left  )
		self.root3d.add_child( self.t_right )

		self.cam_left .lookat( *STARTLOOK )
		self.cam_right.lookat( *STARTLOOK )

		self.p_left .perspective( self.fov , self.ratio, self.near , 10000 )
		self.p_right.perspective( self.fov , self.ratio, self.near , 10000 )

		self.color_left  = Node( color = (1,0,0) )
		self.color_right = Node( color = (0,1,0) )

		self.t_left .add_child( self.p_left  )
		self.t_right.add_child( self.p_right )

		self.p_left .add_child( self.cam_left  )
		self.p_right.add_child( self.cam_right )

		self.cam_left .add_child( self.color_left  )
		self.cam_right.add_child( self.color_right )

		self.color_left .add_child( self.node )
		self.color_right.add_child( self.node )

		self.node.translate(0,0,-2)

	def clear( self ) :
		self.curves.clear()

	def gfx_init( self ) :
		glPointSize(3)

	def draw( self ) :
		root = None

		if self.drawmode == Scene.DRAW2D :
			root = self.root

			glDisable(GL_BLEND)

		elif self.drawmode == Scene.DRAW3D :
			root = self.root3d

			glEnable(GL_BLEND)
			glBlendFunc(GL_ONE,GL_ONE)

		self._draw( root )

		glDisable(GL_BLEND)

	def _draw( self , node ) :
		if not node :
			return

		node.multmatrix()
		node.draw()

		m = glGetFloatv(GL_MODELVIEW_MATRIX)

		for c in node :
			glLoadMatrixf(m)
			self._draw( c )

	def set_left_color( self , color ) :
		self.color_left.set_color( color )

	def set_right_color( self , color ) :
		self.color_right.set_color( color )

	def set_eyes_split( self , split ) :
#        self.cam_left .lookat( (-split,0,0) , (-split,0,-1) , (0,1,0) )
#        self.cam_right.lookat( ( split,0,0) , ( split,0,-1) , (0,1,0) )
		self.cam_left .move( -split , 0 , 0 )
		self.cam_right.move(  split , 0 , 0 )
		self.p_left .loadIdentity()
		self.p_right.loadIdentity()
		self.p_left .translate( -split , 0 , 0 )
		self.p_right.translate(  split , 0 , 0 )

	def _update_proj( self ) :
		self.proj   .perspective( self.fov , self.ratio , self.near , 10000 )
		self.p_left .perspective( self.fov , self.ratio , self.near , 10000 )
		self.p_right.perspective( self.fov , self.ratio , self.near , 10000 )

	def set_fov( self , fov ) :
		self.fov = fov
		self._update_proj()

	def set_near( self , near ) :
		self.near = near
		self._update_proj()

	def set_ratio( self , ratio ) :
		self.ratio = ratio
		self._update_proj()

	def set_screen_size( self , w , h ) :
		self.width  = w 
		self.height = h
		self.curves.set_screen_size( w , h )

	def set_drawmode( self , mode ) :
		self.drawmode = mode

	def set_mousemode( self , mode ) :
		self.mousemode = mode

	def set_cursormode( self , mode ) :
		self.cursormode = mode

	def set_editmode( self , mode ) :
		self.curves.set_editmode( mode )

	def set_lookat( self , pos , look ) :
		pos = np.array(pos)
		look = np.array(look)
		up = np.cross(look,(1,0,0))
		if up[0]==0 and up[1]==0 and up[2]==0 :
			up = np.cross(look,(0,0,1))
		if up[0]==0 and up[1]==0 and up[2]==0 :
			up = np.cross(look,(0,1,0))
		up = up / np.linalg.norm(up)
		self.camera.lookat( pos , pos+look , up )
		self.cam_left .lookat( pos , pos+look , up )
		self.cam_right.lookat( pos , pos+look , up )


	def get_cursor_pos( self ) :
		return self.cursor.get_pos()
	
	def get_cursor_screen_pos( self ) :
		cp = self.cursor.get_clipping_pos()
		return ( (cp[0]+1.0)/2.0 * self.width , (cp[1]+1.0)/2.0 * self.height )

	def mouse_move( self , rdf , df , a1 , a2 ) :

		if self.mousemode == Scene.CURSOR :
			v = self.cursor.move_vec( df )
			self.curves.point_move( v )

		elif self.mousemode == Scene.TRANSLATE :
			self.node.translate( *map(lambda x:x*.01,df) )
		elif self.mousemode == Scene.SCALE :
			self.node.scale( *map(lambda x:1+x*.01,df) )
		elif self.mousemode == Scene.ISOSCALE :
			self.node.scale( *([1+.01*reduce( op.add , df ) ] * 3 ) )
		elif self.mousemode == Scene.ROTATE :
			df.remove(0)
			self.node.rotate( df[0]*.001 , *a1 )
			self.node.rotate( df[1]*.001 , *a2 )
		elif self.mousemode == Scene.CAMERA :
			rdf = [ x*.1 for x in rdf ]
			self.camera.rot( rdf[0] ,-rdf[1] )
			self.cam_left.rot( rdf[0] ,-rdf[1] )
			self.cam_right.rot( rdf[0] ,-rdf[1] )
	
	def key_pressed( self , df ) :
		if self.mousemode == Scene.CAMERA :
			self.camera.move( *map(lambda x : x*.05 , df ) )
			self.cam_left.move( *map(lambda x : x*.05 , df ) )
			self.cam_right.move( *map(lambda x : x*.05 , df ) )

	def activate_cursor( self ) :
		if self.cursormode == Scene.PNTBZADD :
			self.curves.point_new( Curve.BEZIER  , self.cursor.get_pos() )
		elif self.cursormode == Scene.PNTBSADD :
			self.curves.point_new( Curve.BSPLINE , self.cursor.get_pos() )
		elif self.cursormode == Scene.PNTDEL :
			self.curves.point_delete( self.cursor.get_clipping_pos() , self.pdist2 )
		elif self.cursormode == Scene.PNTEDIT :
			self.curves.point_select( self.cursor.get_clipping_pos() , self.pdist2 )

	def new_curve_c0( self ) :
		self.curves.new( self.cursor.get_pos() , Curves.BEZIER_C0 , post_data=Curve.BEZIER ) 

	def new_curve_c2( self ) :
		if self.cursormode == Scene.PNTBZADD :
			self.curves.new( self.cursor.get_pos() , Curves.BEZIER_C2 , post_data=Curve.BEZIER  )
		elif self.cursormode == Scene.PNTBSADD :                      
			self.curves.new( self.cursor.get_pos() , Curves.BEZIER_C2 , post_data=Curve.BSPLINE )

	def new_curve_interpolation( self ) :
		self.curves.new( self.cursor.get_pos() , Curves.INTERPOLATION )

	def new_surface_c0( self , size ) :
		self.curves.new( self.cursor.get_pos() , Curves.SURFACE_C0 , pre_data = size )

	def new_surface_c2( self , size ) :
		self.curves.new( self.cursor.get_pos() , Curves.SURFACE_C2 , pre_data = size )

	def new_pipe( self , size ) :
		self.curves.new( self.cursor.get_pos() , Curves.SURFACE_PIPE , pre_data = size )

	def new_gregory( self , size ) :
		self.curves.new( self.cursor.get_pos() , Curves.SURFACE_GREGORY , pre_data = size )

	def delete_curve( self ) :
		self.curves.delete( self.cursor.get_clipping_pos() , self.pdist2 )

	def select_curve( self ) :
		self.curves.select( self.cursor.get_clipping_pos() , self.pdist2 )

	def toggle_curve( self , which , what ) :
		self.curves.toggle( which , what )

	def fill_gap( self , c ) :
		self.curves.fill_gap( c )

	def cut_current( self , pos , delta ) :
#        return self.curves.cut( self.cursor.get_pos() , delta )
		return self.curves.cut( pos , delta )

	def select_to_cut( self ) :
		self.curves.select_to_cut( self.cursor.get_clipping_pos() , self.pdist2 )

	def clear_cut( self ) :
		self.curves.clear_cut()

	def cut_select( self , i , k ) :
		self.curves.cut_select( i , k )

	def set_surf_density( self , dens ) :
		self.curves.set_surf_density( dens )

	def load_from_file( self , path ) :
		self.curves.load( path )

	def dump_to_file( self , path ) :
		self.curves.dump( path )

	def gen_paths( self ) :
		self.clear()

		self.load_from_file(u'../data/młotek.gpt')
		for c in self.curves : self.curves.cutter.add( c )
		print 'cut'
		self.curves.cut( (0,0,0,0) , 0.01 )
		print 'mil'
		self.miller = milling_paths.Miller( self.curves )
		self.node.add_child( self.miller )

	def dump_sign( self ) :
		curv = self.curves.selected
		if curv == None : return
		path = []
		for u in np.linspace(0,len(curv)-1,len(curv)*16) :
			path.append( curv.get_ptn( u ) )
		trans = np.resize( milling_paths.TRANS , 4 )
		trans[3] = 0
		scale = milling_paths.SCALE
		proc = lambda p : (p + trans)*scale
		saver.save(-1,'05_sign.k4',path, pre = proc )
コード例 #6
0
model_1 = apex.currentModel()

# functions

# Lists of point for all curves
ptList_p = []

n_of_curve = 2

for i in range(0, n_of_curve):
    ptList_p.append([])
# Do przeróbki:

# Do przeróbki:
Curves(ap(C.range_a, C.slow_step, odstep_slupa),
       b_p(C.range_b, C.slow_step, odstep_slupa), C.v, C.pr_height - 2.1,
       ptList_p[0])
# //
# Curves(a(C.range_a, C.slow_step, C.plinth),
#        b(C.range_b, C.slow_step, C.plinth), C.v, C.ibeam_height, ptList_p[1])
# Curves(a(C.range_a, C.slow_step, C.spacing + C.plinth),
#        b(C.range_b, C.slow_step, C.spacing + C.plinth), C.v, C.ibeam_height,
#        ptList_p[2])
# Curves(a(C.range_a, C.slow_step, 2.0 * C.spacing + C.plinth),
#        b(C.range_b, C.slow_step, 2.0 * C.spacing + C.plinth), C.v,
#        C.ibeam_height, ptList_p[3])
# Curves(a(C.range_a, C.slow_step, 3.0 * C.spacing + C.plinth),
#        b(C.range_b, C.slow_step, 3.0 * C.spacing + C.plinth), C.v,
#        C.ibeam_height, ptList_p[4])
# Curves(a(C.range_a, C.slow_step, C.beam_distance + C.plinth),
#        b(C.range_b, C.slow_step, C.beam_distance + C.plinth), C.v,
コード例 #7
0
def main(argv, other_stuff=0):
    print "==================================================================================="
    print "Orchestration Main Started"
    print("################# COLLECTING HAZARD INFORMATION ##################")
    print("Gathering data from USGS")
    # Take USGS data sets (3 curves) for a location and get back........................................................
    basedir = "C:\\Users\\Karen\\Desktop\\USGS_Resilience-master\\USGS_Resilience-master\\nshmp-haz-master\\curve-making-code\\curves_east_donotmodify"
    #models = ["PGA"] #changed this from line below since we are only asking for one intensity measure (imt)
    models = ["PGA","SA0P2","SA1P0"] #different types of hazard models used
    #read all files
    cityhazardfunctions = ReadHazardData(models,basedir) # nested dict. {city: {model: (X,Y) }}
    #print(json.dumps(cityhazardfunctions,indent=4)) #print to debug if something goes horrendously wrong
    curv = Curves()
    city_splines = curv.querycurves(cityhazardfunctions,savefigs=True)
    # To get the y values for a given list of x's, set these values
    # So, you will set the location from the curve sets we have already generated
    # (for locaiotns we have either see the folders ehre or see the USGS files and sitesE.geojson and sitesW.geojson)
    # The models we have access to set (so the middle term here) without further editing the USGS tool are on line 51 above
    #demo_x = [0.42] #[0.1, 0.2, 0.3, 0.4, 0.5] # So this would be some set of x values you want the cooresponding y values for

    #DEFINE HAZARDS.....................................................................................................
    #Here is where we are going to pull data from the USGS Hazard Curves for PGA, SA1, SA02
    #Essentially, we will be passing vectors into MATLAB for the three curves
    #The "x" subscript refers to spectral acceleration being on the x-axis of the hazard curves
    #The "y" subscript refers to the values for the annual rate of exceedance for the specified spectral accelerations
    #Since we will be interpolating between three hazard curves within MATLAB, we ask for all data from USGS (i.e. given the initial USGS data points, we are performing a linear interpolation per curve (these are the values we are getting back in this call to city_splines), then we will interpolate over the three hazard curves in MATLAB)


    #We will begin with PGA for the specified location: Chicago IL
    values=city_splines["Chicago IL"]["PGA"]
    PGAx=matlab.double(list(values[0])) #Spectral acceleration values
    PGAy=matlab.double(list(values[1])) #Annual Rate of Exceedance values (Note: these values need to be converted in MATLAB)
    #print(type(PGAx),type(PGAy)) #if uncommented, this will verify that the conversion to a matlab.mlarray.double class was successful

    #Now we repeat the above procedure for SA1, our spectral acceleration for 1.0 second period:
    values = city_splines["Chicago IL"]["SA1P0"]
    SA1x = matlab.double(list(values[0]))  # Spectral acceleration values
    SA1y = matlab.double(list(values[1]))  # Annual Rate of Exceedance values (Note: these values need to be converted in MATLAB)

    #One more time for SA02, our spectral acceleration for 0.2 second period:
    values = city_splines["Chicago IL"]["SA0P2"]
    SA02x = matlab.double(list(values[0]))  # Spectral acceleration values
    SA02y = matlab.double(list(values[1]))  # Annual Rate of Exceedance values (Note: these values need to be converted in MATLAB)

    num_int=float(8) #here we are defining how many intervals (levels of intensity)

    #Last thing: we are going to specify our Soil_Site_class for this site:
    Soil_Site_class='B'

    #Now that we have all of the data we need from our Hazard Curves, we will continue so that we can construct the Semantic Graph and query elevation information
    #...................................................................................................................

    # Construct Semantic Graph..........................................................................................
    print("Constructing Semantic Graph")
    # Currently using locally stored files, will need to add this API automation from my scrips from Drive
    # Note, in script, will need to reset the location of the stored file to be findable by these next lines
    #inputfileIFCXML = Call API script using IronPython
    inputfileIFCXML = 'C:/Users/Karen/Desktop/Resilience_Orchestration-master/Resilience_Orchestration-master/TempXMLs/bRC_FRAME_Concrete_allComponents.ifcxml'
    outputpath='output.csv'
    material_flag = 0
    level_flag = 0
    structure_flag = 0
    puncture_flag = 0
    test_query_sequence_flag = 0
    SemanticGraph_InitialRun = 0
    # Currently using locally stored files, will need to add this API automation from my scrips from Drive
    #geo_link = Geo_Link()
    #geo_link.inputfile = os.path.abspath(inputfileIFCXML)
    #geo_link.material_flag = material_flag
    #geo_link.level_flag = level_flag
    #geo_link.structure_flag = structure_flag
    #geo_link.puncture_flag = puncture_flag
    #geo_link.test_query_sequence_flag = test_query_sequence_flag
    #geo_link.run()
    # Alternatively, a method like this may work, but will need some tweeking as this is done seperately at this point
    mylist_of_parameters = [str(inputfileIFCXML) + " " + str(outputpath) + " " + str(material_flag) + " " + str(level_flag) + " " + str(structure_flag) + " " + str(puncture_flag) + " " + str(test_query_sequence_flag)]



    # CFVII!!! We should be using this as a module!!!!
    # THIS CREATES A NEW GRAPH OUTPUT
    subprocess.call(["python", "C:/Users/Karen/Desktop/GeoLinked_HollyFerguson-master/GeoLinked_HollyFerguson-master/GeoLmain.pyc", str(inputfileIFCXML), str(outputpath), str(material_flag), str(level_flag), str(structure_flag), str(puncture_flag), str(test_query_sequence_flag) ])
    #subprocess.call(["python", "C:/Users/hfergus2/Desktop/GeoLinked/GeoLmain.py", "--args", str(inputfileIFCXML), str(outputpath), str(material_flag), str(level_flag), str(structure_flag), str(puncture_flag), str(test_query_sequence_flag) ])
    #USO_new = USOmain(inputfileIFCXML, outputpath, material_flag, level_flag, structure_flag, puncture_flag, test_query_sequence_flag)
    print "Storing Graph"
    #store it somewhere...currently we are saving it and accessing it from here: "C:/Users/holly/Desktop/GeoLinked/FinalGraph/MyGraph.ttl"
    #note: make sure to run the specific ifcxml in Geolinked so that the graph is available in the .ttl file specified above before running the orchestration code

    # Query Semantic Graph..............................................................................
    # Now we want to get data from my graph
    # NOTE: more queries will probably have to be written.
    # If you go to this path where the graph serialization was stored, currenlty left in the single room model at the time of this code
    # Then you can see the triples that were able to be pulled out of the GeoLinked project:
    #           "C:/Users/holly/Desktop/GeoLinked/FinalGraph/MyGraph.ttl"
    # If you run other models, they will replace this file above, but if you need multiple runnin,
    # then a versioning system will have to added to the processing, probably back in the GeoLinked Project or running GeoLinked from here
    # For now, this is the process of pulling levels and spaces from the models with SPARQL queries:
    #NOTE: FOR THIS OUTPUT FILE WE NEED TO RUN GEOLINKED WITH THE CORRECT MODEL TO BEGIN WITH
    outputfile = 'C:/Users/Karen/Desktop/GeoLinked_HollyFerguson-master/GeoLinked_HollyFerguson-master/FinalGraph/MyGraph.ttl'  # From the top folder and in FinalGraph
    SGA_Based_Graph = Graph()
    SGA_Based_Graph = SGA_Based_Graph.parse(outputfile, format="turtle")
    #SGA_Based_Graph.serialize(destination=outputfile, format='turtle')

    # CFVII THIS IS WHERE THE SPARQL QUERIES ARE LOCATED
    graph_data = GraphData()
    # I have added a few examples of how you might collect a certain type of data from the graph
    # You will need to add more queries that retrieve and format the information as you see fit per the project needs

    # If uncommented, will print all data in graph so you can learn the structure and what you can and cannot ask it for
    #print "Running All Data Example Query"
    #graph_data.get_all_data(SGA_Based_Graph)

    # If uncommented, will return levels in the building and their heights as a dict: [spaceBoundary: (list of data)]
    # Note: this was modified so that the variable "a" will give us all level information...to see this, uncomment print a in the for loop below
    #print "Running Levels Example Query"
    print("Gathering elevations from graph")
    levels = graph_data.get_levels(SGA_Based_Graph)  # Just copying MyGraph.ttl from other project for now
    a=dict() #this is just here to make sure that we are storing values so that we can filter through our data for when we are querying elevations
    elevations=list()
    for i in levels:
        a=i, len(levels[i]), levels[i] #if we print a, this will give us the full graph for level data
        #print (a)
        value_list=a[2]
        for j in range(len(value_list)): #
            #The idea here is to filter out elevation (z) coordinates by recognizing that these values can be converted into float() type numbers:
            try:
                elevations.append(float(value_list[j])*12) #making sure that we are in inches
            except ValueError:
                pass

    elev=matlab.double(sorted(set(elevations))) #Here we pull unique values from our list and then put them in ascending order
    print("Here are the elevations",elev) #this is here to make sure that we got the correct data

    # If uncommented, will return spaces in their respective building if multi-building: [space_collection: (list of spaces)]
    #print "Running Spaces Example Query"
    #spaces1 = graph_data.get_spaces(SGA_Based_Graph)  # Just copying MyGraph.ttl from other project for now
    #for i in spaces1:
        #print i, len(spaces1[i]), spaces1[i]

    #This calls the queries which give us back the spatial information from the ifcxml for beams and columns in our model:
    Column_info=graph_data.get_dim_columns(SGA_Based_Graph)
    Beam_info = graph_data.get_dim_beams(SGA_Based_Graph)


    #Embodied energy of structural components:
    #First we need to filter through our dictionaries to find the spatial info we need:


    # Call Green Scale..................................................................................................
    # Running t-he GS Tool (it has been updated to 2016 Revit) will need to be added as this project progresses
    GreenScale_InitialRun = 0  # Change flag once first run is complete
    # Currently using locally stored files, will need to add this API automation from my scrips from Drive
    # Note, in script, will need to reset the location of the stored file to be findable by this next lines
    # inputfileGBXML = Call API script using IronPython
    # inputfileGBXML = 'C:/Users/Karen/Desktop/Resilience_Orchestration-master/Resilience_Orchestration-master/TempXMLs/bRC_FRAME_Concrete_allComponents.ifcxml'
    # Call GS Code (will run Thermal and EE), will want to store results plus return a dictinoary of EE values

    # Call Green Scale without Revit API:
    print "==================================================================================="
    print('################ INITIAL SUSTAINABILITY ASSESSMENT #################')
    print('Running GreenScale')
    inputfile = 'D:/Users/Karen/Documents/Revit 2016/GreenScale Trials/RC_FRAME.xml'
    outputpath = 'C:/Users/Karen/Desktop/GreenScale Project/GreenScale Project/Installer/GS/Output/'
    model_flag = '3'
    dev_flag = "1"
    shadowflag = "0"
    locationfile = 'C:/Users/Karen/Desktop/GreenScale Project/GreenScale Project/Installer/GS/Locations/USA_IL_Chicago-OHare.Intl.AP.725300_TMY31.epw'
   
   # CFVII CALLING OLD GREENSCALE CODE
    subprocess.call(["python", "C:/Users/Karen/Desktop/GreenScale Project/GreenScale Project/Installer/GS/main.py", str(inputfile),str(outputpath), str(model_flag), str(dev_flag), str(shadowflag), str(locationfile)])
    print "==================================================================================="
    print "==================================================================================="

    # Karens Code after here!!!4
    # Query for pre-analysis Matlab Module..............................................................................
    print('################ MODAL ANALYSIS #################')
    print("Beginning MATLAB-SAP API: Modal Analysis")
    # Call Matlab Modules as needed:
    #We call one function, InitHazardModule, in order to conduct the following:
    #(1) Pre-analysis: Modal analysis in SAP --> gives us modal analysis information for ELFM as well as connectivity information. Sets up boundary conditions.
    #(2) Values for spectral accelerations in the x and y for num_int number of intensities as per FEMA Simplified Analysis Procedures
    #(3) Calculation of Equivalent Lateral Forces for Response Module

    eng=matlab.engine.start_matlab() #start MATLAB engine for Python
    eng.cd(r'D:\Users\Karen\Documents\MATLAB\RSB\GreenResilienceMATLAB_2') #Here you specify path to folder where m-file is located
    #Define input variables for the MATLAB function:
    FilePath='D:\Users\Karen\Documents\Revit 2017\RC_FRAME' #this is the file path to the full RC Model, needed for pre-analysis function
    units=3 #Define units:
    #These are all of the possible unit combinations:
    #lb,in,F=1  lb,ft,F=2   kip,in,F=3  kip,ft,F=4
    #kN,mm,C=5  kN,m,C=6    kgf,mm,C=7  kgf,m,C=8
    #N,mm,C=9   N,m,C=10    Ton,mm,C=11 Ton,m,C=12
    #kN,cm,C=13 kgf,cm,C=14 N,cm,C=15   Ton,cm,C=16

    #User queries to consider wall properties for a frame system:
    frame_wall_flag=1 #Ask the user if they need to import wall information for frame systems: 0==false, 1==true

    #User queries if the structural system is a wall system:
    struct_wall_flag=1 #Ask the user if they need to consider structural walls: 0==false, 1==true
    wall_type='Masonry' #This is a query to ask what kind of wall system is being used (leaving as a user-defined option so that we can create a library of options in the future)
    #Here is the material information we would need from Revit in order to do this:
    E=0.4*3372.13 #The modulus of elasticity in ksi
    u=0.17 #Poisson's ratio
    a=0.00001 #The thermal coefficient
    rho=150.28 #material density in lb/ft^3

    #Changes here: We are changing the calculation of ELFs so that we only perform one calculation and scale it based on our base shear value
    
    ### CFV!!!! MATLAB EXECUTION HAPPENS HERE!!!!
    FrameObjNames,JointCoords, FrameJointConn, FloorConn, WallConn, T1,hj, mass_floor, weight,Sw,FilePathResponse,lfm,Dl,Sax,Say,Fj,PGA,Sa_1=eng.InitHazardModule(FilePath,units,elev,PGAx,PGAy,SA1x,SA1y,SA02x,SA02y,num_int,frame_wall_flag,struct_wall_flag,wall_type,E,u,a,nargout=18) #here, the format is as follows: output1, output2, etc=eng.NameOfMFile(Input1,Input2,etc), nargout refers to number of outputs
    print("Results: Hazard Module")
    print("Connectivity Data From SAP:")
    print("Joint Names and Coordinates:",JointCoords)
    print("Frame and Joint Connectivity:",FrameJointConn)
    print("Floor and Joint Connectivity:",FloorConn)
    print("Wall and Joint Connectivity:",WallConn)
    print("Information from Modal Analysis:")
    print("Period in the x and y:",T1)
    print("mass per floor:",mass_floor)
    print("total weight of structure:",weight)
    print("seismic weight:",Sw)
    print("Equivalent Lateral Forces:")
    print(Fj)
    print("PGA:",PGA)
    print("Sa_1:",Sa_1)
    print("END OF HAZARD MODULE")
    print "==================================================================================="

    #This is the end of the Hazard Module: We now have our Equivalent Static Forces for num_int intensities to conduct our response analysis

    ####################################################################################################################
    #################################BEGINNING OF RESPONSE MODULE#######################################################
    print('################ BEGINNING RESPONSE AND DAMAGE MODULES #################')
    print("Running ELFM")
    #We are now going to implement our equivalent lateral forces from the Hazard Module onto our structure to obtain the response:
    g = float(386)  # here we are defining gravity for in/s^2
    Frame_type='Moment' #here we are defining the type of frame we are analyzing

    eng2=matlab.engine.start_matlab() #start MATLAB engine for Python
    eng2.cd(r'D:\Users\Karen\Documents\MATLAB\RSB\GreenResilienceMATLAB_2') #Here you specify path to folder where m-file is located
    x_disp, y_disp, m_drift_ratios, m_vel_ratios,m_accel, b_SD, b_FA, b_FV, b_RD,Cost = eng2.InitResponseDamageModule(FrameObjNames,units,FilePathResponse,elev,Fj,num_int,T1,hj,g,PGA,Sa_1,Sax,Say,lfm,Frame_type,Soil_Site_class,Sw,weight,nargout=10)
    print("Displacements for All Intensities from SAP")
    print("Displacements in the x:",x_disp)
    print("Displacements in the y:",y_disp)
    print("Actual Displacements and Accelerations (Corrected)")
    print("drifts:",m_drift_ratios)
    print("velocities:",m_vel_ratios)
    print("accelerations:",m_accel)
    print('Dispersions')
    print("B_SD:",b_SD)
    print("B_FA:", b_FA)
    print("B_FV:",b_FV)
    print("B_RD:",b_RD)
    print("Cost:",Cost)
    print("END OF RESPONSE AND DAMAGE MODULES")

    ####################################################################################################################
    #################################BEGINNING OF DAMAGE MODULE#######################################################
    #print("BEGINNING DAMAGE MODULE")
    #If you need to define a string object, simply type in as follows (without the # at the beginning):
    #variable='StringObject'
    #If you need to pass through a scalar:
    #variable=float(scalarnumber)
    #If you need to pass an array in:
    #variable=matlab.double([indice1, indice2, etc])


    #Make our third call to MATLAB from Python:
    #eng3= matlab.engine.start_matlab()  # start MATLAB engine for Python
    #eng3.cd(r'D:\Users\Karen\Documents\MATLAB\RSB')  # Here you specify path to folder where m-file is located
    #Basic setup here is output variables = nameoffunction(input variables, output number)
    #So if you need to add more output variables, update nargout value
    #If you need to add more input variables, just add them
    #the only big thing is to make sure that your input/output matches that in your MATLAB file and vice versa
   # Cost = eng3.InitDamageModule(mean_drift_ratios,mean_accel,B_SD,B_FA,B_FV,B_RD,num_int,nargout=1) #here nargout is simply the amount of outputs you are asking for
    #if you need to print anything just use the print() function. You can also leave variables uncommented in MATLAB and they will show up below
    #print(Cost)





    #print("Working on figuring out how to query semantic graph")
    #levels = graph_data.get_levels(SGA_Based_Graph)  # Just copying MyGraph.ttl from other project for now
    #a = dict()  # this is just here to make sure that we are storing values so that we can filter through our data for when we are querying elevations
    #elevations = list()
    #for i in levels:
        #print  i, len(levels[i]), levels[i]  # if we print a, this will give us the full graph for level data


    print "Main Finished"
コード例 #8
0
from curves import Curves
from plot import PlotGraph
import math

curve = Curves()
plot = PlotGraph()

x, y = curve.Parabola(angle=0, shift_x=0, shift_y=0)
plot.Plot2DGraph(x, y, 'angle=0, shift_x=0, shift_y=0')

x, y = curve.Parabola(angle=45, shift_x=0, shift_y=0)
plot.Plot2DGraph(x, y, 'angle=45, shift_x=0, shift_y=0')

x, y = curve.Parabola(angle=90, shift_x=0, shift_y=0)
plot.Plot2DGraph(x, y, 'angle=90, shift_x=0, shift_y=0')
コード例 #9
0
ファイル: whitelinedetecton.py プロジェクト: adamvlang/AD17
                      (480, 0)]  # for 640x360

p = {
    'sat_thresh': 120,
    'light_thresh': 40,
    'light_thresh_agr': 205,
    'grad_thresh': (0.7, 1.4),
    'mag_thresh': 40,
    'x_thresh': 20
}

birdsEye = BirdsEye(source_points, destination_points, matrix, distortion_coef)
laneFilter = LaneFilter(p)
curves = Curves(number_of_windows=1,
                margin=100,
                minimum_pixels=50,
                ym_per_pix=30.0 / 720,
                xm_per_pix=3.7 / 700)

bridge = CvBridge()

# ROS Publisher
pub_image = rospy.Publisher('/lane_image', Image, queue_size=1)
pub_values = rospy.Publisher('/lane_values', String, queue_size=1)
pub_sky_view = rospy.Publisher('/sky_view', Image, queue_size=1)

import time


def timeit(method):
    def timed(*args, **kw):
コード例 #10
0
from curves import Curves
from plot import PlotGraph
import math

curve = Curves()
plot = PlotGraph()

x, y = curve.Lissajous(a=3, b=3, c=0, n=1)  # n=1, c=0: straight line
plot.Plot2DGraph(x, y, 'a=3,b=3,c=0,n=1')

x, y = curve.Lissajous(a=3, b=3, c=math.pi / 2, n=1)  # n=1, c=pi/2: ellipse
plot.Plot2DGraph(x, y, 'a=3,b=3,c=math.pi/2,n=1')

x, y = curve.Lissajous(a=3, b=3, c=math.pi / 2, n=2)  # n=2, c=pi/2: parabola
plot.Plot2DGraph(x, y, 'a=3,b=3,c=math.pi/2,n=2')

x, y = curve.Lissajous(
    a=3, b=2, c=3,
    n=3)  # b affect the boundary of y, e.g. b = 2, y = range(-2,2)
plot.Plot2DGraph(x, y, 'a=3,b=2,c=3,n=3')

x, y = curve.Lissajous(a=3, b=3, c=5, n=3)  # c affect number of ellipse
plot.Plot2DGraph(x, y, 'a=3,b=3,c=5,n=3')
コード例 #11
0
def main(argv, other_stuff=0):

    print "Orchestration Main Started"

    # Take USGS data sets (3 curves) for a location and get back........................................................
    basedir = "C:\\Users\\holly\\Desktop\\USGS\\nshmp-haz-master\\curves_east_donotmodify"
    models = ["PGA","SA0P2","SA1P0"] #different types of hazard models used
    #read all files
    cityhazardfunctions = ReadHazardData(models,basedir) # nested dict. {city: {model: (X,Y) }}
    #print(json.dumps(cityhazardfunctions,indent=4)) #print to debug if something goes horrendously wrong
    curv = Curves()
    city_splines = curv.querycurves(cityhazardfunctions,savefigs=True)
    # To get the y values for a given list of x's, set these values
    # So, you will set the location from the curve sets we have already generated
    # (for locaiotns we have either see the folders ehre or see the USGS files and sitesE.geojson and sitesW.geojson)
    # The models we have access to set (so the middle term here) without further editing the USGS tool are on line 51 above
    demo_x = [0.42] #[0.1, 0.2, 0.3, 0.4, 0.5] # So this would be some set of x values you want the cooresponding y values for
    print(city_splines["Chicago IL"]["PGA"](demo_x))


    # Construct Semantic Graph..........................................................................................
    # Currently using locally stored files, will need to add this API automation from my scrips from Drive
    # Note, in script, will need to reset the location of the stored file to be findable by these next lines
    #inputfileIFCXML = Call API script using IronPython
    inputfileIFCXML = 'C:/Users/hfergus2/Desktop/Orchestration/TempXMLs/RC_FRAME.ifcxml'
    outputpath='output.csv'
    material_flag = 0
    level_flag = 0
    structure_flag = 0
    puncture_flag = 0
    test_query_sequence_flag = 0
    SemanticGraph_InitialRun = 0
    # Currently using locally stored files, will need to add this API automation from my scrips from Drive
    #geo_link = Geo_Link()
    #geo_link.inputfile = os.path.abspath(inputfileIFCXML)
    #geo_link.material_flag = material_flag
    #geo_link.level_flag = level_flag
    #geo_link.structure_flag = structure_flag
    #geo_link.puncture_flag = puncture_flag
    #geo_link.test_query_sequence_flag = test_query_sequence_flag
    #geo_link.run()
    # Alternatively, a method like this may work, but will need some tweeking as this is done seperately at this point
    mylist_of_parameters = [str(inputfileIFCXML) + " " + str(outputpath) + " " + str(material_flag) + " " + str(level_flag) + " " + str(structure_flag) + " " + str(puncture_flag) + " " + str(test_query_sequence_flag)]
    subprocess.call(["python", "C:/Users/hfergus2/Desktop/GeoLinked/GeoLmain.py", str(inputfileIFCXML), str(outputpath), str(material_flag), str(level_flag), str(structure_flag), str(puncture_flag), str(test_query_sequence_flag) ])
    #subprocess.call(["python", "C:/Users/hfergus2/Desktop/GeoLinked/GeoLmain.py", "--args", str(inputfileIFCXML), str(outputpath), str(material_flag), str(level_flag), str(structure_flag), str(puncture_flag), str(test_query_sequence_flag) ])
    #USO_new = USOmain(inputfileIFCXML, outputpath, material_flag, level_flag, structure_flag, puncture_flag, test_query_sequence_flag)
    print "Storing Graph"
    #store it somewhere...currently we are saving it and accessing it from here: "C:/Users/holly/Desktop/GeoLinked/FinalGraph/MyGraph.ttl"


    # Query Semantic Graph..............................................................................
    # Now we want to get data from my graph
    # NOTE: more queries will probably have to be written.
    # If you go to this path where the graph serialization was stored, currenlty left in the single room model at the time of this code
    # Then you can see the triples that were able to be pulled out of the GeoLinked project:
    #           "C:/Users/holly/Desktop/GeoLinked/FinalGraph/MyGraph.ttl"
    # If you run other models, they will replace this file above, but if you need multiple runnin,
    # then a versioning system will have to added to the processing, probably back in the GeoLinked Project or running GeoLinked from here
    # For now, this is the process of pulling levels and spaces from the models with SPARQL queries:
    outputfile = 'C:/Users/holly/Desktop/GeoLinked/FinalGraph/MyGraph.ttl'  # From the top folder and in FinalGraph
    SGA_Based_Graph = Graph()
    SGA_Based_Graph = SGA_Based_Graph.parse(outputfile, format="turtle")
    #SGA_Based_Graph.serialize(destination=outputfile, format='turtle')
    graph_data = GraphData()
    # I have added a few examples of how you might colelct a certain type of data from the graph
    # You will need to add more queries that retrieve and format the information as you see fit per the project needs

    # If uncommented, will print all data in graph so you can learn the structure and what you can and cannot ask it for
    print "Running All Data Example Query"
    graph_data.get_all_data(SGA_Based_Graph)

    # If uncommented, will return levels in the building and their heights as a dict: [spaceBoundary: (list of data)]
    print "Running Levels Example Query"
    levels = graph_data.get_levels(SGA_Based_Graph)  # Just copying MyGraph.ttl from other project for now
    for i in levels:
        print i, len(levels[i]), levels[i]

    # If uncommented, will return spaces in their respective building if multi-building: [space_collection: (list of spaces)]
    print "Running Spaces Example Query"
    spaces1 = graph_data.get_spaces(SGA_Based_Graph)  # Just copying MyGraph.ttl from other project for now
    for i in spaces1:
        print i, len(spaces1[i]), spaces1[i]

    # Call Green Scale..................................................................................................
    # Running the GS Tool (it has been updated to 2016 Revit) will need ot be added as this project progresses
    GreenScale_InitialRun = 0 # Change flag once first run is complete
    # Currently using locally stored files, will need to add this API automation from my scrips from Drive
    # Note, in script, will need to reset the location of the stored file to be findable by this next lines
    #inputfileGBXML = Call API script using IronPython
    inputfileGBXML = 'C:/Users/hfergus2/Desktop/Orchestration/TempXMLs/RC_FRAME_2016.xml'
    #Call GS Code (will run Thermal and EE), will want to store results plus return a dictinoary of EE values


    # Query for pre-analysis Matlab Module..............................................................................
    # Call Matlab Modules as needed, example call works like this using the import pymatlab library:
    # Example is form http://compgroups.net/comp.lang.python/calling-a-matlab-gui-from-python-using-pymat/1687289
    #mlabsession = MatlabSession()
    #mlabsession.run('cd ~/path_to_project')
    #mlabsession.run('addpath(genpath(pwd))')
    #mlabsession.run('run path-to-.m-script')

    print "Main Finished"
コード例 #12
0
from curves import Curves
from plot import PlotGraph
import math

curve = Curves()
plot = PlotGraph()

x, y = curve.Epitrochoid(a=10, b=2, c=5)
plot.Plot2DGraph(x, y, 'a=10,b=2,c=5')
コード例 #13
0
from curves import Curves
from plot import PlotGraph
import math

curve = Curves()
plot = PlotGraph()
x, y = curve.Cardioid(a=-0.7)
plot.Plot2DGraph(x, y, 'a=-0.7')
コード例 #14
0
class Scene:
    DRAW2D, DRAW3D = range(2)
    NONE, CURSOR, TRANSLATE, SCALE, ISOSCALE, ROTATE, CAMERA = range(7)
    PNTBZADD, PNTBSADD, PNTDEL, PNTEDIT = range(4)
    C0, C1, C2 = Curves.C0, Curves.C1, Curves.C2

    def __init__(self, fov, ratio, near):
        self.fov = fov
        self.near = near
        self.ratio = ratio
        self.drawmode = Scene.DRAW2D
        self.mousemode = Scene.NONE
        self.cursormode = Scene.PNTBZADD

        self.pdist = 0.025
        self.pdist2 = self.pdist * self.pdist

        self.root = Node()
        self.root3d = Node()

        self.camera = Camera()
        self.proj = Projection()

        self.curves = Curves()

        #
        # craete planes
        #

        self.load_from_file(u'../data/młotek.gpt')
        #        self.load_from_file(u'../data/głowica.gpt')
        #        self.load_from_file(u'../data/cut_test_10.gpt')

        #
        # Craete torus
        #
        self.torus = Torus()

        self.node = Node()

        tn = Node(self.torus)
        tn.rotate(3.1415926 / 2.0, 1, 0, 0)

        self.cursor = Cursor(Cross(self.pdist))

        self.node.add_child(tn)
        self.node.add_child(self.cursor)
        self.node.add_child(self.curves)

        #
        # Craete normal scene
        #
        self.proj.perspective(self.fov, self.ratio, self.near, 10000)
        self.camera.lookat(*STARTLOOK)
        col = Node(color=(1, 1, 1))

        self.root.add_child(self.proj)
        self.proj.add_child(self.camera)
        self.camera.add_child(col)
        col.add_child(self.node)

        #
        # Create 3d scene
        #
        self.cam_left = Camera()
        self.cam_right = Camera()
        self.p_left = Projection()
        self.p_right = Projection()
        self.t_left = Node()
        self.t_right = Node()

        self.root3d.add_child(self.t_left)
        self.root3d.add_child(self.t_right)

        self.cam_left.lookat(*STARTLOOK)
        self.cam_right.lookat(*STARTLOOK)

        self.p_left.perspective(self.fov, self.ratio, self.near, 10000)
        self.p_right.perspective(self.fov, self.ratio, self.near, 10000)

        self.color_left = Node(color=(1, 0, 0))
        self.color_right = Node(color=(0, 1, 0))

        self.t_left.add_child(self.p_left)
        self.t_right.add_child(self.p_right)

        self.p_left.add_child(self.cam_left)
        self.p_right.add_child(self.cam_right)

        self.cam_left.add_child(self.color_left)
        self.cam_right.add_child(self.color_right)

        self.color_left.add_child(self.node)
        self.color_right.add_child(self.node)

        self.node.translate(0, 0, -2)

    def clear(self):
        self.curves.clear()

    def gfx_init(self):
        glPointSize(3)

    def draw(self):
        root = None

        if self.drawmode == Scene.DRAW2D:
            root = self.root

            glDisable(GL_BLEND)

        elif self.drawmode == Scene.DRAW3D:
            root = self.root3d

            glEnable(GL_BLEND)
            glBlendFunc(GL_ONE, GL_ONE)

        self._draw(root)

        glDisable(GL_BLEND)

    def _draw(self, node):
        if not node:
            return

        node.multmatrix()
        node.draw()

        m = glGetFloatv(GL_MODELVIEW_MATRIX)

        for c in node:
            glLoadMatrixf(m)
            self._draw(c)

    def set_left_color(self, color):
        self.color_left.set_color(color)

    def set_right_color(self, color):
        self.color_right.set_color(color)

    def set_eyes_split(self, split):
        #        self.cam_left .lookat( (-split,0,0) , (-split,0,-1) , (0,1,0) )
        #        self.cam_right.lookat( ( split,0,0) , ( split,0,-1) , (0,1,0) )
        self.cam_left.move(-split, 0, 0)
        self.cam_right.move(split, 0, 0)
        self.p_left.loadIdentity()
        self.p_right.loadIdentity()
        self.p_left.translate(-split, 0, 0)
        self.p_right.translate(split, 0, 0)

    def _update_proj(self):
        self.proj.perspective(self.fov, self.ratio, self.near, 10000)
        self.p_left.perspective(self.fov, self.ratio, self.near, 10000)
        self.p_right.perspective(self.fov, self.ratio, self.near, 10000)

    def set_fov(self, fov):
        self.fov = fov
        self._update_proj()

    def set_near(self, near):
        self.near = near
        self._update_proj()

    def set_ratio(self, ratio):
        self.ratio = ratio
        self._update_proj()

    def set_screen_size(self, w, h):
        self.width = w
        self.height = h
        self.curves.set_screen_size(w, h)

    def set_drawmode(self, mode):
        self.drawmode = mode

    def set_mousemode(self, mode):
        self.mousemode = mode

    def set_cursormode(self, mode):
        self.cursormode = mode

    def set_editmode(self, mode):
        self.curves.set_editmode(mode)

    def set_lookat(self, pos, look):
        pos = np.array(pos)
        look = np.array(look)
        up = np.cross(look, (1, 0, 0))
        if up[0] == 0 and up[1] == 0 and up[2] == 0:
            up = np.cross(look, (0, 0, 1))
        if up[0] == 0 and up[1] == 0 and up[2] == 0:
            up = np.cross(look, (0, 1, 0))
        up = up / np.linalg.norm(up)
        self.camera.lookat(pos, pos + look, up)
        self.cam_left.lookat(pos, pos + look, up)
        self.cam_right.lookat(pos, pos + look, up)

    def get_cursor_pos(self):
        return self.cursor.get_pos()

    def get_cursor_screen_pos(self):
        cp = self.cursor.get_clipping_pos()
        return ((cp[0] + 1.0) / 2.0 * self.width,
                (cp[1] + 1.0) / 2.0 * self.height)

    def mouse_move(self, rdf, df, a1, a2):

        if self.mousemode == Scene.CURSOR:
            v = self.cursor.move_vec(df)
            self.curves.point_move(v)

        elif self.mousemode == Scene.TRANSLATE:
            self.node.translate(*map(lambda x: x * .01, df))
        elif self.mousemode == Scene.SCALE:
            self.node.scale(*map(lambda x: 1 + x * .01, df))
        elif self.mousemode == Scene.ISOSCALE:
            self.node.scale(*([1 + .01 * reduce(op.add, df)] * 3))
        elif self.mousemode == Scene.ROTATE:
            df.remove(0)
            self.node.rotate(df[0] * .001, *a1)
            self.node.rotate(df[1] * .001, *a2)
        elif self.mousemode == Scene.CAMERA:
            rdf = [x * .1 for x in rdf]
            self.camera.rot(rdf[0], -rdf[1])
            self.cam_left.rot(rdf[0], -rdf[1])
            self.cam_right.rot(rdf[0], -rdf[1])

    def key_pressed(self, df):
        if self.mousemode == Scene.CAMERA:
            self.camera.move(*map(lambda x: x * .05, df))
            self.cam_left.move(*map(lambda x: x * .05, df))
            self.cam_right.move(*map(lambda x: x * .05, df))

    def activate_cursor(self):
        if self.cursormode == Scene.PNTBZADD:
            self.curves.point_new(Curve.BEZIER, self.cursor.get_pos())
        elif self.cursormode == Scene.PNTBSADD:
            self.curves.point_new(Curve.BSPLINE, self.cursor.get_pos())
        elif self.cursormode == Scene.PNTDEL:
            self.curves.point_delete(self.cursor.get_clipping_pos(),
                                     self.pdist2)
        elif self.cursormode == Scene.PNTEDIT:
            self.curves.point_select(self.cursor.get_clipping_pos(),
                                     self.pdist2)

    def new_curve_c0(self):
        self.curves.new(self.cursor.get_pos(),
                        Curves.BEZIER_C0,
                        post_data=Curve.BEZIER)

    def new_curve_c2(self):
        if self.cursormode == Scene.PNTBZADD:
            self.curves.new(self.cursor.get_pos(),
                            Curves.BEZIER_C2,
                            post_data=Curve.BEZIER)
        elif self.cursormode == Scene.PNTBSADD:
            self.curves.new(self.cursor.get_pos(),
                            Curves.BEZIER_C2,
                            post_data=Curve.BSPLINE)

    def new_curve_interpolation(self):
        self.curves.new(self.cursor.get_pos(), Curves.INTERPOLATION)

    def new_surface_c0(self, size):
        self.curves.new(self.cursor.get_pos(),
                        Curves.SURFACE_C0,
                        pre_data=size)

    def new_surface_c2(self, size):
        self.curves.new(self.cursor.get_pos(),
                        Curves.SURFACE_C2,
                        pre_data=size)

    def new_pipe(self, size):
        self.curves.new(self.cursor.get_pos(),
                        Curves.SURFACE_PIPE,
                        pre_data=size)

    def new_gregory(self, size):
        self.curves.new(self.cursor.get_pos(),
                        Curves.SURFACE_GREGORY,
                        pre_data=size)

    def delete_curve(self):
        self.curves.delete(self.cursor.get_clipping_pos(), self.pdist2)

    def select_curve(self):
        self.curves.select(self.cursor.get_clipping_pos(), self.pdist2)

    def toggle_curve(self, which, what):
        self.curves.toggle(which, what)

    def fill_gap(self, c):
        self.curves.fill_gap(c)

    def cut_current(self, pos, delta):
        #        return self.curves.cut( self.cursor.get_pos() , delta )
        return self.curves.cut(pos, delta)

    def select_to_cut(self):
        self.curves.select_to_cut(self.cursor.get_clipping_pos(), self.pdist2)

    def clear_cut(self):
        self.curves.clear_cut()

    def cut_select(self, i, k):
        self.curves.cut_select(i, k)

    def set_surf_density(self, dens):
        self.curves.set_surf_density(dens)

    def load_from_file(self, path):
        self.curves.load(path)

    def dump_to_file(self, path):
        self.curves.dump(path)

    def gen_paths(self):
        self.clear()

        self.load_from_file(u'../data/młotek.gpt')
        for c in self.curves:
            self.curves.cutter.add(c)
        print 'cut'
        self.curves.cut((0, 0, 0, 0), 0.01)
        print 'mil'
        self.miller = milling_paths.Miller(self.curves)
        self.node.add_child(self.miller)

    def dump_sign(self):
        curv = self.curves.selected
        if curv == None: return
        path = []
        for u in np.linspace(0, len(curv) - 1, len(curv) * 16):
            path.append(curv.get_ptn(u))
        trans = np.resize(milling_paths.TRANS, 4)
        trans[3] = 0
        scale = milling_paths.SCALE
        proc = lambda p: (p + trans) * scale
        saver.save(-1, '05_sign.k4', path, pre=proc)
コード例 #15
0
model_1 = apex.currentModel()

# functions

# Lists of point for all curves
ptList_p = []

n_of_curve = 21

for i in range(0, n_of_curve):
    ptList_p.append([])
# Do przeróbki:

# Do przeróbki:
Curves(ap(C.range_a, C.slow_step, odstep_slupa),
       b_p(C.range_b, C.slow_step, odstep_slupa), C.v, C.pr_height - 2.1,
       ptList_p[0])
# //
Curves(a(C.range_a, C.slow_step, C.plinth),
       b(C.range_b, C.slow_step, C.plinth), C.v, C.ibeam_height, ptList_p[1])
Curves(a(C.range_a, C.slow_step, C.spacing + C.plinth),
       b(C.range_b, C.slow_step, C.spacing + C.plinth), C.v, C.ibeam_height,
       ptList_p[2])
Curves(a(C.range_a, C.slow_step, 2.0 * C.spacing + C.plinth),
       b(C.range_b, C.slow_step, 2.0 * C.spacing + C.plinth), C.v,
       C.ibeam_height, ptList_p[3])
Curves(a(C.range_a, C.slow_step, 3.0 * C.spacing + C.plinth),
       b(C.range_b, C.slow_step, 3.0 * C.spacing + C.plinth), C.v,
       C.ibeam_height, ptList_p[4])
Curves(a(C.range_a, C.slow_step, C.beam_distance + C.plinth),
       b(C.range_b, C.slow_step, C.beam_distance + C.plinth), C.v,
コード例 #16
0
    def __init__(self, fov, ratio, near):
        self.fov = fov
        self.near = near
        self.ratio = ratio
        self.drawmode = Scene.DRAW2D
        self.mousemode = Scene.NONE
        self.cursormode = Scene.PNTBZADD

        self.pdist = 0.025
        self.pdist2 = self.pdist * self.pdist

        self.root = Node()
        self.root3d = Node()

        self.camera = Camera()
        self.proj = Projection()

        self.curves = Curves()

        #
        # craete planes
        #

        self.load_from_file(u'../data/młotek.gpt')
        #        self.load_from_file(u'../data/głowica.gpt')
        #        self.load_from_file(u'../data/cut_test_10.gpt')

        #
        # Craete torus
        #
        self.torus = Torus()

        self.node = Node()

        tn = Node(self.torus)
        tn.rotate(3.1415926 / 2.0, 1, 0, 0)

        self.cursor = Cursor(Cross(self.pdist))

        self.node.add_child(tn)
        self.node.add_child(self.cursor)
        self.node.add_child(self.curves)

        #
        # Craete normal scene
        #
        self.proj.perspective(self.fov, self.ratio, self.near, 10000)
        self.camera.lookat(*STARTLOOK)
        col = Node(color=(1, 1, 1))

        self.root.add_child(self.proj)
        self.proj.add_child(self.camera)
        self.camera.add_child(col)
        col.add_child(self.node)

        #
        # Create 3d scene
        #
        self.cam_left = Camera()
        self.cam_right = Camera()
        self.p_left = Projection()
        self.p_right = Projection()
        self.t_left = Node()
        self.t_right = Node()

        self.root3d.add_child(self.t_left)
        self.root3d.add_child(self.t_right)

        self.cam_left.lookat(*STARTLOOK)
        self.cam_right.lookat(*STARTLOOK)

        self.p_left.perspective(self.fov, self.ratio, self.near, 10000)
        self.p_right.perspective(self.fov, self.ratio, self.near, 10000)

        self.color_left = Node(color=(1, 0, 0))
        self.color_right = Node(color=(0, 1, 0))

        self.t_left.add_child(self.p_left)
        self.t_right.add_child(self.p_right)

        self.p_left.add_child(self.cam_left)
        self.p_right.add_child(self.cam_right)

        self.cam_left.add_child(self.color_left)
        self.cam_right.add_child(self.color_right)

        self.color_left.add_child(self.node)
        self.color_right.add_child(self.node)

        self.node.translate(0, 0, -2)
コード例 #17
0
ファイル: tracing.py プロジェクト: Yennda/ray-tracing
class SetUp:
    def __init__(self, source, crystal: Crystal, detector: Detector):
        for s in source:
            s.reset()
        self.source = source
        self.crystal = crystal
        self.detector = detector
        self.curveSi = Curves()

        self.t = True

        for s in self.source:
            s.direction = la.normalize(la.minus(crystal.loc, s.loc))

            alpha = la.cos([
                0, self.crystal.loc[1] - s.loc[1],
                self.crystal.loc[2] - s.loc[2]
            ], [0, 0, 1])
            beta = la.cos([
                self.crystal.loc[0] - s.loc[0], 0,
                self.crystal.loc[2] - s.loc[2]
            ], [0, 0, 1])

            s.solid_angle = (m.pi * self.crystal.D**2 / 4 * alpha *
                             beta) / (la.norm(la.minus(crystal.loc, s.loc))**2)

    def shine_random(self, source: Source):
        for i in range(source.number):
            done = False

            while not done:
                s = la.plus(la.minus(self.crystal.loc, source.loc), [
                    self.crystal.D * (0.5 - random.random()) for j in range(2)
                ] + [0])
                done = self.reflection_crystal(la.normalize(s), source)

        source.intensity_per_photon = source.photon_count * (
            source.solid_angle / (4 * m.pi)) / source.photons_total
        # print('ipp {}'.format(source.intensity_per_photon))

    def reflection_crystal(self, s, source):
        cp_loc = la.x(
            s,
            tl.qroot(
                a=1,
                b=-2 * la.dot(s, la.minus(self.crystal.centre, source.loc)),
                c=la.norm(self.crystal.centre)**2 + la.norm(source.loc)**2 -
                2 * la.dot(self.crystal.centre, source.loc) -
                self.crystal.r**2))
        cp_loc = la.plus(cp_loc, source.loc)

        if la.norm(la.minus(cp_loc,
                            self.crystal.loc)[:2]) < self.crystal.D / 2:
            normal = la.normalize(la.minus(self.crystal.centre, cp_loc))
            source.photons_total += 1

            if not self.reflection_point(cp_loc, normal, s):
                return False

            return True
        return False

    def reflection_point(self, loc, n, ray: list):
        out_intensity = self.curveSi.curve(
            self.crystal.rc, m.pi / 2 - m.acos(la.cos(ray, la.i(n))))

        if out_intensity != 0:
            self.crystal.points.append(loc)
            ray = la.x(ray, 1 / la.dot(ray, n))
            o = [+ray[i] + 2 * (n[i] - ray[i]) for i in range(3)]
            r = np.array(la.minus(self.detector.loc, loc))
            # if self.t:
            #     print(la.norm(n))
            #     tl.vec_show([ray, n, o])
            #     print(la.cos(ray,n))
            #     print(la.cos(o,n))
            #     self.t = False
            coeff = np.linalg.solve(
                np.array([la.normalize(o), self.detector.ux,
                          self.detector.uy]).T, r)

            i = int(coeff[1] // self.detector.res + self.detector.nx / 2)
            j = int(coeff[2] // self.detector.res + self.detector.ny / 2)

            if 0 <= i < self.detector.nx and 0 <= j < self.detector.ny:
                self.detector.detected_intensity.append(out_intensity)
                self.detector.detected_position.append([i, j])
            return True
        return False

    def pre_shine(self, source: Source):
        angles = [
            la.cos(
                la.plus([self.crystal.D / 2, 0, self.crystal.r],
                        la.minus(self.crystal.centre, source.loc)),
                [self.crystal.D / 2, 0, self.crystal.r]),
            la.cos(
                la.plus([-self.crystal.D / 2, 0, self.crystal.r],
                        la.minus(self.crystal.centre, source.loc)),
                [-self.crystal.D / 2, 0, self.crystal.r]),
            la.cos(
                la.plus([0, self.crystal.D / 2, self.crystal.r],
                        la.minus(self.crystal.centre, source.loc)),
                [0, self.crystal.D / 2, self.crystal.r]),
            la.cos(
                la.plus([0, -self.crystal.D / 2, self.crystal.r],
                        la.minus(self.crystal.centre, source.loc)),
                [0, -self.crystal.D / 2, self.crystal.r])
        ]

        angles = [m.pi / 2 - m.acos(a) for a in angles]
        # print([tl.deg_from_rad(a) for a in angles])
        if angles[0] < self.curveSi.bragg[
                self.crystal.rc] < angles[1] or angles[0] > self.curveSi.bragg[
                    self.crystal.rc] > angles[1]:
            return True
        if angles[2] < self.curveSi.bragg[
                self.crystal.rc] < angles[3] or angles[2] > self.curveSi.bragg[
                    self.crystal.rc] > angles[3]:
            return True
        return False

    def shine_matrix(self, source: Source):
        # angles = list()
        # for i in range(100):
        #     radius = tl.rotate([self.crystal.D / 2, 0, 0], [0, 0, 2 * m.pi * i / 100])
        #
        #     ray = la.plus(la.minus(self.crystal.loc, source.loc), radius)
        #     normal = la.plus([0, 0, self.crystal.r], radius)
        #     # print(la.cos(ray, normal))
        #     angles.append([i, m.pi / 2 - m.acos(la.cos(ray, normal))])
        # angles_bragg = [a[0] for a in angles if self.curveSi.bragg_lim[0] < a[1] < self.curveSi.bragg_lim[1]]
        # limits = []
        # for i in range(len(angles_bragg) - 1):
        #     if (angles_bragg[i + 1] - angles_bragg[i]) > 1:
        #         limits.append(angles_bragg[i])
        # if len(limits) == 1:
        #     lim_x = [m.cos(angles_bragg[0] * m.pi / 100), m.cos(limits[0] * m.pi / 100)]
        #     lim_y= [m.cos(angles_bragg[0] * m.pi / 100), m.cos(limits[0] * m.pi / 100)]

        # print(self.curveSi.bragg)
        # print(angles_bragg)

        num = source.number_list
        for i in range(num[0]):
            for j in range(num[1]):
                vec = [
                    self.crystal.D * (0.5 - i / num[0]),
                    self.crystal.D * (0.5 - j / num[1]), 0
                ]
                s = la.plus(self.crystal.loc, vec)

                if la.cos(self.direction, s) < m.cos(source.max_angle):
                    continue
                self.reflection_crystal(la.normalize(s), source)

        source.intensity_per_photon = source.photon_count * source.solid_angle * source.number / (
            4 * m.pi * source.photons_total * source.number)

    def mesh_to_image(self, source: Source):
        pos = self.detector.detected_position
        inte = self.detector.detected_intensity
        for i in range(len(inte)):
            self.detector.image[
                pos[i][0], pos[i][1]] += inte[i] * source.intensity_per_photon
        pos.clear()
        inte.clear()

    def graph(self):
        f = open('name.txt', 'r+')
        number = int(f.read())
        f.close()

        pilimage = Image.fromarray(self.detector.image.T)
        pilimage.save('images/{}.tiff'.format(number))
        # misc.imsave('images/{}.png'.format(number), self.detector.image.T)

        text_info = '''
###################################
{}.txt
---------------------------------
c = Crystal(d={}, D={}, r={}, loc={})
d = Detector(dim={}, loc={}, res={})


        '''.format(
            number,
            self.crystal.d,
            self.crystal.D,
            self.crystal.r,
            self.crystal.loc,
            self.detector.dim,
            self.detector.loc,
            self.detector.res * 1e4,
        )
        text_info += '''
Source(loc={}, wavelength={}, intensity={}, number={})
        
        '''.format(self.source[0].loc, self.source[0].wl,
                   self.source[0].photon_count, self.source[0].number)
        text_info += '''
---------------------------------
Detector intensity: {}
Photon fraction on detector: {}
Photons on crystal {}
Photons reflected {}
        '''.format(
            self.detector.integral,
            self.detector.integral / sum([s.photon_count
                                          for s in self.source]),
            sum([s.photons_reached for s in self.source]),
            self.crystal.count_reflected)

        info = open('info.txt', 'a')
        info.write(text_info)
        info.close()

        f = open('name.txt', 'w')
        f.write(str(number + 1))
        f.close()

    def statistics(self):
        print('--------------------\nStatisitcs')
        integral = self.detector.integral
        print('Intensity on detector: {}'.format(integral))
        print('Photon fraction on detector: {}'.format(
            integral / sum([s.photon_count for s in self.source])))

    def work(self):
        t = time.time()

        for s in self.source:
            if self.source.index(s) != 0:
                print('{}/{}, {}s'.format(
                    self.source.index(s), len(self.source),
                    int((time.time() - t) *
                        (len(self.source) / self.source.index(s) - 1))))
            if not self.pre_shine(s):
                print('no')
                continue
            self.shine_random(s)
            self.mesh_to_image(s)

        print('Elapsed time: {}s'.format(time.time() - t))

        self.graph()
        self.statistics()