示例#1
0
    def toolbar_draw_callback(self):
        #self.master.statusBar_frame.set( 'Draw callback' )
        width = int(self.master.ob_canvas_frame.canvas.cget("width"))
        height = int(self.master.ob_canvas_frame.canvas.cget("height"))

        if self.master.m_DataSet is None:
            self.master.statusBar_frame.set('No Model Loaded.')
            return  #Return at once if a mesh hasn't been loaded.

        else:
            #reload the file again
            self.master.m_DataSet = DataPopulation.ModelData(
                self.master.m_file_name)
            self.master.statusBar_frame.set('Drawing...done')
            #Get the Window and Viewport info from the ModelData instance.
            w = self.master.m_DataSet.getWindow()
            v = self.master.m_DataSet.getViewport()

            #Construct the transform using constructTransform()
            (ax, ay, sx,
             sy) = DataPopulation.constructTransform(w, v, width, height)

            #Specify the transform to the ModelData instance.
            self.master.m_DataSet.specifyTransform(ax, ay, sx, sy,
                                                   self.master.m_Distance)

            #Invoke the create_graphic_objects() method, passing the canvas and the ModelData instance.
            self.master.ob_world.create_graphic_objects(
                self.master.ob_canvas_frame.canvas, self.master.m_DataSet,
                self.master.m_clip.get() == 1,
                self.master.m_perspective.get() == 1)
示例#2
0
    def toolbar_draw_callback(self):
        global DataSet
        #self.master.statusBar_frame.set( 'Draw callback' )

        if DataSet == 0:
            return  #Return at once if a mesh hasn't been loaded.
        else:
            self.master.statusBar_frame.set('Drawing...done')
            #Get the Window and Viewport info from the ModelData instance.
            w = DataSet.getWindow()
            v = DataSet.getViewport()
            #Construct the transform using constructTransform()
            (ax, ay, sx,
             sy) = DataPopulation.constructTransform(w, v, width, height)
            #Specify the transform to the ModelData instance.
            DataSet.specifyTransform(ax, ay, sx, sy)

            face_count = DataSet.getFaces()
            Vertice_count = DataSet.getVertices()

            #print("Num vertices: ",len(Vertice_count))
            #print("Num faces: ", len(face_count))

            #print("Window line: ", DataSet.getWindow())
            #print("Viewport line:", DataSet.getViewport())
            #print('Bounding box:', DataSet.getBoundingBox())

            #print("---Draw")
            #print(f' Canvas size: ({width} , {height})')
            #print(f' Transform: ax {ax: .3f} ay {ay: .3f} sx {sx: .3f} sy {sy: .3f}')

            #Invoke the create_graphic_objects() method, passing the canvas and the ModelData instance.
            self.master.ob_world.create_graphic_objects(
                self.master.ob_canvas_frame.canvas, DataSet)
示例#3
0
 def load_callback( self ) :
   fName = tk.filedialog.askopenfilename( filetypes = [ ( "allfiles", "*" ) ] )
   if len(fName)==0 :
     self.master.statusBar_frame.set( 'This is the canceled/loaded' )
   else:
     self.mdl=ModelData.ModelData(str(fName))
     self.master.statusBar_frame.set( 'Load callback' )
示例#4
0
 def toolbar_load_callback(self):
     self.master.statusBar_frame.set('Load callback')
     self.master.model = ModelData.ModelData()
     fName = filedialog.askopenfilename(initialdir="/",
                                        title="Select file",
                                        filetypes=(("all files", "*"),
                                                   ("text files", "*.txt")))
     if len(fName) == 0:
         self.master.statusBar_frame.set('no file selected')
     else:
         self.master.model.loadFile(fName)
         self.master.statusBar_frame.set('File Loaded')
示例#5
0
	def plotResiduals(self,x,**kwargs):
		residuals = self.model.getResiduals()		
		samples = []; counter = 0
		for s,sample in enumerate(self.model.data.samples):
			if all([sample.get(term,None) is not None for term in list(self.model.atomicTerms) + [self.model.y]]):
				samples.append({x:sample[x],'residual':residuals[counter]})
				counter += 1
		
		data = ModelData(samples)
		plotter = ModelPlotter(data=data,x=x,y='residual',marker='o',connect=False,**kwargs)
		plotter.plot()
		pylab.ylabel('Residual');
		pylab.gca().legend_ = None
		pylab.draw()
示例#6
0
  def toolbar_load_callback( self ) :
    #fName is the path of the file and we get a filename and (if the user didn't CANCEL)
    fName = tk.filedialog.askopenfilename(filetypes=[("allfiles", "*")])
    if ( len(fName) ) == 0:
      #user did not select a file to load
      self.master.statusBar_frame.set("%s","[Load was cancelled]")
    else:
      #file_name is the filename itself (such us file.txt)
      self.master.m_file_name = os.path.basename(fName)
      self.master.statusBar_frame.set("%s" + " is Loaded ",self.master.m_file_name )

      #print("---Load ", self.master.m_file_name)
      # Dataset is a variable to store DataPopulation results
      # we load the mesh by making (and saving) an instance of ModelData

      self.master.m_DataSet = DataPopulation.ModelData(self.master.m_file_name)
示例#7
0
  def toolbar_draw_callback( self ) :
    global DataSet;
    #self.master.statusBar_frame.set( 'Draw callback' )

    if DataSet == 0:
      return #Return at once if a mesh hasn't been loaded.
    else:
      self.master.statusBar_frame.set('Drawing...')
      #Get the Window and Viewport info from the ModelData instance.
      w  = DataSet.getWindow(); v =  DataSet.getViewport()
      #Construct the transform using constructTransform()
      (ax, ay, sx, sy) = DataPopulation.constructTransform(w, v, width, height)
      #Specify the transform to the ModelData instance.
      DataSet.specifyTransform(ax, ay, sx, sy)
      #print(f' width {width} height {height}')
      #print(f' ax {ax: .3f} ay {ay: .3f} sx {sx: .3f} sy {sy: .3f}')
      #Invoke the create_graphic_objects() method, passing the canvas and the ModelData instance.
      self.master.ob_world.create_graphic_objects(self.master.ob_canvas_frame.canvas, DataSet)
示例#8
0
    def __init__(self, ob_root_window, ob_world=[]):
        self.ob_root_window = ob_root_window
        self.ob_world = ob_world
        self.m_clip = tk.BooleanVar()
        self.m_clip.set(True)
        self.persp = tk.BooleanVar()
        self.persp.set(False)
        self.euler = tk.BooleanVar()
        self.euler.set(False)

        self.menu = cl_menu(self)
        self.model = ModelData.ModelData()

        self.toolbar = cl_toolbar(self)

        self.statusBar_frame = cl_statusBar_frame(self.ob_root_window)
        self.statusBar_frame.pack(side=tk.BOTTOM, fill=tk.X)
        self.statusBar_frame.set('This is the status bar')

        self.ob_canvas_frame = cl_canvas_frame(self)
        self.ob_world.add_canvas(self.ob_canvas_frame.canvas)
  def draw_callback( self ) :
    width=int(self.master.ob_canvas_frame.canvas.cget('width'))
    height=int(self.master.ob_canvas_frame.canvas.cget('height'))
    print(self.master.var.get())
    
    if(len(self.fName)!=0):
      model= ModelData.ModelData(self.fName)
      w = model.getWindow()
      v = model.getViewport()
      vNumber = model.getVertices()
      faces = model.getFaces()
      
      values=constructTransform.constructTransform( w,v, width, height ) 
      ax, ay, sx, sy = values
      model.specifyTransform(ax, ay, sx, sy)
      Tvertex=model.getTransformedVertex(vNumber)
  

     
    
    self.master.ob_world.create_graphic_objects( self.master.ob_canvas_frame.canvas,
                                                 Tvertex,faces)
    self.master.statusBar_frame.set( 'Draw callback' )   
示例#10
0
 def toolbar_load_callback(self):
     self.master.statusBar_frame.set('Load callback')
     fName = tk.filedialog.askopenfilename(
         filetypes=[("text files", "*.txt"), ("python files", "*.py")])
     if len(fName) > 0:
         self.model = modeldata.ModelData(fName)
示例#11
0
    def toolbar_draw_callback(self):
        #self.master.statusBar_frame.set( 'Draw callback' )
        width = int(self.master.ob_canvas_frame.canvas.cget("width"))
        height = int(self.master.ob_canvas_frame.canvas.cget("height"))

        if self.master.m_DataSet is None:
            self.master.statusBar_frame.set('No Model Loaded.')
            return  #Return at once if a mesh hasn't been loaded.

        else:
            #reload the file again
            self.master.m_DataSet = DataPopulation.ModelData(
                self.master.m_file_name)
            self.master.statusBar_frame.set('Drawing...done')
            #Get the Window and Viewport info from the ModelData instance.
            w = self.master.m_DataSet.getWindow()
            v = self.master.m_DataSet.getViewport()

            #Construct the transform using constructTransform()
            (ax, ay, sx,
             sy) = DataPopulation.constructTransform(w, v, width, height)

            #Specify the transform to the ModelData instance.
            self.master.m_DataSet.specifyTransform(ax, ay, sx, sy,
                                                   self.master.m_Distance)

            face_count = self.master.m_DataSet.getFaces()
            Vertice_count = self.master.m_DataSet.getVertices()

            print("Num vertices: ", len(Vertice_count))
            print("Num faces: ", len(face_count))

            print("Window line: ", self.master.m_DataSet.getWindow())
            print("Viewport line:", self.master.m_DataSet.getViewport())
            print('Bounding box:', self.master.m_DataSet.getBoundingBox())

            print("---Draw")
            print(f' Canvas size: ({width} , {height})')
            print(
                f' Transform: ax {ax: .3f} ay {ay: .3f} sx {sx: .3f} sy {sy: .3f}'
            )
            print("View distance:", self.master.m_Distance)
            print("Euler angles:", self.master.m_roll_angle,
                  self.master.m_pitch_angle, self.master.m_yaw_angle)

            #specify the Euler angles to the model
            self.master.m_DataSet.specifyEulerAngles(
                self.master.m_roll_angle,
                self.master.m_pitch_angle,
                self.master.m_yaw_angle,
                ax,
                ay,
                sx,
                sy,
            )

            #Invoke the create_graphic_objects() method, passing the canvas and the ModelData instance, clipping, perspective,
            # and euler rotation options.
            self.master.ob_world.create_graphic_objects(
                self.master.ob_canvas_frame.canvas, self.master.m_DataSet,
                self.master.m_clip.get() == 1,
                self.master.m_perspective.get() == 1,
                self.master.m_euler_rotation.get() == 1)
示例#12
0
# start of the program
#---------------------------------
if __name__ == '__main__':
    if not os.path.exists(BASE_SYNTHEA_INPUT_DIRECTORY):
        print("Synthea input directory should exist")
        exit(1)

    print('BASE_SYNTHEA_INPUT_DIRECTORY     =' + BASE_SYNTHEA_INPUT_DIRECTORY)
    print('ADDRESS_FILE                     =' + ADDRESS_FILE)

    # load utils
    util = Utils.Utils()

    # load the data models
    model_synthea = ModelSyntheaPandas.ModelSyntheaPandas()
    model_data = ModelData.ModelData()

    # read the synthea patient file
    datatype = 'patients'
    if (os.path.exists(
            os.path.join(BASE_SYNTHEA_INPUT_DIRECTORY, datatype + '.csv'))):
        inputfile = datatype + '.csv'
        compression = None
    elif (os.path.exists(
            os.path.join(BASE_SYNTHEA_INPUT_DIRECTORY, datatype + '.csv.gz'))):
        inputfile = datatype + '.csv.gz'
        compression = 'gzip'
    else:
        print("Error:  Could not find synthea file for " + datatype)
        exit(1)
    inputdata = BASE_SYNTHEA_INPUT_DIRECTORY + "/" + inputfile
示例#13
0
    pygame.event.pump()

    for event in pygame.event.get():
        if event.type == pygame.KEYDOWN:
            if (event.key == pygame.K_x):
                sys.exit()
            elif (event.key == pygame.K_SPACE):
                data.pause = not (data.pause)
                data.appState = 2 if (data.appState == 1) else 1
            elif (event.key == pygame.K_s):
                if (not (data.start)):
                    data.start = True
                    data.appState = 1
                    gr.createLaneSprites(height)
                    data.createLanes(road)
                    __showSelectedData()
            pygame.display.set_caption(appStates[data.appState])
        if event.type == pygame.MOUSEBUTTONDOWN:
            if (not (data.start)):
                mpos = pygame.mouse.get_pos()  # Get mouse position
                if single_rect.collidepoint([
                        mpos[0] - (width * 35) / 100,
                        mpos[1] - (height * 2) / 100
                ]):
                    screen.fill(Colors.white)
                    __renderButtonTable()
                    __renderModeComponent('mode')
                elif bp_minusBtn_rect.collidepoint([
                        mpos[0] - (width * 35) / 100,
                        mpos[1] - (height * 2) / 100
def beginConnection(ip, port):
    #Build my socket
    mySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    #Add our ip and port
    mySocket.bind((ip, port))
    #We can listen uo to 5
    mySocket.listen(5)
    print("Welcome")

    while True:
        print("Wating conection")
        #Accept the conection, The sc is Client's socket
        sc, addr = mySocket.accept()
        print("Client connected from: ", addr)
        while True:
            received = sc.recv(2048)
            receivedDecrypt = SecurityModule.decryptMessage(received)
            print(receivedDecrypt)
            receivedDecryptArray = receivedDecrypt.split(" ")
            print(receivedDecryptArray)
            if ModelData.loggin(listUsers, receivedDecryptArray[0],
                                receivedDecryptArray[1]):
                if (receivedDecryptArray[2] == "CONSULTA"):
                    for llave, valor in listUsers.items():
                        if valor.user == receivedDecryptArray[
                                0] and valor.password == receivedDecryptArray[
                                    1]:
                            stringSend = "Your total account is:  " + str(
                                valor.dineroTotal)
                            sc.send(stringSend.encode("utf-8"))
                elif receivedDecryptArray[2] == "DEPOSITAR":
                    print(len(receivedDecryptArray))
                    if len(receivedDecryptArray) > 3:
                        for llave, valor in listUsers.items():
                            if valor.user == receivedDecryptArray[
                                    0] and valor.password == receivedDecryptArray[
                                        1]:
                                valor.dineroTotal += int(
                                    receivedDecryptArray[3])
                                stringSend = "Your deposit was successful  "
                                sc.send(stringSend.encode("utf-8"))
                    else:
                        stringSend = "Your need put the next format: DEPOSITAR [CANTIDAD A DEPOSITAR]"
                        sc.send(stringSend.encode("utf-8"))
                elif receivedDecryptArray[2] == "RETIRAR":
                    print(len(receivedDecryptArray))
                    if len(receivedDecryptArray) > 3:
                        for llave, valor in listUsers.items():
                            if valor.user == receivedDecryptArray[
                                    0] and valor.password == receivedDecryptArray[
                                        1]:
                                if valor.dineroTotal >= int(
                                        receivedDecryptArray[3]):
                                    valor.dineroTotal -= int(
                                        receivedDecryptArray[3])
                                    stringSend = "Your total account is:  " + str(
                                        valor.dineroTotal)
                                    sc.send(stringSend.encode("utf-8"))
                                else:
                                    stringSend = "Your dont have so much  money"
                                    sc.send(stringSend.encode("utf-8"))
                    else:
                        stringSend = "Your nedd put the next format: RETIRAR [CANTIDAD A RETIRAR]"
                        sc.send(stringSend.encode("utf-8"))
                elif receivedDecryptArray[2] == "EXIT":
                    sc.send("EXIT".encode("utf-8"))
                    break
                else:
                    stringSend = "This option is not correct "
                    sc.send(stringSend.encode("utf-8"))
            else:
                sc.send(
                    "Wrong, user and password are incorrect".encode("utf-8"))
    sc.close()
    s.close()
示例#15
0
    def __init__(self):
        curDir = os.path.dirname(os.path.abspath(__file__))
        logger = Tools.loggedPrint.instance()

        logger.write("Initializing WRF Auto-Run Program")
        #Step 1: Load program settings
        logger.write(
            " 1. Loading program settings, Performing pre-run directory creations and loading ANL modules"
        )
        settings = ApplicationSettings.AppSettings()
        modelParms = ModelData.ModelDataParameters(settings.fetch("modeldata"))
        if not modelParms.validModel():
            sys.exit("Program failed at step 1, model data source: " +
                     settings.fetch("modeldata") +
                     ", is not defined in the program.")
        logger.write(" - Settings loaded, model data source " +
                     settings.fetch("modeldata") + " applied to the program.")
        prc = Cleanup.PostRunCleanup(settings)
        prc.performClean(cleanAll=False,
                         cleanOutFiles=True,
                         cleanErrorFiles=True,
                         cleanInFiles=True,
                         cleanBdyFiles=False,
                         cleanWRFOut=False,
                         cleanModelData=False)
        mParms = modelParms.fetch()
        if (settings.fetch("run_prerunsteps") == '1'):
            Tools.popen(
                settings, "mkdir " + settings.fetch("wrfdir") + '/' +
                settings.fetch("starttime")[0:8])
            Tools.popen(
                settings, "mkdir " + settings.fetch("wrfdir") + '/' +
                settings.fetch("starttime")[0:8] + "/output")
            Tools.popen(
                settings, "mkdir " + settings.fetch("wrfdir") + '/' +
                settings.fetch("starttime")[0:8] + "/wrfout")
            Tools.popen(
                settings, "mkdir " + settings.fetch("wrfdir") + '/' +
                settings.fetch("starttime")[0:8] + "/postprd")
        else:
            logger.write(
                " 1. run_prerunsteps is turned off, directories have not been created"
            )
        logger.write("  - Checking if WRF Node decomposition is required")
        if (settings.fetch("wrf_detect_proc_count") == '1'):
            logger.write("   - Yes.")
            det = Tools.detect_ideal_processors(
                int(settings.fetch("e_we")), int(settings.fetch("e_sn")),
                int(settings.fetch("num_wrf_nodes")),
                int(settings.fetch("wrf_mpi_ranks_per_node")),
                int(settings.fetch("wrf_nio_groups")),
                int(settings.fetch("wrf_nio_tasks_per_group")))
            if (det is None):
                logger.write(
                    " 1. Failed to find a decomposition given the input settings in control.txt, please adjust your settings"
                )
                sys.exit("")
            nproc_x = det[0]
            nproc_y = det[1]
            logger.write("   - Found a viable decomposition, X: " +
                         str(nproc_x) + ", Y: " + str(nproc_y) + ".")
            settings.add_replacementKey("[nproc_x]", str(nproc_x))
            settings.add_replacementKey("[nproc_y]", str(nproc_y))
        else:
            settings.add_replacementKey("[nproc_x]", str("-1"))
            settings.add_replacementKey("[nproc_y]", str("-1"))
            logger.write("   - No.")
        logger.write(" 1. Done.")
        #Step 2: Download Data Files
        logger.write(" 2. Downloading Model Data Files")
        modelData = ModelData.ModelData(settings, modelParms)
        if (settings.fetch("run_prerunsteps") == '1'):
            modelData.fetchFiles()
        else:
            logger.write(
                " 2. run_prerunsteps is turned off, model data has not been downloaded"
            )
        logger.write(" 2. Done")
        #Step 3: Generate run files
        logger.write(" 3. Generating job files and creating templated files")
        settings.add_replacementKey("[interval_seconds]",
                                    mParms["HourDelta"] * 60 * 60)
        settings.add_replacementKey(
            "[constants_name]",
            settings.fetch("constantsdir") + '/' + mParms["ConstantsFile"])
        tWrite = Template.Template_Writer(settings)
        if (settings.fetch("run_prerunsteps") == '1'):
            i = 0
            for ext in mParms["FileExtentions"]:
                tWrite.generateTemplatedFile(settings.fetch("headdir") +
                                             "templates/namelist.wps.template",
                                             "namelist.wps." + ext,
                                             extraKeys={
                                                 "[ungrib_prefix]": ext,
                                                 "[fg_name]": mParms["FGExt"]
                                             })
                if (i == 0):
                    Tools.popen(
                        settings,
                        "cp namelist.wps." + ext + " namelist.wps.geogrid")
                i += 1
            tWrite.generateTemplatedFile(
                settings.fetch("headdir") +
                "templates/namelist.input.template", "namelist.input")
        else:
            logger.write(
                " 3. run_prerunsteps is turned off, template files have not been created"
            )
        if (self.write_job_files(settings, mParms) == False):
            logger.write(" 3. Failed to generate job files... abort")
            sys.exit("")
        logger.write(" 3. Done")
        #Step 4: Run the WRF steps
        logger.write(" 4. Run WRF Steps")
        jobs = Jobs.JobSteps(settings, modelParms)
        logger.write("  4.a. Checking for geogrid flag...")
        Tools.Process.instance().HoldUntilOpen(breakTime=86400)
        if (settings.fetch("run_geogrid") == '1'):
            logger.write("  4.a. Geogrid flag is set, preparing geogrid job.")
            jobs.run_geogrid()
            logger.write("  4.a. Geogrid job Done")
        else:
            logger.write("  4.a. Geogrid flag is not set, skipping step")
        logger.write("  4.a. Done")
        logger.write("  4.b. Running pre-processing executables")
        Tools.Process.instance().HoldUntilOpen(breakTime=86400)
        if (settings.fetch("run_preprocessing_jobs") == '1'):
            if (jobs.run_preprocessing() == False):
                logger.write("   4.b. Error in pre-processing jobs")
                logger.close()
                sys.exit(
                    "   4.b. ERROR: Pre-processing jobs failed, check error logs"
                )
        else:
            logger.write(
                "  4.b. run_preprocessing_jobs is turned off, skiping this step"
            )
        Tools.Process.instance().HoldUntilOpen(breakTime=86400)
        logger.write("  4.b. Done")
        logger.write("  4.c. Running WRF Model")
        if (settings.fetch("run_wrf") == '1'):
            if (jobs.run_wrf() == False):
                logger.write("   4.c. Error at WRF.exe")
                logger.close()
                sys.exit(
                    "   4.c. ERROR: wrf.exe process failed to complete, check error file."
                )
        else:
            logger.write(
                "  4.c. run_wrf is turned off, skiping wrf.exe process")
        logger.write("  4.c. Done")
        logger.write(" 4. Done")
        #Step 5: Run postprocessing steps
        if (settings.fetch("run_postprocessing") == '1'):
            logger.write(" 5. Running post-processing")
            post = Jobs.Postprocessing_Steps(settings, modelParms)
            Tools.Process.instance().HoldUntilOpen(breakTime=86400)
            if (post.prepare_postprocessing() == False):
                logger.write("   5. Error initializing post-processing")
                logger.close()
                sys.exit(
                    "   5. ERROR: post-processing process failed to initialize, check error file."
                )
            Tools.Process.instance().HoldUntilOpen(breakTime=86400)
            if (post.run_postprocessing() == False):
                logger.write("   5. Error running post-processing")
                logger.close()
                sys.exit(
                    "   5. ERROR: post-processing process failed to complete, check error file."
                )
            logger.write(" 5. Done")
        else:
            logger.write(" 5. Post-processing flag disabled, skipping step")
        #Step 6: Cleanup
        logger.write(" 6. Cleaning Temporary Files")
        prc.performClean(cleanAll=False,
                         cleanOutFiles=True,
                         cleanErrorFiles=True,
                         cleanInFiles=True,
                         cleanBdyFiles=True,
                         cleanWRFOut=False,
                         cleanModelData=True)
        logger.write(" 6. Done")
        #Done.
        logger.write("All Steps Completed.")
        logger.write("Program execution complete.")
        logger.close()
示例#16
0
	def __init__(self):
		curDir = os.path.dirname(os.path.abspath(__file__)) 
		logger = Tools.loggedPrint.instance()
	
		logger.write("Initializing WRF Auto-Run Program")
		#Step 1: Load program settings
		logger.write(" 1. Loading program settings, setting up directories")
		settings = ApplicationSettings.AppSettings()
		modelParms = ModelData.ModelDataParameters(settings.fetch("modeldata"))
		scheduleParms = Scheduler.Scheduler_Settings(settings.fetch("jobscheduler"))
		if not scheduleParms.validScheduler():
			sys.exit("Program failed at step 1, job scheduler: " + settings.fetch("jobscheduler") + ", is not defined in the program.")	
		if not modelParms.validModel():
			sys.exit("Program failed at step 1, model data source: " + settings.fetch("modeldata") + ", is not defined in the program.")
		logger.write(" - Settings loaded, model data source " + settings.fetch("modeldata") + " applied to the program.")
		prc = Cleanup.PostRunCleanup(settings)
		prc.performClean(cleanAll = False, cleanOutFiles = True, cleanErrorFiles = True, cleanInFiles = True, cleanBdyFiles = False, cleanWRFOut = False, cleanModelData = False)
		mParms = modelParms.fetch()
		if(settings.fetch("run_prerunsteps") == '1'):
			Tools.popen(settings, "mkdir " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8])
			Tools.popen(settings, "mkdir " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/output")		
			Tools.popen(settings, "mkdir " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/wrfout")
			Tools.popen(settings, "mkdir " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/postprd")	
		else:
			logger.write(" 1. run_prerunsteps is turned off, directories have not been created")
		logger.write("  - Checking if WRF Node decomposition is required")
		save_nproc_x = -1
		save_nproc_y = -1
		if(settings.fetch("wrf_detect_proc_count") == '1'):
			logger.write("   - Yes.")
			det = Tools.detect_ideal_processors(int(settings.fetch("e_we")), 
												int(settings.fetch("e_sn")), 
												int(settings.fetch("num_wrf_nodes")), 
												int(settings.fetch("wrf_mpi_ranks_per_node")), 
												int(settings.fetch("wrf_nio_groups")), 
												int(settings.fetch("wrf_nio_tasks_per_group")))
			if(det is None):
				logger.write(" 1. Failed to find a decomposition given the input settings in control.txt, please adjust your settings")
				sys.exit("")
			save_nproc_x = det[0]
			save_nproc_y = det[1]
			logger.write("   - Found a viable decomposition, X: " + str(save_nproc_x) + ", Y: " + str(save_nproc_y) + ".")
		else:
			logger.write("   - No.")
		logger.write(" 1. Done.")
		#Step 2: Download Data Files
		logger.write(" 2. Downloading Model Data Files")
		modelData = ModelData.ModelData(settings, modelParms)
		if(settings.fetch("run_prerunsteps") == '1'):
			modelData.fetchFiles()
		else:
			logger.write(" 2. run_prerunsteps is turned off, model data has not been downloaded")
		logger.write(" 2. Done")
		#Step 3: Generate run files
		logger.write(" 3. Generating job files and creating templated files")
		if(settings.fetch("use_io_vars") == '1'):
			Tools.popen(settings, "cp " + settings.fetch("headdir") + "io_vars/IO_VARS.txt " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/output/IO_VARS.txt")			
		# Check if we are using LFS / quilting
		if(int(settings.fetch("wrf_nio_groups")) * int(settings.fetch("wrf_nio_tasks_per_group")) == 0):
			settings.add_replacementKey("[io_form_geogrid]", 2)
			settings.add_replacementKey("[io_form_metgrid]", 2)
		else:
			settings.add_replacementKey("[io_form_geogrid]", 102)
			settings.add_replacementKey("[io_form_metgrid]", 102)			
		settings.add_replacementKey("[interval_seconds]", mParms["HourDelta"] * 60 * 60)
		settings.add_replacementKey("[constants_name]", settings.fetch("constantsdir") + '/' + mParms["ConstantsFile"])
		settings.add_replacementKey("[num_metgrid_levels]", mParms["MetgridLevels"])
		tWrite = Template.Template_Writer(settings)
		# RF: Additional namelist settings based on IO selections
		if(int(settings.fetch("wrf_nio_groups")) * int(settings.fetch("wrf_nio_tasks_per_group")) == 0):
			# We use parallel netCDF for everything
			settings.add_replacementKey("[io_form_history]", str("11"))
			settings.add_replacementKey("[io_form_restart]", str("2"))
			settings.add_replacementKey("[io_form_auxinput1]", str("2"))
			settings.add_replacementKey("[io_form_auxhist2]", str("11"))
			settings.add_replacementKey("[io_form_auxhist5]", str("11"))
			settings.add_replacementKey("[io_form_auxhist23]", str("11"))		
		else:
			settings.add_replacementKey("[io_form_history]", str("102"))
			settings.add_replacementKey("[io_form_restart]", str("2"))
			settings.add_replacementKey("[io_form_auxinput1]", str("2"))
			settings.add_replacementKey("[io_form_auxhist2]", str("11"))
			settings.add_replacementKey("[io_form_auxhist5]", str("11"))
			settings.add_replacementKey("[io_form_auxhist23]", str("11"))			
		# end
		if(settings.fetch("run_prerunsteps") == '1'):
			i = 0
			for ext in mParms["FileExtentions"]:
				tWrite.generateTemplatedFile(settings.fetch("headdir") + "templates/namelist.wps.template", "namelist.wps." + ext, extraKeys = {"[ungrib_prefix]": ext, "[fg_name]": mParms["FGExt"]})
				if(i == 0):
					Tools.popen(settings, "cp namelist.wps." + ext + " namelist.wps.geogrid")
				i += 1
			# RF 10/19: real.exe requires nproc_x/nproc_y to be -1, update the settings
			settings.add_replacementKey("[nproc_x]", str("-1"))
			settings.add_replacementKey("[nproc_y]", str("-1"))
			# RF 2/12: For real.exe, these need to be 2, WRF can use 11
			settings.add_replacementKey("[io_form_input]", str("2"))
			settings.add_replacementKey("[io_form_boundary]", str("2"))			
			tWrite.generateTemplatedFile(settings.fetch("headdir") + "templates/namelist.input.template", "namelist.input")
		else:
			logger.write(" 3. run_prerunsteps is turned off, template files have not been created")
		if(self.write_job_files(settings, mParms, scheduleParms) == False):
			logger.write(" 3. Failed to generate job files... abort")
			sys.exit("")
		if(self.write_helper_scripts(settings, mParms, scheduleParms) == False):
			logger.write(" 3. Failed to generate helper scripts... abort")
			sys.exit("")
		logger.write(" 3. Copying WPS/WRF run files to output directory")
		# Copy important files to the directory
		Tools.popen(settings, "cp " + settings.fetch("headdir") + "run_files/* " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/output")
		# Copy required WRF files
		Tools.popen(settings, "cp " + settings.fetch("wrfrunfiles") + "* " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/output")
		# Note, we need to remove the .exe files and sample namelist and then recopy from the head dir.
		Tools.popen(settings, "rm " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/output/namelist.input")
		Tools.popen(settings, "rm " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/output/wrf.exe")
		Tools.popen(settings, "rm " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/output/real.exe")
		Tools.popen(settings, "rm " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/output/ndown.exe")
		Tools.popen(settings, "rm " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/output/tc.exe")
		# Copy the .exe files if needed
		if(settings.fetch("need_copy_exe") == '1'):
			Tools.popen(settings, "cp " + settings.fetch("wrfexecutables") + "*.exe " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/output")
			# Grab our WPS executables
			# Tools.popen(settings, "cp " + settings.fetch("wpsdirectory") + "link_grib.csh " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8]) # Now written by this script
			Tools.popen(settings, "cp " + settings.fetch("wpsdirectory") + "geogrid.exe " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8])
			Tools.popen(settings, "cp " + settings.fetch("wpsdirectory") + "ungrib.exe " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8])
			Tools.popen(settings, "cp " + settings.fetch("wpsdirectory") + "metgrid.exe " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8])
		# Finally, move the generated files to the run directory		
		Tools.popen(settings, "mv namelist.input " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/output")		
		logger.write(" 3. Done")
		#Step 4: Run the WRF steps
		logger.write(" 4. Run WRF Steps")
		jobs = Jobs.JobSteps(settings, modelParms, scheduleParms)
		logger.write("  4.a. Checking for geogrid flag...")
		Tools.Process.instance().HoldUntilOpen(breakTime = 86400)
		if(settings.fetch("run_geogrid") == '1'):
			logger.write("  4.a. Geogrid flag is set, preparing geogrid job.")
			jobs.run_geogrid()
			logger.write("  4.a. Geogrid job Done")
		else:
			logger.write("  4.a. Geogrid flag is not set, skipping step")
		logger.write("  4.a. Done")
		logger.write("  4.b. Running pre-processing executables")	
		Tools.Process.instance().HoldUntilOpen(breakTime = 86400)
		if(settings.fetch("run_preprocessing_jobs") == '1'):
			if(jobs.run_preprocessing() == False):
				logger.write("   4.b. Error in pre-processing jobs")
				logger.close()		
				sys.exit("   4.b. ERROR: Pre-processing jobs failed, check error logs")
		else:
			logger.write("  4.b. run_preprocessing_jobs is turned off, skiping this step")
		Tools.Process.instance().HoldUntilOpen(breakTime = 86400)
		logger.write("  4.b. Done")
		logger.write("  4.c. Running WRF Model")
		logger.write("   4.c. > Updating settings for nproc_x/nproc_y")
		# RF 10/19: Now nuke the real.exe namelist file and load in the wrf settings, then run.	
		settings.add_replacementKey("[nproc_x]", str(save_nproc_x))
		settings.add_replacementKey("[nproc_y]", str(save_nproc_y))
		settings.add_replacementKey("[io_form_input]", str("2"))
		settings.add_replacementKey("[io_form_boundary]", str("2"))
		tWrite.generateTemplatedFile(settings.fetch("headdir") + "templates/namelist.input.template", "namelist.input")	
		Tools.popen(settings, "mv namelist.input " + settings.fetch("wrfdir") + '/' + settings.fetch("starttime")[0:8] + "/output/namelist.input")
		logger.write("   4.c. > Starting wrf.exe job process")
		if(settings.fetch("run_wrf") == '1'):
			if(jobs.run_wrf() == False):
				logger.write("   4.c. Error at WRF.exe")
				logger.close()		
				sys.exit("   4.c. ERROR: wrf.exe process failed to complete, check error file.")	
		else:
			logger.write("  4.c. run_wrf is turned off, skiping wrf.exe process")				
		logger.write("  4.c. Done")
		logger.write(" 4. Done")
		#Step 5: Run postprocessing steps
		if(settings.fetch("run_postprocessing") == '1'):
			logger.write(" 5. Running post-processing")
			post = Jobs.Postprocessing_Steps(settings, modelParms)
			Tools.Process.instance().HoldUntilOpen(breakTime = 86400)
			if(post.prepare_postprocessing() == False):
				logger.write("   5. Error initializing post-processing")
				logger.close()			
				sys.exit("   5. ERROR: post-processing process failed to initialize, check error file.")
			Tools.Process.instance().HoldUntilOpen(breakTime = 86400)
			if(post.run_postprocessing() == False):
				logger.write("   5. Error running post-processing")
				logger.close()				
				sys.exit("   5. ERROR: post-processing process failed to complete, check error file.")			
			logger.write(" 5. Done")
		else:
			logger.write(" 5. Post-processing flag disabled, skipping step")
		#Step 6: Cleanup
		logger.write(" 6. Cleaning Temporary Files")
		prc.performClean(cleanAll = False, cleanOutFiles = True, cleanErrorFiles = True, cleanInFiles = True, cleanBdyFiles = True, cleanWRFOut = False, cleanModelData = True)
		logger.write(" 6. Done")		
		#Done.
		logger.write("All Steps Completed.")
		logger.write("Program execution complete.")
		logger.close()
示例#17
0
class ModelPlotter(object):
	
	def __init__(self,data=None,model=None,x=None,y=None,weights=None,groups=[],specialGroups=[],silentGroups=[],
					individuals=[],mean=True,plotIndividuals=True,span=0.6,ialpha=None,malpha=1.0,cialpha=None,meanStyle=None,errorBars='95',
					estimateData=None,binFunction=None,iLinewidth=1,mLinewidth=1,linestyle='-',
					colors=None,styles=None,derivedValues=None,groupValues={},fixedValues={},xUnitConversion=None,
					yUnitConversion=None, drawLegend=True,groupOrdering=None,includeN=True,**args):
		self.model = model
		if self.model:
			self.data = self.model.data
			self.estimateData = estimateData
			self.meanStyle = 'model'
		else:
			self.data = data
			self.meanStyle = 'loess'
			self.estimateData = None
		if data:
			self.data = data
		if meanStyle:
			self.meanStyle = meanStyle
		self.errorBars = errorBars
		self.x = x
		self.y = y
		self.weights = weights
		self.groups = groups
		self.specialGroups = specialGroups
		self.silentGroups = silentGroups		
		self.individuals = individuals
		self.plotIndividuals = plotIndividuals
		self.mean = mean
		self.span = span
		self.ialpha = ialpha
		self.malpha = malpha
		self.cialpha = cialpha if cialpha is not None else self.malpha/10.
		
		self.connect = args.get('connect',True if individuals and len(individuals) > 0 else False)
		self.marker = args.get('marker',None if self.connect else 'o')
		self.markerSize = args.get('markerSize',5)
		self.markerAlpha = args.get('markerAlpha',0.5)
		
		self.label = args.get('label',None)
		self.includeN = includeN
		
		self.groupColorKey = {}
		self.groupStyleKey = {}
		self.groupFormatKey = {}
		self.groupedData = {}
		self.colorCounter = -1
		self.styleCounter = -1
		if colors:
			self.colors = colors
		else:
			if sns is not None:
				self.colors = sns.color_palette()
			else:
				self.colors = ['g','b','c','m','y','k','r']
		if styles:
			self.styles = styles
		else:
			self.styles = ['-','--',':','-.']
		self.estimatedTimecourses = None
		self.binFunction = binFunction
		self.iLinewidth = iLinewidth
		self.mLinewidth = mLinewidth
		self.linestyle = linestyle
		self.derivedValues = None
		self.groupValues = groupValues
		self.fixedValues = fixedValues
		self.xUnitConversion = xUnitConversion
		self.yUnitConversion = yUnitConversion
		self.drawLegend = drawLegend
		self.groupOrdering = groupOrdering
			
		
		
	def setData(self,data):
		self.data = data
		self.estimatedTimecourses = None
		
	def setModel(self,model):
		self.model = model
		self.estimatedTimecourses = None
		
	def setGroups(self,groups):
		self.groups = groups
		

	def getNestedVal(self,data,keys,create=True):
		if create and not keys[0] in data: data[keys[0]] = {}
		if len(keys) == 1:
			return data[keys[0]]
		else:
			return self.getNestedVal(data[keys[0]],keys[1:])

	def getGroupChain(self,groups,sample):
		groupChain = []
		for group in groups:
			groupChain.append(sample[group])
		return groupChain


	def nestedPlot(self,data,groups=None,chain='',color=0):
		for k,key in enumerate(data.keys()):
			if len(groups) > 1:
				color += 1
				try:
					self.nestedPlot(data[key],groups=groups[1:],chain=chain+'%s-'%key,color=color)
				except:
					print()
					traceback.print_exc()
					print()
			else:
				xD,yD = zip(*sorted(zip(data[key]['x'],data[key]['y'])))
				if k == 0: 
					label = chain[0:-1]
				else: label = ''
				if (self.connect):
#					print "Plotting %s %s %s" % (xD,key,label)
					pylab.plot(xD,yD,alpha=self.ialpha,color=self.colors[color%len(self.colors)],label=label)
				else:
					pylab.scatter(xD,yD,alpha=self.ialpha,color=self.colors[color%len(self.colors)],label=label)
		

	def getTimecourses(self,data,**kwargs):
		x = kwargs.get('x',self.x); y = kwargs.get('y',self.y)
		x = x if x in data.samples[0] else x.replace('.','-')
		y = y if y in data.samples[0] else y.replace('.','-')
		groups = kwargs.get('groups',self.groups); silentGroups = kwargs.get('silentGroups',self.silentGroups)
		individuals = kwargs.get('individuals',self.individuals)
		overKey=[x]; accKey=[x,y];
		if '_CI' in data.samples[0]:
			accKey.append('_CI')
		constant=groups+silentGroups+individuals  
#		timecourses, sampleIndex = data.accumulate(overKey=overKey,accKey=accKey+['dataTimepoint'],constant=constant,returnSampleIndex=True)
		timecourses, sampleIndex = data.accumulate(overKey=overKey,accKey=accKey,constant=constant,returnSampleIndex=True)
		return timecourses, sampleIndex
		
		
	def getGroupedSample(self,sample):
		newSample = {}
		for g, group in enumerate(self.groups):
			samplevalue = sample[group]
			if group in self.groupValues.keys():
				try:
					if not samplevalue in self.groupValues[group]:
						possibilities = numpy.array(self.groupValues[group])
						diffs = abs(possibilities - samplevalue)
						samplevalue = possibilities[numpy.argmin(diffs)]
				except:
					traceback.print_exc()
			
			newSample[group] = samplevalue
		return newSample
		
		
	def makeGroupDescriptor(self,sample):
		newSample = self.getGroupedSample(sample)
		try:
			group = '-'.join([str(newSample[k]).replace("'",'') for k in self.groups])
		except:
			print("Could not make group descriptor out of: %s" % ([str(newSample[k]).replace("'",'') for k in self.groups]))
			return None
		return group
		
		
	def getColorAndStyle(self,sample):
		style = self.linestyle
		color = self.colors[0]

		newSample = self.getGroupedSample(sample)
		groupDescription = self.makeGroupDescriptor(newSample)
		if groupDescription in self.groupFormatKey:
			color = self.groupFormatKey[groupDescription]['color']
			style = self.groupFormatKey[groupDescription]['style']
		else:
			for g,group in enumerate(self.groups[0:2]):			
				samplevalue = newSample[group]
				if g == 0:			# first group chooses colour
					color = self.groupColorKey.get(samplevalue,None)
					style = self.groupStyleKey.get(samplevalue,self.linestyle)
					if not color:
						self.colorCounter = (self.colorCounter + 1) % len(self.colors)
						color = self.colors[self.colorCounter]
						if color.__class__ == dict:
							self.groupStyleKey[samplevalue] = color.get('style',self.linestyle); style = self.groupStyleKey[samplevalue]
							self.groupColorKey[samplevalue] = color['color']; color = color['color']
						else:
							self.groupColorKey[samplevalue] = self.colors[self.colorCounter]
							color = self.groupColorKey[samplevalue]
				if g == 1:
					style = self.groupStyleKey.get(samplevalue,None)
					if style is None:		# Try composite group
						try:
							style = self.groupStyleKey.get('-'.join(self.groups[1:])).get('-'.join(groupDescription.split('-')[1:]))						
						except:
							pass
					if style is None:		# give up and make a new one
						self.styleCounter = (self.styleCounter + 1) % len(self.styles)
						style = self.styles[self.styleCounter]
						if style.__class__ == dict:
							self.groupColorKey[samplevalue] = style.get('color',self.colors[0]); self.groupColorKey[samplevalue]
							self.groupStyleKey[samplevalue] = style['style']; style = style['style']
						else:
							self.groupStyleKey[samplevalue] = self.styles[self.styleCounter]
							style = self.groupStyleKey.get(samplevalue,None)
			
		if not groupDescription in self.groupFormatKey:
			self.groupFormatKey[groupDescription] = {'color':color,'style':style}
		return color,style
		
		
	def substituteValues(self,alternateModel,estimateData=None):
		""" Replaces values for matching x's in this plotter with ones from the alternate model"""
		estimateData = estimateData if estimateData is not None else self.estimateData
		aplotter = ModelPlotter(model=alternateModel,x=self.x,y=self.y,groups=self.groups,individuals=self.individuals,silentGroups=self.silentGroups,fixedValues=self.fixedValues,groupValues=self.groupValues,mean=True,estimateData=estimateData)
		aplotter.plot(prepareOnly=True)
		self.plot(prepareOnly=True)
		et1 = aplotter.estimatedTimecourses
		et2 = self.estimatedTimecourses
		for t,tc in enumerate(et2):
			for i,x in enumerate(tc[self.x]):
				if x in et1[t][aplotter.x]:
					idx = et1[t]['elapsed'].index(x)
					for k in ['_CI',self.y]:
						tc[k][i] = et1[t][k][idx]
		
		
	def sorter(self,item):
		value = 0
		for k,key in enumerate(self.groupOrdering[::-1]):
			group = key[0]; order = key[1]
			try:
				value += k*100 + order.index(item[group])
			except:
				pass
			return value
	
	
	def plotEstimatedTimecourses(self,estimatedTimecourses,**kwargs):
		x = kwargs.get('x',self.x); y = kwargs.get('y',self.y)
		colors = kwargs.get('colors',None); styles = kwargs.get('styles',None)
		
		if self.groupOrdering is not None:
			estimatedTimecourses.sort(key=self.sorter)				
		
		for n,i in enumerate(estimatedTimecourses):
			group = self.makeGroupDescriptor(i)
			if colors and styles:
				color = colors[n]
				style = styles[n]
			else:
				color,style = self.getColorAndStyle(i)
			try:
				groupLabel = group
				if self.includeN:
					groupLabel += ' N=%d' % self.groupedData[group]['N']
				label = self.label if self.label is not None else groupLabel
			except:
				label = group
			xs = i[x]; ys = i[y]
			if self.xUnitConversion:
				xs = eval('x ' + self.xUnitConversion,{'x':numpy.array(xs)},globals())
			if self.yUnitConversion:
				ys = eval('y ' + self.yUnitConversion,{'y':numpy.array(ys)},globals())						
			if not i['_CI'][0] == None:
				pred = numpy.array(ys); ci = i['_CI']
				if self.yUnitConversion:
					ci = eval('x ' + self.yUnitConversion,{'x':numpy.array(ci)},globals())
				pylab.fill_between(xs,pred-ci,pred+ci,color=color,alpha=self.cialpha)
			pylab.plot(xs,ys,color=color,alpha=self.malpha,label=label,linewidth=self.mLinewidth,linestyle=style)
			
			
	def plotEstimates(self,estimates,**kwargs):
		estimatedTimecourses,_ = self.getTimecourses(estimates,**kwargs)
		self.plotEstimatedTimecourses(estimatedTimecourses,**kwargs)
		
		
	def plotData(self,prepareOnly=False,correctFixed={},correctRandom={},addResiduals=True,**kwargs):
		if self.model is not None and not 'data' in kwargs:			# If we were supplied data, use that
			self.data = self.model.getIndividualEstimates(fixedTerms=correctFixed, excludeRandomEffects=correctRandom,addResiduals=addResiduals, allData=True)
		
		s,self.sampleIndex = self.getTimecourses(self.data,**kwargs)
		self.timecourses = s
		self.groupedData = {}
		if self.ialpha == None:
			ialpha = min(1.,1. / len(s) * 4.5)
		else:
			ialpha = self.ialpha
		for i in s:
			group = self.makeGroupDescriptor(i)
			if kwargs.get('individualColor',None):
				color = kwargs['individualColor']
				style = kwargs.get('individualStyle','-')
			else:
				color,style = self.getColorAndStyle(i)

			if not group in self.groupedData:
				self.groupedData[group] = {'x':[],'y':[],'weights':[],'N':0}
				if not self.mean:
					label = group
				else:
					label = None
				label = None
			else:
				label = None
			if self.plotIndividuals and not prepareOnly:
				try:
					xs = i[self.x]; ys = i[self.y]
					if self.xUnitConversion:
						xs = eval('x ' + self.xUnitConversion,{'x':numpy.array(xs)},globals())
					if self.yUnitConversion:
						ys = eval('x ' + self.yUnitConversion,{'x':numpy.array(ys)},globals())
					if not self.connect:
						style = ''
					pylab.plot(xs,ys,color=color,label=label,alpha=ialpha,marker=self.marker,markersize=self.markerSize,linewidth=self.iLinewidth,linestyle=style)
				except:
					traceback.print_exc()
			ix = ma.array(i.get(self.x,[None])); ix.mask = numpy.equal(ix,None)
			iy = ma.array(i.get(self.x,[None])); iy.mask = numpy.equal(ix,None)
			combinedMask = numpy.logical_or(ix.mask,iy.mask)
			ix.mask = combinedMask; iy.mask = combinedMask
			if (~combinedMask).any():
				self.groupedData[group]['x'] += i[self.x]
				self.groupedData[group]['y'] += i[self.y]
				if isinstance(self.model,MixedModel):
					self.groupedData[group]['N'] += 1
				else:
					self.groupedData[group]['N'] = len(i[self.y])
				if self.weights:
					self.groupedData[group]['weights'] += i[self.weights]
		self.individualLines = pylab.gca().lines
		
		
	def plotResiduals(self,x,**kwargs):
		residuals = self.model.getResiduals()		
		samples = []; counter = 0
		for s,sample in enumerate(self.model.data.samples):
			if all([sample.get(term,None) is not None for term in list(self.model.atomicTerms) + [self.model.y]]):
				samples.append({x:sample[x],'residual':residuals[counter]})
				counter += 1
		
		data = ModelData(samples)
		plotter = ModelPlotter(data=data,x=x,y='residual',marker='o',connect=False,**kwargs)
		plotter.plot()
		pylab.ylabel('Residual');
		pylab.gca().legend_ = None
		pylab.draw()

		
		
			
	def plot(self,**kwargs):
		prepareOnly = kwargs.get('prepareOnly',False)
		for k in list(kwargs.keys()):
			if hasattr(self,k):
				setattr(self,k,kwargs.pop(k))
		if 'figure' in kwargs:
			figure(figure)
		
		# Plot the individuals
		self.plotData(**kwargs)

		# Plot the mean
		if self.mean:
			if self.meanStyle == 'model' or (self.meanStyle == None and self.model and self.estimateData):
				# Model mean
				if not self.estimatedTimecourses:
					if self.estimateData == None:
						knots = kwargs.get('knots',None)
						if knots is None:
							mn = numpy.ma.min([numpy.ma.min(self.groupedData[group]['x']) for group in self.groupedData.keys()])
							mx = numpy.ma.max([numpy.ma.max(self.groupedData[group]['x']) for group in self.groupedData.keys()])
							knots = numpy.arange(mn,mx,(mx-mn)/10.)
						self.estimateData = ModelData()
						individual = (self.individuals[0],'subject1') if not self.individuals is None and len(self.individuals) > 0 else None
						self.estimateData.makeEstimate(self.y,self.x,knots,individual=individual,extraKeys=self.data.samples[0].keys())

					if self.derivedValues:
						self.estimateData.calculateDerivedValues(self.derivedValues)

					self.estimates = self.model.getEstimates(self.estimateData,self.groups,self.y,self.groupValues,self.fixedValues)
					y = [sample[self.y] for sample in self.estimates.samples]
					if numpy.any(numpy.isnan(y)):
						print("There are nans in plotter's estimate data. Probably you forgot to specify a group or group value")
					if not self.estimates is None:
						self.estimatedTimecourses,_ = self.getTimecourses(self.estimates,**kwargs)
					else:
						print('Plotter could not create estimates. Did you forget to specify something?')
						return
				if prepareOnly:
					return
				
				self.plotEstimatedTimecourses(self.estimatedTimecourses,**kwargs)
				
			if (self.meanStyle.lower() == 'loess' or (self.meanStyle == None and self.model == None)):
				# LOESS mean
				self.loess = {}
				for group in self.groupedData.keys():
					try:
						loess = Loess(None,None,span=self.span)
						loess.setData(x=self.groupedData[group]['x'],y=self.groupedData[group]['y'],weights=self.groupedData[group]['weights'])
						mn = min(self.groupedData[group]['x']); mx = max(self.groupedData[group]['x'])
						xs = numpy.arange(mn,mx,(mx-mn)/1000.)
						pred,ci = loess.predict(xs)

						if self.xUnitConversion:
							xs = eval('x ' + self.xUnitConversion,{'x':numpy.array(xs)},globals())
						if self.yUnitConversion:
							pred = eval('x ' + self.yUnitConversion,{'x':numpy.array(pred)},globals())						
							ci = eval('x ' + self.yUnitConversion,{'x':numpy.array(ci)},globals())						
						
						label = group;
						if self.includeN:
							label += ' N=%d' % self.groupedData[group]['N']
						pylab.plot(xs,pred,color=self.groupFormatKey[group]['color'],alpha=self.malpha,label=label,linewidth=self.mLinewidth,linestyle=self.groupFormatKey[group]['style'])
						pylab.fill_between(xs,pred-ci,pred+ci,color=self.groupFormatKey[group]['color'],alpha=self.cialpha)
						self.loess[group] = loess
					except:
						print("Error building loess curve for %s" % group)
						traceback.print_exc()
						print()

			if self.meanStyle == 'binned':
				# Binned mean
				self.binnedMean = {}
				if not self.binFunction:
					delta = 1000000000000000
					for group in self.groupedData.keys():
						x = numpy.array(self.groupedData[group]['x'])
						delta = min([1. / ((max(x)-min(x)) / 100.),delta])
					self.binFunction = "numpy.around(x*%f).astype(numpy.int16) / %f" % (delta,delta)
				for group in self.groupedData.keys():
					x = numpy.array(self.groupedData[group]['x'])
					y = numpy.array(self.groupedData[group]['y'])
					binnedX = eval(self.binFunction,{'x':x},globals())
					newY = []
					newX = []
					ci = []
					N = []
					for i in numpy.unique(binnedX):
						vals = numpy.ma.masked_invalid(y[numpy.where(binnedX==i)[0]])
						newX.append(i)
						newY.append(numpy.ma.average(vals))
						if self.errorBars == '95':
							ci.append(numpy.ma.std(vals)/numpy.ma.sqrt(numpy.ma.count(vals))*1.96)
						elif self.errorBars == 'sd':
							ci.append(numpy.ma.std(vals))
						N.append(numpy.ma.count(vals))
					newY = numpy.array(newY); ci = numpy.array(ci)
					label = group 
					if self.includeN:
						label += ' N=%d' % self.groupedData[group]['N']

					if self.xUnitConversion:
						newX = eval('x ' + self.xUnitConversion,{'x':numpy.array(newX)},globals())
					if self.yUnitConversion:
						newY = eval('x ' + self.yUnitConversion,{'x':numpy.array(newY)},globals())						
						ci = eval('x ' + self.yUnitConversion,{'x':numpy.array(ci)},globals())						

					pylab.plot(newX,newY,color=self.groupFormatKey[group]['color'],alpha=self.malpha,label=label,linewidth=self.mLinewidth,linestyle=self.groupFormatKey[group]['style'])
					pylab.fill_between(newX,newY-ci,newY+ci,color=self.groupFormatKey[group]['color'],alpha=self.cialpha)
					self.binnedMean[group] = dict(x=newX,y=newY,ci=ci)
			
		pylab.xlabel(self.x)
		pylab.ylabel(self.y)
		if self.drawLegend:
			pylab.legend()
示例#18
0
	def plot(self,**kwargs):
		prepareOnly = kwargs.get('prepareOnly',False)
		for k in list(kwargs.keys()):
			if hasattr(self,k):
				setattr(self,k,kwargs.pop(k))
		if 'figure' in kwargs:
			figure(figure)
		
		# Plot the individuals
		self.plotData(**kwargs)

		# Plot the mean
		if self.mean:
			if self.meanStyle == 'model' or (self.meanStyle == None and self.model and self.estimateData):
				# Model mean
				if not self.estimatedTimecourses:
					if self.estimateData == None:
						knots = kwargs.get('knots',None)
						if knots is None:
							mn = numpy.ma.min([numpy.ma.min(self.groupedData[group]['x']) for group in self.groupedData.keys()])
							mx = numpy.ma.max([numpy.ma.max(self.groupedData[group]['x']) for group in self.groupedData.keys()])
							knots = numpy.arange(mn,mx,(mx-mn)/10.)
						self.estimateData = ModelData()
						individual = (self.individuals[0],'subject1') if not self.individuals is None and len(self.individuals) > 0 else None
						self.estimateData.makeEstimate(self.y,self.x,knots,individual=individual,extraKeys=self.data.samples[0].keys())

					if self.derivedValues:
						self.estimateData.calculateDerivedValues(self.derivedValues)

					self.estimates = self.model.getEstimates(self.estimateData,self.groups,self.y,self.groupValues,self.fixedValues)
					y = [sample[self.y] for sample in self.estimates.samples]
					if numpy.any(numpy.isnan(y)):
						print("There are nans in plotter's estimate data. Probably you forgot to specify a group or group value")
					if not self.estimates is None:
						self.estimatedTimecourses,_ = self.getTimecourses(self.estimates,**kwargs)
					else:
						print('Plotter could not create estimates. Did you forget to specify something?')
						return
				if prepareOnly:
					return
				
				self.plotEstimatedTimecourses(self.estimatedTimecourses,**kwargs)
				
			if (self.meanStyle.lower() == 'loess' or (self.meanStyle == None and self.model == None)):
				# LOESS mean
				self.loess = {}
				for group in self.groupedData.keys():
					try:
						loess = Loess(None,None,span=self.span)
						loess.setData(x=self.groupedData[group]['x'],y=self.groupedData[group]['y'],weights=self.groupedData[group]['weights'])
						mn = min(self.groupedData[group]['x']); mx = max(self.groupedData[group]['x'])
						xs = numpy.arange(mn,mx,(mx-mn)/1000.)
						pred,ci = loess.predict(xs)

						if self.xUnitConversion:
							xs = eval('x ' + self.xUnitConversion,{'x':numpy.array(xs)},globals())
						if self.yUnitConversion:
							pred = eval('x ' + self.yUnitConversion,{'x':numpy.array(pred)},globals())						
							ci = eval('x ' + self.yUnitConversion,{'x':numpy.array(ci)},globals())						
						
						label = group;
						if self.includeN:
							label += ' N=%d' % self.groupedData[group]['N']
						pylab.plot(xs,pred,color=self.groupFormatKey[group]['color'],alpha=self.malpha,label=label,linewidth=self.mLinewidth,linestyle=self.groupFormatKey[group]['style'])
						pylab.fill_between(xs,pred-ci,pred+ci,color=self.groupFormatKey[group]['color'],alpha=self.cialpha)
						self.loess[group] = loess
					except:
						print("Error building loess curve for %s" % group)
						traceback.print_exc()
						print()

			if self.meanStyle == 'binned':
				# Binned mean
				self.binnedMean = {}
				if not self.binFunction:
					delta = 1000000000000000
					for group in self.groupedData.keys():
						x = numpy.array(self.groupedData[group]['x'])
						delta = min([1. / ((max(x)-min(x)) / 100.),delta])
					self.binFunction = "numpy.around(x*%f).astype(numpy.int16) / %f" % (delta,delta)
				for group in self.groupedData.keys():
					x = numpy.array(self.groupedData[group]['x'])
					y = numpy.array(self.groupedData[group]['y'])
					binnedX = eval(self.binFunction,{'x':x},globals())
					newY = []
					newX = []
					ci = []
					N = []
					for i in numpy.unique(binnedX):
						vals = numpy.ma.masked_invalid(y[numpy.where(binnedX==i)[0]])
						newX.append(i)
						newY.append(numpy.ma.average(vals))
						if self.errorBars == '95':
							ci.append(numpy.ma.std(vals)/numpy.ma.sqrt(numpy.ma.count(vals))*1.96)
						elif self.errorBars == 'sd':
							ci.append(numpy.ma.std(vals))
						N.append(numpy.ma.count(vals))
					newY = numpy.array(newY); ci = numpy.array(ci)
					label = group 
					if self.includeN:
						label += ' N=%d' % self.groupedData[group]['N']

					if self.xUnitConversion:
						newX = eval('x ' + self.xUnitConversion,{'x':numpy.array(newX)},globals())
					if self.yUnitConversion:
						newY = eval('x ' + self.yUnitConversion,{'x':numpy.array(newY)},globals())						
						ci = eval('x ' + self.yUnitConversion,{'x':numpy.array(ci)},globals())						

					pylab.plot(newX,newY,color=self.groupFormatKey[group]['color'],alpha=self.malpha,label=label,linewidth=self.mLinewidth,linestyle=self.groupFormatKey[group]['style'])
					pylab.fill_between(newX,newY-ci,newY+ci,color=self.groupFormatKey[group]['color'],alpha=self.cialpha)
					self.binnedMean[group] = dict(x=newX,y=newY,ci=ci)
			
		pylab.xlabel(self.x)
		pylab.ylabel(self.y)
		if self.drawLegend:
			pylab.legend()