Exemplo n.º 1
0
 def __init__(self, local_project_results_dir, logfile, local=False, debugging=False):
     self.debug_mode = debugging
     
     if local:
         mig.local_mode_on()
     
     self.main_results_dir = local_project_results_dir 
     self.logfile = logfile
     self.jobs = []
Exemplo n.º 2
0
def update_solver_data(name, status="", state=""):
    """
    Write the current status to the status file. 
    """
    
    job_data_dir = os.path.join(config.jobdata_directory, name)
    solver_data_path = os.path.join(job_data_dir, config.solver_data_file)
    data_file = open(solver_data_path)
    
    fcntl.flock(data_file, fcntl.LOCK_EX)  # lock the file while updating
    
    solver_data = cPickle.load(data_file)
    data_file.close()
    
    retries = 3
    if solver_data.has_key("grid_enabled") and not solver_data["grid_enabled"]:
        mig.local_mode_on()
    for job in solver_data["timesteps"][-1]["jobs"]: # Go through the jobs in the current time step (indexed last: -1)
        # there seems to be incidents where MiG does not recognize the job id even though it should. 
        # in such a case we let it pass unless the error is consistent across 3 retries. 
        if not retries:  
            break
        
        try :
        
            job_info = mig.job_info(job["job_id"])

        except migerror.MigUnknownJobIdError, e:
            log(str(e))
            retries -= 1
            continue
        
        for (key, value) in job_info.items():
            job_info.pop(key)
            job_info[key.lower()] = value
        
        
        job.update(job_info)
Exemplo n.º 3
0
     
    mig.test_connection() # Check if we can connect to the MiG server
    mpi_file = "example.c" # mpi program source file
    
    # The shell command to execute on the grid resource using 4 processes. We need to it compile on the resource first.
    cmds = ["mpicc -O2 example.c -o example", "$MPI_WRAP mpirun -np 4 ./example Hello"]

    # specify that we need require MPI as a runtime env and use the DIKU vgrid cluster
    specifications = {"RUNTIMEENVIRONMENT":"MPI-WRAP-2.0", "VGRID":"DIKU"}
    # Create and submit the grid job
    job_id = mig.create_job(cmds, input_files=mpi_file, resource_specifications=specifications)
    print "\nJob (ID : %s) submitted. \n\n" % job_id

    # Wait for the job to finish while monitoring the status
    polling_frequency = 10 # seconds
    while not mig.job_finished(job_id):
        job_info = mig.job_info(job_id) # get an info dictionary
        print 'Grid job : %(ID)s \t %(STATUS)s ' % job_info
        time.sleep(polling_frequency) # wait a while before polling again

    print mig.job_output(job_id)


if __name__ == "__main__":
    if "-l" in sys.argv:
        mig.local_mode_on()
    if "-d" in sys.argv:
        mig.debug_mode_on()
        
    main()
Exemplo n.º 4
0
proc_name = None
grid_enabled = True
start_timestep = str(config.INIT_TIMESTEP)
end_timestep = str(config.FINAL_TIMESTEP)

if len(sys.argv) > 1:
    proc_name = sys.argv[1]
    matlab_exec_sh = sys.argv[2]
    matlab_exec_bin = sys.argv[3]
    
    if "-n" in sys.argv:
        pos = sys.argv.index("-n")
        num_jobs = int(sys.argv[pos+1])

    if "-l" in sys.argv:
        mig.local_mode_on()
        if not os.getenv("MATLAB_MCR"):
            os.putenv("MATLAB_MCR", config.MCR_path)
            grid_enabled = False
        print "Local mode!"
        
    if "-t" in sys.argv:
        pos = sys.argv.index("-t")
        start_timestep = int(sys.argv[pos+1])
        end_timestep = int(sys.argv[pos+2])
    
    if "-i" in sys.argv:
        pos = sys.argv.index("-i")
        input_files.extend(sys.argv[pos+1:])
    
else: