if r == rank: print "I am manager {} with {}".format(rank, w) """ solution_lock = mp.Lock() csoln_space = mp.Array(ctypes.c_float, numifs * (w.n + 1), lock=solution_lock) soln_space = np.frombuffer(csoln_space.get_obj(), dtype=np.float32) soln_space[:] = 0.0 soln_space.shape = (-1, w.n + 1) initshared_soln(csoln_space) #Create a put/get memory window on each machine window = MPI.Win.Create(soln_space, soln_space.size, info, comm) jobs = [] for i in xrange(nlocalcores): p = IFS(attribute, w, lock=solution_lock, pid=i, floor=7) jobs.append(p) p.start() for j in jobs: j.join() #Local CMAX localmax = np.max(soln_space[:,0]) #This is a Python type gather, I should move to a np array gather, maybe? globalmax = comm.allgather(localmax) max_globalmax = nregions = max(globalmax) #Synchronize after IFS are generate without enclaves assignment comm.Barrier() group = window.Get_group()
def __init__(self, states, egraph, params, rval): IFS.__init__(self, states, routines) self.params = params
i = 0 while i != 3: #Choosing the search print "-------------------------------------------------------------" print("\nChoose your search options:") print("1. Keyword Search \n2. IFS \n3. Exit\n") i = input() if i == 1: print "You choose Keyword Search" from key import keyword keyword() elif i == 2: print "Your choice is IFS" from ifs import IFS IFS() elif i == 3: print "Exiting...!!" else: print "Invalid Choice..... TRY AGAIN....!!"
dest='progress', help='don\'t display files as they are processed') parser.add_argument( '-r', '--norecurse', action='store_false', dest='recurse', help='if file contains another IFS, don\'t extract its contents') args = parser.parse_args() for f in args.files: if args.progress: print(f) try: i = IFS(f) except IOError as e: # human friendly print('{}: {}'.format(os.path.basename(f), str(e))) exit(1) path = os.path.join(args.out_dir, i.default_out) if os.path.exists(path) and not args.overwrite: if not get_choice('{} exists. Overwrite?'.format(path)): continue if i.is_file: i.extract(args.progress, args.use_cache, args.recurse, args.tex_only, path) else: i.repack(args.progress, args.use_cache, path)
""" solution_lock = mp.Lock() csoln_space = mp.Array(ctypes.c_float, numifs * (w.n + 1), lock=solution_lock) soln_space = np.frombuffer(csoln_space.get_obj(), dtype=np.float32) soln_space[:] = 0.0 soln_space.shape = (-1, w.n + 1) initshared_soln(csoln_space) #Create a put/get memory window on each machine window = MPI.Win.Create(soln_space, soln_space.size, info, comm) jobs = [] for i in xrange(nlocalcores): p = IFS(attribute, w, lock=solution_lock, pid=i, floor=7) jobs.append(p) p.start() for j in jobs: j.join() #Local CMAX localmax = np.max(soln_space[:, 0]) #This is a Python type gather, I should move to a np array gather, maybe? globalmax = comm.allgather(localmax) max_globalmax = nregions = max(globalmax) #Synchronize after IFS are generate without enclaves assignment comm.Barrier() group = window.Get_group()