def setup_vars(self, ofs, wrts): matrix = self.sparsity isplit = self.isplit osplit = self.osplit isizes, _ = evenly_distrib_idxs(isplit, matrix.shape[1]) for i, sz in enumerate(isizes): self.add_input('x%d' % i, np.zeros(sz)) osizes, _ = evenly_distrib_idxs(osplit, matrix.shape[0]) for i, sz in enumerate(osizes): self.add_output('y%d' % i, np.zeros(sz)) self.declare_partials(of=ofs, wrt=wrts, method=self.method)
def setup(self): arr_size = self.options['arr_size'] comm = self.comm rank = comm.rank sizes, offsets = evenly_distrib_idxs(comm.size, arr_size) start = offsets[rank] io_size = sizes[rank] self.offset = offsets[rank] end = start + io_size self.add_input('x', val=np.ones(io_size), src_indices=np.arange(start, end, dtype=int)) self.add_input('y', val=np.ones(io_size), src_indices=np.arange(start, end, dtype=int)) self.add_input('offset', val=-3.0 * np.ones(io_size), src_indices=np.arange(start, end, dtype=int)) self.add_output('f_xy', val=np.ones(io_size)) row_col = np.arange(io_size) self.declare_partials('f_xy', ['x', 'y', 'offset'], rows=row_col, cols=row_col)
def verify(inputs, outputs, in_vals=1., out_vals=1., pathnames=False, comm=None, final=True, rank=None): global_shape = (size, ) if final else 'Unavailable' inputs = sorted(inputs) outputs = sorted(outputs) with multi_proc_exception_check(comm): if comm is not None: sizes, offsets = evenly_distrib_idxs(comm.size, size) local_size = sizes[comm.rank] else: local_size = size if rank is None or comm is None or rank == comm.rank: test.assertEqual(len(inputs), 1) name, meta = inputs[0] test.assertEqual(name, 'C2.invec' if pathnames else 'invec') test.assertEqual(meta['shape'], (local_size,)) test.assertEqual(meta['global_shape'], global_shape) test.assertTrue(all(meta['val'] == in_vals*np.ones(size))) test.assertEqual(len(outputs), 1) name, meta = outputs[0] test.assertEqual(name, 'C2.outvec' if pathnames else 'outvec') test.assertEqual(meta['shape'], (local_size,)) test.assertEqual(meta['global_shape'], global_shape) test.assertTrue(all(meta['val'] == out_vals*np.ones(size)))
def verify(inputs, outputs, in_vals=1., out_vals=1., pathnames=False, comm=None, final=True): global_shape = (size, ) if final else 'Unavailable' if comm is not None: sizes, offsets = evenly_distrib_idxs(comm.size, size) local_size = sizes[comm.rank] else: local_size = size test.assertEqual(len(inputs), 1) name, meta = inputs[0] test.assertEqual(name, 'C2.invec' if pathnames else 'invec') test.assertEqual(meta['shape'], (local_size, )) test.assertEqual(meta['global_shape'], global_shape) test.assertTrue(all(meta['value'] == in_vals * np.ones(local_size))) test.assertEqual(len(outputs), 1) name, meta = outputs[0] test.assertEqual(name, 'C2.outvec' if pathnames else 'outvec') test.assertEqual(meta['shape'], (local_size, )) test.assertEqual(meta['global_shape'], global_shape) test.assertTrue( all(meta['value'] == out_vals * np.ones(local_size)))
def setup(self): comm = self.comm rank = comm.rank # this results in 8 entries for proc 0 and 7 entries for proc 1 when using 2 processes. sizes, offsets = evenly_distrib_idxs(comm.size, self.size) start = offsets[rank] end = start + sizes[rank] self.add_input('invec', np.ones(sizes[rank], float), src_indices=np.arange(start, end, dtype=int)) self.add_output('outvec', np.ones(sizes[rank], float)) self.input_file = 'distrib_comp_input.dat' self.output_file = 'distrib_comp_output.dat' self.options['external_input_files'] = [self.input_file,] self.options['external_output_files'] = [self.output_file,] self.options['command'] = [ 'python', os.path.join(DIRECTORY, 'extcode_distrib_comp.py'), self.input_file, self.output_file ] # at setup time set unique folder to run in subdir_name = 'distrib_{}'.format(rank) self.run_directory = os.path.join(self.options['toplevel_run_directory'], subdir_name) try: os.mkdir(self.run_directory) except: pass
def compute(self, inputs, outputs): rank = self.comm.rank sizes, offsets = evenly_distrib_idxs(self.comm.size, N) outputs['y'] = inputs['x'] * np.ones((sizes[rank], )) if rank == 0: outputs['y'][0] = 2.
def setup(self): arr_size = self.options['arr_size'] deriv_type = self.options['deriv_type'] comm = self.comm rank = comm.rank sizes, offsets = evenly_distrib_idxs(comm.size, arr_size) start = offsets[rank] io_size = sizes[rank] self.offset = offsets[rank] end = start + io_size self.add_input('x', val=np.ones(io_size), src_indices=np.arange(start, end, dtype=int)) self.add_input('y', val=np.ones(io_size), src_indices=np.arange(start, end, dtype=int)) self.add_input('a', val=-3.0 * np.ones(io_size), src_indices=np.arange(start, end, dtype=int)) self.add_output('f_xy', val=np.ones(io_size)) if deriv_type == 'dense': self.declare_partials('f_xy', ['x', 'y', 'a']) elif deriv_type == 'sparse': row_col = np.arange(io_size) self.declare_partials('f_xy', ['x', 'y', 'a'], rows=row_col, cols=row_col) else: self.declare_partials('f_xy', ['x', 'y', 'a'], method=deriv_type)
def setup(self): N = self.options['size'] rank = self.comm.rank self.add_input('x', shape=1, src_indices=rank) sizes, offsets = evenly_distrib_idxs(self.comm.size, N) self.add_output('y', shape=sizes[rank]) # automatically infer dimensions without specifying rows, cols self.declare_partials('y', 'x')
def setup(self): comm = self.comm rank = comm.rank self.sizes, self.offsets = evenly_distrib_idxs(comm.size, self.arr_size) start = self.offsets[rank] end = start + self.sizes[rank] self.add_input('invec', np.ones(self.sizes[rank], float), src_indices=np.arange(start, end, dtype=int)) self.add_output('outvec', np.ones(self.arr_size, float), shape=np.int32(self.arr_size))
def setup_indeps(isplit, ninputs, indeps_name, comp_name): isizes, _ = evenly_distrib_idxs(isplit, ninputs) indeps = IndepVarComp() conns = [] for i, sz in enumerate(isizes): indep_var = 'x%d' % i indeps.add_output(indep_var, np.random.random(sz)) conns.append( (indeps_name + '.' + indep_var, comp_name + '.' + indep_var)) return indeps, conns
def setup(self): comm = self.comm rank = comm.rank # results in 8 entries for proc 0 and 7 entries for proc 1 when using 2 processes. sizes, offsets = evenly_distrib_idxs(comm.size, self.size) start = offsets[rank] end = start + sizes[rank] self.add_input('invec', np.ones(sizes[rank], float), src_indices=np.arange(start, end, dtype=int)) self.add_output('outvec', np.ones(sizes[rank], float))
def setup(self): # this results in 8 entries for proc 0 and 7 entries for proc 1 # when using 2 processes. sizes, offsets = evenly_distrib_idxs(self.comm.size, self.size) start = offsets[rank] end = start + sizes[rank] # NOTE: you must specify src_indices here for the input. Otherwise, # you'll connect the input to [0:local_input_size] of the # full distributed output! self.add_input('invec', np.ones(sizes[self.comm.rank], float), src_indices=np.arange(start, end, dtype=int)) self.add_output('out', 0.0)
def setup(self): comm = self.comm rank = comm.rank arr_size = self.options['arr_size'] sizes, offsets = evenly_distrib_idxs(comm.size, arr_size) start = offsets[rank] end = start + sizes[rank] self.add_input('invec', np.ones(sizes[rank], float), src_indices=np.arange(start, end, dtype=int)) self.add_output('outvec', np.ones(sizes[rank], float))
def setup(self): comm = self.comm rank = comm.rank self.sizes, self.offsets = evenly_distrib_idxs(comm.size, self.arr_size) start = self.offsets[rank] end = start + self.sizes[rank] # need to initialize the variable to have the correct local size self.add_input('invec', np.ones(self.sizes[rank], float), src_indices=np.arange(start, end, dtype=int)) self.add_output('outvec', np.ones(self.arr_size, float))
def setup(self): comm = self.comm rank = comm.rank size = self.options['size'] # if comm.size is 2 and size is 15, this results in # 8 entries for proc 0 and 7 entries for proc 1 sizes, _ = evenly_distrib_idxs(comm.size, size) self.mysize = mysize = sizes[rank] # don't set src_indices on the input, just use default behavior self.add_input('invec', np.ones(mysize, float)) self.add_output('outvec', np.ones(mysize, float))
def setup(self): self.add_input('a', val=10., units='m') rank = self.comm.rank GLOBAL_SIZE = 5 sizes, offsets = evenly_distrib_idxs(self.comm.size, GLOBAL_SIZE) self.add_output('states', shape=int(sizes[rank])) self.add_output('out_var', shape=1) self.local_size = sizes[rank] self.linear_solver = PETScKrylov()
def setup(self): self.add_input('a', val=10., units='m') rank = self.comm.rank GLOBAL_SIZE = 15 sizes, offsets = evenly_distrib_idxs(self.comm.size, GLOBAL_SIZE) self.add_output('states', shape=int(sizes[rank])) self.add_output('out_var', shape=1) self.local_size = sizes[rank] self.linear_solver = om.PETScKrylov() self.linear_solver.precon = om.LinearUserDefined(solve_function=self.mysolve)
def setup(self): arr_size = self.options['arr_size'] self.add_input('x', val=1., distributed=False, shape=arr_size) self.add_input('y', val=1., distributed=False, shape=arr_size) sizes, offsets = evenly_distrib_idxs(self.comm.size, arr_size) self.start = offsets[self.comm.rank] self.end = self.start + sizes[self.comm.rank] self.a = -3.0 + 0.6 * np.arange(self.start, self.end) self.add_output('f_xy', shape=len(self.a), distributed=True) self.add_output('f_sum', shape=1, distributed=False) self.declare_coloring(wrt='*', method='fd')
def setup(self): self.add_input('a', val=10., units='m') rank = self.comm.rank GLOBAL_SIZE = 15 sizes, offsets = evenly_distrib_idxs(self.comm.size, GLOBAL_SIZE) self.add_output('states', shape=int(sizes[rank])) self.add_output('out_var', shape=1) self.local_size = sizes[rank] self.linear_solver = PETScKrylov() self.linear_solver.precon = LinearUserDefined(self.mysolve)
def setup(self): nn = self.options['num_nodes'] comm = self.comm rank = comm.rank sizes, offsets = evenly_distrib_idxs(comm.size, nn) start = offsets[rank] end = start + sizes[rank] self.add_input('x1', val=np.ones(sizes[rank]), src_indices=np.arange(start, end, dtype=int)) self.add_output('x0dot', val=np.ones(sizes[rank])) r = c = np.arange(sizes[rank]) self.declare_partials(of='x0dot', wrt='x1', rows=r, cols=c)
def test_distrib_record_driver(self): # create distributed variables of different sizes to catch mismatched collective calls sizes = [7, 10, 12, 25, 33, 42] prob = om.Problem() ivc = prob.model.add_subsystem('ivc', om.IndepVarComp(), promotes_outputs=['*']) for n, size in enumerate(sizes): local_sizes, _ = evenly_distrib_idxs(prob.comm.size, size) local_size = local_sizes[prob.comm.rank] ivc.add_output(f'in{n}', np.ones(local_size), distributed=True) prob.model.add_design_var(f'in{n}') prob.model.add_subsystem('adder', DistributedAdder(sizes), promotes=['*']) prob.model.add_subsystem('summer', Summer(sizes), promotes_outputs=['sum']) for n, size in enumerate(sizes): prob.model.promotes('summer', inputs=[f'summand{n}'], src_indices=om.slicer[:], src_shape=size) prob.model.add_objective('sum') prob.driver.recording_options['record_desvars'] = True prob.driver.recording_options['record_objectives'] = True prob.driver.recording_options['record_constraints'] = True prob.driver.recording_options['includes'] = [f'out{n}' for n in range(len(sizes))] prob.driver.add_recorder(self.recorder) prob.setup() t0, t1 = run_driver(prob) prob.cleanup() coordinate = [0, 'Driver', (0,)] expected_desvars = {} for n in range(len(sizes)): expected_desvars[f'ivc.in{n}'] = prob.get_val(f'ivc.in{n}', get_remote=True) expected_objectives = { "summer.sum": prob['summer.sum'] } expected_outputs = expected_desvars.copy() for n in range(len(sizes)): expected_outputs[f'adder.out{n}'] = prob.get_val(f'adder.out{n}', get_remote=True) if prob.comm.rank == 0: expected_outputs.update(expected_objectives) expected_data = ((coordinate, (t0, t1), expected_outputs, None, None),) assertDriverIterDataRecorded(self, expected_data, self.eps)
def setup(self): comm = self.comm rank = comm.rank size = self.options['size'] # if comm.size is 2 and size is 15, this results in # 8 entries for proc 0 and 7 entries for proc 1 sizes, _ = evenly_distrib_idxs(comm.size, size) mysize = sizes[rank] self.add_input('invec', np.ones(mysize, float), distributed=True) self.add_output( 'outvec', np.ones(mysize, float), distributed=True, )
def setup(self): self.options['distributed'] = True self.add_input('a', val=10., units='m', src_indices=[0]) rank = self.comm.rank GLOBAL_SIZE = 5 sizes, offsets = evenly_distrib_idxs(self.comm.size, GLOBAL_SIZE) self.add_output('states', shape=int(sizes[rank])) self.add_output('out_var', shape=1) self.local_size = sizes[rank] self.linear_solver = om.PETScKrylov()
def setup(self): self.options['distributed'] = True self.add_input('a', val=10., units='m', src_indices=[0]) rank = self.comm.rank GLOBAL_SIZE = 5 sizes, offsets = evenly_distrib_idxs(self.comm.size, GLOBAL_SIZE) self.add_output('states', shape=int(sizes[rank])) self.add_output('out_var', shape=1) self.local_size = sizes[rank] self.linear_solver = PETScKrylov()
def setup(self): """ specify the local sizes of the variables and which specific indices this specific distributed component will handle. Indices do NOT need to be sequential or contiguous! """ comm = self.comm rank = comm.rank for n, size in enumerate(self.sizes): # NOTE: evenly_distrib_idxs is a helper function to split the array # up as evenly as possible local_sizes, _ = evenly_distrib_idxs(comm.size, size) local_size = local_sizes[rank] self.add_input(f'in{n}', val=np.zeros(local_size, float), distributed=True) self.add_output(f'out{n}', val=np.zeros(local_size, float), distributed=True)
def setup(self): comm = self.comm rank = comm.rank arr_size = self.options['arr_size'] sizes, offsets = evenly_distrib_idxs(comm.size, arr_size) self.sizes = sizes self.offsets = offsets start = offsets[rank] end = start + sizes[rank] # don't set src_indices on the input and just use default behavior self.add_input('invec', np.ones(sizes[rank], float), distributed=True) self.add_output('outvec', np.ones(sizes[rank], float), distributed=True)
def setup(self): outs = set() allvars = set() exprs = self._exprs kwargs = self._kwargs comm = self.comm rank = comm.rank if len(self._exprs) > comm.size: raise RuntimeError( "DistribExecComp only supports up to 1 expression per MPI process." ) if len(self._exprs) < comm.size: # repeat the last expression for any leftover procs self._exprs.extend([self._exprs[-1]] * (comm.size - len(self._exprs))) self._exprs = [self._exprs[rank]] # find all of the variables and which ones are outputs for expr in exprs: lhs, _ = expr.split('=', 1) outs.update(self._parse_for_out_vars(lhs)) allvars.update(self._parse_for_vars(expr)) sizes, offsets = evenly_distrib_idxs(comm.size, self.arr_size) start = offsets[rank] end = start + sizes[rank] for name in outs: if name not in kwargs or not isinstance(kwargs[name], dict): kwargs[name] = {} kwargs[name]['value'] = numpy.ones(sizes[rank], float) for name in allvars: if name not in outs: if name not in kwargs or not isinstance(kwargs[name], dict): kwargs[name] = {} meta = kwargs[name] meta['value'] = numpy.ones(sizes[rank], float) meta['src_indices'] = numpy.arange(start, end, dtype=int) super(DistribExecComp, self).setup()
def setup(self): comm = self.comm rank = comm.rank size = self.options['size'] # if comm.size is 2 and size is 15, this results in # 8 entries for proc 0 and 7 entries for proc 1 sizes, offsets = evenly_distrib_idxs(comm.size, size) mysize = sizes[rank] start = offsets[rank] end = start + mysize self.add_input('invec', np.ones(mysize, float), src_indices=np.arange(start, end, dtype=int)) self.add_output('outvec', np.ones(mysize, float))
def setup(self): comm = self.comm rank = comm.rank size_total = self.options['size'] # Distribute x and y vectors across each processor as evenly as possible sizes, offsets = evenly_distrib_idxs(comm.size, size_total) start = offsets[rank] end = start + sizes[rank] self.size_local = size_local = sizes[rank] # Get the local slice of A that this processor will be working with self.A_local = A[start:end,:] self.add_input('x', np.ones(size_local, float), distributed=True, src_indices=np.arange(start, end, dtype=int)) self.add_output('y', np.ones(size_local, float), distributed=True)
def setup(self): comm = self.comm rank = comm.rank arr_size = self.options['arr_size'] sizes, offsets = evenly_distrib_idxs(comm.size, arr_size) self.sizes = sizes self.offsets = offsets start = offsets[rank] end = start + sizes[rank] # don't set src_indices on the input and just use default behavior self.add_input('invec', np.ones(sizes[rank], float)) self.add_output('outvec', np.ones(sizes[rank], float)) self.declare_partials('outvec', 'invec', rows=np.arange(0, sizes[rank]), cols=np.arange(0, sizes[rank]))
def setup(self): outs = set() allvars = set() exprs = self._exprs kwargs = self._kwargs comm = self.comm rank = comm.rank if len(self._exprs) > comm.size: raise RuntimeError("DistribExecComp only supports up to 1 expression per MPI process.") if len(self._exprs) < comm.size: # repeat the last expression for any leftover procs self._exprs.extend([self._exprs[-1]] * (comm.size - len(self._exprs))) self._exprs = [self._exprs[rank]] # find all of the variables and which ones are outputs for expr in exprs: lhs, _ = expr.split('=', 1) outs.update(self._parse_for_out_vars(lhs)) allvars.update(self._parse_for_vars(expr)) sizes, offsets = evenly_distrib_idxs(comm.size, self.arr_size) start = offsets[rank] end = start + sizes[rank] for name in outs: if name not in kwargs or not isinstance(kwargs[name], dict): kwargs[name] = {} kwargs[name]['value'] = numpy.ones(sizes[rank], float) for name in allvars: if name not in outs: if name not in kwargs or not isinstance(kwargs[name], dict): kwargs[name] = {} meta = kwargs[name] meta['value'] = numpy.ones(sizes[rank], float) meta['src_indices'] = numpy.arange(start, end, dtype=int) super(DistribExecComp, self).setup()
def setup(self): """ specify the local sizes of the variables and which specific indices this specific distributed component will handle. Indices do NOT need to be sequential or contiguous! """ comm = self.comm rank = comm.rank # NOTE: evenly_distrib_idxs is a helper function to split the array # up as evenly as possible sizes, offsets = evenly_distrib_idxs(comm.size,self.options['size']) local_size, local_offset = sizes[rank], offsets[rank] start = local_offset end = local_offset + local_size self.add_input('x', val=np.zeros(local_size, float), distributed=True, src_indices=np.arange(start, end, dtype=int)) self.add_output('y', val=np.zeros(local_size, float), distributed=True)
def setup(self): comm = self.comm rank = comm.rank size = self.options['size'] # if comm.size is 2 and size is 15, this results in # 8 entries for proc 0 and 7 entries for proc 1 sizes, _ = evenly_distrib_idxs(comm.size, size) self.mysize = mysize = sizes[rank] # don't set src_indices on the input, just use default behavior self.add_input('invec', np.ones(mysize, float)) self.add_output('outvec', np.ones(mysize, float)) # declare partial derivatives (diagonal of mysize) self.declare_partials('outvec', 'invec', rows=np.arange(0, mysize), cols=np.arange(0, mysize))
def setup(self): """ specify the local sizes of the variables and which specific indices this specific distributed component will handle. Indices do NOT need to be sequential or contiguous! """ comm = self.comm rank = comm.rank # NOTE: evenly_distrib_idxs is a helper function to split the array # up as evenly as possible sizes, offsets = evenly_distrib_idxs(comm.size,self.options['size']) local_size, local_offset = sizes[rank], offsets[rank] start = local_offset end = local_offset + local_size self.add_input('x', val=np.zeros(local_size, float), src_indices=np.arange(start, end, dtype=int)) self.add_output('y', val=np.zeros(local_size, float))