def add_individuals(target_context, children): target_entity = target_context['__entity__'] id_to_rownum = target_entity.id_to_rownum array = target_entity.array num_rows = len(array) num_birth = len(children) print "%d new %s(s) (%d -> %d)" % (num_birth, target_entity.name, num_rows, num_rows + num_birth), target_entity.array = np.concatenate((array, children)) temp_variables = target_entity.temp_variables for name, temp_value in temp_variables.iteritems(): #FIXME: OUCH, this is getting ugly, I'll need a better way to # differentiate nd-arrays from "entity" variables # I guess having the context contain all entities and a separate # globals namespace should fix this problem if (isinstance(temp_value, np.ndarray) and temp_value.shape == (num_rows,)): extra = get_missing_vector(num_birth, temp_value.dtype) temp_variables[name] = np.concatenate((temp_value, extra)) extra_variables = target_context.extra for name, temp_value in extra_variables.iteritems(): if name == '__globals__': continue if isinstance(temp_value, np.ndarray) and temp_value.shape: extra = get_missing_vector(num_birth, temp_value.dtype) extra_variables[name] = np.concatenate((temp_value, extra)) id_to_rownum_tail = np.arange(num_rows, num_rows + num_birth) target_entity.id_to_rownum = np.concatenate((id_to_rownum, id_to_rownum_tail))
def add_individuals(target_context, children): target_entity = target_context['__entity__'] id_to_rownum = target_entity.id_to_rownum array = target_entity.array num_rows = len(array) num_birth = len(children) print "%d new %s(s) (%d -> %d)" % (num_birth, target_entity.name, num_rows, num_rows + num_birth), target_entity.array = np.concatenate((array, children)) temp_variables = target_entity.temp_variables for name, temp_value in temp_variables.iteritems(): if isinstance(temp_value, np.ndarray) and temp_value.shape: extra = get_missing_vector(num_birth, temp_value.dtype) temp_variables[name] = np.concatenate((temp_value, extra)) extra_variables = target_context.extra for name, temp_value in extra_variables.iteritems(): if name == '__globals__': continue if isinstance(temp_value, np.ndarray) and temp_value.shape: extra = get_missing_vector(num_birth, temp_value.dtype) extra_variables[name] = np.concatenate((temp_value, extra)) id_to_rownum_tail = np.arange(num_rows, num_rows + num_birth) target_entity.id_to_rownum = np.concatenate((id_to_rownum, id_to_rownum_tail))
def add_individuals(target_context, children): target_entity = target_context.entity id_to_rownum = target_entity.id_to_rownum array = target_entity.array num_rows = len(array) num_birth = len(children) if config.log_level == "processes": print("%d new %s(s) (%d -> %d)" % (num_birth, target_entity.name, num_rows, num_rows + num_birth), end=' ') target_entity.array.append(children) temp_variables = target_entity.temp_variables for name, temp_value in temp_variables.iteritems(): #FIXME: OUCH, this is getting ugly, I'll need a better way to # differentiate nd-arrays from "entity" variables # I guess having the context contain all entities and a separate # globals namespace should fix this problem. Well, no it would not # fix the problem by itself, as this would only move the problem # to the "store" part of Assignment processes which would need to be # able to differentiate between an "entity temp" and a global temp. # I think this can be done by inspecting the expressions that generate # them: no non-aggregated entity var => global temp. It would be nice # to further distinguish between aggregated entity var and other global # temporaries to store them in the entity somewhere, but I am unsure # whether it is possible. if (isinstance(temp_value, np.ndarray) and temp_value.shape == ( num_rows,)): extra = get_missing_vector(num_birth, temp_value.dtype) temp_variables[name] = np.concatenate((temp_value, extra)) extra_variables = target_context.entity_data.extra for name, temp_value in extra_variables.iteritems(): if name == '__globals__': continue if isinstance(temp_value, np.ndarray) and temp_value.shape: extra = get_missing_vector(num_birth, temp_value.dtype) extra_variables[name] = np.concatenate((temp_value, extra)) id_to_rownum_tail = np.arange(num_rows, num_rows + num_birth) target_entity.id_to_rownum = np.concatenate( (id_to_rownum, id_to_rownum_tail))
def add_and_drop_fields(self, output_fields): """modify inplace""" output_dtype = np.dtype(output_fields) output_names = set(output_dtype.names) input_names = set(self.dtype.names) length = len(self) # add missing fields for name in output_names - input_names: self[name] = get_missing_vector(length, output_dtype[name]) # delete extra fields for name in input_names - output_names: del self[name]
def add_and_drop_fields(self, output_fields): '''modify inplace''' output_dtype = np.dtype(output_fields) output_names = set(output_dtype.names) input_names = set(self.dtype.names) default_values = self.dval length = len(self) # add missing fields for name in output_names - input_names: if name in default_values: self[name] = np.empty(length, dtype=output_dtype[name]) self[name].fill(default_values[name]) else: self[name] = get_missing_vector(length, output_dtype[name]) # delete extra fields for name in input_names - output_names: del self[name]