def testMatrixSymmetries(self):
     """
     X and P should have the same pattern as the distance matrix defined by
     the lattice.
     """
     precision = 20
     polygon = self.calc.square_lattice(5)
     X,P = self.calc.correlations(polygon, self.maple_link, precision)
     
     # Round X and P down so we can see if elements are distinct or not.
     X = sympy.matrix2numpy(X)
     P = sympy.matrix2numpy(P)
     X = X.astype('float')
     P = P.astype('float')
     
     # Get the pattern of the distance matrix.
     D = spatial.distance.cdist(polygon,polygon)
     
     # The pattern of the distance matrix
     D_pat = sp.zeros(D.shape)
     getSignatureMatrix(D_pat,sp.nditer(D),D.shape)
     
     # Get the pattern of X and P.
     X_pat = sp.zeros(X.shape)
     P_pat = sp.zeros(P.shape)
     getSignatureMatrix(X_pat,sp.nditer(X),X.shape)
     getSignatureMatrix(P_pat,sp.nditer(P),P.shape)
     
     # Check if patterns match.
     eq_(False,(D_pat - X_pat).all())
     eq_(False,(D_pat - P_pat).all())
    def testMatrixSymmetries(self):
        """
        X and P should have the same pattern as the distance matrix defined by
        the lattice.
        """
        precision = 20
        polygon = self.calc.square_lattice(5)
        X, P = self.calc.correlations(polygon, self.maple_link, precision)

        # Round X and P down so we can see if elements are distinct or not.
        X = sympy.matrix2numpy(X)
        P = sympy.matrix2numpy(P)
        X = X.astype('float')
        P = P.astype('float')

        # Get the pattern of the distance matrix.
        D = spatial.distance.cdist(polygon, polygon)

        # The pattern of the distance matrix
        D_pat = sp.zeros(D.shape)
        getSignatureMatrix(D_pat, sp.nditer(D), D.shape)

        # Get the pattern of X and P.
        X_pat = sp.zeros(X.shape)
        P_pat = sp.zeros(P.shape)
        getSignatureMatrix(X_pat, sp.nditer(X), X.shape)
        getSignatureMatrix(P_pat, sp.nditer(P), P.shape)

        # Check if patterns match.
        eq_(False, (D_pat - X_pat).all())
        eq_(False, (D_pat - P_pat).all())
def calculate_signal_discrimination_weber_law(data_flag, 
										nonzero_bounds=[0.5, 1.5], 
										zero_bound=1./10., 
										threshold_pct_nonzero=75.0, 
										threshold_pct_zero=75.0):
	
	list_dict = read_specs_file(data_flag)
	for key in list_dict:
		exec("%s = list_dict[key]" % key)

	iter_vars_dims = []
	for iter_var in iter_vars:
		iter_vars_dims.append(len(iter_vars[iter_var]))		
	
	print ('Loading object list...'),
	CS_object_array = load_aggregated_object_list(iter_vars_dims, data_flag)
	print ('...loaded.')

	#Code not written for Kk_split = 0
	assert CS_object_array[0, 0].Kk_split != 0, "Need nonzero Kk_split"
	
	# Data structures 
	errors_zero = sp.zeros(iter_vars_dims)
	errors_nonzero_2 = sp.zeros(iter_vars_dims)
	errors_nonzero = sp.zeros(iter_vars_dims)
	successes_2 = sp.zeros(iter_vars_dims)
	successes = sp.zeros(iter_vars_dims)
	
	# Calculate binary errors
	it = sp.nditer(sp.zeros(iter_vars_dims), flags=['multi_index'])	
	while not it.finished:
		errors = binary_errors_dual_odor(CS_object_array[it.multi_index], 
								nonzero_bounds=nonzero_bounds,
								zero_bound=zero_bound)
		
		errors_nonzero[it.multi_index] = errors['errors_nonzero']
		errors_nonzero_2[it.multi_index] = errors['errors_nonzero_2']
		errors_zero[it.multi_index] = errors['errors_zero']
		it.iternext()
	
	# Calculate success ratios from binary errors
	it = sp.nditer(sp.zeros(iter_vars_dims), flags = ['multi_index'])
	while not it.finished:
		successes_2[it.multi_index] = binary_success(
					errors_nonzero_2[it.multi_index], 
					errors_zero[it.multi_index], 
					threshold_pct_nonzero=threshold_pct_nonzero,
					threshold_pct_zero=threshold_pct_zero)
		successes[it.multi_index] = binary_success(
					errors_nonzero[it.multi_index], 
					errors_zero[it.multi_index], 
					threshold_pct_nonzero=threshold_pct_nonzero,
					threshold_pct_zero=threshold_pct_zero)
		it.iternext()
		
	save_signal_discrimination_weber_law(successes, successes_2, data_flag)
Beispiel #4
0
def calculate_signal_decoding_weber_law(data_flag,
                                        nonzero_bounds=[0.5, 1.5],
                                        zero_bound=1. / 10.,
                                        threshold_pct_nonzero=85.0,
                                        threshold_pct_zero=85.0):

    list_dict = read_specs_file(data_flag)
    for key in list_dict:
        exec("%s = list_dict[key]" % key)

    iter_vars_dims = []
    for iter_var in iter_vars:
        iter_vars_dims.append(len(iter_vars[iter_var]))

    print('Loading object list...'),
    CS_object_array = load_aggregated_object_list(iter_vars_dims, data_flag)
    print('...loaded.')

    # Data structures
    errors_nonzero = sp.zeros(iter_vars_dims)
    errors_zero = sp.zeros(iter_vars_dims)
    epsilons = sp.zeros((iter_vars_dims[0], iter_vars_dims[1], params['Mm']))
    gains = sp.zeros(
        (iter_vars_dims[0], iter_vars_dims[1], params['Mm'], params['Nn']))
    successes = sp.zeros(iter_vars_dims)

    # Calculate binary errors
    it = sp.nditer(sp.zeros(iter_vars_dims), flags=['multi_index'])
    while not it.finished:
        errors = binary_errors(CS_object_array[it.multi_index],
                               nonzero_bounds=nonzero_bounds,
                               zero_bound=zero_bound)

        errors_nonzero[it.multi_index] = errors['errors_nonzero']
        errors_zero[it.multi_index] = errors['errors_zero']
        it.iternext()

    # Calculate success ratios from binary errors
    it = sp.nditer(sp.zeros(iter_vars_dims), flags=['multi_index'])
    while not it.finished:
        successes[it.multi_index] = binary_success(
            errors_nonzero[it.multi_index],
            errors_zero[it.multi_index],
            threshold_pct_nonzero=threshold_pct_nonzero,
            threshold_pct_zero=threshold_pct_zero)
        epsilons[it.multi_index] = CS_object_array[it.multi_index].eps
        gains[it.multi_index] = CS_object_array[it.multi_index].Rr
        it.iternext()

    save_signal_decoding_weber_law(successes, gains, epsilons, data_flag)
Beispiel #5
0
    def write_iop_to_file(self, wavelengths, iop, file_name):
        lg.info('Writing :: ' + file_name)
        f = open(file_name, 'w')
        for i, wave in enumerate(scipy.nditer(wavelengths)):
            if i < self.wavelengths.shape[1] - 1:
                f.write(str(wave) + ',')
            else:
                f.write(str(wave))
        f.write('\n')

        for i, _iop in enumerate(scipy.nditer(iop)):
            if i < iop.shape[1] - 1:
                f.write(str(_iop) + ',')
            else:
                f.write(str(_iop))
Beispiel #6
0
    def write_iop_to_file(self, wavelengths, iop, file_name):
        #.info('Writing :: ' + file_name)
        f = open(file_name, 'w')
        for i, wave in enumerate(scipy.nditer(wavelengths)):
            if i < self.wavelengths.shape[1] - 1:
                f.write(str(wave) + ',')
            else:
                f.write(str(wave))
        f.write('\n')

        for i, _iop in enumerate(scipy.nditer(iop)):
            if i < iop.shape[1] - 1:
                f.write(str(_iop) + ',')
            else:
                f.write(str(_iop))
	def init_nn_frontend(self):
		"""
		Initialize the signal array and free energies of receptor complexes
		"""
		
		signal_array = sp.zeros((self.Nn, self.num_signals, 
								len(self.mu_dSs_array)))
		eps_array = sp.zeros((self.Mm, self.num_signals, 
								len(self.mu_dSs_array)))
	
		# Iterate to get signal and energy in [# odor IDs, # odor concs]
		it = sp.nditer(signal_array[0,...], flags=['multi_index'])
		while not it.finished:
			
			self.seed_dSs = it.multi_index[0]
			self.mu_dSs = self.mu_dSs_array[it.multi_index[1]]
			self.mu_dSs_2 = self.mu_dSs_array[it.multi_index[1]]
			
			# Set signal and random energy (and Kk matrices if not set)
			self.set_sparse_signals()
			if (self.Kk1 is None):
				self.set_power_Kk()
			self.set_normal_free_energy()
			full_idx = [slice(None), ]*len(signal_array.shape)
			full_idx[1:] = it.multi_index
			signal_array[full_idx] = self.Ss
			eps_array[full_idx] = self.eps
			
			it.iternext()
		
		# Flattened with inner loop being concentration, outer loop being ID
		# Full shape is ((# concs)*(# IDs), Nn or Mm)
		self.Ss = sp.reshape(signal_array, (self.Nn, -1))
		self.eps = sp.reshape(eps_array, (self.Mm, -1))
Beispiel #8
0
def merged_bounds(data, t_min, window, start_frac, end_frac):
    start_pos = -1
    end_pos = -1
    a = (np.diff(np.sign(np.diff(output))) > 0).nonzero()[0] + 1
    b = argrelmin(data)
    peak_v = np.max(data[t_min:t_min + window])
    print("b:", b)
    second = []
    print("a[0]:", a[0])
    for i in scipy.nditer(b):
        #if data[i]>0.2 and max(data[t_min:t_min+window]) < i :
        #for i_start in range(t_min,t_min+window):
        #if data[i_start]>max(data[a[0]]*start_frac,4.5/chA_spe_size):
        #start_pos=i_start
        #break
        if data[i] > 1:
            start_pos = i
            print("i:", i)
            break

    for j in a:

        if data[j] > 0.2:
            second.append(j)
    for k in scipy.nditer(b):
        if max(a) > k:

            for z in second[1:]:

                #end_frac: pulse ends at this fraction of peak height above baseline
                for i_start in range(t_min + window, t_min, -1):
                    if data[i_start] > max(data[z] * end_frac,
                                           4.5 / chA_spe_size):
                        end_pos = i_start
                        break
        else:
            end_pos = b[0]

    return (start_pos, end_pos)
Beispiel #9
0
def calculate_tuning_curves(data_flag):

    list_dict = read_specs_file(data_flag)
    for key in list_dict:
        exec("%s = list_dict[key]" % key)

    # Get the iterated variable dimensions
    iter_vars_dims = []
    for iter_var in iter_vars:
        iter_vars_dims.append(len(iter_vars[iter_var]))
    it = sp.nditer(sp.zeros(iter_vars_dims), flags=['multi_index'])

    # Set up array to hold tuning curve curves
    tuning_curve = sp.zeros(
        (iter_vars_dims[0], iter_vars_dims[1], params['Nn'], params['Mm']))

    # Set array to hold epsilons and Kk2
    epsilons = sp.zeros((iter_vars_dims[0], iter_vars_dims[1], params['Mm']))
    Kk2s = sp.zeros(
        (iter_vars_dims[0], iter_vars_dims[1], params['Mm'], params['Nn']))

    # Iterate tuning curve calculation over all iterable variables
    while not it.finished:
        iter_var_idxs = it.multi_index

        vars_to_pass = dict()
        vars_to_pass = parse_iterated_vars(iter_vars, iter_var_idxs,
                                           vars_to_pass)
        vars_to_pass = parse_relative_vars(rel_vars, iter_vars, vars_to_pass)
        vars_to_pass = merge_two_dicts(vars_to_pass, fixed_vars)
        vars_to_pass = merge_two_dicts(vars_to_pass, params)

        # Calculate tuning curve
        for iN in range(vars_to_pass['Nn']):
            vars_to_pass['manual_dSs_idxs'] = sp.array([iN])
            obj = single_encode_CS(vars_to_pass, run_specs)
            tuning_curve[iter_var_idxs[0], iter_var_idxs[1], iN, :] = obj.dYy

        epsilons[it.multi_index] = obj.eps
        Kk2s[it.multi_index] = obj.Kk2

        it.iternext()

    save_tuning_curve(tuning_curve, epsilons, Kk2s, data_flag)
	def init_nn_frontend_adapted(self):
		"""
		Initialize the signal array and free energies of receptor complexes, 
		for a system that adapts its response to a background signal.
		"""
		
		signal_array = sp.zeros((self.Nn, self.num_signals, 
								len(self.mu_dSs_array)))
		eps_array = sp.zeros((self.Mm, self.num_signals, 
								len(self.mu_dSs_array)))
	
		# Iterate to get signal and energy in [# odor IDs, # odor concs]
		it = sp.nditer(signal_array[0,...], flags=['multi_index'])
		while not it.finished:
			
			self.seed_dSs = it.multi_index[0]
			self.mu_dSs = self.mu_dSs_array[it.multi_index[1]]
			
			# Just do background to get adapted epsilon to background
			self.mu_dSs_2 = 0
			self.set_sparse_signals()
			if (self.Kk1 is None):
				self.set_power_Kk()
			self.set_adapted_free_energy()
			
			# Now reset the Kk_2 components to re-create full fore+back signal
			self.mu_dSs_2 = self.mu_dSs_array[it.multi_index[1]]
			self.set_sparse_signals()
			
			# Now set value in signal_array and eps_array
			full_idx = [slice(None), ]*len(signal_array.shape)
			full_idx[1:] = it.multi_index
			signal_array[full_idx] = self.Ss
			eps_array[full_idx] = self.eps
			
			it.iternext()
	
		# Signal array shape = (odor-D, (# odor IDs)*(# odor intensities))
		# Flattened -- inner loop is concentration, outer loop is ID
		self.Ss = sp.reshape(signal_array, (self.Nn, -1))
		self.eps = sp.reshape(eps_array, (self.Mm, -1))
Beispiel #11
0
    def incidence_matrix(self):
        """Computes incidence matrix of size |V|*|E|
        h(v,e)=1 if v in e
        h(v,e)=0 if v not in e
        
        Returns
        -------
        H: sparse incidence matrix
            sparse incidence matrix of size |V|*|E|
        """
        with Timer() as t_in:
            H = spsp.lil_matrix(
                (sp.shape(sp.unique(self.edge_list.flatten()))[0],
                 sp.shape(self.edge_list)[0]))

            it = sp.nditer(self.edge_list, flags=['multi_index', 'refs_ok'])
            while not it.finished:
                H[it[0], it.multi_index[0]] = 1.0
                it.iternext()

        self.incidence_matrix_timer = t_in.secs
        return H
def aggregate_objects(data_flags, skip_missing=False):
    """
	Aggregate CS objects from separate .pklz files to a single .pklz file.
	
	Args:
		data_flags: Identifiers for saving and loading.
	"""

    if skip_missing == True:
        print("Skipping missing files...will populate with `None`")

    if isinstance(data_flags, str):
        data_flags = [data_flags]

    for data_flag in data_flags:
        list_dict = read_specs_file(data_flag)
        iter_vars = list_dict['iter_vars']
        iter_vars_dims = []
        for iter_var in iter_vars:
            iter_vars_dims.append(len(iter_vars[iter_var]))
        it = sp.nditer(sp.zeros(iter_vars_dims), flags=['multi_index'])

        obj_list = []
        while not it.finished:
            sys.stdout.flush()
            print(it.multi_index)
            if skip_missing == False:
                CS_obj = load_objects(list(it.multi_index), data_flag)
            else:
                try:
                    CS_obj = load_objects(list(it.multi_index), data_flag)
                except (IOError, OSError):
                    print('Skipping item %s...' % list(it.multi_index))
                    CS_obj = None

            obj_list.append(CS_obj)
            it.iternext()

        save_aggregated_object_list(obj_list, data_flag)
def random_matrix(matrix_shape, params, sample_type='normal', seed=0):
    """
	Generate random matrix with given distribution
	"""

    if sample_type == 'normal':
        sp.random.seed(seed)
        mean, sigma = params[:2]
        if sigma != 0.:
            return sp.random.normal(mean, sigma, matrix_shape)
        else:
            return mean * sp.ones(matrix_shape)

    elif sample_type == "rank2_row_gaussian":
        sp.random.seed(seed)
        means, sigmas = params[:2]

        assert len(matrix_shape) == 2, \
          "rank2_row_gaussian method needs a 2x2 matrix"
        nRows, nCols = matrix_shape
        assert len(means) == nRows, "rank2_row_gaussian needs " \
                "mu vector of proper length"
        assert len(sigmas) == nRows, "rank2_row_gaussian needs " \
                "sigma vector of proper length"
        out_matrix = sp.zeros(matrix_shape)

        for iRow in range(nRows):
            out_matrix[iRow, :] = sp.random.normal(means[iRow], sigmas[iRow],
                                                   nCols)
        return out_matrix

    elif sample_type == 'uniform':
        sp.random.seed(seed)
        lo, hi = params[:2]
        return sp.random.uniform(lo, hi, matrix_shape)

    elif sample_type == "rank2_row_uniform":
        sp.random.seed(seed)
        sigmas_lo, sigmas_hi = params[:2]

        assert len(matrix_shape) == 2, \
          "rank2_row_uniform method needs a 2x2 matrix"
        nRows, nCols = matrix_shape
        assert len(sigmas_lo) == nRows, "rank2_row_gaussian needs " \
                "sigma_lo vector of proper length"
        assert len(sigmas_hi) == nRows, "rank2_row_gaussian needs " \
                "sigma_hi vector of proper length"
        out_matrix = sp.zeros(matrix_shape)

        for iRow in range(nRows):
            out_matrix[iRow, :] = sp.random.uniform(sigmas_lo[iRow],
                                                    sigmas_hi[iRow], nCols)
        return out_matrix

    elif sample_type == "gaussian_mixture":
        mean1, sigma1, mean2, sigma2, prob_1 = params[:5]
        assert prob_1 <= 1., "Gaussian mixture needs p < 1"

        sp.random.seed(seed)
        mixture_idxs = sp.random.binomial(1, prob_1, matrix_shape)
        it = sp.nditer(mixture_idxs, flags=['multi_index'])
        out_vec = sp.zeros(matrix_shape)

        while not it.finished:
            if mixture_idxs[it.multi_index] == 1:
                out_vec[it.multi_index] = sp.random.normal(mean1, sigma1)
            else:
                out_vec[it.multi_index] = sp.random.normal(mean2, sigma2)
            it.iternext()

        return out_vec

    else:
        print('No proper matrix sample_type!')
        exit()
def calculate_signal_decoding_weber_law(data_flags,
                                        nonzero_bounds=[0.75, 1.25],
                                        zero_bound=1. / 10.,
                                        threshold_pct_nonzero=100.0,
                                        threshold_pct_zero=100.0):

    for data_flag in data_flags:

        list_dict = read_specs_file(data_flag)
        for key in list_dict:
            exec("%s = list_dict[key]" % key)

        iter_vars_dims = []
        for iter_var in iter_vars:
            iter_vars_dims.append(len(iter_vars[iter_var]))

        print('Loading object list...'),
        CS_object_array = load_aggregated_object_list(iter_vars_dims,
                                                      data_flag)
        print('...loaded.')

        # Data structures
        data = dict()
        errors_nonzero = sp.zeros(iter_vars_dims)
        errors_zero = sp.zeros(iter_vars_dims)

        Mm_shape = iter_vars_dims + [params['Mm']]
        Mm_Nn_shape = iter_vars_dims + [params['Mm'], params['Nn']]
        data['epsilons'] = sp.zeros(Mm_shape)
        data['dYys'] = sp.zeros(Mm_shape)
        data['Yys'] = sp.zeros(Mm_shape)
        data['gains'] = sp.zeros(Mm_Nn_shape)
        data['Kk2s'] = sp.zeros(Mm_Nn_shape)
        data['successes'] = sp.zeros(iter_vars_dims)

        # Calculate binary errors
        it = sp.nditer(sp.zeros(iter_vars_dims), flags=['multi_index'])
        while not it.finished:
            errors = binary_errors(CS_object_array[it.multi_index],
                                   nonzero_bounds=nonzero_bounds,
                                   zero_bound=zero_bound)

            errors_nonzero[it.multi_index] = errors['errors_nonzero']
            errors_zero[it.multi_index] = errors['errors_zero']
            it.iternext()

        # Calculate success ratios from binary errors
        it = sp.nditer(sp.zeros(iter_vars_dims), flags=['multi_index'])
        while not it.finished:
            data['successes'][it.multi_index] = binary_success(
                errors_nonzero[it.multi_index],
                errors_zero[it.multi_index],
                threshold_pct_nonzero=threshold_pct_nonzero,
                threshold_pct_zero=threshold_pct_zero)
            data['epsilons'][it.multi_index] = CS_object_array[
                it.multi_index].eps
            data['gains'][it.multi_index] = CS_object_array[it.multi_index].Rr
            data['dYys'][it.multi_index] = CS_object_array[it.multi_index].dYy
            data['Yys'][it.multi_index] = CS_object_array[it.multi_index].Yy
            data['Kk2s'][it.multi_index] = CS_object_array[it.multi_index].Kk2
            it.iternext()

        save_signal_decoding_weber_law(data, data_flag)
def aggregate_temporal_entropy_objects(data_flags):
    """
	Aggregate CS objects from separate .pklz files of temporal runs to a single
	.pklz object.
	
	Args:
		data_flags: Identifiers for saving and loading.
	"""

    temporal_structs_to_save = ['entropy']

    if isinstance(data_flags, str):
        data_flags = [data_flags]

    for data_flag in data_flags:
        list_dict = read_specs_file(data_flag)
        iter_vars = list_dict['iter_vars']
        iter_vars_dims = []
        for iter_var in iter_vars:
            iter_vars_dims.append(len(iter_vars[iter_var]))
        it = sp.nditer(sp.zeros(iter_vars_dims), flags=['multi_index'])

        CS_init_array = load_objects(list(it.multi_index), data_flag)

        # Dictionary to save all object at time 0; this will contain all
        # non-temporal info for each iterated variable.
        data = dict()
        data['init_objs'] = []
        nT = len(CS_init_array[0].signal_trace_Tt)

        # Assign data structures of appropriate shape for the temporal variable
        structs = dict()
        for struct_name in temporal_structs_to_save:
            try:
                tmp_str = 'structs[struct_name] = CS_init_array[0].%s' \
                   % struct_name
                exec(tmp_str)
            except:
                print('%s not an attribute of the CS object' % struct_name)
                continue

            # shape is (num timesteps, iterated var ranges, variable shape);
            # if a float or integer, shape is just time and iter vars.
            struct_shape = (nT, ) + tuple(iter_vars_dims)
            if hasattr(structs[struct_name], 'shape'):
                struct_shape += (structs[struct_name].shape)
            data['%s' % struct_name] = sp.zeros(struct_shape)

        # Iterate over all objects to be aggregated
        structs = dict()
        while not it.finished:

            print('Loading index:', it.multi_index)
            temporal_CS_array = load_objects(list(it.multi_index), data_flag)

            # Save full object at time 0, contains non-temporal data.
            data['init_objs'].append(temporal_CS_array[0])

            # Grab all the temporal structures, timepoint-by-timepoint
            for iT in range(nT):

                full_idx = (iT, ) + it.multi_index

                for struct_name in temporal_structs_to_save:
                    tmp_str = 'structs[struct_name] = temporal_CS_array[iT].%s' \
                       % struct_name
                    exec(tmp_str)
                    data[struct_name][full_idx] = structs[struct_name]

            it.iternext()

        save_aggregated_temporal_objects(data, data_flag)
    def run(self, workspace):

        diagnostics = dict()

        cent = self.center.get_value()

        input_objects_name = self.input_objects_name.value
        object_set = workspace.object_set
        assert isinstance(object_set, cpo.ObjectSet)

        input_image_name = self.input_image_name.value
        image_set = workspace.image_set
        assert isinstance(image_set, cpi.ImageSet)
        output_image_name = self.output_image_name.value

        input_image = image_set.get_image(input_image_name)# must_be_rgb=True)
        pixels = input_image.pixel_data
        diagnostics['pixels'] = pixels

        input_objects = object_set.get_objects(input_objects_name)

        mask = input_objects.get_segmented()

        new_im = scipy.zeros(shape=pixels.shape)

        diagnostics['new_im'] = list()
        diagnostics['nucleus_processed'] = list()
        diagnostics['nucleus_pixels'] = list()
        diagnostics['ci'] = list()

        for x in range(1, mask.max()+1):

            nucleus_map = mask == x

            nucleus_pixels = scipy.zeros(shape=pixels.shape)
            for i, j, k in scipy.nditer([pixels,
                                         nucleus_map[:, :, scipy.newaxis],
                                         nucleus_pixels],
                                        op_flags=[['readonly'],
                                                  ['readonly'],
                                                  ['readwrite']]):
                for a, b, c in scipy.nditer([i, j, k],
                                            op_flags=[['readonly'],
                                                      ['readonly'],
                                                      ['readwrite']]):
                    if b:
                        c[...] = scipy.copy(a)

            diagnostics['nucleus_pixels'].append(nucleus_pixels)

            nucleus_pixels_t = scipy.transpose(nucleus_pixels)

            nucleus_ci_r = get_ci(nucleus_pixels_t[0],
                                  percentile=self.percentile_r.get_value(),
                                  center=cent,
                                  mod=self.scale_r.get_value())

            nucleus_ci_g = get_ci(nucleus_pixels_t[1],
                                  percentile=self.percentile_g.get_value(),
                                  center=cent,
                                  mod=self.scale_g.get_value())

            nucleus_ci_b = get_ci(nucleus_pixels_t[2],
                                  percentile=self.percentile_b.get_value(),
                                  center=cent,
                                  mod=self.scale_b.get_value())

            diagnostics['ci'].append((nucleus_ci_r, nucleus_ci_g,
                                      nucleus_ci_b))

            nucleus_processed = update_image(nucleus_pixels,
                                             nucleus_ci_r,
                                             nucleus_ci_g,
                                             nucleus_ci_b)

            diagnostics['nucleus_processed'].append(nucleus_processed)

            new_im = new_im + nucleus_processed

            diagnostics['new_im'].append(new_im)

            # with open('/Users/lages/Documents/sauceda/pictures_processed/diagnostics'
            #           '.p', 'wb') as f:
            #     pickle.dump(diagnostics, f)

            from os.path import expanduser
            home = expanduser("~")

            # with open(home + '/ci_values.txt', 'wb') as f:
            #     writeListsToLines(diagnostics['ci'], f)

            sio.savemat(home + '/diagnostics.mat', diagnostics)

        output_image = cpi.Image(new_im, parent_image=input_image)
        image_set.add(output_image_name, output_image)