Пример #1
0
def apply_adjustment(camera, vehicle_center_pixel, vehicle_height, true_vehicle_center):
    """ Returns unadjusted and adjusted ground centers of the vehicle"""

    # project center pixel onto ground
    rough_vehicle_center = camera.pixel_to_plane(*vehicle_center_pixel)

    #rough_vehicle_center = camera.pixel_to_plane(*camera.world_to_pixel(*true_vehicle_center))

    # height of camera position
    camera_position = camera.position
    camera_height = camera_position[2]

    multiplier = 1 if camera.model == 'perspective' else 1 # TODO play with between 2 and 1 depending on model and camera angle...?

    # get angle from Z axis (ie one camera is on)
    dist = np.linalg.norm(rough_vehicle_center[:2]) # ground plane distance
    direction_vec = helper.normalize(rough_vehicle_center)
    # overshoot = (vehicle_height/2)/(camera_height/dist)
    overshoot = multiplier * dist*vehicle_height/(camera_height*2)
    adjusted_center_distance = dist - overshoot

    ground_plane_direction_vector = direction_vec.copy()
    ground_plane_direction_vector[2] = 0.0 # flatten onto ground plane
    ground_plane_direction_vector = helper.normalize(ground_plane_direction_vector)
    adjusted_center = ground_plane_direction_vector * adjusted_center_distance

    return (rough_vehicle_center, adjusted_center, overshoot) 
Пример #2
0
def main(image, template, methods):
    image_normalized = normalize(image)
    template_normalized = normalize(template)
    image_h, image_w = image.shape
    template_h, template_w = template.shape
    match_h, match_w = image_h - template_h + 1, image_w - template_w + 1
    match = np.zeros((match_h, match_w), dtype='float64')
    result = {method_name: match.copy() for method_name in methods}

    for i in range(match_h):
        for j in range(match_w):
            window = image[i:template_h + i, j:template_w + j]
            window_normalized = image_normalized[i:template_h + i,
                                                 j:template_w + j]
            for method_name, values in methods.items():
                func = values['method']
                normalized = values['normalized']
                if normalized:
                    result[method_name][i, j] = func(template_normalized,
                                                     window_normalized)
                else:
                    result[method_name][i, j] = func(template, window)

    for method_name, match in result.items():
        if methods[method_name]['argmax']:
            top_left = np.unravel_index(match.argmax(), match.shape)[::-1]
        else:
            top_left = np.unravel_index(match.argmin(), match.shape)[::-1]
        show_result(image, template_w, template_h, match, top_left,
                    method_name)
def search_signatures(self, neighborhoods):
        """
        Search in database given a list of signatures. Return top NUMBER_OF_MATCHES mathced files.
        """
        matched_files = {}
        total_signatures = 0
        # Iterate over all signatures of the file
        for _, hashes_lst in neighborhoods.items():
            for sig in hashes_lst:
                filehashes_lst = self.signatures_db.get(sig)
                if filehashes_lst is None:
                    continue
                anchor_hashes_lst = self.anchors_db.get(sig)
                # Get the list of filenames. Split by HASH_DIGEST_SIZE bytes.
                filehashes_lst = [ filehashes_lst[i * _hp.HASH_DIGEST_SIZE:(i + 1) * _hp.HASH_DIGEST_SIZE] for i in range((len(filehashes_lst) + _hp.HASH_DIGEST_SIZE - 1) // _hp.HASH_DIGEST_SIZE ) ]
                # Get the list of anchor hashes that . Split by HASH_DIGEST_SIZE bytes.
                anchor_hashes_lst = [ anchor_hashes_lst[i * _hp.HASH_DIGEST_SIZE:(i + 1) * _hp.HASH_DIGEST_SIZE] for i in range((len(anchor_hashes_lst) + _hp.HASH_DIGEST_SIZE - 1) // _hp.HASH_DIGEST_SIZE ) ]
                # for filename_hash in filehashes_lst:
                for i in range(len(filehashes_lst)):
                    filename_hash = filehashes_lst[i]
                    filename = self.filenames_db.get(filename_hash).decode("utf-8")
                    anchor_hash = anchor_hashes_lst[i]
                    # If this is the first hash for that filename create a new inner dict
                    if filename not in matched_files:
                        matched_files[filename] = {}
                        matched_files[filename]['total'] = 1
                    # Count occurences
                    if anchor_hash not in matched_files[filename]:
                        matched_files[filename][anchor_hash] = []
                        matched_files[filename]['anchors_matched'] = 0
                    matched_files[filename][anchor_hash].append(sig)
                    matched_files[filename]['total'] += 1
                total_signatures += 1
        # Check how many signatures and neighborhoods matched
        anchor_matches = {}
        signatures_matches = {}
        signatures_dict = {}
        for filename, anchors in matched_files.items():
            signatures_dict[filename] = {}
            signatures_dict[filename]['total'] = matched_files[filename]['total']
            # for each anchor and the hashes in its neighborhood
            for anch, sigs in anchors.items():
                # Skip the counters
                if anch == 'anchors_matched' or anch == 'total':
                    continue
                if len(sigs) >= _hp.MIN_SIGNATURES_TO_MATCH:
                    matched_files[filename]['anchors_matched'] += 1
                for s in sigs:
                    signatures_dict[filename][s] = 1
            anchor_matches[filename] = anchors['anchors_matched'] / len(neighborhoods)
            signatures_matches[filename] = (len(signatures_dict[filename]) - 2) / total_signatures
        # Find top-K matches with 2 different criteria.
        anchor_matches = sorted(anchor_matches.items(), key=lambda x: x[1], reverse=True)[:_hp.NUMBER_OF_MATCHES]
        signatures_matches = sorted(signatures_matches.items(), key=lambda x: x[1], reverse=True)[:_hp.NUMBER_OF_MATCHES]
        # return lists of tuples
        _hp.normalize(anchor_matches)
        _hp.normalize(signatures_matches)
        return anchor_matches, signatures_matches
Пример #4
0
def combine_scores(features, feature_weights):

    final_scores = features.dot(feature_weights)
    final_scores = pd.Series(
        np.array(final_scores.values.reshape(final_scores.size, )))
    final_scores.index = features.index
    final_scores = helper.normalize(final_scores.sort_values(ascending=False))

    return final_scores
Пример #5
0
def actionProb(action, state, goal):
	index = None
	probs = []
	for i, a in enumerate(legalActions(state)):
		ns = (state[0]+a[0], state[1]+a[1])
		if a == action:
			index = i
		probs.append(math.exp(beta*q(a, state, goal)))
	probs = h.normalize(probs)
	return probs[index]
Пример #6
0
def update_text(selected_feature, selected_value):
    
   
   
    df_temp = df.loc[df[selected_feature] == selected_value]
   
    df_temp2 = df_temp.drop([selected_feature, 'first period grade','second period grade','third period grade'], axis = 1)
    df_temp2 = helper.handle_non_numerical_data(df_temp2)
    
    df_temp2 = helper.normalize(df_temp2)
    return 'Total number of students "{}"'.format(df_temp2.shape[0])
Пример #7
0
def update_details(selected_feature, selected_value):
    
    trace = []
   
    df_temp = df.loc[df[selected_feature] == selected_value]
   
    df_temp2 = df_temp.drop([selected_feature, 'first period grade','second period grade','third period grade'], axis = 1)
    df_temp2 = helper.handle_non_numerical_data(df_temp2)
    
    df_temp2 = helper.normalize(df_temp2)
   
    
    for column in df_temp2.columns:
        trace.append(df_temp2[column].mean())
    
    print(df_temp2.columns)
    print(trace)
        
    graph = []
    
    graph.append(
            go.Bar(
            x = df_temp2.columns,
            y = trace,
            name = 'First Year Results',
            opacity=0.7,
            marker={
                 'color' :'rgb(65,64,66 )',
                 
            }
                    
                    )

            )
    return {
            'data': graph,
            'layout': go.Layout(
            xaxis={
                'title': 'Features',
                
            },
            yaxis={
                'title': 'Average value of each feature',
                
            },
            #margin={'l': 40, 'b': 30, 't': 0, 'r': 10},
            height=450,
            
            hovermode='closest')
            
            
            
            }
Пример #8
0
def update_text(selected_feature, selected_value):

    df_temp = df.loc[df[selected_feature] == selected_value]

    df_temp2 = df_temp.drop([
        selected_feature, 'first period grade', 'second period grade',
        'third period grade'
    ],
                            axis=1)
    df_temp2 = helper.handle_non_numerical_data(df_temp2)

    df_temp2 = helper.normalize(df_temp2)
    return 'Total number of students "{}"'.format(df_temp2.shape[0])
Пример #9
0
 def __init__(self, filename, numbasecorr=20, Normalize=True, plot=True):
     self.__filename = filename
     self.__numbasecorr = numbasecorr
     RawData = RawTOFData(filename)
     self.__RawData = RawData
     idx = RawData.TOF.argsort()
     self.__TOF = RawData.TOF[idx] / 1000
     Signal = (RawData.Baseline - RawData.Signal)[idx]
     self.__Signal = helper.subtract_baseline(Signal, left=self.__numbasecorr)
     if Normalize:
         self.__Signal = helper.normalize(self.__Signal)
         self.__ylabel = r"$\mathrm{Normalized\ REMPI\ Signal}$"
     else:
         self.__ylabel = r"$\mathrm{REMPI\ Signal}$"
     if plot:
         self.plot()
Пример #10
0
def write(input_string, output_path):
    with open(output_path, "w", newline="") as f:
        # To write .csv and .tsv, you first open a
        # file, then you call csv.writer() and give
        # it the file as an argument as seen below.
        # (It doesn't work if you haven't imported
        # the csv module above.)
        # writer = csv.writer(f)
        writer = csv.writer(f, delimiter="\t")

        # The csv.writer() method can either just take
        # one argument, the file it will write to, in
        # which case it looks just as above. It can,
        # however, take a second argument, and look
        # as follows:
        # csv.writer(f, delimiter=",")
        # which will tell the csv module explicitly
        # that we want to use , as a symbol to separate
        # the individual fields.
        # TODO: Change the csv.writer() call above so
        # that the csv module uses the tab as a
        # delimiter. Check exercise-5-readme.md
        # if you are unsure.

        # Then, you can use writer.writerows() to write
        # your .csv file. The writerows() function
        # takes as argument a list of lists. For example,
        # calling writerows( [ [ a , b ] , [ c , d ] ])
        # will result in the following .csv file:
        # a,b
        # c,d

        # TODO: construct a list of lists in the
        # following form:
        # [ [ token1, normalised_form1 ] ,
        #   [ token2, normalised_form2 ] ,
        #  ... ]

        # using the helper.tokenize() and
        # helper.normalize() functions, then change
        # the below call to use your list of lists
        tokens = helper.tokenize(input_string)

        tokens_and_normalizations = [[token, helper.normalize(token)]
                                     for token in tokens]
        writer.writerows(tokens_and_normalizations)
Пример #11
0
def update_details(selected_feature, selected_value):

    trace = []

    df_temp = df.loc[df[selected_feature] == selected_value]

    df_temp2 = df_temp.drop([
        selected_feature, 'first period grade', 'second period grade',
        'third period grade'
    ],
                            axis=1)
    df_temp2 = helper.handle_non_numerical_data(df_temp2)

    df_temp2 = helper.normalize(df_temp2)

    for column in df_temp2.columns:
        trace.append(df_temp2[column].mean())

    print(df_temp2.columns)
    print(trace)

    graph = []

    graph.append(
        go.Bar(x=df_temp2.columns,
               y=trace,
               name='First Year Results',
               opacity=0.7,
               marker={
                   'color': 'rgb(65,64,66 )',
               }))
    return {
        'data':
        graph,
        'layout':
        go.Layout(
            xaxis={
                'title': 'Features',
            },
            yaxis={
                'title': 'Average value of each feature',
            },
            #margin={'l': 40, 'b': 30, 't': 0, 'r': 10},
            height=450,
            hovermode='closest')
    }
Пример #12
0
def write(input_string, output_path):
  with open(output_path, "w") as f:
    writer = csv.writer(f, delimiter="\t")
    tokens = helper.tokenize(input_string)
    normalizations = []
    tokens_and_normalizations = []

    for token in tokens: #normalizing and storing in list
      normalizations.append(helper.normalize(token))
    
    for i in range(len(tokens)): #creating pairs and storing
      temporary_pair = []        #them in a nested list
      temporary_pair.append(tokens[i])
      temporary_pair.append(normalizations[i])
      tokens_and_normalizations.append(temporary_pair)
    
    writer.writerows(tokens_and_normalizations)
Пример #13
0
    def forward(self, x):
        x = normalize(x)
        x = self.conv1(x)
        x = x_1 = self.prelu(x)
        for block in self.residual_part:
            x = block(x)

        x = self.conv2(x)
        x = self.batch1(x)
        x.add_(x_1)
        for block in self.upsample:
            x = block(x)

        x = self.conv3(x)
        x = self.tanh(x)
        x = denormalize(x)

        return x
Пример #14
0
    def point_visible_from_camera(self, point, camera_placement):

        dest = camera_placement.real_camera.position
        diff = dest - point
        dist_to_camera = np.linalg.norm(diff)

        if dist_to_camera is None or dist_to_camera < 0:
            return False

        unit_dir = h.normalize(diff)

        for obj in self.world_objects:
            dist_to_object = obj.time_to_intersection(point, unit_dir)
            if dist_to_object is None:
                continue
            if obj.time_to_intersection(point, unit_dir) < dist_to_camera:
                return False

        return True
Пример #15
0
 def __init__(
     self,
     Setup,
     filename,
     IRDelay,
     mode="Flux_vs_TOF",
     mass=28,
     verbose=False,
     plot=True,
     fit=True,
     numbasecorr=20,
     Normalize=True,
 ):
     """
     ...
     """
     self.__mass = mass
     self.__filename = filename
     self.__IRDelay = IRDelay
     self.__setup = Setup
     self.__l = Setup.FlightLength
     self.__offset = 400
     self.__Normalize = Normalize
     self.__numbasecorr = numbasecorr
     self.__RawData = RawTOFData(filename)
     TOF, Signal = self.__treat_data(self.__RawData)
     v, E = self.__invert(TOF)
     Flux = self.__density_to_flux(Signal, v)
     if Normalize:
         Flux = [helper.normalize(flux, 5) for flux in Flux]
     self.__TOF = (TOF, Flux[0])
     self.__v = (v, Flux[1])
     self.__E = (E, Flux[2])
     self.set_mode(mode)
     self.__fit = None
     self.__moments = None
     if fit:
         self.fit(verbose=verbose)
     if plot:
         self.plot()
Пример #16
0
 def dataframe(self):
     stats = list()
     for tr in self.result_page.find("table", {
             "id": "sortableTable"
     }).find("tbody").find_all("tr", class_="row"):
         c = tr.find_all("td")
         stats.append([
             hp.normalize(c[0], "date"),
             hp.normalize(c[1], "text"),
             hp.normalize(c[2], "distance"),
             hp.normalize(c[3], "only_digits"),
             hp.normalize(c[4], "float"),
             hp.normalize(c[5], "bends"),
             hp.normalize(c[6], "only_digits"),
             hp.normalize(c[7], "by"),
             hp.normalize(c[9], "remarks"),
             hp.normalize(c[10], "float"),
             hp.normalize(c[12], "float"),
             hp.normalize(c[14], "text"),
             hp.normalize(c[15], "float"),
         ])
     self.df = pd.DataFrame(stats,
                            columns=[
                                "date", "local", "distance", "trap",
                                "split", "bends", "position", "by",
                                "remarks", "win_time", "weight", "grade",
                                "cal_time"
                            ])
     self.df = self.df[self.df["date"] < self.date]
     self.df["split"] = self.df["split"].fillna(self.df["split"].mean())
     self.df["bends"] = self.df["bends"].fillna(self.df["bends"].mean())
     self.df = self.df.dropna(subset=["position"], axis=0)
     self.total_df = len(self.df)
Пример #17
0
 def _normalize(value):
     "normalize and do unicodesub"
     return normalize(self.unicodesub(_repl, value))
Пример #18
0
    # Throwing away column label
    data_x = data_x[1:]
    data_y = data_y[1:]

    # Normalising and splitting dataset
    for i in range(len(data_x)):  # |
        data_x[i].insert(0, data_y[i])  # |
    random.shuffle(data_x)  # |
    # |  Trecho todo só pra dar o shuffle
    mimic = copy(data_x)  # |
    data_x, data_y = [], []  # |
    for row in mimic:  # |
        data_x.append(row[1:])  # |
        data_y.append(row[0])  # |
    data_x = hp.normalize(data_x)  # Normaliza

    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    # The THING itself
    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    ratio = 0.40  # Known base %
    k = 3  # K
    distance_func = 'euclidean'
    bound = int(ratio * len(data_y))

    # The classification algorithm
    my_output_labels = knn(x_train=data_x[:bound],
                           y_train=data_y[:bound],
                           x_test=data_x[bound + 1:],
                           distance=distance_func,
Пример #19
0
 def kps(self):
     
     if not hasattr(self, '_kps'):
         self._kps = normalize(self.Kinv, self.kp)
     return self._kps
Пример #20
0

from openpyxl import load_workbook
from openpyxl.utils.dataframe import dataframe_to_rows

df1 = xl.parse('Script_Cat')
df = pd.DataFrame()




for i, row in enumerate(df1.values):
    print(i)

    codpost_ciudad = '{:02}'.format(int(str(row[10]).replace('<sp>', ' ').split('.')[0]))
    prov = helper.normalize(helper.getProvincia(codpost_ciudad + '0'))
    if prov is None:
        raise Exception('12', '-', 'LA PROVINCIA NO EXISTE')
    munic = helper.normalize (str(row[11]).replace('<sp>', ' '))
    calle = helper.normalize(str(row[12]).replace('<sp>', ' '))
    num = int(float(str(row[13]).replace('<sp>', ' ')))
    puerta = str(row[14]).replace('<sp>', ' ')
    piso = str(row[15]).replace('<sp>', ' ')

    try:
        datos_munic = catas.ConsultaMunicipio(provincia=prov, municipio=munic)
        data_calle = json.loads(json.dumps(catas.ConsultaVia(provincia=prov, municipio=munic, nombrevia=calle)))
        if (i == 48):
            print(i)
        cod_post = data_calle['consulta_callejero']['callejero']['calle']['loine']['cp']
        cod_munic = data_calle['consulta_callejero']['callejero']['calle']['loine']['cm']
Пример #21
0
            for f_sub in os.listdir(path):
                path_sub = os.path.join(path, f_sub)
                if os.path.isfile(path_sub):
                    try:
                        img = cv2.imread(path_sub)
                        # rotate
                        img = imutils.rotate(
                            img, rotate) if rotate is not 0 else img
                        # scale
                        img = zoomin(img, scale) if scale > 1 else img
                        # grayscale with 3 channels
                        img = cv2.cvtColor(
                            cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),
                            cv2.COLOR_GRAY2RGB) if grayscale else img
                        # normalize and filter
                        img = normalize(img, EXPECTED_MAX, MEDIAN_VALUE)
                        # gather stat
                        stat[img.shape] = stat[
                            img.shape] + 1 if img.shape in stat else 1
                        r = resize(img, EXPECTED_SIZE)
                        append_data_and_label(r, i, data, labels)
                    except Exception as err:
                        print(err)
                        print(path_sub)
                        sys.exit(0)
            i += 1
        count += 1
        bar.update(count)
    bar.finish()

    print('{} records saved'.format(data.nrows))
validation_file = '../traffic-signs-data/valid.p'
testing_file = '../traffic-signs-data/test.p'

with open(training_file, mode='rb') as f:
    train = pickle.load(f)
with open(validation_file, mode='rb') as f:
    valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
    test = pickle.load(f)

X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']

# pre-process data
X_train_norm, X_valid_norm, X_test_norm = helper.normalize(
    X_train, X_valid, X_test)

label_binarizer = LabelBinarizer()
y_one_hot_train = label_binarizer.fit_transform(y_train)
y_one_hot_valid = label_binarizer.fit_transform(y_valid)
y_one_hot_test = label_binarizer.fit_transform(y_test)

# fix the seed for reducing as much as possible variability in the results
seed = 10
np.random.seed(seed)


# Build Deep Network Model in Keras
def create_model():
    # create model
    model = Sequential()
Пример #23
0
            image, brain_atlas = medim_obj.compute_mask("{Brain}",
                                                        "full",
                                                        interp=True)
            _, ventricle_atlas = medim_obj.compute_mask("{Ventricles}",
                                                        "full",
                                                        interp=True)

            raw_image = copy.deepcopy(image)
            raw_image = helper.z_pad(raw_image)
            raw_image = helper.xy_pad(raw_image)
            np.save(
                "/data/public/Segmentation_Dataset/MR_Dataset_Atlas/train/raw_images/{name}"
                .format(name=file_name), raw_image.data)

            image.data = helper.normalize(image.data, brain_atlas.data)
            image.data = helper.extract_brain(image.data, brain_atlas.data)
            skull_stripped = helper.z_pad(image)
            skull_stripped = helper.xy_pad(skull_stripped)

            np.save(
                "/data/public/Segmentation_Dataset/MR_Dataset_Atlas/train/skull_stripped/{name}"
                .format(name=file_name), skull_stripped.data)

            ventricle_atlas = helper.z_pad(ventricle_atlas)
            ventricle_atlas = helper.xy_pad(ventricle_atlas)

            np.save(
                "/data/public/Segmentation_Dataset/MR_Dataset_Atlas/train/ventricle_atlas/{name}"
                .format(name=file_name), ventricle_atlas.data)
Пример #24
0
 def normalize_phases(self):
     helper.normalize(self.xFracs)
     helper.normalize(self.yFracs)
Пример #25
0
for goal in goalPos:
	y, x = goalPos[goal]
	for i in range(len(world)):
		for j in range(len(world[i])):
			if world[i][j] == -1:
				continue
			s = (i, j)
			stateVals[goal][s] = -1*lengthOfPath(s, goal)

# Update state values iteratively
for i in range(20):
	# policy is given by old q values
	oldValues = deepcopy(stateVals)
	for goal in goalPos:
		y, x = goalPos[goal]
		for state in oldValues[goal]:
			if state == (y, x):
				stateVals[goal][state] = 0
				continue
			probs = []
			vals = []
			for a in legalActions(state):
				ns = (state[0]+a[0], state[1]+a[1])
				qa = oldValues[goal][ns]-cost(a)
				probs.append(math.exp(beta*qa))
				vals.append(qa)
			probs = h.normalize(probs)
			stateVals[goal][state] = sum([v1*v2 for v1,v2 in zip(probs,vals)])


Пример #26
0
        while frame is not None:
            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = (10, h-10)
            fontScale = 0.5
            fontColor = (0, 0, 255)
            lineType = 2

            # Since mhis are calculated by diffs of images, there will
            # necessarily be one less prediction than the number of frames
            img = frame
            if frame_index != 0 and binary_images[frame_index-1].sum() != 0:
                img = cv2.putText(frame, pred[frame_index-1],
                            bottomLeftCornerOfText,
                            font,
                            fontScale,
                            fontColor,
                            lineType)

            if GENERATE_IMAGE and frame_index == 100:
                cv2.imwrite('classify1_handwaving.png', helper.normalize(img))
            if GENERATE_IMAGE and frame_index == 110:
                cv2.imwrite('classify2_handwaving.png', helper.normalize(img))
            if GENERATE_IMAGE and frame_index == 148:
                cv2.imwrite('classify_handclapping.png', helper.normalize(img))

            video_out.write(img)
            frame = frame_gen.__next__()
            frame_index += 1

        video_out.release()
Пример #27
0
 def __init__(self, point, normal):
     self.p = point
     self.n = h.normalize(normal)
Пример #28
0
 def _normalize(value):
     "normalize and do unicodesub"
     return normalize(self.unicodesub(_repl, value))
Пример #29
0
            print(f"{i}/{len(scans)}")
            medim_obj = MedImage.create_from_files(scan)
            file_name = medim_obj.name.split("/")[-1]
      
            image, brain_mask = medim_obj.compute_mask("{Brain}", "full", interp=True)
            _, ventricle_mask = medim_obj.compute_mask("{Ventricles}", "full", interp=True)
            _, brain_atlas = medim_obj.compute_mask("{BrainAtlas}", "full", interp=True)
            _, ventricle_atlas = medim_obj.compute_mask("{VentriclesAtlas}", "full", interp=True)

    
            raw_image = copy.deepcopy(image)
            raw_image = helper.z_pad(raw_image)
            raw_image = helper.xy_pad(raw_image)


            image.data = helper.normalize(image.data, brain_mask.data)
            image.data = helper.extract_brain(image.data, brain_mask.data)
            skull_stripped = helper.z_pad(image)
            skull_stripped = helper.xy_pad(skull_stripped)


            ventricle_mask = helper.z_pad(ventricle_mask)
            ventricle_mask = helper.xy_pad(ventricle_mask)

            brain_mask = helper.z_pad(brain_mask)
            brain_mask = helper.xy_pad(brain_mask)

            ventricle_atlas = helper.z_pad(ventricle_atlas)
            ventricle_atlas = helper.xy_pad(ventricle_atlas)

            brain_atlas = helper.z_pad(brain_atlas)