Ejemplo n.º 1
0
def convert(path_list, annots, batch_size, max_idx, idx):
    start = idx * batch_size
    end = min(start + batch_size, max_idx)
    path_list = path_list[start:end]
    meta = pd.DataFrame(columns=['uid', 'flag', 'z_len', 'y_len', 'x_len'])
    labels = pd.DataFrame(columns=['uid', 'flag', 'z', 'y', 'x', 'diam', 'vol'])
    for i, path in enumerate(path_list):
        if os.path.basename(path) == 'b8bb02d229361a623a4dc57aa0e5c485':
            # ITK cannot read this file
            continue
        print('Converting %s' % path)
        scan_data, uid = read_scan(path)
        if scan_data is None:
            continue

        slices, spacing, origin = get_data(scan_data)
        skip = 0

        video.clip(slices, settings.low_thresh, settings.high_thresh)
        msk = mask.get_mask(slices, uid)
        # msk = mask.segment_lung_mask(slices, uid)
        slices = video.normalize(slices, settings.low_thresh, settings.high_thresh)
        mask.apply_mask(slices, msk)
        slices, starts = trim(slices)
        valid, flag = process_annotations(uid, annots, origin, labels, slices.shape, starts)
        if not valid:
            print('Ignoring %s - bad metadata' % path)
            continue

        video.write_data(slices, os.path.join(output_path, uid))
        meta.loc[meta.shape[0]] = dict(uid=uid, flag=flag, z_len=slices.shape[0],
                                       y_len=slices.shape[1], x_len=slices.shape[2])
    return meta, labels
Ejemplo n.º 2
0
def convert(path_list, annots, batch_size, max_idx, idx):
    start = idx * batch_size
    end = min(start + batch_size, max_idx)
    path_list = path_list[start:end]
    meta = pd.DataFrame(columns=['uid', 'flag', 'z_len', 'y_len', 'x_len'])
    labels = pd.DataFrame(columns=['uid', 'flag', 'z', 'y', 'x', 'diam', 'vol'])
    for i, path in enumerate(path_list):
        if os.path.basename(path) == 'b8bb02d229361a623a4dc57aa0e5c485':
            # ITK cannot read this file
            continue
        print('Converting %s' % path)
        scan_data, uid = read_scan(path)
        if scan_data is None:
            continue

        slices, spacing, origin = get_data(scan_data)
        skip = 0

        video.clip(slices, settings.low_thresh, settings.high_thresh)
        msk = mask.get_mask(slices, uid)
        slices = video.normalize(slices, settings.low_thresh, settings.high_thresh)
        mask.apply_mask(slices, msk)
        slices, starts = trim(slices)
        valid, flag = process_annotations(uid, annots, origin, labels, slices.shape, starts)
        if not valid:
            print('Ignoring %s - bad metadata' % path)
            continue

        video.write_data(slices, os.path.join(output_path, uid))
        meta.loc[meta.shape[0]] = dict(uid=uid, flag=flag, z_len=slices.shape[0],
                                       y_len=slices.shape[1], x_len=slices.shape[2])
    return meta, labels
Ejemplo n.º 3
0
def get_sobel_mask(img,mask_size):
    mask_1,sum_1,m2_1,n2_1= apply_sobel_edge_x(mask_size=mask_size)
    mask_2,sum_2,m2_2,n2_2= apply_sobel_edge_y(mask_size=mask_size)
    
    mask_=mask_1+mask_2
    sobel= apply_mask(img, mask_,sum_1,m2_1,n2_1)

    return sobel
Ejemplo n.º 4
0
    def save(self):
        # When the plot is closed we save the newly created label mask.
        save_im = image.AllSkyImage(self.name, None, None, self.mask)

        if self.camera.lower() == "kpno":
            # Gets the exposure for the saving location.
            exp_im = image.AllSkyImage(self.name, None, None, self.img)
            exp = image.get_exposure(exp_im)
            loc = os.path.join(os.path.dirname(__file__),
                               *["Images", "data", "labels",
                                 str(exp)])

            # Maks the antenna
            m = mask.generate_mask()
            save_im = mask.apply_mask(m, save_im)
        else:
            loc = os.path.join(os.path.dirname(__file__),
                               *["Images", "data", "labels-sw"])

        # Saves the image.
        image.save_image(save_im, loc)

        if not self.update:
            # Moves the downloaded image into the training folder.
            loc = os.path.join(os.path.dirname(__file__),
                               *["Images", "data", "to_label", self.name])
            if self.camera.lower() == "kpno":
                dest = os.path.join(
                    os.path.dirname(__file__),
                    *["Images", "data", "train",
                      str(exp), self.name])
            else:
                dest = os.path.join(
                    os.path.dirname(__file__),
                    *["Images", "data", "train", "sw", self.name])
            os.rename(loc, dest)
            print("Moved: " + loc)
Ejemplo n.º 5
0
def transform(img):
    """Transform a circular all-sky image into an Eckert-IV projection of the
    visible night sky.

    Parameters
    ----------
    img : image.AllSkyImage
        The image.

    See Also
    --------
    eckertiv : Define the projection method.

    Notes
    -----
    First applies a mask generated from mask.generate_mask().
    From there, lists of x and y pixels inside each image is built.
    These lists are converted to right ascension and declination
    representations of each pixel. These are passed to eckertiv(), which
    converts these points to x and y positions on the Eckert-IV projection.
    The map is then built as a scatter plot using these x and y positions,
    where the color of each dot is taken from the pixel originally used.
    Each point is the same size, which is a valid assumption since the
    Eckert-IV projection is an equal area projection.
    The plot is then saved to Images/transform/`img.date`/`img.name`.

    """
    time = img.time

    # Find the mask and black out those pixels.
    # Contrasting the clouds already masks.
    if img.camera == "KPNO":
        masking = mask.generate_full_mask()
        img = mask.apply_mask(masking, img)

    # Sets up the figure and axes objects
    fig = plt.figure(frameon=False)
    fig.set_size_inches(12, 6)

    # We just want the globe to be centered in an image so we turn off the axis
    ax1 = plt.Axes(fig, [0., 0., 1., 1.])
    ax1.set_axis_off()

    # This is black background stuff
    rapoints = []
    decpoints = []

    # Just a bunch of ra-dec points for the background.
    ra = 0
    while ra <= 360:
        dec = -90
        while dec <= 90:
            rapoints.append(ra)
            decpoints.append(dec)

            dec += .5
        ra += .5

    # Scatter for the background
    # (i.e. fills in the rest of the globular shape with black)
    x, y = eckertiv(rapoints, decpoints)
    ax1.scatter(x, y, s=2, color="black")

    # This is the image conversion
    xpoints = []
    ypoints = []

    center = center_kpno if img.camera == "KPNO" else center_sw
    max_r = 241 if img.camera == "KPNO" else 510
    for row in range(0, img.data.shape[0]):
        for column in range(0, img.data.shape[1]):

            x = column - center[0]
            y = center[1] - row
            r = math.hypot(x, y)

            # Only want points in the circle to convert
            if r <= max_r:
                xpoints.append(column)
                ypoints.append(row)

    # We need to add 0.5 to the x,y coords to get the center of the pixel
    # rather than the top left corner.
    # Convert the alt az to x,y
    x = np.add(np.asarray(xpoints), 0.5)
    y = np.add(np.asarray(ypoints), 0.5)
    rapoints, decpoints = coordinates.xy_to_radec(x, y, time, img.camera)

    # This block changes the ra so that the projection is centered at
    # ra = 360-rot.
    # The reason for this is so the outline survey area is 2 rather than 3
    # polygons.
    rot = 60
    rapoints = np.where(rapoints > (360 - rot), rapoints + rot - 360,
                        rapoints + rot)

    # Finds colors for dots.
    colors = []
    for i, _ in enumerate(rapoints):
        x = xpoints[i]
        y = ypoints[i]

        if img.data.shape[-1] == 3:
            colors.append(img.data[y, x] / 255)
        else:
            colors.append(img.data[y, x])

    # Scatter for the image conversion
    x, y = eckertiv(rapoints, decpoints)
    ax1.scatter(x, y, s=1, c=colors, cmap="gray")

    # Add the contours
    ax1 = contours(ax1, time)

    # These coord: -265.300085635, -132.582101423 are the minimum x and y of
    # the projection.
    ax1.text(-290, -143, img.formatdate, style="italic")

    patches = desi_patch()
    for patch in patches:
        ax1.add_patch(patch)

    # Add the axes to the fig so it gets saved.
    fig.add_axes(ax1)

    # Make sure the folder location exists
    directory = os.path.join("Images", *["transform", img.date])
    if not os.path.exists(directory):
        os.makedirs(directory)

    # Save name.
    conv = os.path.join(directory, img.name)

    # Want it to be 1920 wide.
    dpi = 1920 / (fig.get_size_inches()[0])
    plt.savefig(conv, dpi=dpi)

    print("Saved: " + conv)

    # Gotta close the plot so we don't memory overflow lol.
    plt.close()
def run_test(num_run):
        
        load_dotenv()
        SAVE_PATH = os.getenv('SAVE_PATH')
        My_Filters = SAVE_PATH+'/My_Filters'
        OpenCV_Filters= SAVE_PATH+'/OpenCV_Filters'
        
        for x in [num_run]:
                images=load_image.open_multiple_images(x)
                count=0
                # Loops Through All Images Selected In Load_Images
                for img in images.values():
                        count+=1
                        # Stores Original Image To Results Folder
                        temp_filename=SAVE_PATH+'/Original/Original_'+str(count)+'.jpg'
                        cv2.imwrite(temp_filename, img)
                        
                        # Variable To Store Both Image Noises
                        img_noise=[]

                        # Applies Noise Provided In The Project Instructions
                        img_gaus=noise.add_gaussian_noise(50,100,img)
                        img_s_p=noise.add_saltpepper_noise(img)
                        
                        # Stores Gaussian Noise Image To Results Folder
                        temp_filename=SAVE_PATH+'/Gaussian_Noise/Gaussian_Noise_'+str(count)+'.jpg'
                        cv2.imwrite(temp_filename, img_gaus) 

                        # Stores Salt And Pepper Noise Image To Results Folder
                        temp_filename=SAVE_PATH+'/Salt_And_Pepper_Noise/Salt_And_Pepper_Noise_'+str(count)+'.jpg'
                        cv2.imwrite(temp_filename, img_s_p) 

                        # Store Image Noises In List By Tuples
                        # Use Tuple To Check String Which Noise Type 
                        img_noise.append((img_gaus,"Gaussian_Noise"))
                        img_noise.append((img_s_p,"Salt_And_Pepper_Noise"))
                        
                        # cv2.imshow("Original",img)
                        # cv2.imshow("Noise Gaus",img_gaus)
                        # cv2.imshow("Noise SP",img_s_p)

                        # Loops Through Both Gaussian Noise And Salt And Pepper Noise; Filters Images
                        for img_n in img_noise:
                                if img_n[1]=='Gaussian_Noise':
                                        # Changes Results Path To Gaussian_Filter
                                        filename=My_Filters+'/Gaussian_Filter/'
                                        openCV_filename=OpenCV_Filters+'/Gaussian_Filter/'

                                if img_n[1]=='Salt_And_Pepper_Noise':
                                        # Changes Results Path To Salt_And_Pepper_Filter
                                        filename=My_Filters+'/Salt_And_Pepper_Filter/'
                                        openCV_filename=OpenCV_Filters+'/Salt_And_Pepper_Filter/'

                                # Loops For A 3x3 Mask And A 5x5 Mask
                                for matrix_ in [3,5]:

                                        if matrix_==3:
                                                # Changes Results Path To 3x3 Matrix
                                                filename_=filename+'/3x3_Mask/'
                                                openCV_filename_=openCV_filename+'/3x3_Mask/'

                                        if matrix_==5:
                                                # Changes Results Path To 5x5 Matrix
                                                filename_=filename+'/5x5_Mask/'
                                                openCV_filename_=openCV_filename+'/5x5_Mask/'

                                        # CONVOLUTION ALGORITHM
                                        # Creates Masks
                                        mask_1,sum_1,m2_1,n2_1= convolution.create_unweighted(mask_size=matrix_) 
                                        mask_2,sum_2,m2_2,n2_2= convolution.create_weighted(mask_size=matrix_) 
                                        mask_3,sum_3,m2_3,n2_3= convolution.create_gaussian(mask_size=matrix_,sigma=1)
                                        
                                        # Applies Mask To Image
                                        unweighted= mask.apply_mask(img_n[0], mask_1,sum_1,m2_1,n2_1)
                                        weighted= mask.apply_mask(img_n[0], mask_2,sum_2,m2_2,n2_2)
                                        gaussian= mask.apply_mask(img_n[0], mask_3,sum_3,m2_3,n2_3)
                                        
                                        # Applies openCV Filters To Image
                                        openCV_unweighted= openCV_mask_filters.get_unweighted(img_n[0],mask_size=matrix_)
                                        openCV_gaussian=openCV_mask_filters.get_gauss(img_n[0],mask_size=matrix_,sigma=1)
                                        
                                        # Stores unweighted Image To Results Folder
                                        temp_filename=filename_+'/Unweighted/Unweighted_Average_'+str(matrix_)+'x'+str(matrix_)+'_'+str(count)+'.jpg'
                                        cv2.imwrite(temp_filename, unweighted)
                                        
                                        # Stores openCV_unweighted Image To Results Folder 
                                        temp_openCV_filename=openCV_filename_+'/Unweighted/openCV_Unweighted_Average_'+str(matrix_)+'x'+str(matrix_)+'_'+str(count)+'.jpg'
                                        cv2.imwrite(temp_openCV_filename, openCV_unweighted) 

                                        # Stores weighted Image To Results Folder
                                        temp_filename=filename_+'/Weighted/Weighted_Average_'+str(matrix_)+'x'+str(matrix_)+'_'+str(count)+'.jpg'
                                        cv2.imwrite(temp_filename, weighted) 
                                        
                                        # Stores gaussian Image To Results Folder
                                        temp_filename=filename_+'/Gaussian/Gaussian_Mask_'+str(matrix_)+'x'+str(matrix_)+'_'+str(count)+'.jpg'
                                        cv2.imwrite(temp_filename, gaussian) 
                                        
                                        # Stores openCV_gaussian Image To Results Folder
                                        temp_openCV_filename=openCV_filename_+'/Gaussian/openCV_Gaussian_Mask_'+str(matrix_)+'x'+str(matrix_)+'_'+str(count)+'.jpg'
                                        cv2.imwrite(temp_openCV_filename, openCV_gaussian) 
                                        
                                        #MEDIAN FILTERING
                                        median= median_filtering.apply_median_filter(matrix_,img_n[0])

                                        # Applies openCV Median Filter To Image
                                        openCV_median=openCV_mask_filters.get_median(img_n[0],mask_size=matrix_)

                                        # Stores median Image To Results Folder
                                        temp_filename=filename_+'/Median/Median_Filtering_'+str(matrix_)+'x'+str(matrix_)+'_'+str(count)+'.jpg'
                                        cv2.imwrite(temp_filename, median) 
                                        
                                        # Stores openCV_median Image To Results Folder
                                        temp_openCV_filename=openCV_filename_+'/Median/openCV_Median_Filtering_'+str(matrix_)+'x'+str(matrix_)+'_'+str(count)+'.jpg'
                                        cv2.imwrite(temp_openCV_filename, openCV_median) 
                                        
                                        # cv2.imshow("Unweighted Average Of "+img_n[1],unweighted)        
                                        # cv2.imshow("Weighted Average Of "+img_n[1],weighted) 
                                        # cv2.imshow("Gaussian Mask Of "+img_n[1],gaussian) 
                                        # cv2.imshow("Median Filtering Of "+img_n[1],median) 
                        
                        for num_ in [3,5]:
                                if num_==3:
                                        # Changes Results Path To 3x3 Matrix
                                        filename_=My_Filters+'/Sobel_Edge/3x3_Mask/'
                                        openCV_filename_=OpenCV_Filters+'/Sobel_Edge/3x3_Mask/'

                                if num_==5:
                                        # Changes Results Path To 5x5 Matrix
                                        filename_=My_Filters+'/Sobel_Edge/5x5_Mask/'
                                        openCV_filename_=OpenCV_Filters+'/Sobel_Edge/5x5_Mask/'

                                # SOBEL FILTERING
                                sobel= sobel_edge.get_sobel_mask(img,mask_size=num_)

                                # Applies openCV Sobel Filter To Image
                                openCV_sobel=openCV_mask_filters.get_sobel(img,mask_size=num_)

                                # Stores sobel Image To Results Folder
                                temp_filename=filename_+'Sobel_Edge_'+str(num_)+'x'+str(num_)+'_'+str(count)+'.jpg'
                                cv2.imwrite(temp_filename, sobel) 
                                
                                # Stores openCV_sobel Image To Results Folder
                                temp_openCV_filename=openCV_filename_+'openCV_Sobel_Edge_'+str(num_)+'x'+str(num_)+'_'+str(count)+'.jpg'
                                cv2.imwrite(temp_openCV_filename, openCV_sobel)

                        # SOBEL FILTERING
                        sobel= sobel_edge.apply_sobel_edge(img)

                        # Applies openCV Sobel Filter To Image
                        openCV_sobel=openCV_mask_filters.get_sobel(img,mask_size=1)

                        # Stores sobel Image To Results Folder
                        temp_filename=My_Filters+'/Sobel_Edge/Sobel_Edge_'+str(count)+'.jpg'
                        cv2.imwrite(temp_filename, sobel) 
                        
                        # Stores openCV_sobel Image To Results Folder
                        temp_openCV_filename=OpenCV_Filters+'/Sobel_Edge/openCV_Sobel_Edge_'+str(count)+'.jpg'
                        cv2.imwrite(temp_openCV_filename, openCV_sobel) 
Ejemplo n.º 7
0
def zero_three_cloud_contrast(img):
    """Darken cloud pixels in an image taken with an exposure time of 0.3
    seconds.

    Parameters
    ----------
    img : image.AllSkyImage
        The image.

    Returns
    -------
    numpy.ndarray
        A higher contrast version of the original image.

    Notes
    -----
    In order to first determine which pixels should be considered clouds
    this method first finds the difference between the pixel at position (510,
    510) in the given image and the image taken at 05:29:36 on
    November 8, 2017. This difference is then subtracted from all the pixels,
    normalizing the image to have the same background pixel value. A greyscale
    closing is then performed, smudging out white pixel noise.

    The average value of all the pixels in this new normalized image is
    calculated and any pixel that is above this average value is considered a
    cloud pixel. This is because light reflected off the moon illuminates
    the clouds, raising them above the average pixel value.

    Once the cloud pixels are found, all non-cloud pixels are raised in value
    by 40, while the cloud pixels are reduced to 0.
    """
    # Temprary, I intend to change this slightly later.
    img2 = np.asarray(Image.open("Images/Original/KPNO/20171108/r_ut052936s31200.png").convert("L"))

    img3 = np.copy(img.data)
    img1 = np.int16(img.data)
    img2 = np.int16(img2)

    # Finds the difference from the "standard" .03s image.
    # Then subtracts that value from the entire image to normalize it to
    # standard image color.
    val = img1[510, 510] - img2[510, 510]
    img1 = img1 - val

    # Subtracts standard image from current image.
    # Performs closing to clean up some speckling in lower band of image.
    test = io_util.image_diff(img1, img2)
    test = ndimage.grey_closing(test, size=(2, 2))

    # Clouds are regions above the average value of the completed transform.
    avg = np.mean(test)
    cond = np.where(test > avg, 0, 1)

    # Increases black sky brightness in images where the moon is alone (thanks
    # to low dynamic range the sky is black because the moon is so bright)
    img3 = np.where(img3 < 150, img3 + 40, img3)
    final = np.multiply(img3, cond)

    # Find the mask and black out those pixels.
    masking = mask.generate_mask()

    final = AllSkyImage(img.name, img.date, img.camera, final)
    final = mask.apply_mask(masking, final)

    return final
Ejemplo n.º 8
0
def six_cloud_contrast(img):
    """Darken cloud pixels in an image taken with an exposure time of 6 seconds.

    Parameters
    ----------
    img : image.AllSkyImage
        The image.

    Returns
    -------
    numpy.ndarray
        A higher contrast version of the original image.

    Notes
    -----
    At the start of this method, the dead pixels and horizon objects are
    masked out. The image is inverted, and subtracted from itself four times.
    This highly increases the contrast between the clouds (which fall close to
    0 in the original pixel value) and the background,
    which will get reduced to 0. A copy of this image is used later in a
    separate calculation. Meanwhile, a greyscale closing is performed on this
    resulting image, which smooths out stars that were turned into small black
    dots in the inversion process.

    This result then gets thresholded, which creates a two tone, black and white
    version of the image. This is done by making each pixel with a value above
    10 as white and anything below as black.
    A binary closing is performed to remove any created
    singular white pixels. The horizon items are once again masked out, and
    a buffer circle of black pixels is created around the image content. As
    a result, the image is filled with white regions
    that correspond to the original clouds, with the rest of the image being
    black.

    In some images, however, the center of the Milky Way is bright enough to
    be recorded at a pixel value approximately equal to the value at which
    the clouds appear. To account for this, for each white region in the binary
    image, count the number of stars in the original image that would appear
    within that region. Removes any region where the density of stars
    is too high to be a cloud. This leaves a binary image with clouds in
    white, and everything else in black.

    From here, a scaling darkness fraction is determined by
    the original inversion image. Cloud pixels that are close to white
    in the inversion, from the darkest regions of the clouds, are scaled
    to 0, while the rest of the pixels are scaled less dark. This preserves
    the large scale structure of the clouds, but reduces them in brightness
    to nearly 0. The exact formula used to calculate this scaling darkness
    is 0.6 - (inverted pixel value) / 255.
    """

    # Find the mask and black out those pixels.
    masking = mask.generate_mask()
    img1 = mask.apply_mask(masking, img)

    # Inverts and subtracts 4 * the original image. This replicates previous
    # behaviour in one step.
    # Previous work flow: Invert, subtract, subtract, subtract.
    # If it goes negative I want it to be 0 rather than positive abs of num.
    invert = 255 - 4 * np.int16(img1.data)
    invert = np.where(invert < 0, 0, invert)

    # Smooth out the black holes left where stars were in the original.
    # We need them to be "not black" so we can tell if they"re in a region.
    closedimg = ndimage.grey_closing(invert, size=(2, 1))

    # Thresholds the image into black and white with a value of 10.
    # Pixels brighter than greyscale 10 are white, less than are 0.
    binimg = np.where(closedimg > 10, 1, 0)

    # Cleans up "floating" white pixels.
    binimg = ndimage.binary_opening(binimg)

    # Mask out the horizon objects so they don"t mess with cloud calculations.
    img1.data = binimg
    binimg = mask.apply_mask(masking, img1).data

    # Expand the white areas to make sure they cover the items they represent
    # from the inverted image.
    binimg = ndimage.binary_dilation(binimg)

    # Creates a buffer circle keeping the image isolated from the background.
    for row in range(0, binimg.shape[1]):
        for column in range(0, binimg.shape[0]):
            x = column - center[0]
            y = center[1] - row
            r = math.hypot(x, y)
            if (r < 246) and (r > 241):
                binimg[row, column] = 0

    # This structure makes it so that diagonally connected pixels are part of
    # the same region.
    struct = [[True, True, True], [True, True, True], [True, True, True]]
    labeled, num_features = ndimage.label(binimg, structure=struct)
    regionsize = [0] * (num_features + 1)
    starnums = [0] * (num_features + 1)

    for row in range(0, binimg.shape[1]):
        for column in range(0, binimg.shape[0]):
            regionsize[labeled[row, column]] += 1

            # This finds stars in "cloud" regions
            # Basically, if somewhat bright, and the region is marked "cloud."
            if img1.data[row, column] >= (95) and binimg[row, column] == 1:
                x = column - center[0]
                y = center[1] - row
                r = math.hypot(x, y)
                if r <= 240:
                    regionnum = labeled[row, column]
                    starnums[regionnum] += 1

    # The reason why I use density is mainly because of very small non-clouds.
    # They contain few stars, which rules out a strictly star count method.
    # This, however, is actually density^-1. I.e. it"s size/stars rather than
    # stars/size. This is because stars/size is very small sometimes.
    # I"m aware of a division by 0 warning here. If a region has no stars, then
    # this divides by 0. In fact this np.where exists to ignore that and set
    # zero star regions to a density of 0, since I ignore those later.
    # Hence I"m supressing the divide by 0 warning for these two lines.
    with np.errstate(divide="ignore"):
        density = np.divide(regionsize, starnums)
        density = np.where(np.asarray(starnums) < 1, 0, density)

    # Zeroes out densities < 12
    density = np.where(density < 12, 0, density)
    density[0] = 350

    # Creates a density "image".
    # This is an image where each feature has its value set to its density.
    for row in range(0, labeled.shape[1]):
        for column in range(0, labeled.shape[0]):
            value = labeled[row, column]
            labeled[row, column] = density[value]

    # If the value is less than the mean density, we want to mask it in the
    # "map" image. Hence set it to 0, everything else to 1, and multipy.
    # This keeps the non masks (x*1 = x) and ignores the others (x*0 = 0)
    m = np.mean(density[np.nonzero(density)])
    masked = np.where(labeled < m, 0, 1)
    invert2 = np.multiply(invert, masked)

    # The thinking here is that the whiter it is in the contrast++ image, the
    # darker it should be in the original. Thus increasing cloud contrast
    # without making it look like sketchy black blobs.
    multiple = .6 - invert2 / 255

    # Resets the img1 data since I used the img1 object to mask the binary.
    img1 = mask.apply_mask(masking, img)
    newimg = np.multiply(img1.data, multiple)

    # Creates a new AllSkyImage so that we don"t modify the original.
    new = AllSkyImage(img.name, img.date, img.camera, newimg)

    return new