Ejemplo n.º 1
0
 def _getDatatype(driver):
     tnames = tuple(
         driver.GetMetadata_Dict()["DMD_CREATIONDATATYPES"].split(" "))
     types = tuple(gdal.GetDataTypeByName(t) for t in tnames)
     tdict = tuple((gdal.GetDataTypeSize(t), t) for t in types)
     otype = max(tdict, key=lambda x: x[0])[-1]
     return np.dtype(_TYPEMAP[otype])
Ejemplo n.º 2
0
    def create_paux_header(self,filename,datatype):    
	(path, ext) = os.path.splitext(filename)
	auxname = path + ".aux"
	if os.path.isfile(auxname):
	    resp = GtkExtra.message_box('Confirmation', \
		    auxname + ' exists. Overwrite?', ('Yes','No'))
            if resp == 'No':
                return

	# Take the image parameters
	header = long(self.header_entry.get_text())
	width = long(self.width_entry.get_text())
	height = long(self.height_entry.get_text())
	bands = long(self.bands_entry.get_text())
        aux_type_dict={'Byte':'8U','Int16':'16S','UInt16':'16U',
                       'Float32':'32R'}
	aux_type_list = ['8U', '16S', '16U', '32R']
	type = aux_type_dict[datatype]
	gdaltype = gdal.GetDataTypeByName(datatype)
	interleaving = self.interleave_list[self.interleave_menu.get_history()]
	datasize = gdal.GetDataTypeSize(gdaltype) / 8

	# Calculate offsets
	pixel_offset = []
	line_offset = []
	image_offset = []
	if interleaving is 'Pixel':
	    for i in range(bands):
		pixel_offset.append(datasize * bands)
		line_offset.append(datasize * width * bands)
		image_offset.append(header + datasize * i)
	elif interleaving is 'Band':
	    for i in range(bands):
		pixel_offset.append(datasize)
		line_offset.append(datasize * width)
		image_offset.append(header + datasize * width * height * i)
	elif interleaving is 'Line':
	    for i in range(bands):
		pixel_offset.append(datasize)
		line_offset.append(datasize * width * bands)
		image_offset.append(header + datasize * width * i)
	else:
	    raise 'Unsupported interleaving type!'
	
	aux_swap_list = ['Swapped', 'Unswapped']
	swap = aux_swap_list[self.swap_menu.get_history()]

	# Write out the auxilary file
	aux = open(auxname, "wt")
	aux.write("AuxilaryTarget: " +  os.path.basename(filename) + '\n')
	aux.write("RawDefinition: " + str(width) + ' ' \
		+ str(height) + ' ' + str(bands) + '\n')
	for i in range(bands):
	    aux.write("ChanDefinition-" + str(i + 1) + ': ' + type + ' ' \
		    + str(image_offset[i]) + ' ' + str(pixel_offset[i]) \
		    + ' ' + str(line_offset[i]) + ' ' + swap + '\n')
	aux.close()
	aux = None
Ejemplo n.º 3
0
    def _load_image(self):
        # Loads the optical and segmented image data from disk. Should only be called from
        #   load_next_image method.
        full_image_name = self.available_images[self.im_index]

        self.im_name = os.path.splitext(os.path.split(full_image_name)[1])[0]

        src_ds = gdal.Open(full_image_name, gdal.GA_ReadOnly)

        # Read the image date from the metadata
        metadata = src_ds.GetMetadata()
        self.im_date = pp.parse_metadata(metadata, self.im_type)

        # Determine the datatype
        src_dtype = gdal.GetDataTypeSize(src_ds.GetRasterBand(1).DataType)

        # Calculate the reference points from the image histogram
        lower, upper, wb_ref, br_ref = pp.histogram_threshold(
            src_ds, src_dtype)
        self.wb_ref = np.array(wb_ref, dtype=c_uint8)
        self.br_ref = np.array(br_ref, dtype=c_uint8)

        # Load the image data
        image_data = src_ds.ReadAsArray()

        # Close the GDAL dataset
        src_ds = None

        # Rescale the input dataset using a histogram stretch
        image_data = pp.rescale_band(image_data, lower, upper)

        # Apply a white balance to the image
        image_data = pp.white_balance(image_data, self.wb_ref.astype(np.float),
                                      float(np.amax(self.wb_ref)))

        # Convert the input data to c_uint8
        self.original_image = np.ndarray.astype(image_data, c_uint8)

        print("Creating segments on provided image...")
        watershed_image = segment_image(image_data, image_type=self.im_type)
        # Convert the segmented image to c_int datatype. This is needed for the
        # Cython methods that calculate attribute of segments.
        self.segmented_image = np.ndarray.astype(watershed_image, c_uint32)
        # Clear these from memory explicitly
        image_data = None
        watershed_image = None
Ejemplo n.º 4
0
def _calculate_scale_offset(nodata, band):
    """
    This method comes from the old ULA codebase.
    """
    nbits = gdal.GetDataTypeSize(band.DataType)
    dfScaleDstMin, dfScaleDstMax = 0.0, 255.0
    if nbits == 16:
        count = 32767 + nodata
        histogram = band.GetHistogram(-32767, 32767, 65536)
    else:
        count = 0
        histogram = band.GetHistogram()
    dfScaleSrcMin = count
    total = 0
    cliplower = int(0.01 * (sum(histogram) - histogram[count]))
    clipupper = int(0.99 * (sum(histogram) - histogram[count]))
    while total < cliplower and count < len(histogram) - 1:
        count += 1
        total += int(histogram[count])
        dfScaleSrcMin = count
    if nbits == 16:
        count = 32767 + nodata
    else:
        count = 0
    total = 0
    dfScaleSrcMax = count
    while total < clipupper and count < len(histogram) - 1:
        count += 1
        total += int(histogram[count])
        dfScaleSrcMax = count
    if nbits == 16:
        dfScaleSrcMin -= 32768
        dfScaleSrcMax -= 32768

    # Determine gain and offset
    diff_ = dfScaleSrcMax - dfScaleSrcMin

    # From the old Jobmanager codebase: avoid divide by zero caused by some stats.
    if diff_ == 0:
        _LOG.warning("dfScaleSrc Min and Max are equal! Applying correction")
        diff_ = 1

    dfScale = (dfScaleDstMax - dfScaleDstMin) / diff_
    dfOffset = -1 * dfScaleSrcMin * dfScale + dfScaleDstMin

    return dfScale, dfOffset
Ejemplo n.º 5
0
    def create_vrt_lines(self,filename):
	image_offset = long(self.header_entry.get_text())
	width = long(self.width_entry.get_text())
	height = long(self.height_entry.get_text())
	bands = long(self.bands_entry.get_text())
	interleaving = self.interleave_list[self.interleave_menu.get_history()]
        dtype = self.type_list[self.type_menu.get_history()]
	gdaltype = gdal.GetDataTypeByName(dtype)
	datasize = gdal.GetDataTypeSize(gdaltype) / 8
        byteorder = ['LSB','MSB'][self.swap_menu.get_history()]

        vrtdsc = vrtutils.VRTDatasetConstructor(width,height)
        
        if interleaving == 'Pixel':
            pixoff = datasize*bands
            lineoff = pixoff*width
            for idx in range(bands):
                imoff = image_offset + idx*datasize
                vrtdsc.AddRawBand(filename, dtype, byteorder,
                                 imoff, pixoff, lineoff)
                
        elif interleaving == 'Line':
            pixoff=datasize
            lineoff=datasize*width*bands
            for idx in range(bands):
                imoff = image_offset + idx*lineoff
                vrtdsc.AddRawBand(filename, dtype, byteorder,
                                 imoff, pixoff, lineoff)
            
        else:
            pixoff=datasize
            lineoff=datasize*width
            for idx in range(bands):
                imoff = image_offset + datasize*width*height*idx
                vrtdsc.AddRawBand(filename, dtype, byteorder,
                                 imoff, pixoff, lineoff)

        return vrtdsc.GetVRTLines()
Ejemplo n.º 6
0
def unmix_image(full_image_name, clsf_file, output_name):

    # Create the output folder if it does not exist
    output_folder = os.path.dirname(output_name)
    if not os.path.isdir(output_folder):
        os.mkdir(output_folder)

    # scale factor (f): 0.5m WV pixel * 1000 = 500m pseudo-MODIS
    f = 1000
    block_size = f*5        # size of each chunk to load into memory

    src_ds = gdal.Open(full_image_name, gdal.GA_ReadOnly)
    clsf_ds = gdal.Open(clsf_file, gdal.GA_ReadOnly)

    y_dim = src_ds.RasterYSize
    x_dim = src_ds.RasterXSize

    y_blocks = range(0, y_dim, block_size)
    x_blocks = range(0, x_dim, block_size)

    # Find the dark point reference
    src_dtype = gdal.GetDataTypeSize(src_ds.GetRasterBand(1).DataType)
    stretch_params = pp.histogram_threshold(src_ds, src_dtype)
    dark_ref = stretch_params[3]
    # Calculate the dark point and white point for a
    #       2pt correction based on the image histogram
    for b in range(len(dark_ref)):
        # Add a ceiling to the amount of correction per band (b)
        offset = ((b+1)**2)
        if dark_ref[b] > 65 - offset and offset < 62:
            dark_ref[b] = 65 - offset
    white_pt = stretch_params[1]
    # Add a const. ceiling to the white correction
    if white_pt < 235:
        white_pt = 235

    print(stretch_params)
    # We have to read the entire image ahead of time to calculate the whole image
    #   melt pond mean, which will be used in each block read below
    image_pmean, stage_two_srm = find_image_pmean(src_ds, clsf_ds, dark_ref, white_pt)

    # Create an RFC Model from existing training data
    model_filename = './rfc_model_worldview.p'
    with open(model_filename, 'rb') as mf:
        model = pickle.load(mf)

    # Flag for setting variables on first iteration
    initial = True
    pbar = tqdm(total=len(y_blocks)*len(x_blocks), unit='block')

    for y in y_blocks:
        for x in x_blocks:

            read_size_y = check_read_size(y, block_size, y_dim)
            read_size_x = check_read_size(x, block_size, x_dim)
            if read_size_y != block_size or read_size_x != block_size:
                pbar.update()
                continue

            optic_data = src_ds.ReadAsArray(x, y, read_size_x, read_size_y)
            clsf_data = clsf_ds.ReadAsArray(x, y, read_size_x, read_size_y)

            # Skip mostly empty blocks
            if not valid_block(optic_data):
                pbar.update()
                continue

            print("Rescaling bands...")
            # Apply the 2pt correction (simple histogram stretch)
            for b in range(7):
                optic_data[b, :, :] = pp.rescale_band(optic_data[b, :, :], dark_ref[b], white_pt)
            print("Done")

            # "Downsample" the classified image. This finds the percentage of each
            #   pseudo-MODIS pixel that is each surface type.
            ds_clsf_data = block_classification(clsf_data, f)

            # Analyze the imagery to get relevant data
            (srm_list, refl_list, pond_hsv, ocean_hsv,
             true_fractions, pond_ocean_diff, pratio) = analyze_imagery(optic_data,
                                                                        clsf_data, ds_clsf_data,
                                                                        f, pmean=image_pmean)

            # refl_list is a list of all pseudo-MODIS pixels [[b1, b2, ...], [b1, b2, ...], ...]
            # srm_list is a list of all the stage 1 reflectance matrices
            # Apply stage 1 unmixing
            s1_fract, s1_error = stage_one_unmixing(refl_list, srm_list, true_fractions)

            # Apply stage 2 unmixing
            s2_fract, s2_error = stage_two_unmixing(refl_list, stage_two_srm)

            # Unmix using a 'global' srm (generally Rosel's)
            global_srm_rosel = np.array([[.08, .16, .95], [.08, .07, .87], [.08, .22, .95], [1, 1, 1]])
            # Average of a bunch of WV images, calculated elsewhere
            global_srm_wv = np.array([[0.024, 0.201, 0.651], [0.024, 0.152, 0.557], [0.043, 0.279, 0.735], [1, 1, 1]])

            s3_fract, s3_error = stage_three_unmixing(refl_list, global_srm_wv)

            # Unmix with a machine learning method
            s4_fract, s4_error = ml_estimation(refl_list, model)

            # Find the difference between the pond color of each individual SRM and the average of all of them
            srm_diff = calculate_srm_diff(srm_list, stage_two_srm)
            srm_diff2 = calculate_srm_diff(srm_list, global_srm_rosel)
            srm_diff3 = calculate_srm_diff(srm_list, global_srm_wv)

            # write_to_tds(refl_list, true_fractions)

            if initial == True:
                initial = False
                # Initialize the data output lists
                true_fraction_all = true_fractions
                s1_fraction_all = s1_fract
                s1_error_all = s1_error
                s2_fraction_all = s2_fract
                s2_error_all = s2_error
                s3_fraction_all = s3_fract
                s3_error_all = s3_error
                s4_fraction_all = s4_fract
                s4_error_all = s4_error
                pond_hsv_all = pond_hsv
                ocean_hsv_all = ocean_hsv
                pond_ocean_diff_all = pond_ocean_diff
                srm_diff_all = srm_diff
                pratio_all = pratio

                srm_diff2_all = srm_diff2
                srm_diff3_all = srm_diff3
            else:
                # Append the new data to the master lists
                true_fraction_all = np.append(true_fraction_all, true_fractions, axis=0)
                s1_fraction_all = np.append(s1_fraction_all, s1_fract, axis=0)
                s1_error_all = np.append(s1_error_all, s1_error, axis=0)
                s2_fraction_all = np.append(s2_fraction_all, s2_fract, axis=0)
                s2_error_all = np.append(s2_error_all, s2_error, axis=0)
                s3_fraction_all = np.append(s3_fraction_all, s3_fract, axis=0)
                s3_error_all = np.append(s3_error_all, s3_error, axis=0)
                s4_fraction_all = np.append(s4_fraction_all, s4_fract, axis=0)
                s4_error_all = np.append(s4_error_all, s4_error, axis=0)
                pond_hsv_all = np.append(pond_hsv_all, pond_hsv, axis=0)
                ocean_hsv_all = np.append(ocean_hsv_all, ocean_hsv, axis=0)
                pond_ocean_diff_all = np.append(pond_ocean_diff_all, pond_ocean_diff, axis=0)
                srm_diff_all = np.append(srm_diff_all, srm_diff, axis=0)
                pratio_all = np.append(pratio_all, pratio, axis=0)
                srm_diff2_all = np.append(srm_diff2_all, srm_diff2, axis=0)
                srm_diff3_all = np.append(srm_diff3_all, srm_diff3, axis=0)

            pbar.update()

    pbar.close()

    src_ds = None
    clsf_ds = None

    s1_rmse, s2_rmse, s3_rmse, s4_rmse = calculate_rmse(true_fraction_all, s1_fraction_all,
                                                        s2_fraction_all, s3_fraction_all, s4_fraction_all)

    write_results(output_name, true_fraction_all,
                  s1_fraction_all, s1_error_all, s1_rmse,
                  s2_fraction_all, s2_error_all, s2_rmse,
                  s3_fraction_all, s3_error_all, s3_rmse,
                  s4_fraction_all, s4_error_all, s4_rmse,
                  pond_hsv_all, ocean_hsv_all, pond_ocean_diff_all, srm_diff_all, pratio_all)

    print("Stage 1 RMSE: {}".format(np.average(s1_rmse)))
    print("Stage 2 RMSE: {}".format(np.average(s2_rmse)))
    print("Stage 3 RMSE: {}".format(np.average(s3_rmse)))
    print("Stage 4 RMSE: {}".format(np.average(s4_rmse)))

    print("Pond differences:")
    print("Diff from image mean")
    print("{0:0.4f}, {1:0.4f}".format(np.mean(srm_diff_all), np.std(srm_diff_all)))
    print("Diff from Rosel")
    print("{0:0.4f}, {1:0.4f}".format(np.mean(srm_diff2_all), np.std(srm_diff2_all)))
    print("Diff from WV global")
    print("{0:0.4f}, {1:0.4f}".format(np.mean(srm_diff3_all), np.std(srm_diff3_all)))
Ejemplo n.º 7
0
    def guess_cb(self, *args):
	"""Guess image geometry parameters."""

	def correlation(array1, array2):
	    """Calculate correlation coefficient of two arrays."""
	    n_elems = float(array1.shape[0])
	    M1 = Numeric.add.reduce(array1)
	    M2 = Numeric.add.reduce(array2)
	    D1 = Numeric.add.reduce(array1 * array1) - M1 * M1 / n_elems
	    D2 = Numeric.add.reduce(array2 * array2) - M2 * M2 / n_elems
            K = (Numeric.add.reduce(array1 * array2) - M1 * M2 / n_elems) / math.sqrt(D1 * D2)

	    return K

	header = long(self.header_entry.get_text())
	width = long(self.width_entry.get_text())
	height = long(self.height_entry.get_text())
	bands = long(self.bands_entry.get_text())
	gdaltype = \
	    gdal.GetDataTypeByName(self.type_list[self.type_menu.get_history()])
	numtype = gdalnumeric.GDALTypeCodeToNumericTypeCode(gdaltype)
	depth = gdal.GetDataTypeSize(gdaltype) / 8
	
	filename = self.open_entry.get_text()
        if os.path.isfile(filename) == 0:
            gvutils.error('Input file '+filename+' does not exist!')
            return
        
	filesize = os.stat(filename)[ST_SIZE]
	if filesize < header:
	    gvutils.error('Specified header size larger then file size!')
            return
	imagesize = (filesize - header) / bands / depth

	if width != 0 and height == 0:
	    height = imagesize / width
	elif width == 0 and height != 0:
	    width = imagesize / height
	else:
	    rawfile = open(filename, 'rb')
	    longt = 40.0	# maximum possible height/width ratio
	    cor_coef = 0.0
	    w = long(math.sqrt(imagesize / longt))
	    w_max = long(math.sqrt(imagesize * longt))
	    if (self.swap_menu.get_history() == 0 \
		and sys.byteorder == 'little') or \
	       (self.swap_menu.get_history() == 1 and sys.byteorder == 'big'):
		swap = 0
	    else:
		swap = 1
	    while w < w_max:
		if imagesize % w == 0:
		    scanlinesize = w * depth
		    h = imagesize / w
		    rawfile.seek(header + h / 2 * scanlinesize)
		    buf1 = rawfile.read(scanlinesize)
		    buf2 = rawfile.read(scanlinesize)
		    a1 = Numeric.fromstring(buf1, numtype)
		    a2 = Numeric.fromstring(buf2, numtype)
		    if swap:
			a1.byteswapped()
			a2.byteswapped()

		    try:
  		        tmp = correlation(a1.astype(Numeric.Float32), a2.astype(Numeric.Float32))
                    except:
                        # catch 0 division errors
                        gvutils.error('Unable to guess image geometry!')
                        return
                    
		    if tmp > cor_coef:
			cor_coef = tmp
			width = w
			height = h
		w += 1

	self.width_entry.set_text(str(width))
	self.height_entry.set_text(str(height))
Ejemplo n.º 8
0
def main():
    # Set Up Arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("input_dir",
                        help='''directory path containing date directories of 
                        images to be processed''')
    parser.add_argument("image_type",
                        type=str,
                        choices=["srgb", "wv02_ms", "pan"],
                        help="image type: 'srgb', 'wv02_ms', 'pan'")
    parser.add_argument("training_dataset", help="training data file")
    parser.add_argument("--training_label",
                        type=str,
                        default=None,
                        help="name of training classification list")
    parser.add_argument("-o",
                        "--output_dir",
                        type=str,
                        default="default",
                        help="directory to place output results.")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="display text information and progress")
    parser.add_argument("-c",
                        "--stretch",
                        type=str,
                        choices=["hist", "pansh", "none"],
                        default='hist',
                        help='''Apply image correction/stretch to input: \n
                               hist: Histogram stretch \n
                               pansh: Orthorectify / Pansharpen for MS WV images \n
                               none: No correction''')
    parser.add_argument("--pgc_script",
                        type=str,
                        default=None,
                        help="Path for the pansharpening script if needed")
    parser.add_argument("-t",
                        "--threads",
                        type=int,
                        default=16,
                        help="Number of subprocesses to start")

    # Parse Arguments
    args = parser.parse_args()

    # System filepath that contains the directories or files for batch processing
    user_input = args.input_dir
    if os.path.isdir(user_input):
        src_dir = user_input
        src_file = ''
    elif os.path.isfile(user_input):
        src_dir, src_file = os.path.split(user_input)
    else:
        raise IOError('Invalid input')
    # Image type, choices are 'srgb', 'pan', or 'wv02_ms'
    image_type = args.image_type
    # File with the training data
    tds_file = args.training_dataset
    # Default tds label is the image type
    if args.training_label is None:
        tds_label = image_type
    else:
        tds_label = args.training_label
    # Default output directory
    #   (if not provided this gets set when the tasks are created)
    dst_dir = args.output_dir
    threads = args.threads
    verbose = args.verbose
    stretch = args.stretch

    # Use the given pansh script path, otherwise search for the correct folder
    #   in the same directory as this script.
    if args.pgc_script:
        pansh_script_path = args.pgc_script
    else:
        current_path = os.path.dirname(os.path.realpath(__file__))
        pansh_script_path = os.path.join(
            os.path.split(current_path)[0], 'imagery_utils')

    # For Ames OIB Processing:
    # White balance flag (To add as user option in future, presently only used on oib imagery)
    if image_type == 'srgb':
        assess_quality = True
        white_balance = True
    else:
        assess_quality = False
        white_balance = False
    # Set a default quality score until this value is calculated
    quality_score = 1.

    # Prepare a list of images to be processed based on the user input
    #   list of task objects based on the files in the input directory.
    #   Each task is an image to process, and has a subtask for each split
    #   of that image.
    task_list = utils.create_task_list(os.path.join(src_dir, src_file),
                                       dst_dir)

    for task in task_list:

        # ASP: Restrict processing to the frame range
        # try:
        #     frameNum = getFrameNumberFromFilename(file)
        # except Exception, e:
        #     continue
        # if (frameNum < args.min_frame) or (frameNum > args.max_frame):
        #     continue

        # Skip this task if it is already marked as complete
        if task.is_complete():
            continue

        # Make the output directory if it doesnt already exist
        if not os.path.isdir(task.get_dst_dir()):
            os.makedirs(task.get_dst_dir())

        # Run Ortho/Pan scripts if necessary
        if stretch == 'pansh':
            if verbose:
                print("Orthorectifying and Pansharpening image...")

            full_image_name = os.path.join(task.get_src_dir(), task.get_id())
            pansh_filepath = pp.run_pgc_pansharpen(pansh_script_path,
                                                   full_image_name,
                                                   task.get_dst_dir())

            # Set the image name/dir to the pan output name/dir
            task.set_src_dir(task.get_dst_dir())
            task.change_id(pansh_filepath)

        # Open the image dataset with gdal
        full_image_name = os.path.join(task.get_src_dir(), task.get_id())
        if os.path.isfile(full_image_name):
            if verbose:
                print("Loading image {}...".format(task.get_id()))
            src_ds = gdal.Open(full_image_name, gdal.GA_ReadOnly)
        else:
            print("File not found: {}".format(full_image_name))
            continue

        # Read metadata to get image date and keep only the metadata we need
        metadata = src_ds.GetMetadata()
        image_date = pp.parse_metadata(metadata, image_type)
        metadata = [image_type, image_date]

        # For processing icebridge imagery:
        if image_type == 'srgb':
            if image_date <= 150:
                tds_label = 'spring'
                white_balance = True
            else:
                tds_label = 'summer'

        # Load Training Data
        tds = utils.load_tds(tds_file, tds_label, image_type)
        # tds = utils.load_tds(tds_file, 'srgb', image_type)

        if verbose:
            print("Size of training set: {}".format(len(tds[1])))

        # Set necessary parameters for reading image 1 block at a time
        x_dim = src_ds.RasterXSize
        y_dim = src_ds.RasterYSize
        desired_block_size = 1600

        src_dtype = gdal.GetDataTypeSize(src_ds.GetRasterBand(1).DataType)
        # Analyze input image histogram (if applying correction)
        if stretch == 'hist':
            stretch_params = pp.histogram_threshold(src_ds, src_dtype)
        else:  # stretch == 'none':
            # WV Images are actually 11bit stored in 16bit files
            if src_dtype > 12:
                src_dtype = 11
            stretch_params = [
                1, 2**src_dtype - 1,
                [2**src_dtype - 1 for _ in range(src_ds.RasterCount)],
                [1 for _ in range(src_ds.RasterCount)]
            ]

        # Create a blank output image dataset
        # Save the classified image output as a geotiff
        fileformat = "GTiff"
        image_name_noext = os.path.splitext(task.get_id())[0]
        dst_filename = os.path.join(task.get_dst_dir(),
                                    image_name_noext + '_classified.tif')
        driver = gdal.GetDriverByName(fileformat)
        dst_ds = driver.Create(dst_filename,
                               xsize=x_dim,
                               ysize=y_dim,
                               bands=1,
                               eType=gdal.GDT_Byte,
                               options=["TILED=YES", "COMPRESS=LZW"])

        # Transfer the metadata from input image
        # dst_ds.SetMetadata(src_ds.GetMetadata())
        # Transfer the input projection and geotransform if they are different than the default
        if src_ds.GetGeoTransform() != (0, 1, 0, 0, 0, 1):
            dst_ds.SetGeoTransform(
                src_ds.GetGeoTransform())  # sets same geotransform as input
        if src_ds.GetProjection() != '':
            dst_ds.SetProjection(
                src_ds.GetProjection())  # sets same projection as input

        # Find the appropriate image block read size
        block_size_x, block_size_y = utils.find_blocksize(
            x_dim, y_dim, desired_block_size)
        if verbose:
            print("block size: [{},{}]".format(block_size_x, block_size_y))

        # close the source dataset so that it can be loaded by each thread seperately
        src_ds = None
        lock = RLock()
        block_queue, qsize = construct_block_queue(block_size_x, block_size_y,
                                                   x_dim, y_dim)
        dst_queue = Queue()

        # Display a progress bar
        if verbose:
            try:
                from tqdm import tqdm
            except ImportError:
                print("Install tqdm to display progress bar.")
                verbose = False
            else:
                pbar = tqdm(total=qsize, unit='block')

        # Set an empty value for the pixel counter
        pixel_counts = [0, 0, 0, 0, 0]

        NUMBER_OF_PROCESSES = threads
        block_procs = [
            Process(target=process_block_queue,
                    args=(lock, block_queue, dst_queue, full_image_name,
                          assess_quality, stretch_params, white_balance, tds,
                          metadata)) for _ in range(NUMBER_OF_PROCESSES)
        ]

        for proc in block_procs:
            # Add a stop command to the end of the queue for each of the
            #   processes started. This will signal for the process to stop.
            block_queue.put('STOP')
            # Start the process
            proc.start()

        # Collect data from processes as they complete tasks
        finished_threads = 0
        while finished_threads < NUMBER_OF_PROCESSES:

            if not dst_queue.empty():
                val = dst_queue.get()
                if val is None:
                    finished_threads += 1
                else:
                    # Keep only the lowest quality score found
                    quality_score_block = val[0]
                    if quality_score_block < quality_score:
                        quality_score = quality_score_block
                    # Add the pixel counts to the master list
                    pixel_counts_block = val[1]
                    for i in range(len(pixel_counts)):
                        pixel_counts[i] += pixel_counts_block[i]
                    # Write image data to output dataset
                    x = val[2]
                    y = val[3]
                    classified_block = val[4]
                    dst_ds.GetRasterBand(1).WriteArray(classified_block,
                                                       xoff=x,
                                                       yoff=y)
                    dst_ds.FlushCache()
                    # Update the progress bar
                    if verbose: pbar.update()
            # Give the other threads some time to finish their tasks.
            else:
                time.sleep(10)

        # Update the progress bar
        if verbose: pbar.update()

        # Join all of the processes back together
        for proc in block_procs:
            proc.join()

        # Close dataset and write to disk
        dst_ds = None

        # Write extra data (total pixel counts and quality score to the database (or csv)
        output_csv = os.path.join(task.get_dst_dir(),
                                  image_name_noext + '_md.csv')
        with open(output_csv, "w") as csvfile:
            writer = csv.writer(csvfile)
            # writer.writerow(["Quality Score", "White Ice", "Gray Ice", "Melt Ponds", "Open Water", "Shadow"])
            writer.writerow([
                "Quality Score", "Thick Ice", "Thin Ice", "Shadow",
                "Open Water", "Others"
            ])
            writer.writerow([
                quality_score, pixel_counts[0], pixel_counts[1],
                pixel_counts[2], pixel_counts[3], pixel_counts[4]
            ])

        thumbnail_result = resize_tif_result(dst_filename)
        print('thumbnail_result is produced.')
        # Close the progress bar
        if verbose:
            pbar.close()
            print("Finished Processing.")