def backup(args, fits_list): """ Save a list of files into a :param args: :return: """ backup_dir = os.path.join(args.in_dir, "original_data") utils.if_dir_not_exists_create(backup_dir) for im in fits_list: shutil.copy(im, backup_dir)
def combine(args): # Create the folders that do not already exist for the output file outdir, outfile = os.path.split(os.path.abspath(args.output)) if outdir == "": outdir = "." utils.if_dir_not_exists_create(outdir) # Build a list of the filter of each image #filter_list, images_filters = build_filter_list(args) images_filters = utils.collect_from_images(args.input, args.filterk) # If user wants all images to be combined together, regardless of filter: if args.all_together: images_filters = ["AllFilters"] * len(images_filters) # Create a default dictionary for the resulting images result = collections.defaultdict(str) # For each of the filters present combine the images (and write) for filt in set(images_filters): # list of objects with current filter (exception: allfilters is true) list1 = [args.input[p] for p,f in enumerate(images_filters) if f == filt ] # Check that all images have same dimension. If not, exit program if not utils.check_dimensions(list1): sys.exit("Dimensions of images to combine are different!") # Calculate scale of images scales = compute_scales(list1, args.scale, args.mask_key) # Get the sizes of the images lx, ly = utils.get_from_header(list1[0], "NAXIS2", "NAXIS1") # Now, in order to avoid loading many images in memory, we need to slice the images in pieces and combine a slice # of all the images at a time n_slices = 32 # divide the slow axis in this many pieces # Define the whole image and set all elements of mask to False whole_image = numpy.ma.zeros([lx,ly]) whole_image.mask = numpy.zeros_like(whole_image.data) for xmin in range(0, lx, lx/n_slices): xmax = min(xmin + lx/n_slices, lx) # Now we can build and sort a section of the cube with all the images cube = cube_images(list1, args.mask_key, scales, limits=[xmin, 0, xmax, ly]) cube.sort(axis=0) # Finally, average! Remember that the cube is sorted so that # cube[0,ii,jj] < cube[1,ii,jj] and that the highest values of all # are the masked elements. We will take advantage of it if the median # is selected, because nowadays the masked median is absurdly slow: # https://github.com/numpy/numpy/issues/1811 map_cube = numpy.ma.count(cube, axis=0) # number non-masked values per pixel if args.average == "mean": image = numpy.ma.mean(cube, axis=0) non_masked_equivalent = numpy.mean(cube.data, axis=0) elif args.average == "median": image = home_made_median(map_cube, cube) non_masked_equivalent = numpy.median(cube.data, axis=0) # Image is a masked array, we need to fill in masked values with the # args.fill_val if user provided it. Also, values with less than # args.nmin valid values should be masked out. If user did not provide # a fill_val argument, we will substitute masked values with the # unmasked equivalent operation. image.mask[map_cube < args.nmin] = 1 mask = image.mask if args.fill_val != '': image = image.filled(args.fill_val) else: image.data[mask == True] = non_masked_equivalent[mask == True] image = image.data whole_image.data[xmin:xmax, 0:ly] = image[:,:] whole_image.mask[xmin:xmax, 0:ly] = mask[:,:] # And save images. If all_together is activated, use the file name given by user. If not, we need # to separate by filter, so compose a new name with the one given by the user adding the filter if args.all_together: newfile = args.output else: newfile = os.path.join(outdir, utils.add_suffix_prefix(outfile, suffix="_" + filt) ) if args.out_mask != "": name_mask = args.out_mask else: name_mask = newfile + ".msk" if os.path.isfile(newfile): os.remove(newfile) if os.path.isfile(name_mask): os.remove(name_mask) fits.writeto(newfile, whole_image.data) fits.writeto(name_mask, whole_image.mask.astype(numpy.int)) result[filt] = newfile # Add comments to the headers string1 = " - Image built from the combination of the images: "+\ ", ".join(list1) string2 = " combine = " + args.average + ", scale = " + args.scale utils.add_history_line(newfile, string1 + string2 ) utils.add_history_line(name_mask, " - Mask of image: " + newfile) if args.mask_key != "": utils.header_update_keyword(newfile, args.mask_key, name_mask, "Mask for this image") # To normalize calculate median and call arith_images to divide by it. if args.norm == True: median = compute_scales([newfile], args.scale, args.mask_key)[0] msg = "- NORMALIZED USING MEDIAN VALUE:" arith_images.main(arguments=["--message", msg, "--output", newfile, "--mask_key", args.mask_key, "--fill_val", args.fill_val, newfile, "/", str(median)]) return result
def rename(args): # List of fit and fits images in the directory fits_list1 = glob.glob(os.path.join(args.in_dir, args.in_pattern + "*.fits")) fits_list2 = glob.glob(os.path.join(args.in_dir, args.in_pattern + "*.fit")) fits_list = fits_list1+fits_list2 # Sort all images chronologically fits_list = sort_by_date(fits_list) # If --copy was selected, copy all those files into a directory called original_data if args.copy == True: backup(args, fits_list) # The output of the whole code will be this dictionary, in which the images # are sorted in groups (bias, skyflats, domeflats, cigXXXX, ...) empty_array = numpy.asarray([], dtype=object) final_dict = {"filename":numpy.asarray([], dtype="S150"), "type":empty_array, "objname":empty_array, "time":empty_array} # Run through all images for im_name in fits_list: # Read image and header, extract name of object and filter. im = astroim.Astroim(im_name) object_name = re.sub('[\s\-_\(\)]', "", im.target.objname.lower()) object_type = im.target.objtype object_filter = im.filter.__str__() object_date = dateutil.parser.parse(im.primary_header.get(im.primary_header.datek)).date() object_date = re.sub('[\s\-\_\:]', "", object_date.__str__()) # If the subfolder out_dir/object_type does not exist, create it, # because we will create/move the new file there. newdir = os.path.join(args.out_dir, object_type) utils.if_dir_not_exists_create(newdir) # Create the new name of the file new_name = os.path.join(newdir, "{0}_{1}".format(object_name, object_date)) # New name for the file will be determined by the object type (for the # subfolder), object, date and filter. For bias frames, the filter would # have no meaning, so we don't put it, just add the "_" to separate the next thing (numbers) if object_type != "bias": new_name = "{0}_{1}_".format(new_name, object_filter) else: new_name = "{0}_".format(new_name) # Now we need to find out which sequential number the image should have ans = True jj=1 while ans == True: # until file does not exist newfile = os.path.join(new_name + str(jj).zfill(3)+'.fits') ans = os.path.isfile(newfile) jj += 1 oldname_base = os.path.basename(im_name) newname_base = os.path.basename(newfile) # Add history comment into the header. If image is to be overwritten, # just update the image with the changes in the header and move it to # its new name. Otherwise, save it to the new file immediately. im = fits.open(im_name, 'update') hdr = im[0].header hdr.add_history("- Image "+oldname_base+" renamed "+newname_base) if args.overwrite == True: im.flush() im.close() os.rename(im_name, newfile) else: im.writeto(newfile) # Add image to the dictionary for the output. final_dict["filename"] = numpy.append(final_dict["filename"], newfile) final_dict["objname"] = numpy.append(final_dict["objname"], object_name) final_dict["type"] = numpy.append(final_dict["type"], object_type) # Return the dictionary with the images sorted in groups. return final_dict