def main(): parser = argparse.ArgumentParser(description="Convert set of images into video.", parents=[logger.loggingParser]) parser.add_argument("images", nargs="+", help="Image files.") parser.add_argument("-e", "--exif-tags", nargs="+", default=[], help="Exif tags to retrieve. [Default: %(default)s]") parser.add_argument("-r", "--reg-exps", nargs="+", default=[None], help="Regular expressions to retrieve values from exif infos. [Default: %(default)s]") parser.add_argument("-b", "--branch-names", nargs="+", default=[None], help="Names for branches. [Default: %(default)s]") parser.add_argument("-o", "--output", default="exif-stats.root", help="Output ROOT file. [Default: %(default)s]") args = parser.parse_args() logger.initLogger(args) root_file = ROOT.TFile(args.output, "RECREATE") tree = ROOT.TTree("exif", "") values = [] for branch_name in args.branch_names: values.append(numpy.zeros(1, dtype=float)) tree.Branch(branch_name, values[-1], branch_name+"/D") for image in progressiterator.ProgressIterator(args.images, description="Processing images"): for index, (exif_tag, reg_exp) in enumerate(zip(args.exif_tags, args.reg_exps)): exif_result = phototools.load_exif_field(image, *["-d", "%Y:%m:%d %H:%M:%S", "-"+exif_tag]) values[index][0] = float(exif_result.replace("mm", "").strip()) tree.Fill() root_file.Write() log.info("Created tree \"exif\" in file \"%s\"." % args.output)
def main(): parser = argparse.ArgumentParser(description="Convert set of images into video.", parents=[logger.loggingParser]) parser.add_argument("-i", "--images", nargs="+", help="Image files.") parser.add_argument("-o", "--output", default="output.mp4", help="Output video file. [Default: %(default)s]") parser.add_argument("-r", "--resolution", nargs=2, default=[1280, 720], type=int, help="Video resolution. [Default: %(default)s]") parser.add_argument("-f", "--frame-rate", default=25.0, type=float, help="Video frame rate. [Default: %(default)s]") parser.add_argument("-d", "--image-durations", nargs="+", default=[5.0], type=float, help="Image slide durations in seconds. [Default: %(default)s]") parser.add_argument("-t", "--transition-times", nargs="+", default=[1.0], type=float, help="Transition times in seconds. [Default: %(default)s]") args = parser.parse_args() logger.initLogger(args) multivision = multivisiontools.Multivision( frame_rate = args.frame_rate, resolution = args.resolution, video_format = os.path.splitext(args.output)[-1] ) output = multivision.images_to_video( args.images, video_file = args.output, image_durations = args.image_durations, transition_times = args.transition_times ) log.info("Created video in \"%s\" from %d images." % (args.output, len(args.images)))
def main(): parser = argparse.ArgumentParser(description="Set up directory structure for 2D photos.", parents=[logger.loggingParser]) parser.add_argument("project_dir", help="Project directory containing photo/movie files") parser.add_argument("-j", "--jpg-dir", default="JPG", help="Subdirectory name for the JPG photos. [Default: %(default)s]") parser.add_argument("-r", "--raw-dir", default="RAW", help="Subdirectory name for the RAW photos. [Default: %(default)s]") parser.add_argument("-m", "--mov-dir", default="MOV", help="Subdirectory name for the movies. [Default: %(default)s]") parser.add_argument("--jpg-ext", nargs="+", default=["jpg", "jpeg", "JPG", "JPEG"], help="JPG file extensions. [Default: %(default)s]") parser.add_argument("--raw-ext", nargs="+", default=["cr2", "CR2"], help="RAW file extensions. [Default: %(default)s]") parser.add_argument("--mov-ext", nargs="+", default=["mov", "mpeg", "mp4", "MOV", "MPEG", "MP4"], help="MOV file extensions. [Default: %(default)s]") args = parser.parse_args() logger.initLogger(args) args = vars(args) for file_type in ["JPG", "RAW", "MOV"]: files = filetools.get_files(args["project_dir"], args["%s_ext" % file_type.lower()]) sub_dir = os.path.join(args["project_dir"], args["%s_dir" % file_type.lower()]) if (len(files) > 0) and (not os.path.exists(sub_dir)): os.makedirs(sub_dir) for file in progressiterator.ProgressIterator(files, description="Move %s files" % file_type): os.rename(file, os.path.join(sub_dir, os.path.basename(file)))
def main(): parser = argparse.ArgumentParser(description="Photo tools.", parents=[logger.loggingParser]) parser.add_argument("-s", "--step", type=int, default=0, help="Step number. [Default: %(default)s]") parser.add_argument("-m", "--mode", default="2D", choices=["2D", "3D"], help="Mode. [Default: %(default)s]") args = parser.parse_args() logger.initLogger(args) steps = { "2D" : [ "move.py", "sync_jpg_raw.py", "cluster_panos.py", "rename.py", ], "3D" : [ ], } if (args.step > 0) and (args.step-1 < len(steps[args.mode])): log.info(steps[args.mode][args.step-1]) else: log.info("Steps for %s mode:" % args.mode) for index, step in enumerate(steps[args.mode]): log.info("\t%2d. %s" % (index+1, step))
def main(): parser = argparse.ArgumentParser(description="Rename set of photos.", parents=[logger.loggingParser]) parser.add_argument("-j", "--jpg-dir", required=True, help="Directory containing JPG photos.") parser.add_argument("--jpg-ext", nargs="+", default=["jpg", "jpeg", "JPG", "JPEG"], help="JPG file extensions. [Default: %(default)s]") parser.add_argument("-r", "--raw-dir", help="Directory containing RAW photos.") parser.add_argument("--raw-ext", nargs="+", default=["cr2", "dng", "CR2", "DNG"], help="RAW file extensions. [Default: %(default)s]") parser.add_argument("-e", "--exif-date-tag", default="DateTimeOriginal", help="Exif tag for date of creation. [Default: %(default)s]") parser.add_argument("-n", "--name", required=True, help="File name prefix.") args = parser.parse_args() logger.initLogger(args) jpg_files = filetools.get_files(args.jpg_dir, args.jpg_ext) raw_files = [] if args.raw_dir is None else filetools.get_files(args.raw_dir, args.raw_ext) files_dates = [] for files in [jpg_files, raw_files]: files_dates.extend(phototools.FileDate.get_files_dates(files, args.exif_date_tag)) files_dates.sort() rename_files = phototools.FileDate.rename_files(files_dates, args.name) for original_file, new_file in progressiterator.ProgressIterator(rename_files, description="Rename files"): os.rename(original_file, new_file)
def set_log(sLogLevel="info",isToFile="False"): ''' Set log level and if wite to file \n This method maybe not use. ''' logger.initLogger() logger.cfgToFile(isToFile) logger.cfgLevel(sLogLevel)
def eval(): # Initialize Logger logger.initLogger(args.debug) # print iSeeBetter architecture utils.printNetworkArch(netG=model, netD=None) # load model modelPath = os.path.join(args.model) utils.loadPreTrainedModel(gpuMode=args.gpu_mode, model=model, modelPath=modelPath) model.eval() count = 0 avg_psnr_predicted = 0.0 for batch in testing_data_loader: input, target, neigbor, flow, bicubic = batch[0], batch[1], batch[2], batch[3], batch[4] with torch.no_grad(): if cuda: input = Variable(input).cuda(gpus_list[0]) bicubic = Variable(bicubic).cuda(gpus_list[0]) neigbor = [Variable(j).cuda(gpus_list[0]) for j in neigbor] flow = [Variable(j).cuda(gpus_list[0]).float() for j in flow] else: input = Variable(input).to(device=device, dtype=torch.float) bicubic = Variable(bicubic).to(device=device, dtype=torch.float) neigbor = [Variable(j).to(device=device, dtype=torch.float) for j in neigbor] flow = [Variable(j).to(device=device, dtype=torch.float) for j in flow] t0 = time.time() if args.chop_forward: with torch.no_grad(): prediction = chop_forward(input, neigbor, flow, model, args.upscale_factor) else: with torch.no_grad(): prediction = model(input, neigbor, flow) if args.residual: prediction = prediction + bicubic t1 = time.time() print("==> Processing: %s || Timer: %.4f sec." % (str(count), (t1 - t0))) save_img(prediction.cpu().data, str(count), True) save_img(target, str(count), False) prediction = prediction.cpu() prediction = prediction.data[0].numpy().astype(np.float32) prediction = prediction*255. target = target.squeeze().numpy().astype(np.float32) target = target*255. psnr_predicted = PSNR(prediction, target, shave_border=args.upscale_factor) print("PSNR Predicted = ", psnr_predicted) avg_psnr_predicted += psnr_predicted count += 1 print("Avg PSNR Predicted = ", avg_psnr_predicted/count)
def main(): parser = argparse.ArgumentParser(description="Cluster panoramas from portrait-format photos.", parents=[logger.loggingParser]) parser.add_argument("-j", "--jpg-dir", required=True, help="Directory containing JPG photos.") parser.add_argument("-r", "--raw-dir", help="Directory containing RAW photos.") parser.add_argument("-p", "--pano-dir", default="PANO", help="Subdirectory for panoramas. [Default: %(default)s]") parser.add_argument("-e", "--exif-date-tag", default="DateTimeOriginal", help="Exif tag for date of creation. [Default: %(default)s]") parser.add_argument("-n", "--name", help="Name for panorama directories.") parser.add_argument("--min-n-photos", default=3, help="Minimum number of photos in one panorama. [Default: %(default)s]") parser.add_argument("--dry-run", action="store_true", default=False, help="Testing mode without applying any changes to the files. [Default: %(default)s]") args = parser.parse_args() logger.initLogger(args) jpg_files = filetools.get_files(args.jpg_dir, ["*"]) raw_files = [] if args.raw_dir is None else filetools.get_files(args.raw_dir, ["*"]) jpg_files_dates = phototools.FileDate.get_files_dates(jpg_files, args.exif_date_tag) raw_files_dates = phototools.FileDate.get_files_dates(raw_files, args.exif_date_tag) portrait_jpg_files_dates = filter(lambda file_date: phototools.is_portrait_format(file_date.file), progressiterator.ProgressIterator(jpg_files_dates, description="Read image dimensions")) portrait_jpg_files_dates.sort() cluster_data = numpy.array([[float(file_date.date), float(re.search(".*_(?P<file_number>\d*)\.\w*", os.path.basename(file_date.file)).groupdict()["file_number"])] for file_date in portrait_jpg_files_dates]) cluster_indices = scipy.cluster.hierarchy.fclusterdata(cluster_data, 25.0, criterion="distance") clustered_jpg_files_dates = {} for index, cluster_index in enumerate(cluster_indices): clustered_jpg_files_dates.setdefault(cluster_index, []).append(portrait_jpg_files_dates[index]) clustered_jpg_files_dates = [cluster for cluster in clustered_jpg_files_dates.values() if len(cluster) >= args.min_n_photos] clustered_jpg_files_dates.sort(key=lambda cluster: cluster[0].file) for index, jpg_files_dates_cluster in enumerate(progressiterator.ProgressIterator(clustered_jpg_files_dates, description="Move candidates of panoramic photos to subdirectories")): raw_files_dates_cluster = [raw_files_dates[raw_files_dates.index(jpg_file_date)] for jpg_file_date in jpg_files_dates_cluster if jpg_file_date in raw_files_dates] pano_sub_dir = os.path.join(args.pano_dir, ("%s_" % args.name if args.name else "") + str(index+1)) jpg_sub_dir = os.path.join(args.jpg_dir, pano_sub_dir) raw_sub_dir = os.path.join(args.raw_dir, pano_sub_dir) if (not args.dry_run) and (not os.path.exists(jpg_sub_dir)): os.makedirs(jpg_sub_dir) if (not args.dry_run) and (len(raw_files_dates_cluster) > 0) and (not os.path.exists(raw_sub_dir)): os.makedirs(raw_sub_dir) for file_date in jpg_files_dates_cluster + raw_files_dates_cluster: new_file = os.path.join(os.path.dirname(file_date.file), pano_sub_dir, os.path.basename(file_date.file)) if args.dry_run: log.info("mv %s %s" % (file_date.file, new_file)) else: os.rename(file_date.file, new_file)
def main(): initLogger(LOG_PATH, DEBUG) app = tornado.web.Application( handlers=[ (r'/query', QueryHandler), ] ) httpserver = tornado.httpserver.HTTPServer(app) httpserver.listen(8000) tornado.ioloop.IOLoop.instance().start()
def test_defaultLevel(self): """ The initLogger function must set the logger level to info if no APP_ENV is defined. """ with patch('logger.logging.basicConfig') as mockedBasicCfg: initLogger() mockedBasicCfg.assert_called_once_with(level=logging.INFO, format='%(asctime)s' ' %(levelname)s:' '%(name)s:%(message)s')
def main(): parser = argparse.ArgumentParser(description="Stereo Reconstrucion.", parents=[logger.loggingParser]) parser.add_argument("--camera-1-center", type=float, default=-10.0, help="Camera 1 center. [Default: %(default)s]") parser.add_argument("--camera-1-size", type=float, default=10.0, help="Camera 1 size. [Default: %(default)s]") parser.add_argument("--camera-1-n-pixels", type=int, default=20, help="Camera 1 number of pixels. [Default: %(default)s]") parser.add_argument("--camera-1-angle", type=float, default=45.0, help="Camera 1 center. [Default: %(default)s]") parser.add_argument("--camera-2-center", type=float, default=10.0, help="Camera 2 center. [Default: %(default)s]") parser.add_argument("--camera-2-size", type=float, default=10.0, help="Camera 2 size. [Default: %(default)s]") parser.add_argument("--camera-2-n-pixels", type=int, default=20, help="Camera 2 number of pixels. [Default: %(default)s]") parser.add_argument("--camera-2-angle", type=float, default=45.0, help="Camera 2 center. [Default: %(default)s]") args = parser.parse_args() logger.initLogger(args) camera1 = Camera(args.camera_1_center, args.camera_1_size, args.camera_1_n_pixels, args.camera_1_angle) camera2 = Camera(args.camera_2_center, args.camera_2_size, args.camera_2_n_pixels, args.camera_2_angle) """ real_object = RealObject(0.0, 100.0, 1.0) #real_object = RealObject(5.0, 100.0, 1.0) #real_object = RealObject(10.0, 100.0, 1.0) print real_object photo_object1 = camera1.to_photo_object(real_object) print photo_object1 real_object1 = camera1.to_real_object(photo_object1, 100.0) print real_object1 photo_object2 = camera2.to_photo_object(real_object) print photo_object2 real_object2 = camera2.to_real_object(photo_object2, 100.0) print real_object2 """ reality = Reality([RealObject(index*10.0, 100, abs(index)) for index in xrange(-5, 6, 1)]+[RealObject(index*10.0, 200, abs(index+100)) for index in xrange(-5, 6, 1)]) photo1 = Photo(camera1, reality) photo2 = Photo(camera2, reality) print reality print "" print camera1 print photo1 print "" print camera2 print photo2
def test_devLevel(self): """ The initLogger function must set the logger level to debug if the APP_ENV is defined as dev. """ with patch('logger.logging.basicConfig') as mockedBasicCfg: os.environ['APP_ENV'] = 'dev' initLogger() mockedBasicCfg.assert_called_once_with(level=logging.DEBUG, format='%(asctime)s' ' %(levelname)s:' '%(name)s:%(message)s')
def main(): parser = argparse.ArgumentParser(description="Convert Geeqie collection into web gallery.", parents=[logger.loggingParser]) parser.add_argument("-i", "--input", required=True, help="Input Geeqie collection file.") parser.add_argument("-o", "--output", required=True, help="Output directory.") parser.add_argument("-t", "--title", help="Title. [Default: name of input file]") parser.add_argument("-f", "--favicon", help="Favicon.") args = parser.parse_args() logger.initLogger(args) if not os.path.exists(args.output): os.makedirs(args.output) if args.title is None: args.title = os.path.splitext(os.path.basename(args.input))[0] input_files = [] with open(args.input) as collection_file: for line in collection_file: line = line.strip() if line.startswith("\"") and line.endswith("\""): input_files.append(os.path.relpath(line.replace("\"", ""), ".")) name_format = "image_%0" + str(int(math.floor(math.log10(len(input_files))))+1) + "d%s" images_html = "" template_image_html = string.Template("<div class=\"step slide photo\" data-x=\"$data_x\" data-y=\"0\" style=\"background-image:url($path);\"></div>") for index, input_file in enumerate(progressiterator.ProgressIterator(input_files, description="Copy and resize files")): output_file = os.path.join(args.output, name_format % (index+1, os.path.splitext(input_file)[-1])) #logger.subprocessCall(shlex.split("convert %s -resize 1500x1000> %s" % (input_file, output_file))) image_html = template_image_html.safe_substitute(data_x=str(index*1600), path=os.path.basename(output_file)) images_html += ("\n" + image_html) if args.favicon: shutil.copy(args.favicon, os.path.join(args.output, os.path.basename(args.favicon))) template_index_html = None with open(os.path.join(os.path.dirname(__file__), "template.index.html")) as template_index_html_file: template_index_html = string.Template(template_index_html_file.read()) index_html = template_index_html.safe_substitute(images=images_html, title=args.title, favicon=os.path.join(args.output, os.path.basename(args.favicon)) if args.favicon else "") with open(os.path.join(args.output, "index.html"), "w") as index_html_file: index_html_file.write(index_html)
def main(): """ Application main. """ logger = initLogger() app = AppComposer(logger) app.run()
def main(): parser = argparse.ArgumentParser(description="Synchronise JPG and RAW photos, including options to remove supernumerous files.", parents=[logger.loggingParser]) parser.add_argument("-j", "--jpg-dir", required=True, help="Directory containing JPG photos.") parser.add_argument("-r", "--raw-dir", required=True, help="Directory containing RAW photos.") parser.add_argument("-e", "--exif-date-tag", default="DateTimeOriginal", help="Exif tag for date of creation. [Default: %(default)s]") parser.add_argument("-m", "--modes", default=["print"], action="append", choices=["print", "rm_jpg", "rm_raw"], help="Modes. [Default: %(default)s]") args = parser.parse_args() logger.initLogger(args) jpg_files = filetools.get_files(args.jpg_dir, ["*"]) raw_files = filetools.get_files(args.raw_dir, ["*"]) jpg_files_dates = phototools.FileDate.get_files_dates(jpg_files, args.exif_date_tag) raw_files_dates = phototools.FileDate.get_files_dates(raw_files, args.exif_date_tag) jpg_files_dates.sort() raw_files_dates.sort() jpg_index, only_jpg_files = 0, [] raw_index, only_raw_files = 0, [] while jpg_index < len(jpg_files_dates) and raw_index < len(raw_files_dates): if jpg_files_dates[jpg_index] == raw_files_dates[raw_index]: jpg_index += 1 raw_index += 1 elif jpg_files_dates[jpg_index].date < raw_files_dates[raw_index].date: only_jpg_files.append(jpg_files_dates[jpg_index].file) jpg_index += 1 else: only_raw_files.append(raw_files_dates[raw_index].file) raw_index += 1 if "print" in args.modes: for file in only_jpg_files + only_raw_files: log.info(file) if "rm_jpg" in args.modes: for file in progressiterator(only_jpg_files, description="Remove supernumerous JPG files"): os.remove(file) if "rm_raw" in args.modes: for file in progressiterator(only_raw_files, description="Remove supernumerous RAW files"): os.remove(file)
def main(): parser = argparse.ArgumentParser(description="Convert set of images into Kdenlive project.", parents=[logger.loggingParser]) parser.add_argument("-k", "--kdenlive-template", required=True, help="Kdenlive template project file.") parser.add_argument("-i", "--images", nargs="+", help="Image files.") parser.add_argument("-o", "--output", default="project.kdenlive", help="Output Kdenlive project file. [Default: %(default)s]") args = parser.parse_args() logger.initLogger(args) kdenliveProject = kdenlivetools.KdenliveProject(args.kdenlive_template) kdenliveProject.add_images(args.images) kdenliveProject.save(args.output) log.info("Created Kdenlive project \"%s\" from %d images." % (args.output, len(args.images)))
def main(): parser = argparse.ArgumentParser(description="Convert Geeqie collection into direcoty containing the files.", parents=[logger.loggingParser]) parser.add_argument("-i", "--input", required=True, help="Input Geeqie collection file.") parser.add_argument("-o", "--output", required=True, help="Output directory.") parser.add_argument("--copy", action="store_true", default=False, help="Copy files instead of creating symlinks.") parser.add_argument("-n", "--name", help="Prefix name of the files. [Default: name of input file]") args = parser.parse_args() logger.initLogger(args) if not os.path.exists(args.output): os.makedirs(args.output) if args.name is None: args.name = os.path.splitext(os.path.basename(args.input))[0] input_files = [] with open(args.input) as collection_file: for line in collection_file: line = line.strip() if line.startswith("\"") and line.endswith("\""): input_files.append(line.replace("\"", "")) name_format = args.name + "_%0" + str(int(math.floor(math.log10(len(input_files))))+1) + "d%s" for index, input_file in enumerate(progressiterator.ProgressIterator(input_files, description="Create files")): output_file = os.path.join(args.output, name_format % (index+1, os.path.splitext(input_file)[-1])) if args.copy: shutil.copy(input_file, output_file) else: input_file = os.path.relpath(input_file, os.path.dirname(output_file)) os.symlink(input_file, output_file)
def main(): parser = argparse.ArgumentParser(description="Post-process dual ISO images with Magic Lantern.", parents=[logger.loggingParser]) parser.add_argument("images", nargs="+", help="Image files or directories.") parser.add_argument("--cr2hdr", default="$HOME/magic-lantern/modules/dual_iso/cr2hdr", help="Executable cr2hdr. [Default: %(default)s]") parser.add_argument("--raw-ext", nargs="+", default=["cr2", "CR2"], help="RAW file extensions. [Default: %(default)s]") args = parser.parse_args() logger.initLogger(args) args.cr2hdr = os.path.expandvars(args.cr2hdr) images = tools.flatten( [tools.flatten([glob.glob(os.path.join(arg, "*."+ext)) for ext in args.raw_ext]) if os.path.isdir(arg) else [arg] for arg in args.images] ) """ images_isos = [ (image, phototools.load_exif_field(image, "-ISO"), phototools.load_exif_field(image, "-AutoISO")) for image in progressiterator.ProgressIterator(images, description="Load EXIF ISO infos") ] dual_iso_images = [image for image, iso, auto_iso in images_isos if iso != auto_iso] """ dual_iso_images = images for image in progressiterator.ProgressIterator(dual_iso_images, description="Post-process dual ISO images"): command = "%s %s" % (args.cr2hdr, image) log.debug(command) logger.subprocessCall(command)
def eval(): # Initialize Logger logger.initLogger(args.debug) # print iSeeBetter architecture utils.printNetworkArch(netG=model, netD=None) # load model modelPath = os.path.join(args.model) utils.loadPreTrainedModel(gpuMode=args.gpu_mode, model=model, modelPath=modelPath) model.eval() count = 0 avg_psnr_predicted = 0.0 for batch in testing_data_loader: #import pdb; pdb.set_trace() input, target, neigbor, flow, bicubic = batch[0], batch[1], batch[ 2], batch[3], batch[4] with torch.no_grad(): if cuda: input = Variable(input).cuda(gpus_list[0]) bicubic = Variable(bicubic).cuda(gpus_list[0]) neigbor = [Variable(j).cuda(gpus_list[0]) for j in neigbor] flow = [Variable(j).cuda(gpus_list[0]).float() for j in flow] else: input = Variable(input).to(device=device, dtype=torch.float) bicubic = Variable(bicubic).to(device=device, dtype=torch.float) neigbor = [ Variable(j).to(device=device, dtype=torch.float) for j in neigbor ] flow = [ Variable(j).to(device=device, dtype=torch.float) for j in flow ] t0 = time.time() if args.chop_forward: with torch.no_grad(): prediction = chop_forward(input, neigbor, flow, model, args.upscale_factor) else: with torch.no_grad(): # unroll lists neigbor0 = neigbor[0] neigbor1 = neigbor[1] neigbor2 = neigbor[2] neigbor3 = neigbor[3] neigbor4 = neigbor[4] neigbor5 = neigbor[5] flow0 = flow[0] flow1 = flow[1] flow2 = flow[2] flow3 = flow[3] flow4 = flow[4] flow5 = flow[5] #summary(model, (input, neigbor0, neigbor1, neigbor2, neigbor3, neigbor4, neigbor5, flow0, flow1, flow2, flow3, flow4, flow5)) print(model) prediction = model(input, neigbor, flow) # traced_model = torch.jit.trace(model, (input, neigbor0, neigbor1, neigbor2, neigbor3, neigbor4, neigbor5, flow0, flow1, flow2, flow3, flow4, flow5)) # mlmodel = ct.converters.convert(traced_model, inputs=\ # [ct.TensorType(shape=input.shape), ct.TensorType(shape=neigbor0.shape), # ct.TensorType(shape=neigbor1.shape), ct.TensorType(shape=neigbor2.shape), # ct.TensorType(shape=neigbor3.shape), ct.TensorType(shape=neigbor4.shape), # ct.TensorType(shape=neigbor5.shape), ct.TensorType(shape=flow0.shape), # ct.TensorType(shape=flow1.shape), ct.TensorType(shape=flow2.shape), # ct.TensorType(shape=flow3.shape), ct.TensorType(shape=flow4.shape), # ct.TensorType(shape=flow5.shape)])#[input, neigbor, flow]) # mlmodel.save('gen.mlmodel') if args.residual: prediction = prediction + bicubic t1 = time.time() print("==> Processing: %s || Timer: %.4f sec." % (str(count), (t1 - t0))) save_img(prediction.cpu().data, str(count), True) save_img(target, str(count), False) prediction = prediction.cpu() prediction = prediction.data[0].numpy().astype(np.float32) prediction = prediction * 255. target = target.squeeze().numpy().astype(np.float32) target = target * 255. psnr_predicted = PSNR(prediction, target, shave_border=args.upscale_factor) print("PSNR Predicted = ", psnr_predicted) avg_psnr_predicted += psnr_predicted count += 1 print("Avg PSNR Predicted = ", avg_psnr_predicted / count)
from __future__ import division import logging import numpy as np import wx import sys import time import timeit import ctypes from multiprocessing import Process, Manager, Lock, Queue from threading import Timer from collections import deque from logger import initLogger from ScreenCapture import ScreenCapture, get_all_window_names LOGGER = initLogger('GUI') class ImagePanel(wx.Panel): def __init__(self, parent, dproxy, dproxy_lock, fps=60): wx.Panel.__init__(self, parent) self.SetDoubleBuffered(True) self.fps_timer = 0 self.fps_counter = 0 self.fps_ring_buffer = deque(maxlen=30) self.fps = fps self.dproxy = dproxy self.dproxy_lock = dproxy_lock self.fps_label = wx.StaticText(self, label="FPS: 0", pos=(1, 0))
traceback.print_exc() time.sleep(10) def print_time(delay): count = 0 while count < 5: time.sleep(delay) count += 1 print("%s" % (time.ctime(time.time()))) ### # Initalisation #### ### logger.initLogger(settings.LOG_FILENAME) # Init signal handler, because otherwise Ctrl-C does not work signal.signal(signal.SIGINT, signal_handler) # Make the following devices accessable for user os.system("sudo chmod 666 %s" % settings.serialPortDevice) # Give Home Assistant and Mosquitto the time to startup time.sleep(2) # First start the MQTT client client = mqtt_client.Client() client.message_callback_add(settings.MQTT_TOPIC_OUT, on_message_homelogic) client.message_callback_add(settings.MQTT_TOPIC_CHECK, serviceReport.on_message_check) client.on_connect = on_connect
from logger import initLogger logger = initLogger(__name__)
NUM_EPOCHS = args.num_epochs WIDTH = args.width HEIGHT = args.height batchSize = args.batchSize dataset_size = args.dataset_size lr = args.lr express = args.express # Load dataset trainLoader, valLoader = DatasetLoader.get_data_loaders( batchSize, dataset_size=dataset_size, validation_split=0.1) numTrainBatches = len(trainLoader) numValBatches = len(valLoader) # Initialize Logger logger.initLogger(args.debug) # Use Generator as FRVSR netG = FRVSR(batchSize, lr_width=WIDTH, lr_height=HEIGHT) print('# of Generator parameters:', sum(param.numel() for param in netG.parameters())) # Use Discriminator from SRGAN netD = Discriminator() print('# of Discriminator parameters:', sum(param.numel() for param in netD.parameters())) generatorCriterion = GeneratorLoss() if torch.cuda.is_available():
# import model as mdl import config as cfg import fileUtils as fu import logger as lg parser = argparse.ArgumentParser() parser.add_argument('-d', '--dset') parser.add_argument('-a', '--algo') # parser.add_argument('-r', action='store_true') args = parser.parse_args() cfg.config(args.dset, args.algo) # cfg.config('energydata_complete') # cfg.config('peugeot_207_01') lg.initLogger(cfg.ds_name, cfg.algo) X, y = pp.preprocess('../datasets/' + cfg.ds_name + '.csv', cfg.delimiter, cfg.skiprows, cfg.endColumn, startColumn= cfg.startColumn, targetColumn = cfg.targetColumn, pca = cfg.pca, decimal = cfg.decimal) train_size = None if cfg.algo.lower() == 'k-nn': if cfg.training_set_cap != None: if X.shape[0] * (1 - cfg.test_size) > cfg.training_set_cap: train_size = cfg.training_set_cap / X.shape[0] X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_size, test_size=cfg.test_size, random_state=1) fu.cleanSIDirs('./out/') if cfg.algo.lower() == 'ann': import ANN as ann ann.process(X_train, X_test, y_train, y_test) elif cfg.algo.lower() == 'k-nn':
def main(): """ Lets begin the training process! """ args = parser.parse_args() # Initialize Logger logger.initLogger(args.debug) # Load dataset logger.info('==> Loading datasets') # print(args.file_list) # sys.exit() train_set = get_training_set(args.data_dir, args.nFrames, args.upscale_factor, args.data_augmentation, args.file_list, args.other_dataset, args.patch_size, args.future_frame) training_data_loader = DataLoader(dataset=train_set, num_workers=args.threads, batch_size=args.batchSize, shuffle=True) # Use generator as RBPN netG = RBPN(num_channels=3, base_filter=256, feat=64, num_stages=3, n_resblock=5, nFrames=args.nFrames, scale_factor=args.upscale_factor) logger.info('# of Generator parameters: %s', sum(param.numel() for param in netG.parameters())) # Use DataParallel? if args.useDataParallel: gpus_list = range(args.gpus) netG = torch.nn.DataParallel(netG, device_ids=gpus_list) # Use discriminator from SRGAN netD = Discriminator() logger.info('# of Discriminator parameters: %s', sum(param.numel() for param in netD.parameters())) # Generator loss generatorCriterion = nn.L1Loss() if not args.APITLoss else GeneratorLoss() # Specify device device = torch.device( "cuda:0" if torch.cuda.is_available() and args.gpu_mode else "cpu") if args.gpu_mode and torch.cuda.is_available(): utils.printCUDAStats() netG.cuda() netD.cuda() netG.to(device) netD.to(device) generatorCriterion.cuda() # Use Adam optimizer optimizerG = optim.Adam(netG.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-8) optimizerD = optim.Adam(netD.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-8) if args.APITLoss: logger.info( "Generator Loss: Adversarial Loss + Perception Loss + Image Loss + TV Loss" ) else: logger.info("Generator Loss: L1 Loss") # print iSeeBetter architecture utils.printNetworkArch(netG, netD) if args.pretrained: modelPath = os.path.join(args.save_folder + args.pretrained_sr) utils.loadPreTrainedModel(gpuMode=args.gpu_mode, model=netG, modelPath=modelPath) # sys.exit() for epoch in range(args.start_epoch, args.nEpochs + 1): runningResults = trainModel(epoch, training_data_loader, netG, netD, optimizerD, optimizerG, generatorCriterion, device, args) if (epoch + 1) % (args.snapshots) == 0: saveModelParams(epoch, runningResults, netG, netD)
def main(): parser = argparse.ArgumentParser(description="Stereo Reconstrucion.", parents=[logger.loggingParser]) parser.add_argument("-l", "--input-left", help="Left input image. [Default: %(default)s]") parser.add_argument("-r", "--input-right", help="Right input image. [Default: %(default)s]") parser.add_argument("-o", "--output-dir", default="stereo_test", help="Output directory. [Default: %(default)s]") args = parser.parse_args() logger.initLogger(args) """ image = Image.open(args.input_left) print "image.size", image.size data_list = list(image.getdata()) print "data_list", data_list data_array = numpy.array(data_list, dtype=numpy.uint8) print "data_array", data_array data_array_reshaped = numpy.reshape(data_array, tuple(list(image.size)[::-1]+[3])) print "data_array_reshaped", data_array_reshaped image2 = Image.fromarray(data_array_reshaped, mode=image.mode) #image2 = Image.frombytes(mode=image.mode, size=image.size, data=data_array_reshaped) print "image2.size", image2.size data_list = list(image2.getdata()) print "data_list", data_list data_array = numpy.array(data_list, dtype=numpy.uint8) print "data_array", data_array data_array_reshaped = numpy.reshape(data_array, tuple(list(image2.size)[::-1]+[3])) print "data_array_reshaped", data_array_reshaped image2.save(args.output) """ # create output dir if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) log.info("Created \"{output}\".".format(output=args.output_dir)) root_output_filename = os.path.join(args.output_dir, "output.root") root_output_file = ROOT.TFile(root_output_filename, "RECREATE") # copy inputs left_extension = os.path.splitext(args.input_left)[-1] left_output_filename = os.path.join(args.output_dir, "01_left"+left_extension) shutil.copy(args.input_left, left_output_filename) left_image = Image.open(left_output_filename) right_extension = os.path.splitext(args.input_right)[-1] right_output_filename = os.path.join(args.output_dir, "01_right"+right_extension) shutil.copy(args.input_right, right_output_filename) right_image = Image.open(right_output_filename) # convert inputs to grayscale gray_left_image = left_image.convert("L") gray_left_output_filename = os.path.join(args.output_dir, "02_gray_left"+left_extension) save_image(gray_left_image, gray_left_output_filename) gray_right_image = right_image.convert("L") gray_right_output_filename = os.path.join(args.output_dir, "02_gray_right"+right_extension) save_image(gray_right_image, gray_right_output_filename) # superpose images superposition_image = ImageChops.add(left_image, right_image, scale=2.0, offset=0) superposition_filename = os.path.join(args.output_dir, "03_superposition"+left_extension) save_image(superposition_image, superposition_filename) gray_superposition_image = ImageChops.add(gray_left_image, gray_right_image, scale=2.0, offset=0) gray_superposition_filename = os.path.join(args.output_dir, "03_gray_superposition"+left_extension) save_image(gray_superposition_image, gray_superposition_filename) # FFT of superpositions get_fft_histogram(gray_superposition_image, root_output_file, "superposition_fft") gray_superposition_fft_image = get_fft_image(gray_superposition_image) gray_superposition_fft_filename = os.path.join(args.output_dir, "03_gray_superposition_fft"+left_extension) save_image(gray_superposition_fft_image, gray_superposition_fft_filename) # ratio images left_ratio_image, right_ratio_image = get_ratio_images(left_image, right_image) left_ratio_filename = os.path.join(args.output_dir, "04_left_ratio"+left_extension) save_image(left_ratio_image, left_ratio_filename) right_ratio_filename = os.path.join(args.output_dir, "04_right_ratio"+right_extension) save_image(right_ratio_image, right_ratio_filename) # grayscale ratio images gray_left_ratio_image, gray_right_ratio_image = get_ratio_images(gray_left_image, gray_right_image) gray_left_ratio_filename = os.path.join(args.output_dir, "05_gray_left_ratio"+left_extension) save_image(gray_left_ratio_image, gray_left_ratio_filename) gray_right_ratio_filename = os.path.join(args.output_dir, "05_gray_right_ratio"+right_extension) save_image(gray_right_ratio_image, gray_right_ratio_filename) # FFT of superpositions get_fft_histogram(gray_left_ratio_image, root_output_file, "left_ratio_fft") gray_left_ratio_fft_image = get_fft_image(gray_left_ratio_image) gray_left_ratio_fft_filename = os.path.join(args.output_dir, "05_gray_left_ratio_fft"+left_extension) save_image(gray_left_ratio_fft_image, gray_left_ratio_fft_filename) get_fft_histogram(gray_right_ratio_image, root_output_file, "right_ratio_fft") gray_right_ratio_fft_image = get_fft_image(gray_right_ratio_image) gray_right_ratio_fft_filename = os.path.join(args.output_dir, "05_gray_right_ratio_fft"+right_extension) save_image(gray_right_ratio_fft_image, gray_right_ratio_fft_filename) #root_output_file.Write() root_output_file.Close() log.info("Saved ROOT output \"{output}\".".format(output=root_output_filename))
def main(): config = conf.configure() #now configure our settings global set_setting('LOG_LOCATION', config.log_location) set_setting('DEBUG', config.debug) set_setting('VERBOSE', config.verbose) #init logging logger.initLogger() set_setting('REGISTRATION_HOST', config.reg_host) set_setting('REGISTRATION_PORT', config.reg_port) set_setting('PORT', config.port) set_setting('IP', config.ip) set_setting('DB_TYPE', config.db_type) set_setting('REDIS_SOCKET', config.redis_sock) set_setting('REDIS_HOST', config.redis_host) set_setting('REDIS_PORT', config.redis_port) set_setting('MONGO_NAME', config.mongo_name) set_setting('MONGO_HOST', config.mongo_host) set_setting('MONGO_PORT', config.mongo_port) set_setting('MONGO_READ', config.mongo_read) set_setting('MYSQL_NAME', config.mysql_name) set_setting('MYSQL_HOST', config.mysql_host) set_setting('MYSQL_PORT', config.mysql_port) set_setting('MYSQL_USER', config.mysql_user) set_setting('MYSQL_PASSWORD', config.mysql_password) set_setting('HTTP_SERVICE', config.server) set_setting('ROOT_KEY', config.root_key) logger.LOG.log('ReorJS service starting...') logger.LOG.log('Initializing API') logger.LOG.info('Starting with the following settings:') logger.LOG.info('LOG_LOCATION => %s' % settings.LOG_LOCATION) logger.LOG.info('DEBUG => %s' % settings.DEBUG) logger.LOG.info('VERBOSE => %s' % settings.VERBOSE) logger.LOG.info('REGISTRATION_HOST => %s' % settings.REGISTRATION_HOST) logger.LOG.info('REGISTRATION_PORT => %s' % settings.REGISTRATION_PORT) logger.LOG.info('PORT => %s' % settings.PORT) logger.LOG.info('IP => %s' % settings.IP) logger.LOG.info('DB_TYPE => %s' % settings.DB_TYPE) logger.LOG.info('REDIS_SOCKET => %s' % settings.REDIS_SOCKET) logger.LOG.info('REDIS_HOST => %s' % settings.REDIS_HOST) logger.LOG.info('REDIS_PORT => %s' % settings.REDIS_PORT) logger.LOG.info('MONGO_NAME => %s' % settings.MONGO_NAME) logger.LOG.info('MONGO_HOST => %s' % settings.MONGO_HOST) logger.LOG.info('MONGO_PORT => %s' % settings.MONGO_PORT) logger.LOG.info('MONGO_READ => %s' % settings.MONGO_READ) logger.LOG.info('MYSQL_NAME => %s' % settings.MYSQL_NAME) logger.LOG.info('MYSQL_HOST => %s' % settings.MYSQL_HOST) logger.LOG.info('MYSQL_PORT => %s' % settings.MYSQL_PORT) logger.LOG.info('MYSQL_USER => %s' % settings.MYSQL_USER) logger.LOG.info('MYSQL_PASSWORD => %s' % settings.MYSQL_PASSWORD) logger.LOG.info('HTTP_SERVICE => %s' % settings.HTTP_SERVICE) logger.LOG.info('ROOT_KEY => %s' % settings.ROOT_KEY) #we need to configure our API database from settings if api.connect(): #and then our stacker logger.LOG.log('Initializing stacker') stack.initStacker() #next we need to create our query service logger.LOG.log('Initializing query service') service = query.QueryService() #and run it logger.LOG.log('Running service...') service.run() else: logger.LOG.log("Error connecting to API database, please check configuration.")
def initialize(config_file): with INIT_LOCK: global CONFIG global CONFIG_FILE global _INITIALIZED global CURRENT_VERSION global LATEST_VERSION global UMASK global POLLING_FAILOVER CONFIG = pacvert.config.Config(config_file) CONFIG_FILE = config_file assert CONFIG is not None if _INITIALIZED: return False #if CONFIG.HTTP_PORT < 21 or CONFIG.HTTP_PORT > 65535: # pacvert.logger.warn( # 'HTTP_PORT out of bounds: 21 < %s < 65535', CONFIG.HTTP_PORT) # CONFIG.HTTP_PORT = 8181 #if not CONFIG.HTTPS_CERT: # CONFIG.HTTPS_CERT = os.path.join(DATA_DIR, 'server.crt') #if not CONFIG.HTTPS_KEY: # CONFIG.HTTPS_KEY = os.path.join(DATA_DIR, 'server.key') if not CONFIG.LOG_DIR: CONFIG.LOG_DIR = os.path.join(DATA_DIR, 'logs') if not os.path.exists(CONFIG.LOG_DIR): try: os.makedirs(CONFIG.LOG_DIR) except OSError: CONFIG.LOG_DIR = None if not QUIET: sys.stderr.write("Unable to create the log directory. " \ "Logging to screen only.\n") if not CONFIG.OUTPUT_DIRECTORY: CONFIG.OUTPUT_DIRECTORY = os.path.join(DATA_DIR, 'output') if not os.path.exists(CONFIG.OUTPUT_DIRECTORY): try: os.makedirs(CONFIG.OUTPUT_DIRECTORY) except OSError: if not QUIET: sys.stderr.write("Unable to create the output directory.") # Start the logger, disable console if needed logger.initLogger(console=not QUIET, log_dir=CONFIG.LOG_DIR, verbose=VERBOSE) if not CONFIG.BACKUP_DIR: CONFIG.BACKUP_DIR = os.path.join(DATA_DIR, 'backups') if not os.path.exists(CONFIG.BACKUP_DIR): try: os.makedirs(CONFIG.BACKUP_DIR) except OSError as e: logger.error("Could not create backup dir '%s': %s" % (CONFIG.BACKUP_DIR, e)) #if not CONFIG.CACHE_DIR: # CONFIG.CACHE_DIR = os.path.join(DATA_DIR, 'cache') #if not os.path.exists(CONFIG.CACHE_DIR): # try: # os.makedirs(CONFIG.CACHE_DIR) # except OSError as e: # logger.error("Could not create cache dir '%s': %s" % (CONFIG.CACHE_DIR, e)) # Initialize the database #logger.info('Checking to see if the database has all tables....') #try: # dbcheck() #except Exception as e: # logger.error("Can't connect to the database: %s" % e) # Check if pacvert has a uuid #if CONFIG.PMS_UUID == '' or not CONFIG.PMS_UUID: # my_uuid = generate_uuid() # CONFIG.__setattr__('PMS_UUID', my_uuid) # CONFIG.write() # Get the currently installed version. Returns None, 'win32' or the git # hash. try: CURRENT_VERSION, CONFIG.GIT_BRANCH = versioncheck.getVersion() except TypeError as e: logger.error("Something went terribly wrong by checking for the current version: "+str(e)) # Write current version to a file, so we know which version did work. # This allowes one to restore to that version. The idea is that if we # arrive here, most parts of pacvert seem to work. if CURRENT_VERSION: version_lock_file = os.path.join(DATA_DIR, "version.lock") try: with open(version_lock_file, "w") as fp: fp.write(CURRENT_VERSION) except IOError as e: logger.error("Unable to write current version to file '%s': %s" % (version_lock_file, e)) # Check for new versions if CONFIG.CHECK_GITHUB_ON_STARTUP and CONFIG.CHECK_GITHUB: try: LATEST_VERSION = versioncheck.checkGithub() except: logger.exception("Unhandled exception") LATEST_VERSION = CURRENT_VERSION else: LATEST_VERSION = CURRENT_VERSION # Store the original umask UMASK = os.umask(0) os.umask(UMASK) _INITIALIZED = True return True
#!/usr/bin/env python # -*- coding:utf-8 -*- ''' Created on Jun 28, 2018 @author: WiShen ''' import sys import time from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import Select import logger logger.initLogger() class webApp(object): def __init__(self): option = webdriver.ChromeOptions() option.add_argument('disable-infobars') self.driver = webdriver.Chrome(chrome_options = option ) self.driver.maximize_window() #maximize chrome self.result = False ################ login and open web functions ################# def login(self, sLoginIp, sLoginPwd): loginUrl = "http://" + sLoginIp self.driver.get(loginUrl) self.driver.implicitly_wait(10) #print self.driver.title
def initialize(config_file): with INIT_LOCK: global CONFIG global CONFIG_FILE global _INITIALIZED global CURRENT_VERSION global LATEST_VERSION global UMASK global POLLING_FAILOVER CONFIG = plexpy.config.Config(config_file) CONFIG_FILE = config_file assert CONFIG is not None if _INITIALIZED: return False if CONFIG.HTTP_PORT < 21 or CONFIG.HTTP_PORT > 65535: plexpy.logger.warn( 'HTTP_PORT out of bounds: 21 < %s < 65535', CONFIG.HTTP_PORT) CONFIG.HTTP_PORT = 8181 if not CONFIG.HTTPS_CERT: CONFIG.HTTPS_CERT = os.path.join(DATA_DIR, 'server.crt') if not CONFIG.HTTPS_KEY: CONFIG.HTTPS_KEY = os.path.join(DATA_DIR, 'server.key') if not CONFIG.LOG_DIR: CONFIG.LOG_DIR = os.path.join(DATA_DIR, 'logs') if not os.path.exists(CONFIG.LOG_DIR): try: os.makedirs(CONFIG.LOG_DIR) except OSError: CONFIG.LOG_DIR = None if not QUIET: sys.stderr.write("Unable to create the log directory. " \ "Logging to screen only.\n") # Start the logger, disable console if needed logger.initLogger(console=not QUIET, log_dir=CONFIG.LOG_DIR, verbose=VERBOSE) if not CONFIG.BACKUP_DIR: CONFIG.BACKUP_DIR = os.path.join(DATA_DIR, 'backups') if not os.path.exists(CONFIG.BACKUP_DIR): try: os.makedirs(CONFIG.BACKUP_DIR) except OSError as e: logger.error("Could not create backup dir '%s': %s" % (CONFIG.BACKUP_DIR, e)) if not CONFIG.CACHE_DIR: CONFIG.CACHE_DIR = os.path.join(DATA_DIR, 'cache') if not os.path.exists(CONFIG.CACHE_DIR): try: os.makedirs(CONFIG.CACHE_DIR) except OSError as e: logger.error("Could not create cache dir '%s': %s" % (CONFIG.CACHE_DIR, e)) # Initialize the database logger.info('Checking to see if the database has all tables....') try: dbcheck() except Exception as e: logger.error("Can't connect to the database: %s" % e) # Check if PlexPy has a uuid if CONFIG.PMS_UUID == '' or not CONFIG.PMS_UUID: my_uuid = generate_uuid() CONFIG.__setattr__('PMS_UUID', my_uuid) CONFIG.write() # Get the currently installed version. Returns None, 'win32' or the git # hash. CURRENT_VERSION, CONFIG.GIT_BRANCH = versioncheck.getVersion() # Write current version to a file, so we know which version did work. # This allowes one to restore to that version. The idea is that if we # arrive here, most parts of PlexPy seem to work. if CURRENT_VERSION: version_lock_file = os.path.join(DATA_DIR, "version.lock") try: with open(version_lock_file, "w") as fp: fp.write(CURRENT_VERSION) except IOError as e: logger.error("Unable to write current version to file '%s': %s" % (version_lock_file, e)) # Check for new versions if CONFIG.CHECK_GITHUB_ON_STARTUP and CONFIG.CHECK_GITHUB: try: LATEST_VERSION = versioncheck.checkGithub() except: logger.exception("Unhandled exception") LATEST_VERSION = CURRENT_VERSION else: LATEST_VERSION = CURRENT_VERSION # Get the real PMS urls for SSL and remote access if CONFIG.PMS_TOKEN and CONFIG.PMS_IP and CONFIG.PMS_PORT: plextv.get_real_pms_url() pmsconnect.get_server_friendly_name() # Refresh the users list on startup if CONFIG.PMS_TOKEN and CONFIG.REFRESH_USERS_ON_STARTUP: plextv.refresh_users() # Refresh the libraries list on startup if CONFIG.PMS_IP and CONFIG.PMS_TOKEN and CONFIG.REFRESH_LIBRARIES_ON_STARTUP: pmsconnect.refresh_libraries() # Store the original umask UMASK = os.umask(0) os.umask(UMASK) _INITIALIZED = True return True
#!/usr/bin/python import ctypes from ctypes.wintypes import BOOL, LONG, DWORD, WORD import logging import numpy as np import cv2 from logger import initLogger LOGGER = initLogger('ScreenCapture') # Function constants EnumWindows = ctypes.windll.user32.EnumWindows EnumWindowsProc = ctypes.WINFUNCTYPE(BOOL, ctypes.POINTER(LONG), ctypes.POINTER(LONG)) GetWindowText = ctypes.windll.user32.GetWindowTextW GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW IsWindowVisible = ctypes.windll.user32.IsWindowVisible GetDC = ctypes.windll.user32.GetDC CreateCompatibleDC = ctypes.windll.gdi32.CreateCompatibleDC SetStretchBltMode = ctypes.windll.gdi32.SetStretchBltMode GetClientRect = ctypes.windll.user32.GetClientRect CreateCompatibleBitmap = ctypes.windll.gdi32.CreateCompatibleBitmap SelectObject = ctypes.windll.gdi32.SelectObject StretchBlt = ctypes.windll.gdi32.StretchBlt GetDIBits = ctypes.windll.gdi32.GetDIBits DeleteDC = ctypes.windll.gdi32.DeleteDC ReleaseDC = ctypes.windll.user32.ReleaseDC DeleteObject = ctypes.windll.gdi32.DeleteObject # Structures and Constants COLORONCOLOR = 3