def process(o): # get current positions of six trackbars hLo = cv2.getTrackbarPos('hLo', 'image') hHi = cv2.getTrackbarPos('hHi', 'image') sLo = cv2.getTrackbarPos('sLo', 'image') sHi = cv2.getTrackbarPos('sHi', 'image') vLo = cv2.getTrackbarPos('vLo', 'image') vHi = cv2.getTrackbarPos('vHi', 'image') global currentHSV currentHSV = { 'hLo': hLo, 'hHi': hHi, 'sLo': sLo, 'sHi': sHi, 'vLo': vLo, 'vHi': vHi } # get imformation about the image output, images = imageprocessor.process_image(im, currentHSV, markup=1) logger.info("output = %s", output) cv2.imshow('image', im) for n, img in images.iteritems(): cv2.imshow(n, img)
def predict(use_gpu, checkpoint, input, top_k): device = torch.device( "cuda" if use_gpu and torch.cuda.is_available() else "cpu") image_model = imagemodel.ImageModel() image_model.load(checkpoint) image_input = imageprocessor.process_image(input) probs, classes = image_model.predict(image_input, device, top_k) category_names_path = args.category_names cat_to_name = None if category_names_path is not None: with open(category_names_path, 'r') as f: cat_to_name = json.load(f) print_results(classes, probs, cat_to_name)
logging.basicConfig(stream=sys.stderr,level=LOG_LEVEL,format=LOG_FORMAT) ############################################################################### # get the vision settings # start = open('videoSettings.json','r') sread = start.read() videoSettings=json.loads(sread) start.close() ############################################################################### # read the file # print "Processing file %s", args.file im = cv2.imread(args.file) # process the image output, images = imageprocessor.process_image( im, videoSettings, markup=1 ) print output for n, img in images.iteritems(): cv2.imshow(n, img) # wait for a key k = cv2.waitKey() # clean up cv2.destroyAllWindows()
def process_url(url, updatedConfig): STATUSES[threading.get_ident()] = 'Beginning to Process ' + url MinBytes = int(updatedConfig['minbytes']) MinSaving = int(float(updatedConfig['minsaving'])) SsoLink = updatedConfig['ssolink'] PngquantCommand = updatedConfig['pngquantcommand'] PillowQuality = int(updatedConfig['pillowquality']) ThumbnailPath = updatedConfig['thumbnailpath'] RootPath = updatedConfig['rootpath'] WholeSite = updatedConfig['wholesite'] if WholeSite == '0': WholeSite = False FlattenOutputDirectory = updatedConfig['flattenoutputdirectory'] if FlattenOutputDirectory == '0': FlattenOutputDirectory = False DownloadCSSFiles = updatedConfig['downloadcssfiles'] if DownloadCSSFiles == '0': DownloadCSSFiles = False printlog(str(DownloadCSSFiles)) total_size = 0 new_size = 0 old_size = 0 images_attempted = 0 total_bytes_saved = 0 total_bytes_saved = 0 images_compressed = 0 host_regex = r'(?:http[s]?:)\/\/([^\/?#]+)' host_url_regex = r'(?:http[s]?:)\/\/(?:[^\/?#]+)' hostname = re.match(host_regex, url).group(1) host_url = re.match(host_url_regex, url).group() all_pages_visited[host_url] = [] link_errors[host_url] = [] image_errors[host_url] = {} image_to_page_map[host_url] = {} pages_crawled = 0 current_path = os.path.realpath(RootPath) if not os.path.exists(current_path): os.makedirs(current_path) new_path = os.path.join(current_path, hostname) backup_path_root = os.path.join(current_path, '_original') if not os.path.exists(backup_path_root): os.mkdir(backup_path_root) backup_path = os.path.join(backup_path_root, hostname) if not os.path.exists(backup_path): os.mkdir(backup_path) if not os.path.exists(new_path): os.mkdir(new_path) images = [] STATUSES if not WholeSite: STATUSES[threading.get_ident()] = 'Downloading ' + url response = requests.get(url, timeout=TIMEOUT) images = get_image_urls_from_page(response.text, url, host_url) else: try: STATUSES[threading.get_ident()] = 'Crawling site: ' + host_url images = crawl_all_images(url, host_url, download_css=DownloadCSSFiles) pages_crawled = len(all_pages_visited[host_url]) except Exception as err: printlog(repr(err)) STATUSES[threading.get_ident()] = 'Creating shortcut for: ' + url shortcut_path = os.path.join(new_path, "View Online.url") info_file = os.path.join(new_path, 'Compression Info.txt') shotrcut_target = url + '/' + SsoLink images_on_site = 0 images_found = 0 images_compressed = 0 old_size = 0 new_size = 0 images_skipped = {} with open(shortcut_path, 'w') as shortcut: shortcut.write('[InternetShortcut]\n') shortcut.write('URL=%s' % shotrcut_target) shortcut.close() images = list(dict.fromkeys(images)) #Remove duplicates for image_match in images: image_url = image_match STATUSES[threading.get_ident()] = 'Processing image: ' + image_url try: info = process_image(image_url, updatedConfig, host_url, new_path, backup_path) except Exception as err: printlog('Image error: ' + repr(err)) if info.to_remove is not None: os.remove(info.to_remove) images_found += 1 if info.compressed == True: images_compressed += 1 old_size += info.old_size new_size += info.new_size if info.attempted == True: images_attempted += 1 if not info.exclusion_reason == 'NONE': images_skipped[info.exclusion_reason] = images_skipped.get( info.exclusion_reason, 0) + 1 if images_compressed == 0: os.remove(shortcut_path) #Remove the shortcut ratio = 0 if new_size > 0: ratio = round(100 * (1 - new_size / (new_size + old_size)), 2) try: with open(info_file, 'w') as file: file.write('Compression Ratio: ' + str(ratio) + '%\n') if pages_crawled > 0: file.write('Pages Crawled: ' + str(pages_crawled) + '\n') file.write('Images found: ' + str(images_found) + '\n') for reason in images_skipped: file.write('\t' + str(REASONS[reason]) + ': ' + str(images_skipped[reason]) + '\n') file.write('Images attempted: ' + str(images_attempted) + '\n') file.write('Images Compressed: ' + str(images_compressed) + '\n') file.write('Amount saved: ' + str(sizeof_fmt(old_size - new_size)) + '\n') file.write('\nLink Errors\n') for error in link_errors[host_url]: try: printlog(error) except Exception as err: printlog(repr(err)) file.write('\t' + error + '\n') if not len(link_errors[host_url]): file.write('\t No Link Errors! :)\n') file.write('\nImage Errors\n') for image_url in image_errors[host_url]: file.write('Error with ' + image_url + ': ' + image_errors[host_url][image_url] + ' - on the following page(s): \n') for page in image_to_page_map[host_url][image_url]: file.write('\t' + page + '\n') if not len(image_errors[host_url]): file.write('\tNo Image Errors! :)\n') except Exception as error: printlog(repr(error)) del_dirs(new_path) del_dirs(backup_path) del STATUSES[threading.get_ident()] return url
############################################################################### # process frame from camera # lastJSON = None newLog = True while (1): # Take each frame _, frame = cap.read() #cv2.imshow('frame',frame) #get information aboot the image output, images = imageprocessor.process_image(frame, videoSettings, markup=0) #convert to JSON jsontote = json.dumps(output) logger.debug("JSON: %s", jsontote) if sock is not None: #send to network try: pass #sock.sendto(jsontote, (UDP_IP, UDP_PORT)) except Exception as foo: print type(foo) print "Huh, had trouble making the socket"