def main(): """requests the continual recording of a a particular alertwildfire camera and saves images to outputDir Args: -c cameraID (str): time in hours to store data -i interval (flt): minutes between observations -d duration (flt): duration of observation (minutes) -o outputDir (str): directory to save the output image Returns: None """ reqArgs = [["c", "cameraID", "ID (code name) of camera"]] optArgs = [["i", "interval", "minutes between observations"], ["d", "duration", "duration of observation (minutes)"], ["o", "outputDir", "directory to save the output image"]] args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs) if not args.outputDir: args.outputDir = settings.downloadDir if not args.duration: args.duration = 30 if not args.interval: args.interval = 1 list_of_downloaded_img_paths = [] start_time = time.time() end_time = start_time + float(args.duration) * 60 next_interval = start_time while True: path = alertwildfire_API.request_current_image(args.outputDir, args.cameraID) list_of_downloaded_img_paths.append(path) next_interval += float(args.interval) * 60 if next_interval > end_time: return list_of_downloaded_img_paths time.sleep(int(next_interval - time.time()))
def main(): reqArgs = [ ["f", "fire", "ID of the fire in the images"], ["c", "camera", "ID of the camera used in the images"], ] optArgs = [ ["z", "zipFile", "Name of the zip file containing the images"], ["d", "imgDirectory", "Name of the directory containing the images or ask:dir"], ] args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) imgDirectory = None if args.imgDirectory: imgDirectory = args.imgDirectory if imgDirectory == 'ask:dir': imgDirectory = askdirectory() elif args.zipFile: tempDir = unzipFile(args.zipFile) imgDirectory = tempDir.name if not imgDirectory: logging.error('Must specify either zipFile or imgDirectory') exit(1) googleServices = goog_helper.getGoogleServices(settings, args) processFolder(imgDirectory, args.camera, args.fire, googleServices)
def main(): reqArgs = [ ['o', 'outputFile', 'filename for output CSV of fire x camera matches with available archives'], ] optionalArgs = [ ['g', 'longitude', 'longitude of fire', float], ['t', 'latitude', 'latitude of fire', float], ['s', 'startTime', 'start time of fire'], ] args = collect_args.collectArgs(reqArgs, optionalArgs=optionalArgs, parentParsers=[goog_helper.getParentParser()]) googleServices = goog_helper.getGoogleServices(settings, args) dbManager = db_manager.DbManager(sqliteFile=settings.db_file) outputFile = open(args.outputFile, 'w', newline='') outputCsv = csv.writer(outputFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) camArchives = img_archive.getHpwrenCameraArchives(googleServices['sheet'], settings) locMatches = getLocationMatches(dbManager, args.longitude, args.latitude, args.startTime) totalMatches = len(locMatches) numOutput = 0 for rowNum,locMatch in enumerate(locMatches): timeDT = datetime.datetime.fromtimestamp(locMatch['timestamp']) cams = locMatch['cameraids'].split(',') availCams = [] for cameraID in cams: if isCamArchiveAvailable(camArchives, cameraID, timeDT): availCams.append(cameraID) # logging.warning('availCams %d: %s', len(availCams), availCams) if len(availCams) > 0: outputRow(outputCsv, locMatch, timeDT, availCams) numOutput += 1 if (rowNum % 10) == 0: logging.warning('Processing %d of %d, output %d', rowNum, totalMatches, numOutput) logging.warning('Processed %d, output %d', totalMatches, numOutput)
def main(): reqArgs = [ ["c", "camera", "name of camera (part of URL)"], ["d", "date", "date in YYYYMMDD format"], ["s", "startTime", "starting time in HH:mm format"], ["e", "endTime", "ending time in HH:mm format"], ["o", "output", "path to folder where images are stored"], ] optArgs = [ ["n", "numProcesses", "number of child prcesses to start (default 1)"], ] args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs) numProcesses = int(args.numProcesses) if args.numProcesses else 1 camera_dir = args.output + '/' + args.camera + '/' if not os.path.exists(camera_dir): os.makedirs(camera_dir) startTime = list(map(lambda x: int(x), args.startTime.split(':'))) endTime = list(map(lambda x: int(x), args.endTime.split(':'))) timeSpan = timeDiff(startTime[0], startTime[1], endTime[0], endTime[1]) timePerProcess = int(timeSpan / numProcesses) allArgs = [] for i in range(numProcesses): procStartTime = calcTime(startTime[0], startTime[1], timePerProcess * i) procEndTime = calcTime(startTime[0], startTime[1], timePerProcess * (i + 1)) if i == (numProcesses - 1): # special handling to deal with rounding errors procEndTime = endTime procArgs = [ args.camera, args.date, procStartTime, procEndTime, camera_dir ] allArgs.append(procArgs) with Pool(numProcesses) as pool: pool.map(usgs_puller, allArgs)
def main(): reqArgs = [["l", "imgClass", "image class (smoke, nonSmoke, motion)"], [ "o", "outputDir", "local directory to save images and segments" ]] optArgs = [["c", "cellRange", "cells to read and process"], ["i", "image", "file name of the image in google drive"]] args = collect_args.collectArgs( reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) googleServices = goog_helper.getGoogleServices(settings, args) if args.cellRange: values = readFromMainSheet(googleServices['sheet'], args.cellRange) for [fileName] in values: print(fileName) goog_helper.downloadClassImage(googleServices['drive'], settings.IMG_CLASSES, args.imgClass, fileName, args.outputDir) if args.image: goog_helper.downloadClassImage(googleServices['drive'], settings.IMG_CLASSES, args.imgClass, args.image, args.outputDir)
def main(): argDefs = [ ["i", "image", "filename of the image"], ["o", "output", "output directory name"], ] args = collect_args.collectArgs(argDefs) print(args) imageDisplay(args.image, args.output)
def main(): reqArgs = [ ["o", "outputDir", "local directory to save images and segments"], ["i", "inputCsv", "csvfile with contents of Fuego Cropped Images"], ] optArgs = [["s", "startRow", "starting row"], ["e", "endRow", "ending row"], [ "d", "display", "(optional) specify any value to display image and boxes" ]] args = collect_args.collectArgs( reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) startRow = int(args.startRow) if args.startRow else 0 endRow = int(args.endRow) if args.endRow else 1e9 googleServices = goog_helper.getGoogleServices(settings, args) cameraCache = {} with open(args.inputCsv) as csvFile: csvreader = csv.reader(csvFile) for (rowIndex, csvRow) in enumerate(csvreader): if rowIndex < startRow: continue if rowIndex > endRow: print('Reached end row', rowIndex, endRow) exit(0) [cropName, minX, minY, maxX, maxY, fileName] = csvRow[:6] minX = int(minX) minY = int(minY) maxX = int(maxX) maxY = int(maxY) dirID = getCameraDir(googleServices['drive'], cameraCache, fileName) localFilePath = os.path.join(args.outputDir, fileName) if not os.path.isfile(localFilePath): goog_helper.downloadFile(googleServices['drive'], dirID, fileName, localFilePath) imgOrig = Image.open(localFilePath) squareCoords = rect_to_squares.rect_to_squares( minX, minY, maxX, maxY, imgOrig.size[0], imgOrig.size[1], MIN_SIZE) # print(squareCoords) imgNameNoExt = str(os.path.splitext(fileName)[0]) for coords in squareCoords: cropImgName = imgNameNoExt + '_Crop_' + 'x'.join( list(map(lambda x: str(x), coords))) + '.jpg' cropImgPath = os.path.join(args.outputDir, 'cropped', cropImgName) cropped_img = imgOrig.crop(coords) cropped_img.save(cropImgPath, format='JPEG') print('Processed row: %s, file: %s, num squares: %d' % (rowIndex, fileName, len(squareCoords))) if args.display: squareCoords.append((minX, minY, maxX, maxY)) displayImageWithScores(imgOrig, squareCoords) imageDisplay(imgOrig)
def main(): reqArgs = [] args = collect_args.collectArgs( reqArgs, optionalArgs=[], parentParsers=[goog_helper.getParentParser()]) dbManager = db_manager.DbManager(sqliteFile=settings.db_file) fires = getUnparsedFires(dbManager) parseDates(dbManager, fires)
def main(): reqArgs = [ ["f", "fileName", "name of file containing fire_coords.py output"], ] args = collect_args.collectArgs( reqArgs, optionalArgs=[], parentParsers=[goog_helper.getParentParser()]) dbManager = db_manager.DbManager(sqliteFile=settings.db_file) insertFires(dbManager, args.fileName)
def main(): reqArgs = [ ["f", "fileName", "name of file containing 'md5sum |sort' output "], ["d", "destDir", "name of directory where to move dupes"], ] args = collect_args.collectArgs( reqArgs, optionalArgs=[], parentParsers=[goog_helper.getParentParser()]) checkDupes(args.fileName, args.destDir)
def main(): reqArgs = [ ["k", "key", "api key for google geocoding service"], ["f", "fileName", "name of file containing calfire_parse.py output"], ] args = collect_args.collectArgs( reqArgs, optionalArgs=[], parentParsers=[goog_helper.getParentParser()]) gmaps = googlemaps.Client(key=args.key) getCoords(gmaps, args.fileName)
def main(): reqArgs = [ ["o", "outputDir", "local directory to save images and segments"], ["i", "inputCsv", "csvfile with contents of Fuego Cropped Images"], ] optArgs = [ ["s", "startRow", "starting row"], ["e", "endRow", "ending row"], [ "d", "display", "(optional) specify any value to display image and boxes" ], ] args = collect_args.collectArgs( reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) startRow = int(args.startRow) if args.startRow else 0 endRow = int(args.endRow) if args.endRow else 1e9 googleServices = goog_helper.getGoogleServices(settings, args) cameraCache = {} with open(args.inputCsv) as csvFile: csvreader = csv.reader(csvFile) for (rowIndex, csvRow) in enumerate(csvreader): if rowIndex < startRow: continue if rowIndex > endRow: logging.warning('Reached end row: %d, %d', rowIndex, endRow) exit(0) logging.warning('row %d: %s', rowIndex, csvRow[:2]) [cameraName, cropName] = csvRow[:2] if not cameraName: continue fileName = re.sub('_Crop[^.]+', '', cropName) # get back filename for whole image dirID = getCameraDir(googleServices['drive'], cameraCache, fileName) localFilePath = os.path.join(args.outputDir, fileName) if not os.path.isfile(localFilePath): goog_helper.downloadFile(googleServices['drive'], dirID, fileName, localFilePath) logging.warning('local %s', fileName) cropInfo = re.findall('_Crop_(\d+)x(\d+)x(\d+)x(\d+)', cropName) if len(cropInfo) != 1: logging.error('Failed to parse crop info %s, %s', cropName, cropInfo) exit(1) cropInfo = list(map(lambda x: int(x), cropInfo[0])) logging.warning('Dims: %s', cropInfo) imgOrig = Image.open(localFilePath) rect_to_squares.cutBoxesFixed(imgOrig, args.outputDir, fileName, lambda x: checkCoords(x, cropInfo))
def test(): argDefs = [ ["a", "X0", "X coord of first corner", int], ["b", "Y0", "Y coord of first corner", int], ["c", "X1", "X coord of opposite corner", int], ["d", "Y1", "Y coord of opposite corner", int], ] args = collect_args.collectArgs(argDefs) print('Rect:', (args.X0, args.Y0, args.X1, args.Y1)) coords = rect_to_squares(args.X0, args.Y0, args.X1, args.Y1, 1000, 1000, MIN_SQUARE_SIZE) print('Squares:', coords)
def main(): reqArgs = [ ["a", "imgA", "image to subtract from"], ["b", "imgB", "image to subtract"], ["o", "imgOutput", "output image"], ] optArgs = [] args = collect_args.collectArgs( reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) imgA = Image.open(args.imgA) imgB = Image.open(args.imgB) imgOut = img_archive.diffImages(imgA, imgB) imgOut.save(args.imgOutput, format='JPEG')
def main(): reqArgs = [ ["c", "cameraID", "ID (code name) of camera"], [ "s", "startTime", "starting date and time in ISO format (e.g., 2019-02-22T14:34:56 in Pacific time zone)" ], ] optArgs = [ [ "e", "endTime", "ending date and time in ISO format (e.g., 2019-02-22T14:34:56 in Pacific time zone)" ], [ "g", "gapMinutes", "override default of 1 minute gap between images to download" ], ["o", "outputDir", "directory to save the output image"], ] args = collect_args.collectArgs( reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) googleServices = goog_helper.getGoogleServices(settings, args) gapMinutes = int(args.gapMinutes) if args.gapMinutes else 1 outputDir = args.outputDir if args.outputDir else settings.downloadDir startTimeDT = dateutil.parser.parse(args.startTime) if args.endTime: endTimeDT = dateutil.parser.parse(args.endTime) else: endTimeDT = startTimeDT assert startTimeDT.year == endTimeDT.year assert startTimeDT.month == endTimeDT.month assert startTimeDT.day == endTimeDT.day assert endTimeDT >= startTimeDT camArchives = img_archive.getHpwrenCameraArchives(googleServices['sheet'], settings) files = img_archive.getHpwrenImages(googleServices, settings, outputDir, camArchives, args.cameraID, startTimeDT, endTimeDT, gapMinutes) if files: logging.warning('Found %d files.', len(files)) else: logging.error('No matches for camera ID %s', args.cameraID)
def main(): optArgs = [ [ "e", "entireImage", "csv filename with data on entire images (Fuego Images)" ], [ "c", "croppedImages", "csv filename with data on cropped images (Fuego Cropped Images)" ], ] args = collect_args.collectArgs([], optionalArgs=optArgs) if args.entireImage: with open(args.entireImage) as csvfile: insert_entire_images(csvfile) if args.croppedImages: with open(args.croppedImages) as csvfile: insert_cropped_images(csvfile)
def main(): reqArgs = [ ["g", "geoTiffName", "File name of geotiff"], ["a", "lat", "latitude of desired point", float], ["o", "long", "longtitude of desired point", float], ] args = collect_args.collectArgs(reqArgs, optionalArgs=[], parentParsers=[goog_helper.getParentParser()]) tiffData = gdal.Open(args.geoTiffName) logging.warning('x: %d, y: %d', tiffData.RasterXSize, tiffData.RasterYSize) metadata = tiffData.GetGeoTransform() logging.warning('metadata: %s', metadata) specs = tiffData.ReadAsArray(xoff=0, yoff=0) logging.warning('specs: %s', specs) coordX = mapping_with_bounds(args.long, metadata[0], metadata[1], tiffData.RasterXSize) coordY = mapping_with_bounds(args.lat, metadata[3], metadata[5], tiffData.RasterYSize) if coordX != None and coordY != None: val = specs[coordX,coordY] logging.warning("The value is (%s)", val)
def main(): reqArgs = [ ["c", "cameraID", "ID (code name) of camera"], ] optArgs = [ ["l", "localhost", "localhost for testing"], ] args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) googleCreds = goog_helper.getCreds(settings, args) googleServices = goog_helper.getGoogleServices(settings, args) folderName = str(uuid.uuid4()) folderID = goog_helper.createFolder(googleServices['drive'], settings.ffmpegFolder, folderName) url = settings.ffmpegUrl if args.localhost: url = 'http://localhost:8080' respData = callGCF(url, googleCreds, args.cameraID, folderID) logging.warning('GCF Result: %s', respData) logging.warning('New folder %s (%s) should be cleaned up', folderName, folderID)
def main(): reqArgs = [ [ "i", "inputDir", "local directory containing both smoke and nonSmoke images" ], ["o", "outputDir", "local directory to write out TFRecords files"], ] optArgs = [[ "t", "trainPercentage", "percentage of data to use for training vs. validation (default 90)" ]] args = collect_args.collectArgs( reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) trainPercentage = int(args.trainPercentage) if args.trainPercentage else 90 writeTFRecords(args.inputDir, args.outputDir, trainPercentage)
def main(): optArgs = [ ["n", "numProcesses", "number of child prcesses to start (default 1)"], ["g", "useGpu", "(optional) specify any value to use gpu (default off)"], ["c", "collectPositves", "collect positive segments for training data"], ["r", "restrictType", "Only process images from cameras of given type"], ] args = collect_args.collectArgs([], optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) numProcesses = int(args.numProcesses) if args.numProcesses else 1 useGpu = True if args.useGpu else False if not useGpu: os.environ["CUDA_VISIBLE_DEVICES"]="-1" scriptName = 'detect_fire.py' procInfos = [] for i in range(numProcesses): heartbeatFile = tempfile.NamedTemporaryFile() heartbeatFileName = heartbeatFile.name proc = startProcess(scriptName, heartbeatFileName, args.collectPositves, args.restrictType) procInfos.append({ 'proc': proc, 'heartbeatFile': heartbeatFile, 'heartbeatFileName': heartbeatFileName, }) time.sleep(10) # 10 seconds per process to allow startup while True: for procInfo in procInfos: lastTS = lastHeartbeat(procInfo['heartbeatFileName']) # check heartbeat timestamp = int(time.time()) proc = procInfo['proc'] logging.debug('DBG: Process %d: %s: %d seconds since last image scanned, %d', proc.pid, procInfo['heartbeatFileName'], timestamp - lastTS, lastTS) if (timestamp - lastTS) > 2*60: # warn if stuck more than 2 minutes logging.warning('Process %d: %d seconds since last image scanned', proc.pid, timestamp - lastTS) if (timestamp - lastTS) > 4*60: # kill if stuck more than 4 minutes logging.warning('Killing %d', proc.pid) proc.kill() procInfo['proc'] = startProcess(scriptName, procInfo['heartbeatFileName'], args.collectPositves, args.restrictType) time.sleep(30)
def main(): reqArgs = [ ["i", "image", "filename of the image"], ["o", "output", "output directory name"], ] optArgs = [["l", "labels", "labels file generated during retraining"], ["m", "model", "model file generated during retraining"], [ "d", "display", "(optional) specify any value to display image and boxes" ]] args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs) model_file = args.model if args.model else settings.model_file labels_file = args.labels if args.labels else settings.labels_file os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' graph = tf_helper.load_graph(model_file) labels = tf_helper.load_labels(labels_file) segments = [] with tf.Session(graph=graph) as tfSession: if True: # chops image in segment files and classifies each segment imgOrig = Image.open(args.image) segments = rect_to_squares.cutBoxes(imgOrig, args.output, args.image) tf_helper.classifySegments(tfSession, graph, labels, segments) if False: # version that classifies entire image without cropping imgOrig = Image.open(args.image) segments = [{'imgPath': args.image}] tf_helper.classifySegments(tfSession, graph, labels, segments) if False: # chops image into in-memory segments and classifies each segment calcScoresInMemory(args.model, args.labels, args.image) for segmentInfo in segments: print(segmentInfo['imgPath'], segmentInfo['score']) if args.display: drawBoxesAndScores(imgOrig, segments) displayImageWithScores(imgOrig, [])
def main(): global fen reqArgs = [ ["i", "image", "filename of the image"], ["o", "output", "output directory name"], ] optArgs = [[ "d", "display", "(optional) specify any value to display image and boxes" ], ["f", "fixedSize", "(optional) use 299x299 segments"]] args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs) # print(args) imgOrig = Image.open(args.image) callBackFn = None if args.display: imageDisplay(imgOrig) callBackFn = showSquares if args.fixedSize: rect_to_squares.cutBoxesFixed(imgOrig, args.output, args.image, callBackFn) else: rect_to_squares.cutBoxes(imgOrig, args.output, args.image, callBackFn) if args.display: fen.mainloop()
def main(): """directs the funtionality of the process ie start a cleanup, record all cameras on 2min refresh, record a subset of cameras, manage multiprocessed recording of cameras Args: -c cleaning_threshold" (flt): time in hours to store data -o cameras_overide (str): list of specific cameras to watch -a agents (int): number of agents to assign for parallelization -f full_system (Bool):monitor full system with as many agents as needed Returns: None """ reqArgs = [] optArgs = [ ["c", "cleaning_threshold", "time in hours to store data"], ["o", "cameras_overide", "specific cameras to watch"], ["a", "agents", "number of agents to assign for parallelization"], [ "f", "full_system", "toggle to cover all of alert wildfire with unrestrained parallelization" ] ] args = collect_args.collectArgs( reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) googleServices = goog_helper.getGoogleServices(settings, args) dbManager = db_manager.DbManager(sqliteFile=settings.db_file, psqlHost=settings.psqlHost, psqlDb=settings.psqlDb, psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd) if args.cleaning_threshold: cleaning_threshold = float(args.cleaning_threshold) cleanup_archive(googleServices, dbManager, cleaning_threshold) if args.cameras_overide: listofRotatingCameras = list( args.cameras_overide.replace(" ", "").strip('[]').split(',')) else: listofCameras = alertwildfire_API.get_all_camera_info() listofRotatingCameras = [ camera["name"] for camera in listofCameras if (camera["name"][-1] == '2') ] if args.agents: agents = int(args.agents) #num of camera's per process toc_avg = test_System_response_time(googleServices, dbManager, trial_length=10) # target estimate of camera refresh time target_refresh_time_per_camera = 12 #secs num_cameras_per_process = math.floor(target_refresh_time_per_camera / toc_avg) #future ability to re-adjust as needed #divy the cameras camera_bunchs = [] num_of_processes_needed = math.ceil( len(listofRotatingCameras) / num_cameras_per_process) if num_of_processes_needed > agents: logging.warning( 'unable to process all given cameras on this machine with %s agents and maintain a target refresh rate of %s seconds, please reduce number of cameras to less than %s or increase number of agents to %s', agents, target_refresh_time_per_camera, num_cameras_per_process * agents, num_of_processes_needed) return num_cameras_per_process = math.floor( len(listofRotatingCameras) / agents) for num in range(0, num_of_processes_needed): split_start = num_cameras_per_process * num split_stop = num_cameras_per_process * num + num_cameras_per_process camera_bunchs.append(listofRotatingCameras[split_start:split_stop]) with Pool(processes=agents) as pool: result = pool.map(fetchAllCameras, camera_bunchs) pool.close() else: if args.full_system: response_time_per_camera = test_System_response_time( googleServices, dbManager, trial_length=10) listofCameras = alertwildfire_API.get_all_camera_info() target_refresh_time_per_camera, listofTargetCameras, num_of_processes_needed, num_cameras_per_process, num_of_agents_needed = {},{},{},{},0 # target estimate of camera refresh time target_refresh_time_per_camera["rotating"] = 12 #secs target_refresh_time_per_camera["stationary"] = 60 #secs #separation of data by type listofTargetCameras["rotating"] = [ camera["name"] for camera in listofCameras if (camera["name"][-1] == '2') ] listofTargetCameras["stationary"] = [ camera["name"] for camera in listofCameras if (camera["name"][-1] != '2') ] camera_bunchs = [] for type in ["rotating", "stationary"]: num_cameras_per_process[type] = math.floor( target_refresh_time_per_camera[type] / response_time_per_camera) #divy up cameras rotating and stationary to maximize efficiency num_of_processes_needed[type] = math.ceil( len(listofTargetCameras[type]) / num_cameras_per_process[type]) num_cameras_per_process[type] = math.floor( len(listofTargetCameras[type]) / num_of_processes_needed[type]) num_of_agents_needed += num_of_processes_needed[type] for num in range(0, num_of_processes_needed[type]): split_start = num_cameras_per_process[type] * num split_stop = num_cameras_per_process[ type] * num + num_cameras_per_process[type] camera_bunchs.append( listofTargetCameras[type][split_start:split_stop]) with Pool(processes=num_of_agents_needed) as pool: result = pool.map(fetchAllCameras, camera_bunchs) pool.close() else: fetchAllCameras(listofRotatingCameras)
def main(): reqArgs = [ ["d", "directory", "directory containing the image sets"], ["o", "outputFile", "output file name"], ] optArgs = [ ["l", "labels", "labels file generated during retraining"], ["m", "model", "model file generated during retraining"], ] args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs) model_file = args.model if args.model else settings.model_file labels_file = args.labels if args.labels else settings.labels_file test_data = [] image_name = [] crop_name = [] score_name = [] class_name = [] image_name += ["Image"] crop_name += ["Crop"] score_name += ["Score"] class_name += ["Class"] os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # quiet down tensorflow logging graph = tf_helper.load_graph(model_file) labels = tf_helper.load_labels(labels_file) smokeDir = os.path.join(args.directory, 'test_set_smoke') smoke_image_list = listJpegs(smokeDir) logging.warning('Found %d images of smoke', len(smoke_image_list)) nonSmokeDir = os.path.join(args.directory, 'test_set_other') other_image_list = listJpegs(nonSmokeDir) logging.warning('Found %d images of nonSmoke', len(other_image_list)) smokeFile = os.path.join(args.directory, 'test_smoke.txt') np.savetxt(smokeFile, smoke_image_list, fmt="%s") nonSmokeFile = os.path.join(args.directory, 'test_other.txt') np.savetxt(nonSmokeFile, other_image_list, fmt="%s") (i, cr, s, cl, numPositive) = classifyImages(graph, labels, smoke_image_list, 'smoke', args.outputFile) image_name += i crop_name += cr score_name += s class_name += cl logging.warning('Done with smoke images') truePositive = numPositive falseNegative = len(smoke_image_list) - numPositive logging.warning('True Positive: %d', truePositive) logging.warning('False Negative: %d', falseNegative) (i, cr, s, cl, numPositive) = classifyImages(graph, labels, other_image_list, 'other', args.outputFile) image_name += i crop_name += cr score_name += s class_name += cl logging.warning('Done with nonSmoke images') falsePositive = numPositive trueNegative = len(other_image_list) - numPositive logging.warning('False Positive: %d', falsePositive) logging.warning('True Negative: %d', trueNegative) accuracy = (truePositive + trueNegative) / (truePositive + trueNegative + falsePositive + falseNegative) logging.warning('Accuracy: %f', accuracy) precision = truePositive / (truePositive + falsePositive) logging.warning('Precision: %f', precision) recall = truePositive / (truePositive + falseNegative) logging.warning('Recall: %f', recall) f1 = 2 * precision * recall / (precision + recall) logging.warning('F1: %f', f1) test_data = [image_name, crop_name, score_name, class_name] np.savetxt(args.outputFile, np.transpose(test_data), fmt="%s") print("DONE")
def main(): optArgs = [ ["b", "heartbeat", "filename used for heartbeating check"], [ "c", "collectPositves", "collect positive segments for training data" ], ["d", "imgDirectory", "Name of the directory containing the images"], ["t", "time", "Time breakdown for processing images"], [ "m", "minusMinutes", "(optional) subtract images from given number of minutes ago" ], [ "r", "restrictType", "Only process images from cameras of given type" ], [ "s", "startTime", "(optional) performs search with modifiedTime > startTime" ], [ "e", "endTime", "(optional) performs search with modifiedTime < endTime" ], ] args = collect_args.collectArgs( [], optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) minusMinutes = int(args.minusMinutes) if args.minusMinutes else 0 googleServices = goog_helper.getGoogleServices(settings, args) dbManager = db_manager.DbManager(sqliteFile=settings.db_file, psqlHost=settings.psqlHost, psqlDb=settings.psqlDb, psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd) cameras = dbManager.get_sources(activeOnly=True, restrictType=args.restrictType) startTimeDT = dateutil.parser.parse( args.startTime) if args.startTime else None endTimeDT = dateutil.parser.parse(args.endTime) if args.endTime else None timeRangeSeconds = None useArchivedImages = False camArchives = img_archive.getHpwrenCameraArchives(googleServices['sheet'], settings) constants = { # dictionary of constants to reduce parameters in various functions 'args': args, 'googleServices': googleServices, 'camArchives': camArchives, 'dbManager': dbManager, } if startTimeDT or endTimeDT: assert startTimeDT and endTimeDT timeRangeSeconds = (endTimeDT - startTimeDT).total_seconds() assert timeRangeSeconds > 0 assert args.collectPositves useArchivedImages = True deferredImages = [] processingTimeTracker = initializeTimeTracker() graph = tf_helper.load_graph(settings.model_file) labels = tf_helper.load_labels(settings.labels_file) config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.1 #hopefully reduces segfaults with tf.Session(graph=graph, config=config) as tfSession: while True: classifyImgPath = None timeStart = time.time() if useArchivedImages: (cameraID, timestamp, imgPath, classifyImgPath) = \ getArchivedImages(constants, cameras, startTimeDT, timeRangeSeconds, minusMinutes) elif minusMinutes: (queueFull, deferredImageInfo) = getDeferrredImgageInfo( deferredImages, processingTimeTracker, minusMinutes, timeStart) if not queueFull: # queue is not full, so add more to queue addToDeferredImages(dbManager, cameras, deferredImages) if deferredImageInfo: # we have a deferred image ready to process, now get latest image and subtract (cameraID, timestamp, imgPath, classifyImgPath) = \ genDiffImageFromDeferred(dbManager, cameras, deferredImageInfo, deferredImages, minusMinutes) if not cameraID: continue # skip to next camera without deleting deferred image which may be reused later os.remove(deferredImageInfo['imgPath']) # no longer needed else: continue # in diff mode without deferredImage, nothing more to do # elif args.imgDirectory: unused functionality -- to delete? # (cameraID, timestamp, imgPath, md5) = getNextImageFromDir(args.imgDirectory) else: # regular (non diff mode), grab image and process (cameraID, timestamp, imgPath, md5) = getNextImage(dbManager, cameras) classifyImgPath = imgPath if not cameraID: continue # skip to next camera timeFetch = time.time() segments = segmentAndClassify(classifyImgPath, tfSession, graph, labels) timeClassify = time.time() recordFilterReport(constants, cameraID, timestamp, classifyImgPath, imgPath, segments, minusMinutes, googleServices['drive'], useArchivedImages) timePost = time.time() updateTimeTracker(processingTimeTracker, timePost - timeStart) if args.time: logging.warning( 'Timings: fetch=%.2f, classify=%.2f, post=%.2f', timeFetch - timeStart, timeClassify - timeFetch, timePost - timeClassify)
def main(): reqArgs = [ ["d", "dirID", "ID of google drive directory"], ["f", "fileName", "fileName of google drive file"], ] optArgs = [ ["u", "upload", "(optional) performs upload vs. download"], [ "s", "startTime", "(optional) performs search with modifiedTime > startTime" ], [ "e", "endTime", "(optional) performs search with modifiedTime < endTime" ], ["l", "listOnly", "(optional) list vs. download"], [ "r", "remove", "(optional) performs remove/delete vs. download (value must be 'delete')" ], [ "m", "maxFiles", "override default of 100 for max number of files to operate on" ], ] args = collect_args.collectArgs( reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) maxFiles = int(args.maxFiles) if args.maxFiles else 100 googleServices = goog_helper.getGoogleServices(settings, args) # default mode is to download a single file operation = 'download' multipleFiles = False batchMode = True MAX_BATCH_SIZE = 25 # increasing size beyond 60 tends to generate http 500 errors if args.upload: operation = 'upload' elif args.remove: if args.remove != 'delete': logging.error( "value for remove must be 'delete', but instead is %s", args.remove) exit(1) operation = 'delete' elif args.listOnly: operation = 'list' if args.startTime or args.endTime: multipleFiles = True if not multipleFiles: if operation == 'upload': goog_helper.uploadFile(googleServices['drive'], args.dirID, args.fileName) else: assert operation == 'download' goog_helper.downloadFile(googleServices['drive'], args.dirID, args.fileName, args.fileName) else: nextPageToken = 'init' processedFiles = 0 while True: batch = None batchCount = 0 (items, nextPageToken) = goog_helper.searchFiles(googleServices['drive'], args.dirID, args.startTime, args.endTime, args.fileName, npt=nextPageToken) firstLast = '' if len(items) > 0: firstLast = str(items[0]) + ' to ' + str(items[-1]) logging.warning('Found %d files: %s', len(items), firstLast) if operation == 'list': logging.warning('All files: %s', items) for item in items: if operation == 'delete': if batchMode: if not batch: batch = googleServices[ 'drive'].new_batch_http_request( callback=delete_file) batch.add(googleServices['drive'].files().delete( fileId=item["id"], supportsTeamDrives=True)) batchCount += 1 if batchCount == MAX_BATCH_SIZE: logging.warning('Execute batch with %d items', batchCount) batch.execute() batch = None batchCount = 0 else: googleServices['drive'].files().delete( fileId=item["id"], supportsTeamDrives=True).execute() elif operation == 'download': goog_helper.downloadFileByID(googleServices['drive'], item['id'], item['name']) if batch and batchCount: batch.execute() batch = None processedFiles += len(items) logging.warning('Processed %d of max %d. NextToken: %s', processedFiles, maxFiles, bool(nextPageToken)) if (processedFiles >= maxFiles) or not nextPageToken: break # exit if we processed enough files or no files left logging.warning('Done')
def main(): reqArgs = [ ["o", "operation", "add (includes update), delete, list"], ] optArgs = [ ["n", "name", "name (ID) of user"], ["m", "email", "email address of user"], ["p", "phone", "phone number of user"], [ "s", "startTime", "starting date and time in ISO format (e.g., 2019-02-22T14:34:56 in Pacific time zone)" ], [ "e", "endTime", "ending date and time in ISO format (e.g., 2019-02-22T14:34:56 in Pacific time zone)" ], ] args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs) startTime = parseTimeStr(args.startTime) if args.startTime else None endTime = parseTimeStr(args.endTime) if args.endTime else None dbManager = db_manager.DbManager(sqliteFile=settings.db_file, psqlHost=settings.psqlHost, psqlDb=settings.psqlDb, psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd) notifications = dbManager.getNotifications() activeEmails = dbManager.getNotifications(filterActiveEmail=True) activePhones = dbManager.getNotifications(filterActivePhone=True) logging.warning( 'Num all notifications: %d. Active emails: %d. Active phones: %d', len(notifications), len(activeEmails), len(activePhones)) if args.operation == 'list': for n in notifications: printNoficiation(n) return assert args.name matching = list(filter(lambda x: x['name'] == args.name, notifications)) logging.warning('Found %d matching for name %s', len(matching), args.name) if matching: printNoficiation(matching[0]) if args.operation == 'add': assert startTime and endTime assert endTime >= startTime assert args.email or args.phone if not matching: # insert new entry dbRow = { 'name': args.name, } if args.email: dbRow['email'] = args.email dbRow['EmailStartTime'] = startTime dbRow['EmailEndTime'] = endTime if args.phone: dbRow['phone'] = args.phone dbRow['PhoneStartTime'] = startTime dbRow['PhoneEndTime'] = endTime dbManager.add_data('notifications', dbRow) logging.warning('Successfully added notification for %s', args.name) else: # update existing entry if args.email: sqlTemplate = """UPDATE notifications SET email='%s',EmailStartTime=%s,EmailEndTime=%s WHERE name = '%s' """ sqlStr = sqlTemplate % (args.email, startTime, endTime, args.name) dbManager.execute(sqlStr) if args.phone: sqlTemplate = """UPDATE notifications SET phone='%s',PhoneStartTime=%s,PhoneEndTime=%s WHERE name = '%s' """ sqlStr = sqlTemplate % (args.phone, startTime, endTime, args.name) dbManager.execute(sqlStr) logging.warning('Successfully updated notification for %s', args.name) notifications = dbManager.getNotifications() matching = list(filter(lambda x: x['name'] == args.name, notifications)) printNoficiation(matching[0]) elif args.operation == 'delete': sqlTemplate = """DELETE FROM notifications WHERE name = '%s' """ sqlStr = sqlTemplate % (args.name) dbManager.execute(sqlStr) else: logging.error('Unexpected operation: %s', args.operation)
def main(): """directs the funtionality of the process ie start a cleanup, record all cameras on 2min refresh, record a subset of cameras, manage multiprocessed recording of cameras Args: -c cleaning_threshold" (flt): time in hours to store data -o cameras_overide (str): list of specific cameras to watch -a agents (int): number of agents to assign for parallelization Returns: None """ reqArgs = [] optArgs = [ ["c", "cleaning_threshold", "time in hours to store data"], ["o", "cameras_overide", "specific cameras to watch"], ["a", "agents", "number of agents to assign for parallelization"] ] args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) googleServices = goog_helper.getGoogleServices(settings, args) dbManager = db_manager.DbManager(sqliteFile=settings.db_file, psqlHost=settings.psqlHost, psqlDb=settings.psqlDb, psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd) if args.cleaning_threshold: cleaning_threshold = float(args.cleaning_threshold) cleanup_archive(googleServices, dbManager, cleaning_threshold) if args.cameras_overide: listofRotatingCameras = list(args.cameras_overide.replace(" ", "").strip('[]').split(',')) else: listofCameras = alertwildfire_API.get_all_camera_info() listofRotatingCameras = [camera["name"] for camera in listofCameras if (camera["name"][-1]=='2') ] if args.agents: agents = int(args.agents) #num of camera's per process test = "Axis-Briar2" temporaryDir = tempfile.TemporaryDirectory() trial = [x for x in range(0,10)] tic = time.time() for x in trial: capture_and_record(googleServices, dbManager, temporaryDir.name, test) toc =time.time()-tic toc_avg = toc/len(trial) # target estimate of camera refresh time target_refresh_time_per_camera = 12#secs num_cameras_per_process = math.floor(target_refresh_time_per_camera / toc_avg) #future ability to re-adjust as needed #divy the cameras camera_bunchs= [] num_of_processes_needed = math.ceil(len(listofRotatingCameras)/num_cameras_per_process) if num_of_processes_needed>agents: logging.warning('unable to process all given cameras on this machine with %s agents and maintain a target refresh rate of %s seconds, please reduce number of cameras to less than %s',agents, target_refresh_time_per_camera,num_cameras_per_process*agents) return for num in range(0, num_of_processes_needed): split_start = num_cameras_per_process*num split_stop = num_cameras_per_process*num+num_cameras_per_process camera_bunchs.append(listofRotatingCameras[split_start:split_stop]) with Pool(processes=agents) as pool: result = pool.map(fetchAllCameras, camera_bunchs) pool.close() else: fetchAllCameras(listofRotatingCameras)
def main(): reqArgs = [ ["o", "outputDir", "local directory to save diff image segments"], ["i", "inputDir", "input local directory containing nonSmoke image segments"], ["m", "minusMinutes", "subtract images from given number of minutes ago"], ] optArgs = [ ["s", "startRow", "starting row"], ["e", "endRow", "ending row"], ] args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) minusMinutes = int(args.minusMinutes) startRow = int(args.startRow) if args.startRow else 0 endRow = int(args.endRow) if args.endRow else 1e9 googleServices = goog_helper.getGoogleServices(settings, args) cookieJar = None camArchives = None cookieJar = img_archive.loginAjax() camArchives = img_archive.getHpwrenCameraArchives(googleServices['sheet'], settings) timeGapDelta = datetime.timedelta(seconds = 60*minusMinutes) skippedBadParse = [] skippedArchive = [] imageFileNames = sorted(os.listdir(args.inputDir)) rowIndex = -1 for fileName in imageFileNames: rowIndex += 1 if rowIndex < startRow: continue if rowIndex > endRow: print('Reached end row', rowIndex, endRow) break if (fileName[:3] == 'v2_') or (fileName[:3] == 'v3_'): continue # skip replicated files logging.warning('Processing row %d, file: %s', rowIndex, fileName) parsedName = img_archive.parseFilename(fileName) if (not parsedName) or parsedName['diffMinutes'] or ('minX' not in parsedName): logging.warning('Skipping file with unexpected parsed data: %s, %s', fileName, str(parsedName)) skippedBadParse.append((rowIndex, fileName, parsedName)) continue # skip files without crop info or with diff matchingCams = list(filter(lambda x: parsedName['cameraID'] == x['id'], camArchives)) if len(matchingCams) != 1: logging.warning('Skipping camera without archive: %d, %s', len(matchingCams), str(matchingCams)) skippedArchive.append((rowIndex, fileName, matchingCams)) continue archiveDirs = matchingCams[0]['dirs'] logging.warning('Found %s directories', archiveDirs) earlierImgPath = None dt = datetime.datetime.fromtimestamp(parsedName['unixTime']) dt -= timeGapDelta for dirName in archiveDirs: logging.warning('Searching for files in dir %s', dirName) imgPaths = img_archive.getFilesAjax(cookieJar, settings.downloadDir, parsedName['cameraID'], dirName, dt, dt, 1) if imgPaths: earlierImgPath = imgPaths[0] break # done if not earlierImgPath: logging.warning('Skipping image without prior image: %s, %s', str(dt), fileName) skippedArchive.append((rowIndex, fileName, dt)) continue logging.warning('Subtracting old image %s', earlierImgPath) earlierImg = Image.open(earlierImgPath) print('CR', (parsedName['minX'], parsedName['minY'], parsedName['maxX'], parsedName['maxY'])) croppedEarlyImg = earlierImg.crop((parsedName['minX'], parsedName['minY'], parsedName['maxX'], parsedName['maxY'])) imgOrig = Image.open(os.path.join(args.inputDir, fileName)) diffImg = img_archive.diffImages(imgOrig, croppedEarlyImg) parsedName['diffMinutes'] = minusMinutes diffImgPath = os.path.join(args.outputDir, img_archive.repackFileName(parsedName)) logging.warning('Saving new image %s', diffImgPath) diffImg.save(diffImgPath, format='JPEG') logging.warning('Skipped bad parse %d, %s', len(skippedBadParse), str(skippedBadParse)) logging.warning('Skipped images without archives %d, %s', len(skippedArchive), str(skippedArchive))
def main(): reqArgs = [ ["m", "mode", "add, delete, enable, disable, stats, or list"], ] optArgs = [ ["c", "cameraID", "ID of the camera (e.g., mg-n-mobo-c)"], ["u", "url", "url to get images from camera"], ] args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs) if settings.db_file: logging.warning('using sqlite %s', settings.db_file) dbManager = db_manager.DbManager(sqliteFile=settings.db_file) else: logging.warning('using postgres %s', settings.psqlHost) dbManager = db_manager.DbManager(psqlHost=settings.psqlHost, psqlDb=settings.psqlDb, psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd) cameraInfos = dbManager.get_sources(activeOnly=False) logging.warning('Num all cameras: %d', len(cameraInfos)) logging.warning( 'Num active cameras: %d', len(list(filter(lambda x: x['dormant'] == 0, cameraInfos)))) if args.mode == 'list': logging.warning('All cameras: %s', list(map(lambda x: x['name'], cameraInfos))) return matchingCams = list( filter(lambda x: x['name'] == args.cameraID, cameraInfos)) logging.warning('Found %d matching cams for ID %s', len(matchingCams), args.cameraID) if args.mode == 'add': if len(matchingCams) != 0: logging.error('Camera with ID %s already exists: %s', args.cameraID, matchingCams) exit(1) dbRow = { 'name': args.cameraID, 'url': args.url, 'dormant': 0, 'randomID': random.random(), 'last_date': datetime.datetime.now().isoformat() } dbManager.add_data('sources', dbRow) logging.warning('Successfully added camera %s', args.cameraID) return if len(matchingCams) != 1: logging.error('Cannot find camera with ID %s: %s', args.cameraID, matchingCams) exit(1) camInfo = matchingCams[0] logging.warning('Cam details: %s', camInfo) if args.mode == 'del': sqlTemplate = """DELETE FROM sources WHERE name = '%s' """ execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=False) return if args.mode == 'enable': if camInfo['dormant'] == 0: logging.error('Camera already enabled: dormant=%d', camInfo['dormant']) exit(1) sqlTemplate = """UPDATE sources SET dormant=0 WHERE name = '%s' """ execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=False) return if args.mode == 'disable': if camInfo['dormant'] == 1: logging.error('Camera already disabled: dormant=%d', camInfo['dormant']) exit(1) sqlTemplate = """UPDATE sources SET dormant=1 WHERE name = '%s' """ execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=False) return if args.mode == 'stats': sqlTemplate = """SELECT max(timestamp) as maxtime FROM scores WHERE CameraName = '%s' """ dbResult = execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=True) logging.warning('Most recent image scanned: %s', getTime(dbResult)) sqlTemplate = """SELECT max(timestamp) as maxtime FROM detections WHERE CameraName = '%s' """ dbResult = execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=True) logging.warning('Most recent smoke detection: %s', getTime(dbResult)) sqlTemplate = """SELECT max(timestamp) as maxtime FROM alerts WHERE CameraName = '%s' """ dbResult = execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=True) logging.warning('Most recent smoke alert: %s', getTime(dbResult)) return logging.error('Unexpected mode: %s', args.mode) exit(1)