def main(): reqArgs = [ ['o', 'outputFile', 'filename for output CSV of fire x camera matches with available archives'], ] optionalArgs = [ ['g', 'longitude', 'longitude of fire', float], ['t', 'latitude', 'latitude of fire', float], ['s', 'startTime', 'start time of fire'], ] args = collect_args.collectArgs(reqArgs, optionalArgs=optionalArgs, parentParsers=[goog_helper.getParentParser()]) googleServices = goog_helper.getGoogleServices(settings, args) dbManager = db_manager.DbManager(sqliteFile=settings.db_file) outputFile = open(args.outputFile, 'w', newline='') outputCsv = csv.writer(outputFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) camArchives = img_archive.getHpwrenCameraArchives(googleServices['sheet'], settings) locMatches = getLocationMatches(dbManager, args.longitude, args.latitude, args.startTime) totalMatches = len(locMatches) numOutput = 0 for rowNum,locMatch in enumerate(locMatches): timeDT = datetime.datetime.fromtimestamp(locMatch['timestamp']) cams = locMatch['cameraids'].split(',') availCams = [] for cameraID in cams: if isCamArchiveAvailable(camArchives, cameraID, timeDT): availCams.append(cameraID) # logging.warning('availCams %d: %s', len(availCams), availCams) if len(availCams) > 0: outputRow(outputCsv, locMatch, timeDT, availCams) numOutput += 1 if (rowNum % 10) == 0: logging.warning('Processing %d of %d, output %d', rowNum, totalMatches, numOutput) logging.warning('Processed %d, output %d', totalMatches, numOutput)
def main(): reqArgs = [] optArgs = [ [ "n", "notes", "(optional) notes/comments (e.g., test) to associate with data" ], ["z", "zipFile", "Name of the zip file containing the images"], [ "d", "imgDirectory", "Name of the directory containing the images or ask:dir" ], ] args = collect_args.collectArgs( reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) imgDirectory = None if args.imgDirectory: imgDirectory = args.imgDirectory if imgDirectory == 'ask:dir': imgDirectory = askdirectory() elif args.zipFile: tempDir = unzipFile(args.zipFile) imgDirectory = tempDir.name if not imgDirectory: logging.error('Must specify either zipFile or imgDirectory') exit(1) googleServices = goog_helper.getGoogleServices(settings, args) processFolder(imgDirectory, googleServices, args.notes)
def main(): reqArgs = [ ["o", "outputDir", "local directory to save images and segments"], ["i", "inputCsv", "csvfile with contents of Cropped Images"], ] optArgs = [ ["s", "startRow", "starting row"], ["e", "endRow", "ending row"], [ "d", "display", "(optional) specify any value to display image and boxes" ], ] args = collect_args.collectArgs( reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) startRow = int(args.startRow) if args.startRow else 0 endRow = int(args.endRow) if args.endRow else 1e9 googleServices = goog_helper.getGoogleServices(settings, args) cameraCache = {} with open(args.inputCsv) as csvFile: csvreader = csv.reader(csvFile) for (rowIndex, csvRow) in enumerate(csvreader): if rowIndex < startRow: continue if rowIndex > endRow: logging.warning('Reached end row: %d, %d', rowIndex, endRow) exit(0) logging.warning('row %d: %s', rowIndex, csvRow[:2]) [cameraName, cropName] = csvRow[:2] if not cameraName: continue fileName = re.sub('_Crop[^.]+', '', cropName) # get back filename for whole image # TODO: update to img_archive.download... # dirID = getCameraDir(googleServices['drive'], cameraCache, fileName) # localFilePath = os.path.join(args.outputDir, fileName) # if not os.path.isfile(localFilePath): # goog_helper.downloadFile(googleServices['drive'], dirID, fileName, localFilePath) # logging.warning('local %s', fileName) cropInfo = re.findall('_Crop_(\d+)x(\d+)x(\d+)x(\d+)', cropName) if len(cropInfo) != 1: logging.error('Failed to parse crop info %s, %s', cropName, cropInfo) exit(1) cropInfo = list(map(lambda x: int(x), cropInfo[0])) logging.warning('Dims: %s', cropInfo) imgOrig = Image.open(localFilePath) rect_to_squares.cutBoxesFiles(imgOrig, args.outputDir, fileName, lambda x: checkCoords(x, cropInfo))
def main(): reqArgs = [ [ "s", "startTime", "starting date and time in ISO format (e.g., 2019-02-22T14:34:56 in Pacific time zone)" ], ] optArgs = [ ["c", "cameraID", "ID (code name) of camera"], ['n', 'longitude', 'longitude of fire', float], ['t', 'latitude', 'latitude of fire', float], [ 'm', 'maxDistance', '(optional default=20) max distance in miles from fire', float ], [ "e", "endTime", "ending date and time in ISO format (e.g., 2019-02-22T14:34:56 in Pacific time zone)" ], [ "d", "durationMinutes", "alternative spec for endTime as start + duration", int ], [ "g", "gapMinutes", "override default of 1 minute gap between images to download" ], ["o", "outputDir", "directory to save the output image"], ] args = collect_args.collectArgs( reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) googleServices = goog_helper.getGoogleServices(settings, args) gapMinutes = int(args.gapMinutes) if args.gapMinutes else 1 distanceMiles = float(args.maxDistance if args.maxDistance else 20) outputDir = args.outputDir if args.outputDir else settings.downloadDir startTimeDT = dateutil.parser.parse(args.startTime) if args.endTime: endTimeDT = dateutil.parser.parse(args.endTime) elif args.durationMinutes: durationDelta = datetime.timedelta(seconds=60 * args.durationMinutes) endTimeDT = startTimeDT + durationDelta else: endTimeDT = startTimeDT assert startTimeDT.year == endTimeDT.year assert startTimeDT.month == endTimeDT.month assert startTimeDT.day == endTimeDT.day assert endTimeDT >= startTimeDT if args.cameraID: assert (not args.latitude) and (not args.longitude) cameras = [args.cameraID] else: assert args.latitude and args.longitude dbManager = db_manager.DbManager(sqliteFile=settings.db_file, psqlHost=settings.psqlHost, psqlDb=settings.psqlDb, psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd) cameras = getNearbyCameras(dbManager, args.latitude, args.longitude, distanceMiles) logging.warning('Matched cmaeras: %s', cameras) camArchives = img_archive.getHpwrenCameraArchives(settings.hpwrenArchives) allFiles = [] for cameraID in cameras: camFiles = img_archive.getHpwrenImages(googleServices, settings, outputDir, camArchives, cameraID, startTimeDT, endTimeDT, gapMinutes) if camFiles: allFiles += camFiles if allFiles: logging.warning('Found %d files.', len(allFiles)) else: logging.error('No filed matched')
def main(): reqArgs = [ ["o", "outputDir", "local directory to save diff image segments"], [ "i", "inputDir", "input local directory containing nonSmoke image segments" ], [ "m", "minusMinutes", "subtract images from given number of minutes ago" ], ] optArgs = [ ["s", "startRow", "starting row"], ["e", "endRow", "ending row"], ] args = collect_args.collectArgs( reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) minusMinutes = int(args.minusMinutes) startRow = int(args.startRow) if args.startRow else 0 endRow = int(args.endRow) if args.endRow else 1e9 googleServices = goog_helper.getGoogleServices(settings, args) camArchives = img_archive.getHpwrenCameraArchives(settings.hpwrenArchives) timeGapDelta = datetime.timedelta(seconds=60 * minusMinutes) skippedBadParse = [] skippedArchive = [] imageFileNames = sorted(os.listdir(args.inputDir)) rowIndex = -1 for fileName in imageFileNames: rowIndex += 1 if rowIndex < startRow: continue if rowIndex > endRow: print('Reached end row', rowIndex, endRow) break if (fileName[:3] == 'v2_') or (fileName[:3] == 'v3_') or (not 'mobo-c' in fileName): continue # skip replicated files logging.warning('Processing row %d, file: %s', rowIndex, fileName) parsedName = img_archive.parseFilename(fileName) if (not parsedName) or parsedName['diffMinutes'] or ( 'minX' not in parsedName): logging.warning( 'Skipping file with unexpected parsed data: %s, %s', fileName, str(parsedName)) skippedBadParse.append((rowIndex, fileName, parsedName)) continue # skip files without crop info or with diff parsedName['unixTime'] -= 60 * minusMinutes earlierName = img_archive.repackFileName(parsedName) earlierImgPath = os.path.join(settings.downloadDir, earlierName) if not os.path.isfile( earlierImgPath ): # if file has not been downloaded by a previous iteration dt = datetime.datetime.fromtimestamp(parsedName['unixTime']) dt -= timeGapDelta files = img_archive.getHpwrenImages(googleServices, settings, settings.downloadDir, camArchives, parsedName['cameraID'], dt, dt, 1) if files: earlierImgPath = files[0] else: logging.warning('Skipping image without prior image: %s, %s', str(dt), fileName) skippedArchive.append((rowIndex, fileName, dt)) continue logging.warning('Subtracting old image %s', earlierImgPath) earlierImg = Image.open(earlierImgPath) croppedEarlyImg = earlierImg.crop( (parsedName['minX'], parsedName['minY'], parsedName['maxX'], parsedName['maxY'])) imgOrig = Image.open(os.path.join(args.inputDir, fileName)) diffImg = img_archive.diffImages(imgOrig, croppedEarlyImg) extremas = diffImg.getextrema() if extremas[0][0] == 128 or extremas[0][1] == 128 or extremas[1][ 0] == 128 or extremas[1][1] == 128 or extremas[2][ 0] == 128 or extremas[2][1] == 128: logging.warning('Skipping no diffs %s, name=%s', str(extremas), fileName) skippedBadParse.append((rowIndex, fileName, extremas)) continue parsedName['diffMinutes'] = minusMinutes diffImgPath = os.path.join(args.outputDir, img_archive.repackFileName(parsedName)) logging.warning('Saving new image %s', diffImgPath) diffImg.save(diffImgPath, format='JPEG') logging.warning('Skipped bad parse %d, %s', len(skippedBadParse), str(skippedBadParse)) logging.warning('Skipped images without archives %d, %s', len(skippedArchive), str(skippedArchive))
def main(): reqArgs = [ ["o", "outputDir", "local directory to save images segments"], ["i", "inputCsv", "csvfile with contents of Cropped Images"], ] optArgs = [ ["s", "startRow", "starting row"], ["e", "endRow", "ending row"], ["d", "display", "(optional) specify any value to display image and boxes"], ["x", "minDiffX", "(optional) override default minDiffX of 299"], ["y", "minDiffY", "(optional) override default minDiffY of 299"], ["a", "minArea", "(optional) override default throw away areas < 1/100 of 299x299"], ["t", "throwSize", "(optional) override default throw away size of 598x598"], ["g", "growRatio", "(optional) override default grow ratio of 1.2"], ["m", "minusMinutes", "(optional) subtract images from given number of minutes ago"], ["r", "review", "(optional) download original crops without augmentation"], ] args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) startRow = int(args.startRow) if args.startRow else 0 endRow = int(args.endRow) if args.endRow else 1e9 minDiffX = int(args.minDiffX) if args.minDiffX else 299 minDiffY = int(args.minDiffY) if args.minDiffY else 299 throwSize = int(args.throwSize) if args.throwSize else 299*2 growRatio = float(args.growRatio) if args.growRatio else 1.2 minArea = int(args.minArea) if args.minArea else int(299*2.99) minusMinutes = int(args.minusMinutes) if args.minusMinutes else 0 googleServices = goog_helper.getGoogleServices(settings, args) camArchives = img_archive.getHpwrenCameraArchives(settings.hpwrenArchives) if minusMinutes: timeGapDelta = datetime.timedelta(seconds = 60*minusMinutes) cameraCache = {} skippedTiny = [] skippedHuge = [] skippedArchive = [] with open(args.inputCsv) as csvFile: csvreader = csv.reader(csvFile) for (rowIndex, csvRow) in enumerate(csvreader): if rowIndex < startRow: continue if rowIndex > endRow: print('Reached end row', rowIndex, endRow) break [cropName, minX, minY, maxX, maxY, fileName] = csvRow[:6] minX = int(minX) minY = int(minY) maxX = int(maxX) maxY = int(maxY) oldCoords = (minX, minY, maxX, maxY) if ((maxX - minX) > throwSize) or ((maxY - minY) > throwSize): logging.warning('Skip large image: dx=%d, dy=%d, name=%s', maxX - minX, maxY - minY, fileName) skippedHuge.append((rowIndex, fileName, maxX - minX, maxY - minY)) continue if ((maxX - minX) * (maxY - minY)) < minArea: logging.warning('Skipping tiny image with area: %d, name=%s', (maxX - minX) * (maxY - minY), fileName) skippedTiny.append((rowIndex, fileName, (maxX - minX) * (maxY - minY))) continue nameParsed = img_archive.parseFilename(fileName) imgDT = datetime.datetime.fromtimestamp(nameParsed['unixTime']) localFilePath = os.path.join(settings.downloadDir, fileName) if not os.path.isfile(localFilePath):# if file has not been downloaded by a previous iteration files = img_archive.getHpwrenImages(googleServices, settings, settings.downloadDir, camArchives, nameParsed['cameraID'], imgDT, imgDT, 1) localFilePath = files[0] imgOrig = Image.open(localFilePath) # if in subracted images mode, download an earlier image and subtract if minusMinutes: dt = imgDT - timeGapDelta nameParsed['unixTime'] -= 60*minusMinutes earlierName = img_archive.repackFileName(nameParsed) earlierImgPath = os.path.join(settings.downloadDir, earlierName) if not os.path.isfile(earlierImgPath):# if file has not been downloaded by a previous iteration files = img_archive.getHpwrenImages(googleServices, settings, settings.downloadDir, camArchives, nameParsed['cameraID'], dt, dt, 1) if files: earlierImgPath = files[0] else: logging.warning('Skipping image without prior image: %s, %s', str(dt), fileName) skippedArchive.append((rowIndex, fileName, dt)) continue logging.warning('Subtracting old image %s', earlierImgPath) earlierImg = Image.open(earlierImgPath) diffImg = img_archive.diffImages(imgOrig, earlierImg) extremas = diffImg.getextrema() if extremas[0][0] == 128 or extremas[0][1] == 128 or extremas[1][0] == 128 or extremas[1][1] == 128 or extremas[2][0] == 128 or extremas[2][1] == 128: logging.warning('Skipping no diffs %s, name=%s', str(extremas), fileName) skippedTiny.append((rowIndex, fileName, extremas)) continue # realImgOrig = imgOrig # is this useful? imgOrig = diffImg fileNameParts = os.path.splitext(fileName) fileName = str(fileNameParts[0]) + ('_Diff%d' % minusMinutes) + fileNameParts[1] if args.review: cropCoords = [oldCoords] else: # crop the full sized image to show just the smoke, but shifted and flipped # shifts and flips increase number of segments for training and also prevent overfitting by perturbing data cropCoords = getCropCoords((minX, minY, maxX, maxY), minDiffX, minDiffY, growRatio, (imgOrig.size[0], imgOrig.size[1])) for newCoords in cropCoords: # XXXX - save work if old=new? logging.warning('coords old %s, new %s', str(oldCoords), str(newCoords)) imgNameNoExt = str(os.path.splitext(fileName)[0]) cropImgName = imgNameNoExt + '_Crop_' + 'x'.join(list(map(lambda x: str(x), newCoords))) + '.jpg' cropImgPath = os.path.join(args.outputDir, cropImgName) cropped_img = imgOrig.crop(newCoords) cropped_img.save(cropImgPath, format='JPEG') if not args.review: flipped_img = cropped_img.transpose(Image.FLIP_LEFT_RIGHT) flipImgName = imgNameNoExt + '_Crop_' + 'x'.join(list(map(lambda x: str(x), newCoords))) + '_Flip.jpg' flipImgPath = os.path.join(args.outputDir, flipImgName) flipped_img.save(flipImgPath, format='JPEG') logging.warning('Processed row: %d, file: %s', rowIndex, fileName) if args.display: displayCoords = [oldCoords] + cropCoords displayImageWithScores(imgOrig, displayCoords) imageDisplay(imgOrig) logging.warning('Skipped tiny images %d, %s', len(skippedTiny), str(skippedTiny)) logging.warning('Skipped huge images %d, %s', len(skippedHuge), str(skippedHuge)) logging.warning('Skipped images without archives %d, %s', len(skippedArchive), str(skippedArchive))
def main(): reqArgs = [ ["o", "outputDir", "local directory to save images segments"], ["i", "inputCsv", "csvfile with contents of Cropped Images"], ] optArgs = [ ["s", "startRow", "starting row"], ["e", "endRow", "ending row"], ["x", "minSizeX", "(optional) override default minSizeX of 299"], ["y", "minSizeY", "(optional) override default minSizeY of 299"], ["a", "minArea", "(optional) override default 0 for minimum area"], ["t", "throwSize", "(optional) override default throw away size of 598x598"], ["g", "growRatio", "(optional) override default grow ratio of 1.2"], ["m", "minusMinutes", "(optional) subtract images from given number of minutes ago"], ["r", "recropType", "recrop type: 'raw', 'center', 'full', 'shift', 'augment' (default)"], ] args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()]) startRow = int(args.startRow) if args.startRow else 0 endRow = int(args.endRow) if args.endRow else 1e9 minSizeX = int(args.minSizeX) if args.minSizeX else 299 minSizeY = int(args.minSizeY) if args.minSizeY else 299 throwSize = int(args.throwSize) if args.throwSize else 299*2 growRatio = float(args.growRatio) if args.growRatio else 1.2 minArea = int(args.minArea) if args.minArea else 0 minusMinutes = int(args.minusMinutes) if args.minusMinutes else 0 recropType = args.recropType if args.recropType else 'augment' random.seed(0) googleServices = goog_helper.getGoogleServices(settings, args) camArchives = img_archive.getHpwrenCameraArchives(settings.hpwrenArchives) downloadDirCache = img_archive.cacheDir(settings.downloadDir, settings.downloadDir) if minusMinutes: timeGapDelta = datetime.timedelta(seconds = 60*minusMinutes) cameraCache = {} skippedTiny = [] skippedHuge = [] skippedArchive = [] with open(args.inputCsv) as csvFile: csvreader = csv.reader(csvFile) for (rowIndex, csvRow) in enumerate(csvreader): if rowIndex < startRow: continue if rowIndex > endRow: print('Reached end row', rowIndex, endRow) break [_unused_cropName, minX, minY, maxX, maxY, fileName] = csvRow[:6] minX = int(minX) minY = int(minY) maxX = int(maxX) maxY = int(maxY) oldCoords = (minX, minY, maxX, maxY) if ((maxX - minX) > throwSize) or ((maxY - minY) > throwSize): logging.warning('Skip large image: dx=%d, dy=%d, name=%s', maxX - minX, maxY - minY, fileName) skippedHuge.append((rowIndex, fileName, maxX - minX, maxY - minY)) continue if ((maxX - minX) * (maxY - minY)) < minArea: logging.warning('Skipping tiny image with area: %d, name=%s', (maxX - minX) * (maxY - minY), fileName) skippedTiny.append((rowIndex, fileName, (maxX - minX) * (maxY - minY))) continue nameParsed = img_archive.parseFilename(fileName) imgDT = datetime.datetime.fromtimestamp(nameParsed['unixTime']) (imgOrig, imgFilePath) = getArchiveImage(googleServices, downloadDirCache, camArchives, nameParsed['cameraID'], fileName, imgDT) if not imgOrig: logging.warning('Skip image without archive: %s', fileName) skippedArchive.append((rowIndex, fileName, imgDT)) continue # find coordinates for cropping if recropType == 'raw': cropCoords = [oldCoords] elif recropType == 'full': # useful for generating full diffs cropCoords = [(0, 0, imgOrig.size[0], imgOrig.size[1])] else: # crop the full sized image to show just the smoke, but shifted and flipped # shifts and flips increase number of segments for training and also prevent overfitting by perturbing data cropCoords = getCropCoords((minX, minY, maxX, maxY), minSizeX, minSizeY, growRatio, (imgOrig.size[0], imgOrig.size[1]), recropType) fullImage = False if len(cropCoords) == 1 and cropCoords[0][0] == 0 and cropCoords[0][1] == 0 and cropCoords[0][2] == imgOrig.size[0] and cropCoords[0][3] == imgOrig.size[1]: fullImage = True assert fullImage or ('minX' not in nameParsed) # disallow crops of crops # find extrema (min/max) crop coordinates to crop the original image to speed up processing extremaCoords = list(cropCoords[0]) for coords in cropCoords: extremaCoords[0] = min(extremaCoords[0], coords[0]) extremaCoords[1] = min(extremaCoords[1], coords[1]) extremaCoords[2] = max(extremaCoords[2], coords[2]) extremaCoords[3] = max(extremaCoords[3], coords[3]) imgOrig = imgOrig.crop(extremaCoords) # if in subracted images mode, download an earlier image and subtract if minusMinutes: if not img_archive.findCameraInArchive(camArchives, nameParsed['cameraID']): earlierImg = None files = img_archive.cacheFetchRange(downloadDirCache, nameParsed['cameraID'], nameParsed['unixTime'], -minusMinutes*60, -10*minusMinutes*60) if files: earlierImg = findAlignedImage(imgFilePath, files, fullImage) if not files or not earlierImg: logging.warning('Skipping image without prior image: %s', fileName) skippedArchive.append((rowIndex, fileName, None)) continue else: nameParsed['unixTime'] -= 60*minusMinutes earlierName = img_archive.repackFileName(nameParsed) dt = imgDT - timeGapDelta (earlierImg, _) = getArchiveImage(googleServices, downloadDirCache, camArchives, nameParsed['cameraID'], earlierName, dt) if not earlierImg: logging.warning('Skipping image without prior image: %s, %s', str(dt), fileName) skippedArchive.append((rowIndex, fileName, dt)) continue logging.warning('Subtracting old image %s', earlierName) earlierImg = earlierImg.crop(extremaCoords) diffImg = img_archive.diffWithChecks(imgOrig, earlierImg) if not diffImg: skippedTiny.append((rowIndex, fileName)) continue imgOrig = diffImg fileNameParts = os.path.splitext(fileName) fileName = str(fileNameParts[0]) + ('_Diff%d' % minusMinutes) + fileNameParts[1] for newCoords in cropCoords: logging.warning('coords old %s, new %s', str(oldCoords), str(newCoords)) parsed = img_archive.parseFilename(fileName) if not fullImage: parsed['minX'] = newCoords[0] parsed['minY'] = newCoords[1] parsed['maxX'] = newCoords[2] parsed['maxY'] = newCoords[3] if minusMinutes: parsed['diffMinutes'] = 1 cropImgName = img_archive.repackFileName(parsed) cropImgPath = os.path.join(args.outputDir, cropImgName) cropped_img = imgOrig.crop((newCoords[0] - extremaCoords[0], newCoords[1] - extremaCoords[1], newCoords[2] - extremaCoords[0], newCoords[3] - extremaCoords[1])) cropped_img.save(cropImgPath, format='JPEG', quality=95) if recropType == 'augment': flipped_img = cropped_img.transpose(Image.FLIP_LEFT_RIGHT) flipImgName = cropImgName.replace('.jpg', '_Flip.jpg') flipImgPath = os.path.join(args.outputDir, flipImgName) flipped_img.save(flipImgPath, format='JPEG', quality=95) logging.warning('Processed row: %d, file: %s', rowIndex, fileName) logging.warning('Skipped tiny images %d, %s', len(skippedTiny), str(skippedTiny)) logging.warning('Skipped huge images %d, %s', len(skippedHuge), str(skippedHuge)) logging.warning('Skipped images without archives %d, %s', len(skippedArchive), str(skippedArchive))