Example #1
0
def main():
    reqArgs = [["l", "imgClass", "image class (smoke, nonSmoke, motion)"],
               [
                   "o", "outputDir",
                   "local directory to save images and segments"
               ]]
    optArgs = [["c", "cellRange", "cells to read and process"],
               ["i", "image", "file name of the image in google drive"]]
    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])

    googleServices = goog_helper.getGoogleServices(settings, args)
    if args.cellRange:
        values = readFromMainSheet(googleServices['sheet'], args.cellRange)
        for [fileName] in values:
            print(fileName)
            goog_helper.downloadClassImage(googleServices['drive'],
                                           settings.IMG_CLASSES, args.imgClass,
                                           fileName, args.outputDir)
    if args.image:
        goog_helper.downloadClassImage(googleServices['drive'],
                                       settings.IMG_CLASSES, args.imgClass,
                                       args.image, args.outputDir)
Example #2
0
def main():
    reqArgs = [
        ["f", "fire", "ID of the fire in the images"],
        ["c", "camera", "ID of the camera used in the images"],
    ]
    optArgs = [
        ["z", "zipFile", "Name of the zip file containing the images"],
        ["d", "imgDirectory", "Name of the directory containing the images or ask:dir"],
    ]
    args = collect_args.collectArgs(reqArgs,  optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    imgDirectory = None
    if args.imgDirectory:
        imgDirectory = args.imgDirectory
        if imgDirectory == 'ask:dir':
            imgDirectory = askdirectory()
    elif args.zipFile:
        tempDir = unzipFile(args.zipFile)
        imgDirectory = tempDir.name

    if not imgDirectory:
        logging.error('Must specify either zipFile or imgDirectory')
        exit(1)

    googleServices = goog_helper.getGoogleServices(settings, args)
    processFolder(imgDirectory, args.camera, args.fire, googleServices)
Example #3
0
def main():
    reqArgs = [
        ["o", "outputDir", "local directory to save images and segments"],
        ["i", "inputCsv", "csvfile with contents of Fuego Cropped Images"],
    ]
    optArgs = [["s", "startRow", "starting row"],
               ["e", "endRow", "ending row"],
               [
                   "d", "display",
                   "(optional) specify any value to display image and boxes"
               ]]
    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])
    startRow = int(args.startRow) if args.startRow else 0
    endRow = int(args.endRow) if args.endRow else 1e9

    googleServices = goog_helper.getGoogleServices(settings, args)
    cameraCache = {}
    with open(args.inputCsv) as csvFile:
        csvreader = csv.reader(csvFile)
        for (rowIndex, csvRow) in enumerate(csvreader):
            if rowIndex < startRow:
                continue
            if rowIndex > endRow:
                print('Reached end row', rowIndex, endRow)
                exit(0)
            [cropName, minX, minY, maxX, maxY, fileName] = csvRow[:6]
            minX = int(minX)
            minY = int(minY)
            maxX = int(maxX)
            maxY = int(maxY)
            dirID = getCameraDir(googleServices['drive'], cameraCache,
                                 fileName)
            localFilePath = os.path.join(args.outputDir, fileName)
            if not os.path.isfile(localFilePath):
                goog_helper.downloadFile(googleServices['drive'], dirID,
                                         fileName, localFilePath)
            imgOrig = Image.open(localFilePath)
            squareCoords = rect_to_squares.rect_to_squares(
                minX, minY, maxX, maxY, imgOrig.size[0], imgOrig.size[1],
                MIN_SIZE)
            # print(squareCoords)
            imgNameNoExt = str(os.path.splitext(fileName)[0])
            for coords in squareCoords:
                cropImgName = imgNameNoExt + '_Crop_' + 'x'.join(
                    list(map(lambda x: str(x), coords))) + '.jpg'
                cropImgPath = os.path.join(args.outputDir, 'cropped',
                                           cropImgName)
                cropped_img = imgOrig.crop(coords)
                cropped_img.save(cropImgPath, format='JPEG')
            print('Processed row: %s, file: %s, num squares: %d' %
                  (rowIndex, fileName, len(squareCoords)))
            if args.display:
                squareCoords.append((minX, minY, maxX, maxY))
                displayImageWithScores(imgOrig, squareCoords)
                imageDisplay(imgOrig)
Example #4
0
def main():
    reqArgs = []
    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=[],
        parentParsers=[goog_helper.getParentParser()])
    dbManager = db_manager.DbManager(sqliteFile=settings.db_file)
    fires = getUnparsedFires(dbManager)
    parseDates(dbManager, fires)
Example #5
0
def main():
    reqArgs = [
        ["f", "fileName", "name of file containing fire_coords.py output"],
    ]
    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=[],
        parentParsers=[goog_helper.getParentParser()])
    dbManager = db_manager.DbManager(sqliteFile=settings.db_file)
    insertFires(dbManager, args.fileName)
Example #6
0
def main():
    reqArgs = [
        ["f", "fileName", "name of file containing 'md5sum |sort' output "],
        ["d", "destDir", "name of directory where to move dupes"],
    ]
    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=[],
        parentParsers=[goog_helper.getParentParser()])
    checkDupes(args.fileName, args.destDir)
Example #7
0
def main():
    reqArgs = [
        ["k", "key", "api key for google geocoding service"],
        ["f", "fileName", "name of file containing calfire_parse.py output"],
    ]
    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=[],
        parentParsers=[goog_helper.getParentParser()])
    gmaps = googlemaps.Client(key=args.key)
    getCoords(gmaps, args.fileName)
Example #8
0
def main():
    reqArgs = [
        ["o", "outputDir", "local directory to save images and segments"],
        ["i", "inputCsv", "csvfile with contents of Fuego Cropped Images"],
    ]
    optArgs = [
        ["s", "startRow", "starting row"],
        ["e", "endRow", "ending row"],
        [
            "d", "display",
            "(optional) specify any value to display image and boxes"
        ],
    ]
    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])
    startRow = int(args.startRow) if args.startRow else 0
    endRow = int(args.endRow) if args.endRow else 1e9

    googleServices = goog_helper.getGoogleServices(settings, args)
    cameraCache = {}
    with open(args.inputCsv) as csvFile:
        csvreader = csv.reader(csvFile)
        for (rowIndex, csvRow) in enumerate(csvreader):
            if rowIndex < startRow:
                continue
            if rowIndex > endRow:
                logging.warning('Reached end row: %d, %d', rowIndex, endRow)
                exit(0)
            logging.warning('row %d: %s', rowIndex, csvRow[:2])
            [cameraName, cropName] = csvRow[:2]
            if not cameraName:
                continue
            fileName = re.sub('_Crop[^.]+', '',
                              cropName)  # get back filename for whole image
            dirID = getCameraDir(googleServices['drive'], cameraCache,
                                 fileName)
            localFilePath = os.path.join(args.outputDir, fileName)
            if not os.path.isfile(localFilePath):
                goog_helper.downloadFile(googleServices['drive'], dirID,
                                         fileName, localFilePath)
            logging.warning('local %s', fileName)
            cropInfo = re.findall('_Crop_(\d+)x(\d+)x(\d+)x(\d+)', cropName)
            if len(cropInfo) != 1:
                logging.error('Failed to parse crop info %s, %s', cropName,
                              cropInfo)
                exit(1)
            cropInfo = list(map(lambda x: int(x), cropInfo[0]))
            logging.warning('Dims: %s', cropInfo)
            imgOrig = Image.open(localFilePath)
            rect_to_squares.cutBoxesFixed(imgOrig, args.outputDir, fileName,
                                          lambda x: checkCoords(x, cropInfo))
Example #9
0
def main():
    reqArgs = [
        ["a", "imgA", "image to subtract from"],
        ["b", "imgB", "image to subtract"],
        ["o", "imgOutput", "output image"],
    ]
    optArgs = []

    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])
    imgA = Image.open(args.imgA)
    imgB = Image.open(args.imgB)

    imgOut = img_archive.diffImages(imgA, imgB)
    imgOut.save(args.imgOutput, format='JPEG')
Example #10
0
def main():
    reqArgs = [
        ["c", "cameraID", "ID (code name) of camera"],
        [
            "s", "startTime",
            "starting date and time in ISO format (e.g., 2019-02-22T14:34:56 in Pacific time zone)"
        ],
    ]
    optArgs = [
        [
            "e", "endTime",
            "ending date and time in ISO format (e.g., 2019-02-22T14:34:56 in Pacific time zone)"
        ],
        [
            "g", "gapMinutes",
            "override default of 1 minute gap between images to download"
        ],
        ["o", "outputDir", "directory to save the output image"],
    ]

    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])
    googleServices = goog_helper.getGoogleServices(settings, args)
    gapMinutes = int(args.gapMinutes) if args.gapMinutes else 1
    outputDir = args.outputDir if args.outputDir else settings.downloadDir
    startTimeDT = dateutil.parser.parse(args.startTime)
    if args.endTime:
        endTimeDT = dateutil.parser.parse(args.endTime)
    else:
        endTimeDT = startTimeDT
    assert startTimeDT.year == endTimeDT.year
    assert startTimeDT.month == endTimeDT.month
    assert startTimeDT.day == endTimeDT.day
    assert endTimeDT >= startTimeDT

    camArchives = img_archive.getHpwrenCameraArchives(googleServices['sheet'],
                                                      settings)
    files = img_archive.getHpwrenImages(googleServices, settings, outputDir,
                                        camArchives, args.cameraID,
                                        startTimeDT, endTimeDT, gapMinutes)
    if files:
        logging.warning('Found %d files.', len(files))
    else:
        logging.error('No matches for camera ID %s', args.cameraID)
Example #11
0
def main():
    reqArgs = [
        ["g", "geoTiffName", "File name of geotiff"],
        ["a", "lat", "latitude of desired point", float],
        ["o", "long", "longtitude of desired point", float],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=[], parentParsers=[goog_helper.getParentParser()])
    tiffData = gdal.Open(args.geoTiffName)
    logging.warning('x: %d, y: %d', tiffData.RasterXSize, tiffData.RasterYSize)
    metadata = tiffData.GetGeoTransform()
    logging.warning('metadata: %s', metadata)
    specs =  tiffData.ReadAsArray(xoff=0, yoff=0)
    logging.warning('specs: %s', specs)

    coordX = mapping_with_bounds(args.long, metadata[0], metadata[1], tiffData.RasterXSize)
    coordY = mapping_with_bounds(args.lat, metadata[3], metadata[5], tiffData.RasterYSize)
    if coordX != None and coordY != None:
        val = specs[coordX,coordY]
        logging.warning("The value is (%s)", val)
Example #12
0
def main():
    reqArgs = [
        [
            "i", "inputDir",
            "local directory containing both smoke and nonSmoke images"
        ],
        ["o", "outputDir", "local directory to write out TFRecords files"],
    ]
    optArgs = [[
        "t", "trainPercentage",
        "percentage of data to use for training vs. validation (default 90)"
    ]]

    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])
    trainPercentage = int(args.trainPercentage) if args.trainPercentage else 90

    writeTFRecords(args.inputDir, args.outputDir, trainPercentage)
Example #13
0
def main():
    reqArgs = [
        ["c", "cameraID", "ID (code name) of camera"],
    ]
    optArgs = [
        ["l", "localhost", "localhost for testing"],
    ]
    
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    googleCreds = goog_helper.getCreds(settings, args)
    googleServices = goog_helper.getGoogleServices(settings, args)

    folderName = str(uuid.uuid4())
    folderID = goog_helper.createFolder(googleServices['drive'], settings.ffmpegFolder, folderName)
    url = settings.ffmpegUrl
    if args.localhost:
        url = 'http://localhost:8080'
    respData = callGCF(url, googleCreds, args.cameraID, folderID)
    logging.warning('GCF Result: %s', respData)
    logging.warning('New folder %s (%s) should be cleaned up', folderName, folderID)
Example #14
0
def main():
    optArgs = [
        ["n", "numProcesses", "number of child prcesses to start (default 1)"],
        ["g", "useGpu", "(optional) specify any value to use gpu (default off)"],
        ["c", "collectPositves", "collect positive segments for training data"],
        ["r", "restrictType", "Only process images from cameras of given type"],
    ]
    args = collect_args.collectArgs([], optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    numProcesses = int(args.numProcesses) if args.numProcesses else 1
    useGpu = True if args.useGpu else False

    if not useGpu:
        os.environ["CUDA_VISIBLE_DEVICES"]="-1"
    scriptName = 'detect_fire.py'
    procInfos = []
    for i in range(numProcesses):
        heartbeatFile = tempfile.NamedTemporaryFile()
        heartbeatFileName = heartbeatFile.name
        proc = startProcess(scriptName, heartbeatFileName, args.collectPositves, args.restrictType)
        procInfos.append({
            'proc': proc,
            'heartbeatFile': heartbeatFile,
            'heartbeatFileName': heartbeatFileName,
        })
        time.sleep(10) # 10 seconds per process to allow startup

    while True:
        for procInfo in procInfos:
            lastTS = lastHeartbeat(procInfo['heartbeatFileName']) # check heartbeat
            timestamp = int(time.time())
            proc = procInfo['proc']
            logging.debug('DBG: Process %d: %s: %d seconds since last image scanned, %d',
                            proc.pid, procInfo['heartbeatFileName'], timestamp - lastTS, lastTS)
            if (timestamp - lastTS) > 2*60: # warn if stuck more than 2 minutes
                logging.warning('Process %d: %d seconds since last image scanned', proc.pid, timestamp - lastTS)
            if (timestamp - lastTS) > 4*60: # kill if stuck more than 4 minutes
                logging.warning('Killing %d', proc.pid)
                proc.kill()
                procInfo['proc'] = startProcess(scriptName, procInfo['heartbeatFileName'], args.collectPositves, args.restrictType)
        time.sleep(30)
Example #15
0
def main():
    reqArgs = [
        ["o", "outputDir", "local directory to save images segments"],
        ["i", "inputCsv", "csvfile with contents of Fuego Cropped Images"],
    ]
    optArgs = [
        ["s", "startRow", "starting row"],
        ["e", "endRow", "ending row"],
        ["d", "display", "(optional) specify any value to display image and boxes"],
        ["x", "minDiffX", "(optional) override default minDiffX of 299"],
        ["y", "minDiffY", "(optional) override default minDiffY of 299"],
        ["a", "minArea", "(optional) override default throw away areas < 1/100 of 299x299"],
        ["t", "throwSize", "(optional) override default throw away size of 1000x1000"],
        ["g", "growRatio", "(optional) override default grow ratio of 1.2"],
        ["m", "minusMinutes", "(optional) subtract images from given number of minutes ago"],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    startRow = int(args.startRow) if args.startRow else 0
    endRow = int(args.endRow) if args.endRow else 1e9
    minDiffX = int(args.minDiffX) if args.minDiffX else 299
    minDiffY = int(args.minDiffY) if args.minDiffY else 299
    throwSize = int(args.throwSize) if args.throwSize else 1000
    growRatio = float(args.growRatio) if args.growRatio else 1.2
    minArea = int(args.minArea) if args.minArea else int(299*2.99)
    minusMinutes = int(args.minusMinutes) if args.minusMinutes else 0

    googleServices = goog_helper.getGoogleServices(settings, args)
    camArchives = img_archive.getHpwrenCameraArchives(googleServices['sheet'], settings)
    if minusMinutes:
        timeGapDelta = datetime.timedelta(seconds = 60*minusMinutes)
    cameraCache = {}
    skippedTiny = []
    skippedHuge = []
    skippedArchive = []
    with open(args.inputCsv) as csvFile:
        csvreader = csv.reader(csvFile)
        for (rowIndex, csvRow) in enumerate(csvreader):
            if rowIndex < startRow:
                continue
            if rowIndex > endRow:
                print('Reached end row', rowIndex, endRow)
                break
            [cropName, minX, minY, maxX, maxY, fileName] = csvRow[:6]
            minX = int(minX)
            minY = int(minY)
            maxX = int(maxX)
            maxY = int(maxY)
            oldCoords = (minX, minY, maxX, maxY)
            if ((maxX - minX) > throwSize) and ((maxY - minY) > throwSize):
                logging.warning('Skip large image: dx=%d, dy=%d, name=%s', maxX - minX, maxY - minY, fileName)
                skippedHuge.append((rowIndex, fileName, maxX - minX, maxY - minY))
                continue
            if ((maxX - minX) * (maxY - minY)) < minArea:
                logging.warning('Skipping tiny image with area: %d, name=%s', (maxX - minX) * (maxY - minY), fileName)
                skippedTiny.append((rowIndex, fileName, (maxX - minX) * (maxY - minY)))
                continue
            # get base image from google drive that was uploaded by sort_images.py

            dirID = getCameraDir(googleServices['drive'], cameraCache, fileName)#-##REPLACE DEP. GDRIVE W HPREWN#
            localFilePath = os.path.join(settings.downloadDir, fileName)#sets a path for that image() not yet downloadedby this iteration
            print('local', localFilePath)
            if not os.path.isfile(localFilePath):# if file has not been downloaded by a previous iteration
                print('download', fileName)
                #+##REPLACE DEP. GDRIVE W HPREWN#nameParsed = img_archive.parseFilename(fileName)#parses file name into dictionary of parts name,unixtime,etc.
                #+##REPLACE DEP. GDRIVE W HPREWN#matchingCams = list(filter(lambda x: nameParsed['cameraID'] == x['id'], camArchives))#filter through camArchives for ids matching cameraid
                #+##REPLACE DEP. GDRIVE W HPREWN#if len(matchingCams) != 1:#if we cannot determine where the image will come from we cannot use the image
                #+##REPLACE DEP. GDRIVE W HPREWN#    logging.warning('Skipping camera without archive: %d, %s', len(matchingCams), str(matchingCams))
                #+##REPLACE DEP. GDRIVE W HPREWN#    skippedArchive.append((rowIndex, fileName, matchingCams))
                #+##REPLACE DEP. GDRIVE W HPREWN#    continue
                #+##REPLACE DEP. GDRIVE W HPREWN#archiveDirs = matchingCams[0]['dirs']
                #+##REPLACE DEP. GDRIVE W HPREWN#logging.warning('Found %s directories', archiveDirs)
                #+##REPLACE DEP. GDRIVE W HPREWN#time = datetime.datetime.fromtimestamp(nameParsed['unixTime'])
                #+##REPLACE DEP. GDRIVE W HPREWN#for dirName in archiveDirs:#search directories of camera for a time near
                #+##REPLACE DEP. GDRIVE W HPREWN#    logging.warning('Searching for files in dir %s', dirName)
                #+##REPLACE DEP. GDRIVE W HPREWN#    imgPaths = img_archive.downloadFilesHpwren(settings.downloadDir, nameParsed['cameraID'], dirName, time, time, 1, 0)
                #+##REPLACE DEP. GDRIVE W HPREWN#    if imgPaths:
                #+##REPLACE DEP. GDRIVE W HPREWN#        localFilePath = imgPaths[0]
                #+##REPLACE DEP. GDRIVE W HPREWN#        break
                #+##REPLACE DEP. GDRIVE W HPREWN#if not imgPaths:
                #+##REPLACE DEP. GDRIVE W HPREWN#    logging.warning('Skipping image not found: %s', fileName)
                #+##REPLACE DEP. GDRIVE W HPREWN#    skippedArchive.append((rowIndex, fileName, time))#archive that images were skipped
                #+##REPLACE DEP. GDRIVE W HPREWN#    continue
                goog_helper.downloadFile(googleServices['drive'], dirID, fileName, localFilePath)#-##REPLACE DEP. GDRIVE W HPREWN#
            imgOrig = Image.open(localFilePath)#opens image

            # if in subracted images mode, download an earlier image and subtract
            if minusMinutes:
                nameParsed = img_archive.parseFilename(fileName)#parses file name into dictionary of parts name,unixtime,etc.
                dt = datetime.datetime.fromtimestamp(nameParsed['unixTime'])
                dt -= timeGapDelta
                earlierImgPath = None
                files = img_archive.getHpwrenImages(googleServices, settings, settings.downloadDir, camArchives, nameParsed['cameraID'], dt, dt, 1)
                if files:
                    earlierImgPath = files[0]
                else:
                    logging.warning('Skipping image without prior image: %s, %s', str(dt), fileName)
                    skippedArchive.append((rowIndex, fileName, dt))
                    continue
                logging.warning('Subtracting old image %s', earlierImgPath)
                earlierImg = Image.open(earlierImgPath)
                diffImg = img_archive.diffImages(imgOrig, earlierImg)
                # realImgOrig = imgOrig # is this useful?
                imgOrig = diffImg
                fileNameParts = os.path.splitext(fileName)
                fileName = str(fileNameParts[0]) + ('_Diff%d' % minusMinutes) + fileNameParts[1]

            # crop the full sized image to show just the smoke, but shifted and flipped
            # shifts and flips increase number of segments for training and also prevent overfitting by perturbing data
            cropCoords = getCropCoords((minX, minY, maxX, maxY), minDiffX, minDiffY, growRatio, (imgOrig.size[0], imgOrig.size[1]))
            for newCoords in cropCoords:
                # XXXX - save work if old=new?
                print('coords old,new', oldCoords, newCoords)
                imgNameNoExt = str(os.path.splitext(fileName)[0])
                cropImgName = imgNameNoExt + '_Crop_' + 'x'.join(list(map(lambda x: str(x), newCoords))) + '.jpg'
                cropImgPath = os.path.join(args.outputDir, cropImgName)
                cropped_img = imgOrig.crop(newCoords)
                cropped_img.save(cropImgPath, format='JPEG')
                flipped_img = cropped_img.transpose(Image.FLIP_LEFT_RIGHT)
                flipImgName = imgNameNoExt + '_Crop_' + 'x'.join(list(map(lambda x: str(x), newCoords))) + '_Flip.jpg'
                flipImgPath = os.path.join(args.outputDir, flipImgName)
                flipped_img.save(flipImgPath, format='JPEG')
            print('Processed row: %s, file: %s' % (rowIndex, fileName))
            if args.display:
                displayCoords = [oldCoords] + cropCoords
                displayImageWithScores(imgOrig, displayCoords)
                imageDisplay(imgOrig)
    logging.warning('Skipped tiny images %d, %s', len(skippedTiny), str(skippedTiny))
    logging.warning('Skipped huge images %d, %s', len(skippedHuge), str(skippedHuge))
    logging.warning('Skipped images without archives %d, %s', len(skippedArchive), str(skippedArchive))
Example #16
0
def main():
    reqArgs = [
        ['o', 'outputFile', 'filename for output CSV of fire x camera matches with available archives'],
    ]
    optionalArgs = [
        ['g', 'longitude', 'longitude of fire', float],
        ['t', 'latitude', 'latitude of fire', float],
        ['s', 'startTime', 'start time of fire'],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optionalArgs, parentParsers=[goog_helper.getParentParser()])
    googleServices = goog_helper.getGoogleServices(settings, args)
    dbManager = db_manager.DbManager(sqliteFile=settings.db_file)
    outputFile = open(args.outputFile, 'w', newline='')
    outputCsv = csv.writer(outputFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)

    camArchives = img_archive.getHpwrenCameraArchives(googleServices['sheet'], settings)

    locMatches = getLocationMatches(dbManager, args.longitude, args.latitude, args.startTime)
    totalMatches = len(locMatches)
    numOutput = 0
    for rowNum,locMatch in enumerate(locMatches):
        timeDT = datetime.datetime.fromtimestamp(locMatch['timestamp'])
        cams = locMatch['cameraids'].split(',')
        availCams = []
        for cameraID in cams:
            if isCamArchiveAvailable(camArchives, cameraID, timeDT):
                availCams.append(cameraID)
        # logging.warning('availCams %d: %s', len(availCams), availCams)
        if len(availCams) > 0:
            outputRow(outputCsv, locMatch, timeDT, availCams)
            numOutput += 1
        if (rowNum % 10) == 0:
            logging.warning('Processing %d of %d, output %d', rowNum, totalMatches, numOutput)

    logging.warning('Processed %d, output %d', totalMatches, numOutput)
Example #17
0
def main():
    optArgs = [
        ["b", "heartbeat", "filename used for heartbeating check"],
        ["c", "collectPositves", "collect positive segments for training data"],
        ["d", "imgDirectory", "Name of the directory containing the images"],
        ["t", "time", "Time breakdown for processing images"],
        ["m", "minusMinutes", "(optional) subtract images from given number of minutes ago"],
        ["r", "restrictType", "Only process images from cameras of given type"],
        ["s", "startTime", "(optional) performs search with modifiedTime > startTime"],
        ["e", "endTime", "(optional) performs search with modifiedTime < endTime"],
    ]
    args = collect_args.collectArgs([], optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    minusMinutes = int(args.minusMinutes) if args.minusMinutes else 0
    googleServices = goog_helper.getGoogleServices(settings, args)
    dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
                                    psqlHost=settings.psqlHost, psqlDb=settings.psqlDb,
                                    psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd)
    tfConfig = tf.ConfigProto()
    tfConfig.gpu_options.per_process_gpu_memory_fraction = 0.1 #hopefully reduces segfaults
    cameras = dbManager.get_sources(activeOnly=True, restrictType=args.restrictType)
    startTimeDT = dateutil.parser.parse(args.startTime) if args.startTime else None
    endTimeDT = dateutil.parser.parse(args.endTime) if args.endTime else None
    timeRangeSeconds = None
    useArchivedImages = False
    camArchives = img_archive.getHpwrenCameraArchives(googleServices['sheet'], settings)
    DetectionPolicyClass = policies.get_policies()[settings.detectionPolicy]
    detectionPolicy = DetectionPolicyClass(settings, args, googleServices, dbManager, tfConfig, camArchives, minusMinutes, useArchivedImages)
    constants = { # dictionary of constants to reduce parameters in various functions
        'args': args,
        'googleServices': googleServices,
        'camArchives': camArchives,
        'dbManager': dbManager,
    }

    if startTimeDT or endTimeDT:
        assert startTimeDT and endTimeDT
        timeRangeSeconds = (endTimeDT-startTimeDT).total_seconds()
        assert timeRangeSeconds > 0
        assert args.collectPositves
        useArchivedImages = True
        random.seed(0) # fixed seed guarantees same randomized ordering.  Should make this optional argument in future

    processingTimeTracker = initializeTimeTracker()
    while True:
        classifyImgPath = None
        timeStart = time.time()
        if useArchivedImages:
            (cameraID, timestamp, imgPath, classifyImgPath) = \
                getArchivedImages(constants, cameras, startTimeDT, timeRangeSeconds, minusMinutes)
        # elif minusMinutes: to be resurrected using archive functionality
        # elif args.imgDirectory:  unused functionality -- to delete?
        #     (cameraID, timestamp, imgPath, md5) = getNextImageFromDir(args.imgDirectory)
        else: # regular (non diff mode), grab image and process
            (cameraID, timestamp, imgPath, md5) = getNextImage(dbManager, cameras)
            classifyImgPath = imgPath
        if not cameraID:
            continue # skip to next camera
        timeFetch = time.time()

        image_spec = [{}]
        image_spec[-1]['path'] = classifyImgPath
        image_spec[-1]['timestamp'] = timestamp
        image_spec[-1]['cameraID'] = cameraID

        detectionResult = detectionPolicy.detect(image_spec)
        timeDetect = time.time()
        if detectionResult['fireSegment']:
            if checkAndUpdateAlerts(dbManager, cameraID, timestamp, detectionResult['driveFileIDs']):
                alertFire(constants, cameraID, imgPath, detectionResult['annotatedFile'], detectionResult['driveFileIDs'], detectionResult['fireSegment'], timestamp)
        deleteImageFiles(imgPath, imgPath, detectionResult['annotatedFile'])
        if (args.heartbeat):
            heartBeat(args.heartbeat)

        timePost = time.time()
        updateTimeTracker(processingTimeTracker, timePost - timeStart)
        if args.time:
            if not detectionResult['timeMid']:
                detectionResult['timeMid'] = timeDetect
            logging.warning('Timings: fetch=%.2f, detect0=%.2f, detect1=%.2f post=%.2f',
                timeFetch-timeStart, detectionResult['timeMid']-timeFetch, timeDetect-detectionResult['timeMid'], timePost-timeDetect)
Example #18
0
def main():
    """records the Forestry fire database
    Args:
        -c continual monitoring: continually monitor the forestry system
        -s start time override:  override the start time
        -e end time override:    override the end time
    Returns:
        None
    """
    reqArgs = []
    optArgs = [[
        "c", "continual_monitoring", "continually monitor the forestry system"
    ], ["s", "start_time_override", "override the start time"],
               ["e", "end_time_override", "override the end time"]]
    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])
    googleServices = goog_helper.getGoogleServices(settings, args)
    if args.start_time_override and args.end_time_override:
        startTimeDT = args.start_time_override
        endTimeDT = args.end_time_override
    if args.start_time_override and not args.end_time_override:
        logging.warning(
            "Please provide a start (-s) and end (-e) time in the format of 'YYYY-MM-DDThh:mm:ss' to your search"
        )
        return

    if args.continual_monitoring:
        csvFile = open('forestryDB_continual.csv', 'w')
        writer = csv.writer(csvFile)
        header1 = [
            "properties", "", "", "", "", "", "", "geometry", "", "", "type"
        ]
        header2 = [
            "ig_test", "ig_date", "created", "id", "ig_time", "ig_confidence",
            "ig_identity", "coordinates", "", "type"
        ]
        writer.writerow(header1)
        writer.writerow(header2)
        csvFile.close()
        while True:
            csvFile = open('forestryDB_continual.csv', 'a')
            writer = csv.writer(csvFile)
            startTimeDT = time.strftime('%Y-%m-%dT%H:%M:%S',
                                        time.localtime(time.time() - 10 * 60))
            endTimeDT = time.strftime('%Y-%m-%dT%H:%M:%S',
                                      time.localtime(time.time()))
            data = get_forestryDB(timefrom=startTimeDT, timeto=endTimeDT)
            tic = time.time()
            logging.warning('found %s fires', len(data))
            for elem in data:
                #upload_data
                values = unpack_forestryDB(elem)
                writer.writerow(values)
            csvFile.close()
            time.sleep(10 * 60 - (time.time() - tic))
    else:
        data = get_forestryDB(timefrom=startTimeDT, timeto=endTimeDT)
        if data == None:
            logging.warning("request could not be made")
            return
        file_name = 'forestryDBfrom' + startTimeDT + 'to' + endTimeDT + '.csv'
        csvFile = open(file_name.replace(":", ";").replace(" ", "T"), 'w')
        writer = csv.writer(csvFile)
        header1 = [
            "properties", "", "", "", "", "", "", "geometry", "", "", "type"
        ]
        header2 = [
            "ig_test", "ig_date", "created", "id", "ig_time", "ig_confidence",
            "ig_identity", "coordinates", "", "type"
        ]
        writer.writerow(header1)
        writer.writerow(header2)
        for elem in data:
            #upload_data
            values = unpack_forestryDB(elem)
            writer.writerow(values)

        csvFile.close()
Example #19
0
def main():
    """directs the funtionality of the process ie start a cleanup, record all cameras on 2min refresh, record a subset of cameras, manage multiprocessed recording of cameras
    Args:
        -c  cleaning_threshold" (flt): time in hours to store data
        -o  cameras_overide    (str): list of specific cameras to watch
        -a  agents            (int): number of agents to assign for parallelization
    Returns:
        None
    """
    reqArgs = []
    optArgs = [
        ["c", "cleaning_threshold", "time in hours to store data"],
        ["o", "cameras_overide", "specific cameras to watch"],
        ["a", "agents", "number of agents to assign for parallelization"]
    ]
    args = collect_args.collectArgs(reqArgs,  optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    googleServices = goog_helper.getGoogleServices(settings, args)
    dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
                                    psqlHost=settings.psqlHost, psqlDb=settings.psqlDb,    
                                    psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd)
    
    if args.cleaning_threshold:
        cleaning_threshold = float(args.cleaning_threshold)
        cleanup_archive(googleServices, dbManager, cleaning_threshold)
    if args.cameras_overide:
        listofRotatingCameras = list(args.cameras_overide.replace(" ", "").strip('[]').split(','))
    else:
        listofCameras = alertwildfire_API.get_all_camera_info()
        listofRotatingCameras = [camera["name"] for camera in listofCameras if (camera["name"][-1]=='2') ]
    if args.agents:
        agents = int(args.agents)
        #num of camera's per process
        test = "Axis-Briar2"

        temporaryDir = tempfile.TemporaryDirectory()
        trial = [x for x in range(0,10)]
        tic = time.time()
        for x in trial:
            capture_and_record(googleServices, dbManager, temporaryDir.name, test)
        toc =time.time()-tic
        toc_avg = toc/len(trial)
        # target estimate of camera refresh time
        target_refresh_time_per_camera = 12#secs
        num_cameras_per_process = math.floor(target_refresh_time_per_camera / toc_avg)
        #future ability to re-adjust as needed

        #divy the cameras
        camera_bunchs= []
        num_of_processes_needed  =  math.ceil(len(listofRotatingCameras)/num_cameras_per_process)
        if num_of_processes_needed>agents:
            logging.warning('unable to process all given cameras on this machine with %s agents and maintain a target refresh rate of %s seconds, please reduce number of cameras to less than %s',agents, target_refresh_time_per_camera,num_cameras_per_process*agents)
            return

        for num in range(0, num_of_processes_needed):
            split_start = num_cameras_per_process*num
            split_stop = num_cameras_per_process*num+num_cameras_per_process
            camera_bunchs.append(listofRotatingCameras[split_start:split_stop])

        with Pool(processes=agents) as pool:
            result = pool.map(fetchAllCameras, camera_bunchs)
            pool.close()
    else:
        fetchAllCameras(listofRotatingCameras)
Example #20
0
def main():
    reqArgs = [
        ["o", "outputDir", "local directory to save diff image segments"],
        ["i", "inputDir", "input local directory containing nonSmoke image segments"],
        ["m", "minusMinutes", "subtract images from given number of minutes ago"],
    ]
    optArgs = [
        ["s", "startRow", "starting row"],
        ["e", "endRow", "ending row"],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    minusMinutes = int(args.minusMinutes)
    startRow = int(args.startRow) if args.startRow else 0
    endRow = int(args.endRow) if args.endRow else 1e9

    googleServices = goog_helper.getGoogleServices(settings, args)
    cookieJar = None
    camArchives = None
    cookieJar = img_archive.loginAjax()
    camArchives = img_archive.getHpwrenCameraArchives(googleServices['sheet'], settings)
    timeGapDelta = datetime.timedelta(seconds = 60*minusMinutes)
    skippedBadParse = []
    skippedArchive = []
    imageFileNames = sorted(os.listdir(args.inputDir))
    rowIndex = -1
    for fileName in imageFileNames:
        rowIndex += 1

        if rowIndex < startRow:
            continue
        if rowIndex > endRow:
            print('Reached end row', rowIndex, endRow)
            break

        if (fileName[:3] == 'v2_') or (fileName[:3] == 'v3_'):
            continue # skip replicated files
        logging.warning('Processing row %d, file: %s', rowIndex, fileName)
        parsedName = img_archive.parseFilename(fileName)

        if (not parsedName) or parsedName['diffMinutes'] or ('minX' not in parsedName):
            logging.warning('Skipping file with unexpected parsed data: %s, %s', fileName, str(parsedName))
            skippedBadParse.append((rowIndex, fileName, parsedName))
            continue # skip files without crop info or with diff
        matchingCams = list(filter(lambda x: parsedName['cameraID'] == x['id'], camArchives))
        if len(matchingCams) != 1:
            logging.warning('Skipping camera without archive: %d, %s', len(matchingCams), str(matchingCams))
            skippedArchive.append((rowIndex, fileName, matchingCams))
            continue
        archiveDirs = matchingCams[0]['dirs']
        logging.warning('Found %s directories', archiveDirs)
        earlierImgPath = None
        dt = datetime.datetime.fromtimestamp(parsedName['unixTime'])
        dt -= timeGapDelta
        for dirName in archiveDirs:
            logging.warning('Searching for files in dir %s', dirName)
            imgPaths = img_archive.getFilesAjax(cookieJar, settings.downloadDir, parsedName['cameraID'], dirName, dt, dt, 1)
            if imgPaths:
                earlierImgPath = imgPaths[0]
                break # done
        if not earlierImgPath:
            logging.warning('Skipping image without prior image: %s, %s', str(dt), fileName)
            skippedArchive.append((rowIndex, fileName, dt))
            continue
        logging.warning('Subtracting old image %s', earlierImgPath)
        earlierImg = Image.open(earlierImgPath)
        print('CR', (parsedName['minX'], parsedName['minY'], parsedName['maxX'], parsedName['maxY']))
        croppedEarlyImg = earlierImg.crop((parsedName['minX'], parsedName['minY'], parsedName['maxX'], parsedName['maxY']))

        imgOrig = Image.open(os.path.join(args.inputDir, fileName))
        diffImg = img_archive.diffImages(imgOrig, croppedEarlyImg)
        parsedName['diffMinutes'] = minusMinutes
        diffImgPath = os.path.join(args.outputDir, img_archive.repackFileName(parsedName))
        logging.warning('Saving new image %s', diffImgPath)
        diffImg.save(diffImgPath, format='JPEG')
    logging.warning('Skipped bad parse %d, %s', len(skippedBadParse), str(skippedBadParse))
    logging.warning('Skipped images without archives %d, %s', len(skippedArchive), str(skippedArchive))
Example #21
0
def main():
    optArgs = [
        ["b", "heartbeat", "filename used for heartbeating check"],
        [
            "c", "collectPositves",
            "collect positive segments for training data"
        ],
        ["d", "imgDirectory", "Name of the directory containing the images"],
        ["t", "time", "Time breakdown for processing images"],
        [
            "m", "minusMinutes",
            "(optional) subtract images from given number of minutes ago"
        ],
        [
            "r", "restrictType",
            "Only process images from cameras of given type"
        ],
        [
            "s", "startTime",
            "(optional) performs search with modifiedTime > startTime"
        ],
        [
            "e", "endTime",
            "(optional) performs search with modifiedTime < endTime"
        ],
    ]
    args = collect_args.collectArgs(
        [],
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])
    minusMinutes = int(args.minusMinutes) if args.minusMinutes else 0
    googleServices = goog_helper.getGoogleServices(settings, args)
    dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
                                     psqlHost=settings.psqlHost,
                                     psqlDb=settings.psqlDb,
                                     psqlUser=settings.psqlUser,
                                     psqlPasswd=settings.psqlPasswd)
    cameras = dbManager.get_sources(activeOnly=True,
                                    restrictType=args.restrictType)
    startTimeDT = dateutil.parser.parse(
        args.startTime) if args.startTime else None
    endTimeDT = dateutil.parser.parse(args.endTime) if args.endTime else None
    timeRangeSeconds = None
    useArchivedImages = False
    camArchives = img_archive.getHpwrenCameraArchives(googleServices['sheet'],
                                                      settings)
    constants = { # dictionary of constants to reduce parameters in various functions
        'args': args,
        'googleServices': googleServices,
        'camArchives': camArchives,
        'dbManager': dbManager,
    }
    if startTimeDT or endTimeDT:
        assert startTimeDT and endTimeDT
        timeRangeSeconds = (endTimeDT - startTimeDT).total_seconds()
        assert timeRangeSeconds > 0
        assert args.collectPositves
        useArchivedImages = True

    deferredImages = []
    processingTimeTracker = initializeTimeTracker()
    graph = tf_helper.load_graph(settings.model_file)
    labels = tf_helper.load_labels(settings.labels_file)
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.1  #hopefully reduces segfaults
    with tf.Session(graph=graph, config=config) as tfSession:
        while True:
            classifyImgPath = None
            timeStart = time.time()
            if useArchivedImages:
                (cameraID, timestamp, imgPath, classifyImgPath) = \
                    getArchivedImages(constants, cameras, startTimeDT, timeRangeSeconds, minusMinutes)
            elif minusMinutes:
                (queueFull, deferredImageInfo) = getDeferrredImgageInfo(
                    deferredImages, processingTimeTracker, minusMinutes,
                    timeStart)
                if not queueFull:  # queue is not full, so add more to queue
                    addToDeferredImages(dbManager, cameras, deferredImages)
                if deferredImageInfo:  # we have a deferred image ready to process, now get latest image and subtract
                    (cameraID, timestamp, imgPath, classifyImgPath) = \
                        genDiffImageFromDeferred(dbManager, cameras, deferredImageInfo, deferredImages, minusMinutes)
                    if not cameraID:
                        continue  # skip to next camera without deleting deferred image which may be reused later
                    os.remove(deferredImageInfo['imgPath'])  # no longer needed
                else:
                    continue  # in diff mode without deferredImage, nothing more to do
            # elif args.imgDirectory:  unused functionality -- to delete?
            #     (cameraID, timestamp, imgPath, md5) = getNextImageFromDir(args.imgDirectory)
            else:  # regular (non diff mode), grab image and process
                (cameraID, timestamp, imgPath,
                 md5) = getNextImage(dbManager, cameras)
                classifyImgPath = imgPath
            if not cameraID:
                continue  # skip to next camera
            timeFetch = time.time()

            segments = segmentAndClassify(classifyImgPath, tfSession, graph,
                                          labels)
            timeClassify = time.time()
            recordFilterReport(constants, cameraID, timestamp, classifyImgPath,
                               imgPath, segments, minusMinutes,
                               googleServices['drive'], useArchivedImages)
            timePost = time.time()
            updateTimeTracker(processingTimeTracker, timePost - timeStart)
            if args.time:
                logging.warning(
                    'Timings: fetch=%.2f, classify=%.2f, post=%.2f',
                    timeFetch - timeStart, timeClassify - timeFetch,
                    timePost - timeClassify)
Example #22
0
def main():
    reqArgs = [
        ["d", "dirID", "ID of google drive directory"],
        ["f", "fileName", "fileName of google drive file"],
    ]
    optArgs = [
        ["u", "upload", "(optional) performs upload vs. download"],
        [
            "s", "startTime",
            "(optional) performs search with modifiedTime > startTime"
        ],
        [
            "e", "endTime",
            "(optional) performs search with modifiedTime < endTime"
        ],
        ["l", "listOnly", "(optional) list vs. download"],
        [
            "r", "remove",
            "(optional) performs remove/delete vs. download (value must be 'delete')"
        ],
        [
            "m", "maxFiles",
            "override default of 100 for max number of files to operate on"
        ],
    ]

    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])
    maxFiles = int(args.maxFiles) if args.maxFiles else 100
    googleServices = goog_helper.getGoogleServices(settings, args)

    # default mode is to download a single file
    operation = 'download'
    multipleFiles = False
    batchMode = True
    MAX_BATCH_SIZE = 25  # increasing size beyond 60 tends to generate http 500 errors

    if args.upload:
        operation = 'upload'
    elif args.remove:
        if args.remove != 'delete':
            logging.error(
                "value for remove must be 'delete', but instead is %s",
                args.remove)
            exit(1)
        operation = 'delete'
    elif args.listOnly:
        operation = 'list'

    if args.startTime or args.endTime:
        multipleFiles = True

    if not multipleFiles:
        if operation == 'upload':
            goog_helper.uploadFile(googleServices['drive'], args.dirID,
                                   args.fileName)
        else:
            assert operation == 'download'
            goog_helper.downloadFile(googleServices['drive'], args.dirID,
                                     args.fileName, args.fileName)
    else:
        nextPageToken = 'init'
        processedFiles = 0
        while True:
            batch = None
            batchCount = 0

            (items,
             nextPageToken) = goog_helper.searchFiles(googleServices['drive'],
                                                      args.dirID,
                                                      args.startTime,
                                                      args.endTime,
                                                      args.fileName,
                                                      npt=nextPageToken)
            firstLast = ''
            if len(items) > 0:
                firstLast = str(items[0]) + ' to ' + str(items[-1])
            logging.warning('Found %d files: %s', len(items), firstLast)

            if operation == 'list':
                logging.warning('All files: %s', items)
            for item in items:
                if operation == 'delete':
                    if batchMode:
                        if not batch:
                            batch = googleServices[
                                'drive'].new_batch_http_request(
                                    callback=delete_file)
                        batch.add(googleServices['drive'].files().delete(
                            fileId=item["id"], supportsTeamDrives=True))
                        batchCount += 1
                        if batchCount == MAX_BATCH_SIZE:
                            logging.warning('Execute batch with %d items',
                                            batchCount)
                            batch.execute()
                            batch = None
                            batchCount = 0
                    else:
                        googleServices['drive'].files().delete(
                            fileId=item["id"],
                            supportsTeamDrives=True).execute()
                elif operation == 'download':
                    goog_helper.downloadFileByID(googleServices['drive'],
                                                 item['id'], item['name'])
            if batch and batchCount:
                batch.execute()
                batch = None
            processedFiles += len(items)
            logging.warning('Processed %d of max %d. NextToken: %s',
                            processedFiles, maxFiles, bool(nextPageToken))
            if (processedFiles >= maxFiles) or not nextPageToken:
                break  # exit if we processed enough files or no files left
        logging.warning('Done')
Example #23
0
def main():
    """directs the funtionality of the process ie start a cleanup, record all cameras on 2min refresh, record a subset of cameras, manage multiprocessed recording of cameras
    Args:
        -c  cleaning_threshold" (flt): time in hours to store data
        -o  cameras_overide    (str): list of specific cameras to watch
        -a  agents            (int): number of agents to assign for parallelization
        -f  full_system       (Bool):monitor full system with as many agents as needed
    Returns:
        None
    """
    reqArgs = []
    optArgs = [
        ["c", "cleaning_threshold", "time in hours to store data"],
        ["o", "cameras_overide", "specific cameras to watch"],
        ["a", "agents", "number of agents to assign for parallelization"],
        [
            "f", "full_system",
            "toggle to cover all of alert wildfire with unrestrained parallelization"
        ]
    ]
    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])
    googleServices = goog_helper.getGoogleServices(settings, args)
    dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
                                     psqlHost=settings.psqlHost,
                                     psqlDb=settings.psqlDb,
                                     psqlUser=settings.psqlUser,
                                     psqlPasswd=settings.psqlPasswd)

    if args.cleaning_threshold:
        cleaning_threshold = float(args.cleaning_threshold)
        cleanup_archive(googleServices, dbManager, cleaning_threshold)
    if args.cameras_overide:
        listofRotatingCameras = list(
            args.cameras_overide.replace(" ", "").strip('[]').split(','))
    else:
        listofCameras = alertwildfire_API.get_all_camera_info()
        listofRotatingCameras = [
            camera["name"] for camera in listofCameras
            if (camera["name"][-1] == '2')
        ]
    if args.agents:
        agents = int(args.agents)
        #num of camera's per process

        toc_avg = test_System_response_time(googleServices,
                                            dbManager,
                                            trial_length=10)
        # target estimate of camera refresh time
        target_refresh_time_per_camera = 12  #secs
        num_cameras_per_process = math.floor(target_refresh_time_per_camera /
                                             toc_avg)
        #future ability to re-adjust as needed

        #divy the cameras
        camera_bunchs = []
        num_of_processes_needed = math.ceil(
            len(listofRotatingCameras) / num_cameras_per_process)
        if num_of_processes_needed > agents:
            logging.warning(
                'unable to process all given cameras on this machine with %s agents and maintain a target refresh rate of %s seconds, please reduce number of cameras to less than %s or increase number of agents to %s',
                agents, target_refresh_time_per_camera,
                num_cameras_per_process * agents, num_of_processes_needed)
            return
        num_cameras_per_process = math.floor(
            len(listofRotatingCameras) / agents)
        for num in range(0, num_of_processes_needed):
            split_start = num_cameras_per_process * num
            split_stop = num_cameras_per_process * num + num_cameras_per_process
            camera_bunchs.append(listofRotatingCameras[split_start:split_stop])

        with Pool(processes=agents) as pool:
            result = pool.map(fetchAllCameras, camera_bunchs)
            pool.close()
    else:
        if args.full_system:
            response_time_per_camera = test_System_response_time(
                googleServices, dbManager, trial_length=10)
            listofCameras = alertwildfire_API.get_all_camera_info()
            target_refresh_time_per_camera, listofTargetCameras, num_of_processes_needed, num_cameras_per_process, num_of_agents_needed = {},{},{},{},0
            # target estimate of camera refresh time
            target_refresh_time_per_camera["rotating"] = 12  #secs
            target_refresh_time_per_camera["stationary"] = 60  #secs
            #separation of data by type
            listofTargetCameras["rotating"] = [
                camera["name"] for camera in listofCameras
                if (camera["name"][-1] == '2')
            ]
            listofTargetCameras["stationary"] = [
                camera["name"] for camera in listofCameras
                if (camera["name"][-1] != '2')
            ]
            camera_bunchs = []
            for type in ["rotating", "stationary"]:
                num_cameras_per_process[type] = math.floor(
                    target_refresh_time_per_camera[type] /
                    response_time_per_camera)
                #divy up cameras rotating and stationary to maximize efficiency
                num_of_processes_needed[type] = math.ceil(
                    len(listofTargetCameras[type]) /
                    num_cameras_per_process[type])
                num_cameras_per_process[type] = math.floor(
                    len(listofTargetCameras[type]) /
                    num_of_processes_needed[type])
                num_of_agents_needed += num_of_processes_needed[type]
                for num in range(0, num_of_processes_needed[type]):
                    split_start = num_cameras_per_process[type] * num
                    split_stop = num_cameras_per_process[
                        type] * num + num_cameras_per_process[type]
                    camera_bunchs.append(
                        listofTargetCameras[type][split_start:split_stop])

            with Pool(processes=num_of_agents_needed) as pool:
                result = pool.map(fetchAllCameras, camera_bunchs)
                pool.close()

        else:
            fetchAllCameras(listofRotatingCameras)