Пример #1
0
def main():
    reqArgs = [
        ["o", "outputFile", "output file name"],
        ["i", "inputCsv", "csvfile with fire/detection data"],
        ['m', "mode", "mode: votepoly or camdir or pruned"],
    ]
    optArgs = [
        ["s", "startRow", "starting row"],
        ["e", "endRow", "ending row"],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    startRow = int(args.startRow) if args.startRow else 0
    endRow = int(args.endRow) if args.endRow else 1e9
    mode = args.mode
    assert mode == 'votepoly' or mode == 'camdir' or mode == 'pruned'
    outFile = open(args.outputFile, 'w')
    dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
                                     psqlHost=settings.psqlHost, psqlDb=settings.psqlDb,
                                     psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd)

    lastCam = None
    lastTime = None
    random.seed(0)
    with open(args.inputCsv) as csvFile:
        csvreader = csv.reader(csvFile)
        for (rowIndex, csvRow) in enumerate(csvreader):
            if rowIndex < startRow:
                continue
            if rowIndex > endRow:
                print('Reached end row', rowIndex, endRow)
                break
            if mode == 'votepoly':
                [cameraID, timestamp, score, polygon, sourcePolygons, isRealFire] = csvRow[:6]
                timestamp = int(timestamp)
                logging.warning('Processing row: %d, cam: %s, ts: %s', rowIndex, cameraID, timestamp)
                if cameraID == lastCam and timestamp == lastTime:
                    logging.warning('Duplicate row: %d, cam: %s, ts: %s', rowIndex, cameraID, timestamp)
                lastCam = cameraID
                lastTime = timestamp
                centroid = getCentroid(polygon)
                if timestamp < 1607786165: #sourcePolygons didn't exist before this
                    if isRealFire:
                        numPolys = round(getRandInterpolatedVal(settings.percentilesNumPolyFire))
                    else:
                        numPolys = round(getRandInterpolatedVal(settings.percentilesNumPolyOther))
                else:
                    numPolys = 1
                    if sourcePolygons:
                        sourcePolygonsArr = json.loads(sourcePolygons)
                        numPolys = len(sourcePolygonsArr)
                cameraID = patchCameraId(cameraID)
                camInfo = dbManager.getCameraMapLocation(cameraID)
                if camInfo == None:
                    logging.warning('Skipping row with camera without meta %s', cameraID)
                    continue
                (mapImgGCS, camLatitude, camLongitude) = camInfo
            else:
                if mode == 'camdir':
                    [cameraID, isoTime, direction] = csvRow[:3]
                    logging.warning('Processing row: %d, cam: %s, ts: %s', rowIndex, cameraID, isoTime)
                    timestamp = time.mktime(dateutil.parser.parse(isoTime).timetuple())
                    if 'center left' in direction:
                        offset = -20
                    elif 'center right' in direction:
                        offset = 20
                    elif 'center' in direction:
                        offset = 0
                    elif 'left' in direction:
                        offset = -40
                    elif 'right' in direction:
                        offset = 40
                    else:
                        logging.error('Unexpected dir row: %d, dir: %s', rowIndex, direction)
                        continue
                elif mode == 'pruned':
                    [_cropName, minX, _minY, maxX, _maxY, fileName] = csvRow[:6]
                    minX = int(minX)
                    maxX = int(maxX)
                    nameParsed = img_archive.parseFilename(fileName)
                    cameraID = nameParsed['cameraID']
                    cameraID = patchCameraId(cameraID)
                    timestamp = nameParsed['unixTime']
                    dateStr = nameParsed['isoStr'][:nameParsed['isoStr'].index('T')]
                    if dateStr == lastTime and cameraID == lastCam:
                        # logging.warning('Skip same fire. row %s', rowIndex)
                        continue
                    lastCam = cameraID
                    lastTime = dateStr
                    localFilePath = os.path.join(settings.downloadDir, fileName)
                    if not os.path.isfile(localFilePath):
                        logging.warning('Skip missing file %s, row %s', fileName, rowIndex)
                        continue
                    img = Image.open(localFilePath)
                    degreesInView = img_archive.getCameraFov(cameraID)
                    centerX = (minX + maxX) / 2
                    offset = centerX / img.size[0] * degreesInView - degreesInView/2
                    img.close()
                (mapImgGCS, camLatitude, camLongitude) = dbManager.getCameraMapLocation(cameraID)
                camHeading = img_archive.getHeading(cameraID)
                heading = (camHeading + offset) % 360
                angle = 90 - heading
                distanceDegrees = 0.2 # approx 14 miles
                fireLat = camLatitude + math.sin(angle*math.pi/180)*distanceDegrees
                fireLong = camLongitude + math.cos(angle*math.pi/180)*distanceDegrees
                centroid = (fireLat, fireLong)
                score = getRandInterpolatedVal(settings.percentilesScoreFire)
                numPolys = round(getRandInterpolatedVal(settings.percentilesNumPolyFire))
                isRealFire = 1
                logging.warning('Processing row: %d, heading: %s, centroid: %s, score: %s, numpoly: %s', rowIndex, heading, centroid, score, numPolys)
            if not keepData(score, centroid, numPolys, isRealFire):
                logging.warning('Skipping Mexico fire row %d, camera %s', rowIndex, cameraID)
                continue
            (weatherCentroid, weatherCamera) = weather.getWeatherData(dbManager, cameraID, timestamp, centroid, (camLatitude, camLongitude))
            if not weatherCentroid:
                logging.warning('Skipping row %d', rowIndex)
                continue
            # logging.warning('Weather %s', weatherCentroid)
            outputWithWeather(outFile, score, timestamp, centroid, numPolys, weatherCentroid, weatherCamera, isRealFire)

            logging.warning('Processed row: %d, cam: %s, ts: %s', rowIndex, cameraID, timestamp)
    outFile.close()
Пример #2
0
def main():
    reqArgs = [
        ["o", "outputDir", "local directory to save images segments"],
        ["i", "inputCsv", "csvfile with contents of Cropped Images"],
    ]
    optArgs = [
        ["s", "startRow", "starting row"],
        ["e", "endRow", "ending row"],
        ["d", "display", "(optional) specify any value to display image and boxes"],
        ["x", "minDiffX", "(optional) override default minDiffX of 299"],
        ["y", "minDiffY", "(optional) override default minDiffY of 299"],
        ["a", "minArea", "(optional) override default throw away areas < 1/100 of 299x299"],
        ["t", "throwSize", "(optional) override default throw away size of 598x598"],
        ["g", "growRatio", "(optional) override default grow ratio of 1.2"],
        ["m", "minusMinutes", "(optional) subtract images from given number of minutes ago"],
        ["r", "review", "(optional) download original crops without augmentation"],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    startRow = int(args.startRow) if args.startRow else 0
    endRow = int(args.endRow) if args.endRow else 1e9
    minDiffX = int(args.minDiffX) if args.minDiffX else 299
    minDiffY = int(args.minDiffY) if args.minDiffY else 299
    throwSize = int(args.throwSize) if args.throwSize else 299*2
    growRatio = float(args.growRatio) if args.growRatio else 1.2
    minArea = int(args.minArea) if args.minArea else int(299*2.99)
    minusMinutes = int(args.minusMinutes) if args.minusMinutes else 0

    googleServices = goog_helper.getGoogleServices(settings, args)
    camArchives = img_archive.getHpwrenCameraArchives(settings.hpwrenArchives)
    if minusMinutes:
        timeGapDelta = datetime.timedelta(seconds = 60*minusMinutes)
    cameraCache = {}
    skippedTiny = []
    skippedHuge = []
    skippedArchive = []
    with open(args.inputCsv) as csvFile:
        csvreader = csv.reader(csvFile)
        for (rowIndex, csvRow) in enumerate(csvreader):
            if rowIndex < startRow:
                continue
            if rowIndex > endRow:
                print('Reached end row', rowIndex, endRow)
                break
            [cropName, minX, minY, maxX, maxY, fileName] = csvRow[:6]
            minX = int(minX)
            minY = int(minY)
            maxX = int(maxX)
            maxY = int(maxY)
            oldCoords = (minX, minY, maxX, maxY)
            if ((maxX - minX) > throwSize) or ((maxY - minY) > throwSize):
                logging.warning('Skip large image: dx=%d, dy=%d, name=%s', maxX - minX, maxY - minY, fileName)
                skippedHuge.append((rowIndex, fileName, maxX - minX, maxY - minY))
                continue
            if ((maxX - minX) * (maxY - minY)) < minArea:
                logging.warning('Skipping tiny image with area: %d, name=%s', (maxX - minX) * (maxY - minY), fileName)
                skippedTiny.append((rowIndex, fileName, (maxX - minX) * (maxY - minY)))
                continue

            nameParsed = img_archive.parseFilename(fileName)
            imgDT = datetime.datetime.fromtimestamp(nameParsed['unixTime'])
            localFilePath = os.path.join(settings.downloadDir, fileName)
            if not os.path.isfile(localFilePath):# if file has not been downloaded by a previous iteration
                files = img_archive.getHpwrenImages(googleServices, settings, settings.downloadDir, camArchives, nameParsed['cameraID'], imgDT, imgDT, 1)
                localFilePath = files[0]
            imgOrig = Image.open(localFilePath)

            # if in subracted images mode, download an earlier image and subtract
            if minusMinutes:
                dt = imgDT - timeGapDelta
                nameParsed['unixTime'] -= 60*minusMinutes
                earlierName = img_archive.repackFileName(nameParsed)
                earlierImgPath = os.path.join(settings.downloadDir, earlierName)
                if not os.path.isfile(earlierImgPath):# if file has not been downloaded by a previous iteration
                    files = img_archive.getHpwrenImages(googleServices, settings, settings.downloadDir, camArchives, nameParsed['cameraID'], dt, dt, 1)
                    if files:
                        earlierImgPath = files[0]
                    else:
                        logging.warning('Skipping image without prior image: %s, %s', str(dt), fileName)
                        skippedArchive.append((rowIndex, fileName, dt))
                        continue
                logging.warning('Subtracting old image %s', earlierImgPath)
                earlierImg = Image.open(earlierImgPath)
                diffImg = img_archive.diffImages(imgOrig, earlierImg)
                extremas = diffImg.getextrema()
                if extremas[0][0] == 128 or extremas[0][1] == 128 or extremas[1][0] == 128 or extremas[1][1] == 128 or extremas[2][0] == 128 or extremas[2][1] == 128:
                    logging.warning('Skipping no diffs %s, name=%s', str(extremas), fileName)
                    skippedTiny.append((rowIndex, fileName, extremas))
                    continue
                # realImgOrig = imgOrig # is this useful?
                imgOrig = diffImg
                fileNameParts = os.path.splitext(fileName)
                fileName = str(fileNameParts[0]) + ('_Diff%d' % minusMinutes) + fileNameParts[1]

            if args.review:
                cropCoords = [oldCoords]
            else:
                # crop the full sized image to show just the smoke, but shifted and flipped
                # shifts and flips increase number of segments for training and also prevent overfitting by perturbing data
                cropCoords = getCropCoords((minX, minY, maxX, maxY), minDiffX, minDiffY, growRatio, (imgOrig.size[0], imgOrig.size[1]))
            for newCoords in cropCoords:
                # XXXX - save work if old=new?
                logging.warning('coords old %s, new %s', str(oldCoords), str(newCoords))
                imgNameNoExt = str(os.path.splitext(fileName)[0])
                cropImgName = imgNameNoExt + '_Crop_' + 'x'.join(list(map(lambda x: str(x), newCoords))) + '.jpg'
                cropImgPath = os.path.join(args.outputDir, cropImgName)
                cropped_img = imgOrig.crop(newCoords)
                cropped_img.save(cropImgPath, format='JPEG')
                if not args.review:
                    flipped_img = cropped_img.transpose(Image.FLIP_LEFT_RIGHT)
                    flipImgName = imgNameNoExt + '_Crop_' + 'x'.join(list(map(lambda x: str(x), newCoords))) + '_Flip.jpg'
                    flipImgPath = os.path.join(args.outputDir, flipImgName)
                    flipped_img.save(flipImgPath, format='JPEG')
            logging.warning('Processed row: %d, file: %s', rowIndex, fileName)
            if args.display:
                displayCoords = [oldCoords] + cropCoords
                displayImageWithScores(imgOrig, displayCoords)
                imageDisplay(imgOrig)
    logging.warning('Skipped tiny images %d, %s', len(skippedTiny), str(skippedTiny))
    logging.warning('Skipped huge images %d, %s', len(skippedHuge), str(skippedHuge))
    logging.warning('Skipped images without archives %d, %s', len(skippedArchive), str(skippedArchive))
Пример #3
0
def main():
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'  # quiet down tensorflow logging

    reqArgs = [
        ["d", "directory", "directory containing the image sets"],
        ["o", "outputFile", "output file name"],
    ]
    optArgs = [
        ["l", "labels", "labels file generated during retraining"],
        ["m", "model", "model file generated during retraining"],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs)
    model_file = args.model if args.model else settings.model_file
    labels_file = args.labels if args.labels else settings.labels_file
    DetectionPolicyClass = policies.get_policies()[settings.detectionPolicy]
    detectionPolicy = DetectionPolicyClass(args,
                                           None,
                                           0,
                                           stateless=True,
                                           modelLocation=model_file)

    test_data = []

    image_name = []
    crop_name = []
    score_name = []
    class_name = []

    image_name += ["Image"]
    crop_name += ["Crop"]
    score_name += ["Score"]
    class_name += ["Class"]

    smokeDir = os.path.join(args.directory, 'test_set_smoke')
    smoke_image_list = listJpegs(smokeDir)
    logging.warning('Found %d images of smoke', len(smoke_image_list))
    nonSmokeDir = os.path.join(args.directory, 'test_set_other')
    other_image_list = listJpegs(nonSmokeDir)
    logging.warning('Found %d images of nonSmoke', len(other_image_list))

    smokeFile = os.path.join(args.directory, 'test_smoke.txt')
    np.savetxt(smokeFile, smoke_image_list, fmt="%s")
    nonSmokeFile = os.path.join(args.directory, 'test_other.txt')
    np.savetxt(nonSmokeFile, other_image_list, fmt="%s")
    outFile = open(args.outputFile, 'w')

    (i, cr, s, cl, positives,
     negatives) = classifyImages(detectionPolicy, smoke_image_list, 'smoke',
                                 args.outputFile)
    image_name += i
    crop_name += cr
    score_name += s
    class_name += cl
    logging.warning('Done with smoke images')
    truePositive = len(positives)
    falseNegative = len(smoke_image_list) - len(positives)
    logging.warning('True Positive: %d', truePositive)
    logging.warning('False Negative: %d', falseNegative)
    outFile.write('True Positives: ' + ', '.join(positives) + '\n')
    outFile.write('False Negative: ' + ', '.join(negatives) + '\n')

    (i, cr, s, cl, positives,
     negatives) = classifyImages(detectionPolicy, other_image_list, 'other',
                                 args.outputFile)
    image_name += i
    crop_name += cr
    score_name += s
    class_name += cl
    logging.warning('Done with nonSmoke images')
    falsePositive = len(positives)
    trueNegative = len(other_image_list) - len(positives)
    logging.warning('False Positive: %d', falsePositive)
    logging.warning('True Negative: %d', trueNegative)
    outFile.write('False Positives: ' + ', '.join(positives) + '\n')
    outFile.write('True Negative: ' + ', '.join(negatives) + '\n')

    accuracy = safeDiv(
        truePositive + trueNegative,
        truePositive + trueNegative + falsePositive + falseNegative)
    logging.warning('Accuracy: %f', accuracy)
    precision = safeDiv(truePositive, truePositive + falsePositive)
    logging.warning('Precision: %f', precision)
    recall = safeDiv(truePositive, truePositive + falseNegative)
    logging.warning('Recall: %f', recall)
    f1 = safeDiv(2 * precision * recall, precision + recall)
    logging.warning('F1: %f', f1)

    test_data = [image_name, crop_name, score_name, class_name]
    np.savetxt(outFile, np.transpose(test_data), fmt="%s")
    outFile.close()
    print("DONE")
Пример #4
0
def main():
    reqArgs = [
        ["i", "inputDir", "directory containing TFRecord files"],
        [
            "o", "outputDir",
            "directory to write out checkpoints and tensorboard logs"
        ],
        ["a", "algorithm", "adam, nadam, or rmsprop"],
    ]
    optArgs = [
        [
            "m", "maxEpochs", "(optional) max number of epochs (default 1000)",
            int
        ],
        ["r", "resumeModel", "resume training from given saved model"],
        ["s", "startEpoch", "epoch to resume from (epoch from resumeModel)"],
        ["t", "stepsPerEpoch", "(optional) number of steps per epoch", int],
        [
            "v", "valStepsPerEpoch",
            "(optional) number of validation steps per epoch", int
        ],
    ]

    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])

    batch_size = 64
    max_epochs = args.maxEpochs if args.maxEpochs else 1000
    steps_per_epoch = args.stepsPerEpoch if args.stepsPerEpoch else 2000
    overshoot_epochs = 30  #number of epochs over which validation loss hasnt decreased to stop training at
    val_steps = args.valStepsPerEpoch if args.valStepsPerEpoch else 200
    #val_steps only needed for now because of a bug in tf2.0, which should be fixed in next version
    #TODO: either set this to # of validation examples /batch size (i.e. figure out num validation examples)
    #or upgrade to TF2.1 when its ready and automatically go thorugh the whole set

    train_filenames = glob.glob(
        os.path.join(args.inputDir, 'firecam_train_*.tfrecord'))
    val_filenames = glob.glob(
        os.path.join(args.inputDir, 'firecam_validation_*.tfrecord'))
    logging.warning('Found %d training files, and %d validation files',
                    len(train_filenames), len(val_filenames))
    if (len(train_filenames) == 0) or (len(val_filenames) == 0):
        logging.error('Could not find data in %s', args.inputDir)
        exit(1)

    raw_dataset_train = tf.data.TFRecordDataset(train_filenames)
    raw_dataset_val = tf.data.TFRecordDataset(val_filenames)

    dataset_train = raw_dataset_train.map(_parse_function).repeat(
        max_epochs * steps_per_epoch).shuffle(batch_size * 5).batch(batch_size)
    dataset_val = raw_dataset_val.map(_parse_function).repeat().batch(
        batch_size)

    if args.resumeModel:
        inception = tf_helper.loadModel(args.resumeModel)
        assert int(args.startEpoch) > 0
        initial_epoch = int(args.startEpoch)
    else:
        inception = keras.applications.inception_v3.InceptionV3(
            weights=None, include_top=True, input_tensor=None, classes=2)
        initial_epoch = 0
    if args.algorithm == "adam":
        # optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
        optimizer = tf.keras.optimizers.Adam(decay=1e-06, amsgrad=True)
    elif args.algorithm == "nadam":
        optimizer = tf.keras.optimizers.Nadam()
    elif args.algorithm == "rmsprop":
        optimizer = tf.keras.optimizers.RMSprop(decay=1e-06)
    else:
        logging.error('Unsupported algo %s', args.algorithm)
        exit(1)

    inception.compile(optimizer=optimizer,
                      loss=tf.keras.losses.BinaryCrossentropy(),
                      metrics=['accuracy'])

    logdir = os.path.join(args.outputDir,
                          datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
    callbacks = [
        keras.callbacks.EarlyStopping(monitor='val_loss',
                                      patience=overshoot_epochs),
        keras.callbacks.ModelCheckpoint(filepath=os.path.join(
            args.outputDir, 'model_{epoch}'),
                                        monitor='val_loss',
                                        save_best_only=True),
        LRTensorBoard(log_dir=logdir)
    ]

    logging.warning('Start training')
    inception.fit(dataset_train,
                  validation_data=dataset_val,
                  epochs=max_epochs,
                  initial_epoch=initial_epoch,
                  steps_per_epoch=steps_per_epoch,
                  validation_steps=val_steps,
                  callbacks=callbacks)
    logging.warning('Done training')
Пример #5
0
def main():
    reqArgs = [
        ["o", "outputDir", "local directory to save images segments"],
        ["i", "inputCsv", "csvfile with contents of Cropped Images"],
    ]
    optArgs = [
        ["s", "startRow", "starting row"],
        ["e", "endRow", "ending row"],
        ["x", "minSizeX", "(optional) override default minSizeX of 299"],
        ["y", "minSizeY", "(optional) override default minSizeY of 299"],
        ["a", "minArea", "(optional) override default 0 for minimum area"],
        ["t", "throwSize", "(optional) override default throw away size of 598x598"],
        ["g", "growRatio", "(optional) override default grow ratio of 1.2"],
        ["m", "minusMinutes", "(optional) subtract images from given number of minutes ago"],
        ["r", "recropType", "recrop type: 'raw', 'center', 'full', 'shift', 'augment' (default)"],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    startRow = int(args.startRow) if args.startRow else 0
    endRow = int(args.endRow) if args.endRow else 1e9
    minSizeX = int(args.minSizeX) if args.minSizeX else 299
    minSizeY = int(args.minSizeY) if args.minSizeY else 299
    throwSize = int(args.throwSize) if args.throwSize else 299*2
    growRatio = float(args.growRatio) if args.growRatio else 1.2
    minArea = int(args.minArea) if args.minArea else 0
    minusMinutes = int(args.minusMinutes) if args.minusMinutes else 0
    recropType = args.recropType if args.recropType else 'augment'

    random.seed(0)
    googleServices = goog_helper.getGoogleServices(settings, args)
    camArchives = img_archive.getHpwrenCameraArchives(settings.hpwrenArchives)
    downloadDirCache = img_archive.cacheDir(settings.downloadDir, settings.downloadDir)

    if minusMinutes:
        timeGapDelta = datetime.timedelta(seconds = 60*minusMinutes)
    cameraCache = {}
    skippedTiny = []
    skippedHuge = []
    skippedArchive = []
    with open(args.inputCsv) as csvFile:
        csvreader = csv.reader(csvFile)
        for (rowIndex, csvRow) in enumerate(csvreader):
            if rowIndex < startRow:
                continue
            if rowIndex > endRow:
                print('Reached end row', rowIndex, endRow)
                break
            [_unused_cropName, minX, minY, maxX, maxY, fileName] = csvRow[:6]
            minX = int(minX)
            minY = int(minY)
            maxX = int(maxX)
            maxY = int(maxY)
            oldCoords = (minX, minY, maxX, maxY)
            if ((maxX - minX) > throwSize) or ((maxY - minY) > throwSize):
                logging.warning('Skip large image: dx=%d, dy=%d, name=%s', maxX - minX, maxY - minY, fileName)
                skippedHuge.append((rowIndex, fileName, maxX - minX, maxY - minY))
                continue
            if ((maxX - minX) * (maxY - minY)) < minArea:
                logging.warning('Skipping tiny image with area: %d, name=%s', (maxX - minX) * (maxY - minY), fileName)
                skippedTiny.append((rowIndex, fileName, (maxX - minX) * (maxY - minY)))
                continue

            nameParsed = img_archive.parseFilename(fileName)
            imgDT = datetime.datetime.fromtimestamp(nameParsed['unixTime'])
            (imgOrig, imgFilePath) = getArchiveImage(googleServices, downloadDirCache, camArchives, nameParsed['cameraID'], fileName, imgDT)
            if not imgOrig:
                logging.warning('Skip image without archive: %s', fileName)
                skippedArchive.append((rowIndex, fileName, imgDT))
                continue

            # find coordinates for cropping
            if recropType == 'raw':
                cropCoords = [oldCoords]
            elif recropType == 'full': # useful for generating full diffs
                cropCoords = [(0, 0, imgOrig.size[0], imgOrig.size[1])]
            else:
                # crop the full sized image to show just the smoke, but shifted and flipped
                # shifts and flips increase number of segments for training and also prevent overfitting by perturbing data
                cropCoords = getCropCoords((minX, minY, maxX, maxY), minSizeX, minSizeY, growRatio, (imgOrig.size[0], imgOrig.size[1]), recropType)
            fullImage = False
            if len(cropCoords) == 1 and cropCoords[0][0] == 0 and cropCoords[0][1] == 0 and cropCoords[0][2] == imgOrig.size[0] and cropCoords[0][3] == imgOrig.size[1]:
                fullImage = True
            assert fullImage or ('minX' not in nameParsed) # disallow crops of crops
            # find extrema (min/max) crop coordinates to crop the original image to speed up processing
            extremaCoords = list(cropCoords[0])
            for coords in cropCoords:
                extremaCoords[0] = min(extremaCoords[0], coords[0])
                extremaCoords[1] = min(extremaCoords[1], coords[1])
                extremaCoords[2] = max(extremaCoords[2], coords[2])
                extremaCoords[3] = max(extremaCoords[3], coords[3])
            imgOrig = imgOrig.crop(extremaCoords)

            # if in subracted images mode, download an earlier image and subtract
            if minusMinutes:
                if not img_archive.findCameraInArchive(camArchives, nameParsed['cameraID']):
                    earlierImg = None
                    files = img_archive.cacheFetchRange(downloadDirCache, nameParsed['cameraID'], nameParsed['unixTime'], -minusMinutes*60, -10*minusMinutes*60)
                    if files:
                        earlierImg = findAlignedImage(imgFilePath, files, fullImage)
                    if not files or not earlierImg:
                        logging.warning('Skipping image without prior image: %s', fileName)
                        skippedArchive.append((rowIndex, fileName, None))
                        continue
                else:
                    nameParsed['unixTime'] -= 60*minusMinutes
                    earlierName = img_archive.repackFileName(nameParsed)
                    dt = imgDT - timeGapDelta
                    (earlierImg, _) = getArchiveImage(googleServices, downloadDirCache, camArchives, nameParsed['cameraID'], earlierName, dt)
                    if not earlierImg:
                        logging.warning('Skipping image without prior image: %s, %s', str(dt), fileName)
                        skippedArchive.append((rowIndex, fileName, dt))
                        continue
                    logging.warning('Subtracting old image %s', earlierName)

                earlierImg = earlierImg.crop(extremaCoords)
                diffImg = img_archive.diffWithChecks(imgOrig, earlierImg)
                if not diffImg:
                    skippedTiny.append((rowIndex, fileName))
                    continue
                imgOrig = diffImg
                fileNameParts = os.path.splitext(fileName)
                fileName = str(fileNameParts[0]) + ('_Diff%d' % minusMinutes) + fileNameParts[1]

            for newCoords in cropCoords:
                logging.warning('coords old %s, new %s', str(oldCoords), str(newCoords))
                parsed = img_archive.parseFilename(fileName)
                if not fullImage:
                    parsed['minX'] = newCoords[0]
                    parsed['minY'] = newCoords[1]
                    parsed['maxX'] = newCoords[2]
                    parsed['maxY'] = newCoords[3]
                if minusMinutes:
                    parsed['diffMinutes'] = 1
                cropImgName = img_archive.repackFileName(parsed)
                cropImgPath = os.path.join(args.outputDir, cropImgName)
                cropped_img = imgOrig.crop((newCoords[0] - extremaCoords[0], newCoords[1] - extremaCoords[1],
                                            newCoords[2] - extremaCoords[0], newCoords[3] - extremaCoords[1]))
                cropped_img.save(cropImgPath, format='JPEG', quality=95)
                if recropType == 'augment':
                    flipped_img = cropped_img.transpose(Image.FLIP_LEFT_RIGHT)
                    flipImgName = cropImgName.replace('.jpg', '_Flip.jpg')
                    flipImgPath = os.path.join(args.outputDir, flipImgName)
                    flipped_img.save(flipImgPath, format='JPEG', quality=95)
            logging.warning('Processed row: %d, file: %s', rowIndex, fileName)
    logging.warning('Skipped tiny images %d, %s', len(skippedTiny), str(skippedTiny))
    logging.warning('Skipped huge images %d, %s', len(skippedHuge), str(skippedHuge))
    logging.warning('Skipped images without archives %d, %s', len(skippedArchive), str(skippedArchive))
Пример #6
0
def main():
    reqArgs = [
        ["o", "operation", "add (includes update), delete, list"],
    ]
    optArgs = [
        ["n", "name", "name (ID) of user"],
        ["m", "email", "email address of user"],
        ["p", "phone", "phone number of user"],
        ["s", "startTime", "starting date and time in ISO format (e.g., 2019-02-22T14:34:56 in Pacific time zone)"],
        ["e", "endTime", "ending date and time in ISO format (e.g., 2019-02-22T14:34:56 in Pacific time zone)"],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs)
    startTime = parseTimeStr(args.startTime) if args.startTime else None
    endTime = parseTimeStr(args.endTime) if args.endTime else None
    dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
                                    psqlHost=settings.psqlHost, psqlDb=settings.psqlDb,
                                    psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd)
    notifications = dbManager.getNotifications()
    activeEmails = dbManager.getNotifications(filterActiveEmail=True)
    activePhones = dbManager.getNotifications(filterActivePhone=True)
    logging.warning('Num all notifications: %d.  Active emails: %d.  Active phones: %d',
                     len(notifications), len(activeEmails), len(activePhones))
    if args.operation == 'list':
        for n in notifications:
            printNoficiation(n)
        return
    assert args.name
    matching = list(filter(lambda x: x['name'] == args.name, notifications))
    logging.warning('Found %d matching for name %s', len(matching), args.name)
    if matching:
        printNoficiation(matching[0])
    if args.operation == 'add':
        assert startTime and endTime
        assert endTime >= startTime
        assert args.email or args.phone
        if not matching:
            # insert new entry
            dbRow = {
                'name': args.name,
            }
            if args.email:
                dbRow['email'] = args.email
                dbRow['EmailStartTime'] = startTime
                dbRow['EmailEndTime'] = endTime
            if args.phone:
                dbRow['phone'] = args.phone
                dbRow['PhoneStartTime'] = startTime
                dbRow['PhoneEndTime'] = endTime
            dbManager.add_data('notifications', dbRow)
            logging.warning('Successfully added notification for %s', args.name)
        else:
            # update existing entry
            if args.email:
                sqlTemplate = """UPDATE notifications SET email='%s',EmailStartTime=%s,EmailEndTime=%s WHERE name = '%s' """
                sqlStr = sqlTemplate % (args.email, startTime, endTime, args.name)
                dbManager.execute(sqlStr)
            if args.phone:
                sqlTemplate = """UPDATE notifications SET phone='%s',PhoneStartTime=%s,PhoneEndTime=%s WHERE name = '%s' """
                sqlStr = sqlTemplate % (args.phone, startTime, endTime, args.name)
                dbManager.execute(sqlStr)
            logging.warning('Successfully updated notification for %s', args.name)
        notifications = dbManager.getNotifications()
        matching = list(filter(lambda x: x['name'] == args.name, notifications))
        printNoficiation(matching[0])
    elif args.operation == 'delete':
        sqlTemplate = """DELETE FROM notifications WHERE name = '%s' """
        sqlStr = sqlTemplate % (args.name)
        dbManager.execute(sqlStr)
    else:
        logging.error('Unexpected operation: %s', args.operation)
Пример #7
0
def main():
    reqArgs = [
        ["m", "mode", "add, delete, enable, disable, stats, or list"],
    ]
    optArgs = [
        ["c", "cameraID", "ID of the camera (e.g., mg-n-mobo-c)"],
        ["u", "url", "url to get images from camera"],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs)
    dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
                                     psqlHost=settings.psqlHost, psqlDb=settings.psqlDb,
                                     psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd)
    cameraInfos = dbManager.get_sources(activeOnly=False)
    logging.warning('Num all cameras: %d', len(cameraInfos))
    logging.warning('Num active cameras: %d', len(list(filter(lambda x: x['dormant'] == 0, cameraInfos))))
    if args.mode == 'list':
        logging.warning('All cameras: %s', list(map(lambda x: x['name'], cameraInfos)))
        return
    matchingCams = list(filter(lambda x: x['name'] == args.cameraID, cameraInfos))
    logging.warning('Found %d matching cams for ID %s', len(matchingCams), args.cameraID)

    if args.mode == 'add':
        if len(matchingCams) != 0:
            logging.error('Camera with ID %s already exists: %s', args.cameraID, matchingCams)
            exit(1)
        dbRow = {
            'name': args.cameraID,
            'url': args.url,
            'dormant': 0,
            'randomID': random.random(),
            'last_date': datetime.datetime.now().isoformat()
        }
        dbManager.add_data('sources', dbRow)
        logging.warning('Successfully added camera %s', args.cameraID)
        return

    if len(matchingCams) != 1:
        logging.error('Cannot find camera with ID %s: %s', args.cameraID, matchingCams)
        exit(1)
    camInfo = matchingCams[0]
    logging.warning('Cam details: %s', camInfo)

    if args.mode == 'delete':
        sqlTemplate = """DELETE FROM sources WHERE name = '%s' """
        execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=False)
        return

    if args.mode == 'enable':
        if camInfo['dormant'] == 0:
            logging.error('Camera already enabled: dormant=%d', camInfo['dormant'])
            exit(1)
        sqlTemplate = """UPDATE sources SET dormant=0 WHERE name = '%s' """
        execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=False)
        return

    if args.mode == 'disable':
        if camInfo['dormant'] == 1:
            logging.error('Camera already disabled: dormant=%d', camInfo['dormant'])
            exit(1)
        sqlTemplate = """UPDATE sources SET dormant=1 WHERE name = '%s' """
        execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=False)
        return

    if args.mode == 'stats':
        sqlTemplate = """SELECT max(timestamp) as maxtime FROM scores WHERE CameraName = '%s' """
        dbResult = execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=True)
        logging.warning('Most recent image scanned: %s', getTime(dbResult))
        sqlTemplate = """SELECT max(timestamp) as maxtime FROM detections WHERE CameraName = '%s' """
        dbResult = execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=True)
        logging.warning('Most recent smoke detection: %s', getTime(dbResult))
        sqlTemplate = """SELECT max(timestamp) as maxtime FROM alerts WHERE CameraName = '%s' """
        dbResult = execCameraSql(dbManager, sqlTemplate, args.cameraID, isQuery=True)
        logging.warning('Most recent smoke alert: %s', getTime(dbResult))
        return

    logging.error('Unexpected mode: %s', args.mode)
    exit(1)
Пример #8
0
def main():
    optArgs = [
        ["b", "heartbeat", "filename used for heartbeating check"],
        ["c", "collectPositves", "collect positive segments for training data"],
        ["t", "time", "Time breakdown for processing images"],
        ["m", "minusMinutes", "(optional) subtract images from given number of minutes ago"],
        ["r", "restrictType", "Only process images from cameras of given type"],
        ["s", "startTime", "(optional) performs search with modifiedTime > startTime"],
        ["e", "endTime", "(optional) performs search with modifiedTime < endTime"],
    ]
    args = collect_args.collectArgs([], optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    minusMinutes = int(args.minusMinutes) if args.minusMinutes else 0
    # TODO: Fix googleServices auth to resurrect email alerts
    # googleServices = goog_helper.getGoogleServices(settings, args)
    googleServices = None
    dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
                                    psqlHost=settings.psqlHost, psqlDb=settings.psqlDb,
                                    psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd)
    cameras = dbManager.get_sources(activeOnly=True, restrictType=args.restrictType)
    startTimeDT = dateutil.parser.parse(args.startTime) if args.startTime else None
    endTimeDT = dateutil.parser.parse(args.endTime) if args.endTime else None
    timeRangeSeconds = None
    useArchivedImages = False
    camArchives = img_archive.getHpwrenCameraArchives(settings.hpwrenArchives)
    DetectionPolicyClass = policies.get_policies()[settings.detectionPolicy]
    detectionPolicy = DetectionPolicyClass(args, dbManager, minusMinutes, stateless=useArchivedImages)
    constants = { # dictionary of constants to reduce parameters in various functions
        'args': args,
        'googleServices': googleServices,
        'camArchives': camArchives,
        'dbManager': dbManager,
    }

    if startTimeDT or endTimeDT:
        assert startTimeDT and endTimeDT
        timeRangeSeconds = (endTimeDT-startTimeDT).total_seconds()
        assert timeRangeSeconds > 0
        assert args.collectPositves
        useArchivedImages = True
        random.seed(0) # fixed seed guarantees same randomized ordering.  Should make this optional argument in future

    processingTimeTracker = initializeTimeTracker()
    while True:
        classifyImgPath = None
        timeStart = time.time()
        if useArchivedImages:
            (cameraID, timestamp, imgPath, classifyImgPath) = \
                getArchivedImages(constants, cameras, startTimeDT, timeRangeSeconds, minusMinutes)
        # elif minusMinutes: to be resurrected using archive functionality
        else: # regular (non diff mode), grab image and process
            (cameraID, timestamp, imgPath, md5) = getNextImage(dbManager, cameras)
            classifyImgPath = imgPath
        if not cameraID:
            continue # skip to next camera
        timeFetch = time.time()

        image_spec = [{}]
        image_spec[-1]['path'] = classifyImgPath
        image_spec[-1]['timestamp'] = timestamp
        image_spec[-1]['cameraID'] = cameraID

        detectionResult = detectionPolicy.detect(image_spec)
        timeDetect = time.time()
        if detectionResult['fireSegment']:
            if not isDuplicateAlert(dbManager, cameraID, timestamp):
                alertFire(constants, cameraID, timestamp, imgPath, detectionResult['fireSegment'])
        deleteImageFiles(imgPath, imgPath)
        if (args.heartbeat):
            heartBeat(args.heartbeat)

        timePost = time.time()
        updateTimeTracker(processingTimeTracker, timePost - timeStart)
        if args.time:
            if not detectionResult['timeMid']:
                detectionResult['timeMid'] = timeDetect
            logging.warning('Timings: fetch=%.2f, detect0=%.2f, detect1=%.2f post=%.2f',
                timeFetch-timeStart, detectionResult['timeMid']-timeFetch, timeDetect-detectionResult['timeMid'], timePost-timeDetect)
        # free all memory for current iteration and trigger GC to prevent memory growth
        detectionResult = None
        gc.collect()
Пример #9
0
def main():
    reqArgs = [
        ["o", "outputDir", "local directory to save diff image segments"],
        [
            "i", "inputDir",
            "input local directory containing nonSmoke image segments"
        ],
        [
            "m", "minusMinutes",
            "subtract images from given number of minutes ago"
        ],
    ]
    optArgs = [
        ["s", "startRow", "starting row"],
        ["e", "endRow", "ending row"],
        ["f", "fullImages", "(optional) process full images vs cropped"],
    ]
    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])
    minusMinutes = int(args.minusMinutes)
    startRow = int(args.startRow) if args.startRow else 0
    endRow = int(args.endRow) if args.endRow else 1e9
    fullImages = True if args.fullImages else False

    googleServices = goog_helper.getGoogleServices(settings, args)
    camArchives = img_archive.getHpwrenCameraArchives(settings.hpwrenArchives)
    timeGapDelta = datetime.timedelta(seconds=60 * minusMinutes)
    skippedBadParse = []
    skippedArchive = []
    imageFileNames = sorted(os.listdir(args.inputDir))
    rowIndex = -1
    for fileName in imageFileNames:
        rowIndex += 1

        if rowIndex < startRow:
            continue
        if rowIndex > endRow:
            print('Reached end row', rowIndex, endRow)
            break

        if (fileName[:3] == 'v2_') or (fileName[:3]
                                       == 'v3_') or (not 'mobo-c' in fileName):
            continue  # skip replicated files
        logging.warning('Processing row %d, file: %s', rowIndex, fileName)
        parsedName = img_archive.parseFilename(fileName)

        if (not parsedName) or parsedName['diffMinutes'] or (
            ('minX' not in parsedName) and not fullImages):
            logging.warning(
                'Skipping file with unexpected parsed data: %s, %s', fileName,
                str(parsedName))
            skippedBadParse.append((rowIndex, fileName, parsedName))
            continue  # skip files without crop info or with diff
        parsedName['unixTime'] -= 60 * minusMinutes
        earlierName = img_archive.repackFileName(parsedName)
        earlierImgPath = os.path.join(settings.downloadDir, earlierName)
        if not os.path.isfile(
                earlierImgPath
        ):  # if file has not been downloaded by a previous iteration
            dt = datetime.datetime.fromtimestamp(parsedName['unixTime'])
            dt -= timeGapDelta
            files = img_archive.getHpwrenImages(googleServices, settings,
                                                settings.downloadDir,
                                                camArchives,
                                                parsedName['cameraID'], dt, dt,
                                                1)
            if files:
                earlierImgPath = files[0]
            else:
                logging.warning('Skipping image without prior image: %s, %s',
                                str(dt), fileName)
                skippedArchive.append((rowIndex, fileName, dt))
                continue
        logging.warning('Subtracting old image %s', earlierImgPath)
        imgOrig = Image.open(os.path.join(args.inputDir, fileName))
        earlierImg = Image.open(earlierImgPath)
        if fullImages:
            diffImg = img_archive.diffSmoothImages(imgOrig, earlierImg)
        else:
            croppedEarlyImg = earlierImg.crop(
                (parsedName['minX'], parsedName['minY'], parsedName['maxX'],
                 parsedName['maxY']))
            diffImg = img_archive.diffImages(imgOrig, croppedEarlyImg)
        extremas = diffImg.getextrema()
        if extremas[0][0] == 128 or extremas[0][1] == 128 or extremas[1][
                0] == 128 or extremas[1][1] == 128 or extremas[2][
                    0] == 128 or extremas[2][1] == 128:
            logging.warning('Skipping no diffs %s, name=%s', str(extremas),
                            fileName)
            skippedBadParse.append((rowIndex, fileName, extremas))
            continue
        parsedName['diffMinutes'] = minusMinutes
        diffImgPath = os.path.join(args.outputDir,
                                   img_archive.repackFileName(parsedName))
        logging.warning('Saving new image %s', diffImgPath)
        diffImg.save(diffImgPath, format='JPEG', quality=95)
    logging.warning('Skipped bad parse %d, %s', len(skippedBadParse),
                    str(skippedBadParse))
    logging.warning('Skipped images without archives %d, %s',
                    len(skippedArchive), str(skippedArchive))
Пример #10
0
def main():
    reqArgs = [
        [
            "s", "startTime",
            "starting date and time in ISO format (e.g., 2019-02-22T14:34:56 in Pacific time zone)"
        ],
    ]
    optArgs = [
        ["c", "cameraID", "ID (code name) of camera"],
        ['n', 'longitude', 'longitude of fire', float],
        ['t', 'latitude', 'latitude of fire', float],
        [
            'm', 'maxDistance',
            '(optional default=20) max distance in miles from fire', float
        ],
        [
            "e", "endTime",
            "ending date and time in ISO format (e.g., 2019-02-22T14:34:56 in Pacific time zone)"
        ],
        [
            "d", "durationMinutes",
            "alternative spec for endTime as start + duration", int
        ],
        [
            "g", "gapMinutes",
            "override default of 1 minute gap between images to download"
        ],
        ["o", "outputDir", "directory to save the output image"],
    ]

    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])
    googleServices = goog_helper.getGoogleServices(settings, args)
    gapMinutes = int(args.gapMinutes) if args.gapMinutes else 1
    distanceMiles = float(args.maxDistance if args.maxDistance else 20)
    outputDir = args.outputDir if args.outputDir else settings.downloadDir
    startTimeDT = dateutil.parser.parse(args.startTime)
    if args.endTime:
        endTimeDT = dateutil.parser.parse(args.endTime)
    elif args.durationMinutes:
        durationDelta = datetime.timedelta(seconds=60 * args.durationMinutes)
        endTimeDT = startTimeDT + durationDelta
    else:
        endTimeDT = startTimeDT
    assert startTimeDT.year == endTimeDT.year
    assert startTimeDT.month == endTimeDT.month
    assert startTimeDT.day == endTimeDT.day
    assert endTimeDT >= startTimeDT
    if args.cameraID:
        assert (not args.latitude) and (not args.longitude)
        cameras = [args.cameraID]
    else:
        assert args.latitude and args.longitude
        dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
                                         psqlHost=settings.psqlHost,
                                         psqlDb=settings.psqlDb,
                                         psqlUser=settings.psqlUser,
                                         psqlPasswd=settings.psqlPasswd)
        cameras = getNearbyCameras(dbManager, args.latitude, args.longitude,
                                   distanceMiles)
        logging.warning('Matched cmaeras: %s', cameras)

    camArchives = img_archive.getHpwrenCameraArchives(settings.hpwrenArchives)
    allFiles = []
    for cameraID in cameras:
        camFiles = img_archive.getHpwrenImages(googleServices, settings,
                                               outputDir, camArchives,
                                               cameraID, startTimeDT,
                                               endTimeDT, gapMinutes)
        if camFiles:
            allFiles += camFiles
    if allFiles:
        logging.warning('Found %d files.', len(allFiles))
    else:
        logging.error('No filed matched')