示例#1
0
def getArchivedImages(constants, cameras, startTimeDT, timeRangeSeconds, minusMinutes):
    """Get random images from HPWREN archive matching given constraints and optionally subtract them

    Args:
        constants (dict): "global" contants
        cameras (list): list of cameras
        startTimeDT (datetime): starting time of time range
        timeRangeSeconds (int): number of seconds in time range
        minusMinutes (int): number of desired minutes between images to subract

    Returns:
        Tuple containing camera name, current timestamp, filepath of regular image, and filepath of difference image
    """
    if getArchivedImages.tmpDir == None:
        getArchivedImages.tmpDir = tempfile.TemporaryDirectory()
        logging.warning('TempDir %s', getArchivedImages.tmpDir.name)

    cameraID = cameras[int(len(cameras)*random.random())]['name']
    timeDT = startTimeDT + datetime.timedelta(seconds = random.random()*timeRangeSeconds)
    if minusMinutes:
        prevTimeDT = timeDT + datetime.timedelta(seconds = -60 * minusMinutes)
    else:
        prevTimeDT = timeDT
    files = img_archive.getHpwrenImages(constants['googleServices'], settings, getArchivedImages.tmpDir.name,
                                        constants['camArchives'], cameraID, prevTimeDT, timeDT, minusMinutes or 1)
    # logging.warning('files %s', str(files))
    if not files:
        return (None, None, None, None)
    if minusMinutes:
        if len(files) > 1:
            if files[0] >= files[1]: # files[0] is supposed to be earlier than files[1]
                logging.warning('unexpected file order %s', str(files))
                for file in files:
                    os.remove(file)
                return (None, None, None, None)
            imgDiffPath = genDiffImage(files[1], files[0], minusMinutes)
            os.remove(files[0]) # no longer needed
            parsedName = img_archive.parseFilename(files[1])
            return (cameraID, parsedName['unixTime'], files[1], imgDiffPath)
        else:
            logging.warning('unexpected file count %s', str(files))
            for file in files:
                os.remove(file)
            return (None, None, None, None)
    elif len(files) > 0:
        parsedName = img_archive.parseFilename(files[0])
        return (cameraID, parsedName['unixTime'], files[0], files[0])
    return (None, None, None, None)
示例#2
0
def parseToCsv(intput, output, startRow, endRow):
    skipped=[]
    outFile = open(output, 'w')
    with open(intput, 'r') as myfile:
        for (rowIndex, line) in enumerate(myfile):
            if rowIndex < startRow:
                continue
            if rowIndex > endRow:
                logging.warning('Reached end row %d', rowIndex)
                break

            # print("raw", line)
            parsed = img_archive.parseFilename(line)
            if not parsed:
                continue
            # print("parsed", parsed)
            outArray = [
                line.rstrip(),
                str(parsed['minX']),
                str(parsed['minY']),
                str(parsed['maxX']),
                str(parsed['maxY']),
            ]
            del parsed['minX']
            outArray.append(img_archive.repackFileName(parsed))
            # print("parsed2", ','.join(outArray))
            outFile.write(','.join(outArray) + '\n')

    print('Skipped:', skipped)
示例#3
0
    def detect(self,
               image_spec,
               checkShifts=False,
               silent=False,
               fetchDiff=None):
        last_image_spec = image_spec[-1]
        timestamp = last_image_spec['timestamp']
        cameraID = last_image_spec['cameraID']
        parsedName = img_archive.parseFilename(last_image_spec['path'])
        base_image_spec = image_spec
        diffImgPath = None
        if not parsedName['diffMinutes']:
            outputDirName = self.outputDirObj.name
            diffImg = fetchDiff(outputDirName)
            if not diffImg:
                logging.warning('Failed to fetch diff image for %s',
                                last_image_spec['path'])
                return {'fireSegment': None, 'timeMid': 0}
            diffImgPath = img_archive.getImgPath(outputDirName,
                                                 cameraID,
                                                 timestamp,
                                                 diffMinutes=1)
            diffImg.save(diffImgPath, format='JPEG', quality=95)
            last_image_spec = last_image_spec.copy()
            last_image_spec['path'] = diffImgPath
            base_image_spec = [last_image_spec]

        detectionResult = self.basePolicy.detect(base_image_spec,
                                                 checkShifts=checkShifts,
                                                 silent=silent)
        if diffImgPath:
            os.remove(diffImgPath)
        return detectionResult
def classifyImages(detectionPolicy, checkShifts, imageList, className,
                   outFile):
    count = 0
    positives = []
    negatives = []
    mixed = []
    for image in imageList:
        t0 = time.time()
        ppath = pathlib.PurePath(image)
        nameParsed = img_archive.parseFilename(image)

        image_spec = [{}]
        image_spec[-1]['path'] = image
        image_spec[-1]['timestamp'] = nameParsed['unixTime']
        image_spec[-1]['cameraID'] = nameParsed['cameraID']

        detectionResult = detectionPolicy.detect(image_spec,
                                                 checkShifts=checkShifts,
                                                 silent=True)
        # logging.warning('dr %s', str(detectionResult))
        image_spec[-1]['startY'] = 140
        image_spec[-1]['endY'] = -140
        detectionResultOffset = detectionPolicy.detect(image_spec,
                                                       checkShifts=checkShifts,
                                                       silent=True)
        if len(detectionResultOffset['segments']
               ) == 0:  # happens with tiny images
            detectionResultOffset = detectionResult
        scores = [
            detectionResult['segments'][0]['score'],
            detectionResultOffset['segments'][0]['score']
        ]
        if detectionResult['fireSegment'] and detectionResultOffset[
                'fireSegment']:
            status = 'smoke'
            positives.append(ppath.name)
        elif (not detectionResult['fireSegment']) and (
                not detectionResultOffset['fireSegment']):
            status = 'other'
            negatives.append(ppath.name)
        else:
            status = 'mixed'
            mixed.append(ppath.name)

        t2 = time.time()
        count += 1
        sys.stdout.write('\r>> Caclulated %d/%d of class %s' %
                         (count, len(imageList), className))
        # logging.warning('Timing %f: %f, %f' % (t2-t0, t1-t0, t2-t1))
        sys.stdout.flush()
        outFile.write('%s file %s classified as %s: %s\n' %
                      (className, ppath.name, status, str(scores)))

        detectionResult = None
        detectionResultOffset = None
        gc.collect()
    sys.stdout.write('\n')
    sys.stdout.flush()
    return (positives, negatives, mixed)
示例#5
0
def processFolder(imgDirectory, googleServices, notes):
    temporaryDir = tempfile.TemporaryDirectory()
    imageFileNames = os.listdir(imgDirectory)
    # print('images', len(imageFileNames), imageFileNames)
    # discard files that don't match the expected file name pattern (e.g. .DS_Store)
    imageFileNames = list(filter(img_archive.parseFilename, imageFileNames))
    # print('images2', len(imageFileNames), imageFileNames)
    # we want to process in time order, so first create tuples with associated time
    tuples=list(map(lambda x: (x,img_archive.parseFilename(x)['unixTime']), imageFileNames))
    for tuple in sorted(tuples, key=lambda x: x[1]):
        imgName=tuple[0]
        imgPath = os.path.join(imgDirectory, imgName)
        nameParsed = img_archive.parseFilename(imgName)
        assert nameParsed['cameraID']
        result = crop_single.imageDisplay(imgPath, temporaryDir.name)
        if len(result) > 0:
            for entry in result:
                print('crop data', entry['coords'])
                uploadCoords(entry['coords'], imgName, googleServices, notes)
示例#6
0
def genAnnotatedImages(constants, cameraID, timestamp, imgPath, fireSegment):
    """Generate annotated images (one cropped video, and other full size image)

    Args:
        constants (dict): "global" contants
        cameraID (str): camera name
        timestamp (int): time.time() value when image was taken
        imgPath (str): filepath of the image
        fireSegment (dict): dict describing segment with fire

    Returns:
        Tuple (str, str): filepaths of cropped and full size annotated iamges
    """
    filePathParts = os.path.splitext(imgPath)
    img = Image.open(imgPath)
    x0 = fireSegment['MinX'] if 'MinX' in fireSegment else 0
    y0 = fireSegment['MinY'] if 'MinY' in fireSegment else 0
    x1 = fireSegment['MaxX'] if 'MaxX' in fireSegment else img.size[0]
    y1 = fireSegment['MaxY'] if 'MaxY' in fireSegment else img.size[0]

    (cropX0, cropX1) = stretchBounds(x0, x1, img.size[0])
    (cropY0, cropY1) = stretchBounds(y0, y1, img.size[1])
    cropCoords = (cropX0, cropY0, cropX1, cropY1)
    # get images spanning a few minutes so reviewers can evaluate based on progression
    startTimeDT = datetime.datetime.fromtimestamp(timestamp - 5*60)
    endTimeDT = datetime.datetime.fromtimestamp(timestamp - 1*60)

    with tempfile.TemporaryDirectory() as tmpDirName:
        oldImages = img_archive.getHpwrenImages(constants['googleServices'], settings, tmpDirName,
                                                constants['camArchives'], cameraID, startTimeDT, endTimeDT, 1)
        imgSequence = oldImages or []
        imgSequence.append(imgPath)
        for (i, imgFile) in enumerate(imgSequence):
            imgParsed = img_archive.parseFilename(imgFile)
            cropName = 'img' + ("%03d" % i) + filePathParts[1]
            croppedPath = os.path.join(tmpDirName, cropName)
            imgSeq = Image.open(imgFile)
            croppedImg = imgSeq.crop(cropCoords)
            drawFireBox(croppedImg, croppedPath, fireSegment, x0 - cropX0, y0 - cropY0, x1 - cropX0, y1 - cropY0, timestamp=imgParsed['unixTime'])
            imgSeq.close()
            croppedImg.close()
        # now make movie from this sequence of cropped images
        moviePath = filePathParts[0] + '_AnnCrop_' + 'x'.join(list(map(lambda x: str(x), cropCoords))) + '.mp4'
        (
            ffmpeg.input(os.path.join(tmpDirName, 'img%03d.jpg'), framerate=1)
                .filter('fps', fps=25, round='up')
                .output(moviePath, pix_fmt='yuv420p').run()
        )

    annotatedPath = filePathParts[0] + '_Ann' + filePathParts[1]
    drawFireBox(img, annotatedPath, fireSegment, x0, y0, x1, y1)
    img.close()

    return (moviePath, annotatedPath)
示例#7
0
def main():
    reqArgs = [
        ["i", "image", "filename of the image"],
    ]
    optArgs = [["m", "model", "model file generated during retraining"],
               ["y", "startY", "(optional) startY override", int],
               ["z", "endY", "(optional) endY override", int],
               ["x", "startX", "(optional) startY override", int],
               ["e", "endX", "(optional) endY override", int],
               [
                   "d", "display",
                   "(optional) specify any value to display image and boxes"
               ]]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs)
    model_file = args.model if args.model else settings.model_file
    DetectionPolicyClass = policies.get_policies()[settings.detectionPolicy]
    detectionPolicy = DetectionPolicyClass(args,
                                           None,
                                           stateless=True,
                                           modelLocation=model_file)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    nameParsed = img_archive.parseFilename(args.image)
    image_spec = [{}]
    image_spec[-1]['path'] = args.image
    image_spec[-1]['timestamp'] = nameParsed['unixTime']
    image_spec[-1]['cameraID'] = nameParsed['cameraID']
    image_spec[-1]['heading'] = 0  # fake heading
    if args.startY:
        image_spec[-1]['startY'] = args.startY
    if args.endY:
        image_spec[-1]['endY'] = args.endY
    if args.startX:
        image_spec[-1]['startX'] = args.startX
    if args.endX:
        image_spec[-1]['endX'] = args.endX
    detectionResult = detectionPolicy.detect(image_spec, checkShifts=True)

    for segmentInfo in detectionResult['segments']:
        # print(segmentInfo['imgPath'], segmentInfo['score'])
        print(segmentInfo['MinX'], segmentInfo['MinY'], segmentInfo['score'])
    if args.display:
        imgOrig = Image.open(args.image)
        drawBoxesAndScores(imgOrig, detectionResult['segments'])
        displayImageWithScores(imgOrig, [])
示例#8
0
def genDiffImage(imgPath, earlierImgPath, minusMinutes):
    """Subtract the two given images and store result in new difference image file

    Args:
        imgPath (str): filepath of the current image (to subtract from)
        imgPath (str): filepath of the earlier image (value to subtract)
        minusMinutes (int): number of minutes separating subtracted images

    Returns:
        file path to the difference image
    """
    imgA = Image.open(imgPath)
    imgB = Image.open(earlierImgPath)
    imgDiff = img_archive.diffImages(imgA, imgB)
    parsedName = img_archive.parseFilename(imgPath)
    parsedName['diffMinutes'] = minusMinutes
    imgDiffName = img_archive.repackFileName(parsedName)
    ppath = pathlib.PurePath(imgPath)
    imgDiffPath = os.path.join(str(ppath.parent), imgDiffName)
    imgDiff.save(imgDiffPath, format='JPEG')
    return imgDiffPath
示例#9
0
def main():
    reqArgs = [
        ["o", "outputDir", "local directory to save diff image segments"],
        [
            "i", "inputDir",
            "input local directory containing nonSmoke image segments"
        ],
        [
            "m", "minusMinutes",
            "subtract images from given number of minutes ago"
        ],
    ]
    optArgs = [
        ["s", "startRow", "starting row"],
        ["e", "endRow", "ending row"],
    ]
    args = collect_args.collectArgs(
        reqArgs,
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])
    minusMinutes = int(args.minusMinutes)
    startRow = int(args.startRow) if args.startRow else 0
    endRow = int(args.endRow) if args.endRow else 1e9

    googleServices = goog_helper.getGoogleServices(settings, args)
    camArchives = img_archive.getHpwrenCameraArchives(settings.hpwrenArchives)
    timeGapDelta = datetime.timedelta(seconds=60 * minusMinutes)
    skippedBadParse = []
    skippedArchive = []
    imageFileNames = sorted(os.listdir(args.inputDir))
    rowIndex = -1
    for fileName in imageFileNames:
        rowIndex += 1

        if rowIndex < startRow:
            continue
        if rowIndex > endRow:
            print('Reached end row', rowIndex, endRow)
            break

        if (fileName[:3] == 'v2_') or (fileName[:3]
                                       == 'v3_') or (not 'mobo-c' in fileName):
            continue  # skip replicated files
        logging.warning('Processing row %d, file: %s', rowIndex, fileName)
        parsedName = img_archive.parseFilename(fileName)

        if (not parsedName) or parsedName['diffMinutes'] or (
                'minX' not in parsedName):
            logging.warning(
                'Skipping file with unexpected parsed data: %s, %s', fileName,
                str(parsedName))
            skippedBadParse.append((rowIndex, fileName, parsedName))
            continue  # skip files without crop info or with diff
        parsedName['unixTime'] -= 60 * minusMinutes
        earlierName = img_archive.repackFileName(parsedName)
        earlierImgPath = os.path.join(settings.downloadDir, earlierName)
        if not os.path.isfile(
                earlierImgPath
        ):  # if file has not been downloaded by a previous iteration
            dt = datetime.datetime.fromtimestamp(parsedName['unixTime'])
            dt -= timeGapDelta
            files = img_archive.getHpwrenImages(googleServices, settings,
                                                settings.downloadDir,
                                                camArchives,
                                                parsedName['cameraID'], dt, dt,
                                                1)
            if files:
                earlierImgPath = files[0]
            else:
                logging.warning('Skipping image without prior image: %s, %s',
                                str(dt), fileName)
                skippedArchive.append((rowIndex, fileName, dt))
                continue
        logging.warning('Subtracting old image %s', earlierImgPath)
        earlierImg = Image.open(earlierImgPath)
        croppedEarlyImg = earlierImg.crop(
            (parsedName['minX'], parsedName['minY'], parsedName['maxX'],
             parsedName['maxY']))

        imgOrig = Image.open(os.path.join(args.inputDir, fileName))
        diffImg = img_archive.diffImages(imgOrig, croppedEarlyImg)
        extremas = diffImg.getextrema()
        if extremas[0][0] == 128 or extremas[0][1] == 128 or extremas[1][
                0] == 128 or extremas[1][1] == 128 or extremas[2][
                    0] == 128 or extremas[2][1] == 128:
            logging.warning('Skipping no diffs %s, name=%s', str(extremas),
                            fileName)
            skippedBadParse.append((rowIndex, fileName, extremas))
            continue
        parsedName['diffMinutes'] = minusMinutes
        diffImgPath = os.path.join(args.outputDir,
                                   img_archive.repackFileName(parsedName))
        logging.warning('Saving new image %s', diffImgPath)
        diffImg.save(diffImgPath, format='JPEG')
    logging.warning('Skipped bad parse %d, %s', len(skippedBadParse),
                    str(skippedBadParse))
    logging.warning('Skipped images without archives %d, %s',
                    len(skippedArchive), str(skippedArchive))
示例#10
0
def main():
    reqArgs = [
        ["o", "outputFile", "output file name"],
        ["i", "inputCsv", "csvfile with fire/detection data"],
        ['m', "mode", "mode: votepoly or camdir or pruned"],
    ]
    optArgs = [
        ["s", "startRow", "starting row"],
        ["e", "endRow", "ending row"],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    startRow = int(args.startRow) if args.startRow else 0
    endRow = int(args.endRow) if args.endRow else 1e9
    mode = args.mode
    assert mode == 'votepoly' or mode == 'camdir' or mode == 'pruned'
    outFile = open(args.outputFile, 'w')
    dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
                                     psqlHost=settings.psqlHost, psqlDb=settings.psqlDb,
                                     psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd)

    lastCam = None
    lastTime = None
    random.seed(0)
    with open(args.inputCsv) as csvFile:
        csvreader = csv.reader(csvFile)
        for (rowIndex, csvRow) in enumerate(csvreader):
            if rowIndex < startRow:
                continue
            if rowIndex > endRow:
                print('Reached end row', rowIndex, endRow)
                break
            if mode == 'votepoly':
                [cameraID, timestamp, score, polygon, sourcePolygons, isRealFire] = csvRow[:6]
                timestamp = int(timestamp)
                logging.warning('Processing row: %d, cam: %s, ts: %s', rowIndex, cameraID, timestamp)
                if cameraID == lastCam and timestamp == lastTime:
                    logging.warning('Duplicate row: %d, cam: %s, ts: %s', rowIndex, cameraID, timestamp)
                lastCam = cameraID
                lastTime = timestamp
                centroid = getCentroid(polygon)
                if timestamp < 1607786165: #sourcePolygons didn't exist before this
                    if isRealFire:
                        numPolys = round(getRandInterpolatedVal(settings.percentilesNumPolyFire))
                    else:
                        numPolys = round(getRandInterpolatedVal(settings.percentilesNumPolyOther))
                else:
                    numPolys = 1
                    if sourcePolygons:
                        sourcePolygonsArr = json.loads(sourcePolygons)
                        numPolys = len(sourcePolygonsArr)
                cameraID = patchCameraId(cameraID)
                camInfo = dbManager.getCameraMapLocation(cameraID)
                if camInfo == None:
                    logging.warning('Skipping row with camera without meta %s', cameraID)
                    continue
                (mapImgGCS, camLatitude, camLongitude) = camInfo
            else:
                if mode == 'camdir':
                    [cameraID, isoTime, direction] = csvRow[:3]
                    logging.warning('Processing row: %d, cam: %s, ts: %s', rowIndex, cameraID, isoTime)
                    timestamp = time.mktime(dateutil.parser.parse(isoTime).timetuple())
                    if 'center left' in direction:
                        offset = -20
                    elif 'center right' in direction:
                        offset = 20
                    elif 'center' in direction:
                        offset = 0
                    elif 'left' in direction:
                        offset = -40
                    elif 'right' in direction:
                        offset = 40
                    else:
                        logging.error('Unexpected dir row: %d, dir: %s', rowIndex, direction)
                        continue
                elif mode == 'pruned':
                    [_cropName, minX, _minY, maxX, _maxY, fileName] = csvRow[:6]
                    minX = int(minX)
                    maxX = int(maxX)
                    nameParsed = img_archive.parseFilename(fileName)
                    cameraID = nameParsed['cameraID']
                    cameraID = patchCameraId(cameraID)
                    timestamp = nameParsed['unixTime']
                    dateStr = nameParsed['isoStr'][:nameParsed['isoStr'].index('T')]
                    if dateStr == lastTime and cameraID == lastCam:
                        # logging.warning('Skip same fire. row %s', rowIndex)
                        continue
                    lastCam = cameraID
                    lastTime = dateStr
                    localFilePath = os.path.join(settings.downloadDir, fileName)
                    if not os.path.isfile(localFilePath):
                        logging.warning('Skip missing file %s, row %s', fileName, rowIndex)
                        continue
                    img = Image.open(localFilePath)
                    degreesInView = img_archive.getCameraFov(cameraID)
                    centerX = (minX + maxX) / 2
                    offset = centerX / img.size[0] * degreesInView - degreesInView/2
                    img.close()
                (mapImgGCS, camLatitude, camLongitude) = dbManager.getCameraMapLocation(cameraID)
                camHeading = img_archive.getHeading(cameraID)
                heading = (camHeading + offset) % 360
                angle = 90 - heading
                distanceDegrees = 0.2 # approx 14 miles
                fireLat = camLatitude + math.sin(angle*math.pi/180)*distanceDegrees
                fireLong = camLongitude + math.cos(angle*math.pi/180)*distanceDegrees
                centroid = (fireLat, fireLong)
                score = getRandInterpolatedVal(settings.percentilesScoreFire)
                numPolys = round(getRandInterpolatedVal(settings.percentilesNumPolyFire))
                isRealFire = 1
                logging.warning('Processing row: %d, heading: %s, centroid: %s, score: %s, numpoly: %s', rowIndex, heading, centroid, score, numPolys)
            if not keepData(score, centroid, numPolys, isRealFire):
                logging.warning('Skipping Mexico fire row %d, camera %s', rowIndex, cameraID)
                continue
            (weatherCentroid, weatherCamera) = weather.getWeatherData(dbManager, cameraID, timestamp, centroid, (camLatitude, camLongitude))
            if not weatherCentroid:
                logging.warning('Skipping row %d', rowIndex)
                continue
            # logging.warning('Weather %s', weatherCentroid)
            outputWithWeather(outFile, score, timestamp, centroid, numPolys, weatherCentroid, weatherCamera, isRealFire)

            logging.warning('Processed row: %d, cam: %s, ts: %s', rowIndex, cameraID, timestamp)
    outFile.close()
示例#11
0
def classifyImages(detectionPolicy, imageList, className, outFile):
    count = 0
    image_name = []
    crop_name = []
    score_name = []
    class_name = []
    positives = []
    negatives = []
    try:
        for image in imageList:
            t0 = time.time()
            isPositive = False
            ppath = pathlib.PurePath(image)
            nameParsed = img_archive.parseFilename(image)

            image_spec = [{}]
            image_spec[-1]['path'] = image
            image_spec[-1]['timestamp'] = nameParsed['unixTime']
            image_spec[-1]['cameraID'] = nameParsed['cameraID']

            try:
                detectionResult = detectionPolicy.detect(image_spec)
                if detectionResult['fireSegment']:
                    isPositive = True
                if detectionResult['segments']:
                    segments = detectionResult['segments']
                    for i in range(len(segments)):
                        image_name += [ppath.name]
                        crop_name += [segments[i]['coordStr']]
                        # for testing
                        # segments[i]['score'] = random.random()*.55
                        score_name += [segments[i]['score']]
                        class_name += [className]
                        if segments[i]['score'] > .5:
                            isPositive = True

            except Exception as e:
                logging.error('FAILURE processing %s. Count: %d, Error: %s',
                              image, count, str(e))
                test_data = [image_name, crop_name, score_name, class_name]
                np.savetxt(outFile + '-ERROR-' + image + '.txt',
                           np.transpose(test_data),
                           fmt="%s")
                sys.exit()

            t2 = time.time()

            count += 1
            if isPositive:
                positives.append(ppath.name)
            else:
                negatives.append(ppath.name)
            sys.stdout.write('\r>> Caclulated %d/%d of class %s' %
                             (count, len(imageList), className))
            # logging.warning('Timing %f: %f, %f' % (t2-t0, t1-t0, t2-t1))
            sys.stdout.flush()
    except Exception as e:
        logging.error('Failure after %d images of class %s. Error: %s', count,
                      className, str(e))
        try:
            test_data = [image_name, crop_name, score_name, class_name]
            np.savetxt(outFile + '-ERROR.txt',
                       np.transpose(test_data),
                       fmt="%s")
        except Exception as e:
            logging.error('Total Failure, Moving On. Error: %s', str(e))
    sys.stdout.write('\n')
    sys.stdout.flush()
    return (image_name, crop_name, score_name, class_name, positives,
            negatives)
示例#12
0
def main():
    reqArgs = [
        ["o", "outputDir", "local directory to save images segments"],
        ["i", "inputCsv", "csvfile with contents of Cropped Images"],
    ]
    optArgs = [
        ["s", "startRow", "starting row"],
        ["e", "endRow", "ending row"],
        ["d", "display", "(optional) specify any value to display image and boxes"],
        ["x", "minDiffX", "(optional) override default minDiffX of 299"],
        ["y", "minDiffY", "(optional) override default minDiffY of 299"],
        ["a", "minArea", "(optional) override default throw away areas < 1/100 of 299x299"],
        ["t", "throwSize", "(optional) override default throw away size of 598x598"],
        ["g", "growRatio", "(optional) override default grow ratio of 1.2"],
        ["m", "minusMinutes", "(optional) subtract images from given number of minutes ago"],
        ["r", "review", "(optional) download original crops without augmentation"],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    startRow = int(args.startRow) if args.startRow else 0
    endRow = int(args.endRow) if args.endRow else 1e9
    minDiffX = int(args.minDiffX) if args.minDiffX else 299
    minDiffY = int(args.minDiffY) if args.minDiffY else 299
    throwSize = int(args.throwSize) if args.throwSize else 299*2
    growRatio = float(args.growRatio) if args.growRatio else 1.2
    minArea = int(args.minArea) if args.minArea else int(299*2.99)
    minusMinutes = int(args.minusMinutes) if args.minusMinutes else 0

    googleServices = goog_helper.getGoogleServices(settings, args)
    camArchives = img_archive.getHpwrenCameraArchives(settings.hpwrenArchives)
    if minusMinutes:
        timeGapDelta = datetime.timedelta(seconds = 60*minusMinutes)
    cameraCache = {}
    skippedTiny = []
    skippedHuge = []
    skippedArchive = []
    with open(args.inputCsv) as csvFile:
        csvreader = csv.reader(csvFile)
        for (rowIndex, csvRow) in enumerate(csvreader):
            if rowIndex < startRow:
                continue
            if rowIndex > endRow:
                print('Reached end row', rowIndex, endRow)
                break
            [cropName, minX, minY, maxX, maxY, fileName] = csvRow[:6]
            minX = int(minX)
            minY = int(minY)
            maxX = int(maxX)
            maxY = int(maxY)
            oldCoords = (minX, minY, maxX, maxY)
            if ((maxX - minX) > throwSize) or ((maxY - minY) > throwSize):
                logging.warning('Skip large image: dx=%d, dy=%d, name=%s', maxX - minX, maxY - minY, fileName)
                skippedHuge.append((rowIndex, fileName, maxX - minX, maxY - minY))
                continue
            if ((maxX - minX) * (maxY - minY)) < minArea:
                logging.warning('Skipping tiny image with area: %d, name=%s', (maxX - minX) * (maxY - minY), fileName)
                skippedTiny.append((rowIndex, fileName, (maxX - minX) * (maxY - minY)))
                continue

            nameParsed = img_archive.parseFilename(fileName)
            imgDT = datetime.datetime.fromtimestamp(nameParsed['unixTime'])
            localFilePath = os.path.join(settings.downloadDir, fileName)
            if not os.path.isfile(localFilePath):# if file has not been downloaded by a previous iteration
                files = img_archive.getHpwrenImages(googleServices, settings, settings.downloadDir, camArchives, nameParsed['cameraID'], imgDT, imgDT, 1)
                localFilePath = files[0]
            imgOrig = Image.open(localFilePath)

            # if in subracted images mode, download an earlier image and subtract
            if minusMinutes:
                dt = imgDT - timeGapDelta
                nameParsed['unixTime'] -= 60*minusMinutes
                earlierName = img_archive.repackFileName(nameParsed)
                earlierImgPath = os.path.join(settings.downloadDir, earlierName)
                if not os.path.isfile(earlierImgPath):# if file has not been downloaded by a previous iteration
                    files = img_archive.getHpwrenImages(googleServices, settings, settings.downloadDir, camArchives, nameParsed['cameraID'], dt, dt, 1)
                    if files:
                        earlierImgPath = files[0]
                    else:
                        logging.warning('Skipping image without prior image: %s, %s', str(dt), fileName)
                        skippedArchive.append((rowIndex, fileName, dt))
                        continue
                logging.warning('Subtracting old image %s', earlierImgPath)
                earlierImg = Image.open(earlierImgPath)
                diffImg = img_archive.diffImages(imgOrig, earlierImg)
                extremas = diffImg.getextrema()
                if extremas[0][0] == 128 or extremas[0][1] == 128 or extremas[1][0] == 128 or extremas[1][1] == 128 or extremas[2][0] == 128 or extremas[2][1] == 128:
                    logging.warning('Skipping no diffs %s, name=%s', str(extremas), fileName)
                    skippedTiny.append((rowIndex, fileName, extremas))
                    continue
                # realImgOrig = imgOrig # is this useful?
                imgOrig = diffImg
                fileNameParts = os.path.splitext(fileName)
                fileName = str(fileNameParts[0]) + ('_Diff%d' % minusMinutes) + fileNameParts[1]

            if args.review:
                cropCoords = [oldCoords]
            else:
                # crop the full sized image to show just the smoke, but shifted and flipped
                # shifts and flips increase number of segments for training and also prevent overfitting by perturbing data
                cropCoords = getCropCoords((minX, minY, maxX, maxY), minDiffX, minDiffY, growRatio, (imgOrig.size[0], imgOrig.size[1]))
            for newCoords in cropCoords:
                # XXXX - save work if old=new?
                logging.warning('coords old %s, new %s', str(oldCoords), str(newCoords))
                imgNameNoExt = str(os.path.splitext(fileName)[0])
                cropImgName = imgNameNoExt + '_Crop_' + 'x'.join(list(map(lambda x: str(x), newCoords))) + '.jpg'
                cropImgPath = os.path.join(args.outputDir, cropImgName)
                cropped_img = imgOrig.crop(newCoords)
                cropped_img.save(cropImgPath, format='JPEG')
                if not args.review:
                    flipped_img = cropped_img.transpose(Image.FLIP_LEFT_RIGHT)
                    flipImgName = imgNameNoExt + '_Crop_' + 'x'.join(list(map(lambda x: str(x), newCoords))) + '_Flip.jpg'
                    flipImgPath = os.path.join(args.outputDir, flipImgName)
                    flipped_img.save(flipImgPath, format='JPEG')
            logging.warning('Processed row: %d, file: %s', rowIndex, fileName)
            if args.display:
                displayCoords = [oldCoords] + cropCoords
                displayImageWithScores(imgOrig, displayCoords)
                imageDisplay(imgOrig)
    logging.warning('Skipped tiny images %d, %s', len(skippedTiny), str(skippedTiny))
    logging.warning('Skipped huge images %d, %s', len(skippedHuge), str(skippedHuge))
    logging.warning('Skipped images without archives %d, %s', len(skippedArchive), str(skippedArchive))
def main():
    reqArgs = [
        ["o", "outputDir", "local directory to save images segments"],
        ["i", "inputCsv", "csvfile with contents of Cropped Images"],
    ]
    optArgs = [
        ["s", "startRow", "starting row"],
        ["e", "endRow", "ending row"],
        ["x", "minSizeX", "(optional) override default minSizeX of 299"],
        ["y", "minSizeY", "(optional) override default minSizeY of 299"],
        ["a", "minArea", "(optional) override default 0 for minimum area"],
        ["t", "throwSize", "(optional) override default throw away size of 598x598"],
        ["g", "growRatio", "(optional) override default grow ratio of 1.2"],
        ["m", "minusMinutes", "(optional) subtract images from given number of minutes ago"],
        ["r", "recropType", "recrop type: 'raw', 'center', 'full', 'shift', 'augment' (default)"],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
    startRow = int(args.startRow) if args.startRow else 0
    endRow = int(args.endRow) if args.endRow else 1e9
    minSizeX = int(args.minSizeX) if args.minSizeX else 299
    minSizeY = int(args.minSizeY) if args.minSizeY else 299
    throwSize = int(args.throwSize) if args.throwSize else 299*2
    growRatio = float(args.growRatio) if args.growRatio else 1.2
    minArea = int(args.minArea) if args.minArea else 0
    minusMinutes = int(args.minusMinutes) if args.minusMinutes else 0
    recropType = args.recropType if args.recropType else 'augment'

    random.seed(0)
    googleServices = goog_helper.getGoogleServices(settings, args)
    camArchives = img_archive.getHpwrenCameraArchives(settings.hpwrenArchives)
    downloadDirCache = img_archive.cacheDir(settings.downloadDir, settings.downloadDir)

    if minusMinutes:
        timeGapDelta = datetime.timedelta(seconds = 60*minusMinutes)
    cameraCache = {}
    skippedTiny = []
    skippedHuge = []
    skippedArchive = []
    with open(args.inputCsv) as csvFile:
        csvreader = csv.reader(csvFile)
        for (rowIndex, csvRow) in enumerate(csvreader):
            if rowIndex < startRow:
                continue
            if rowIndex > endRow:
                print('Reached end row', rowIndex, endRow)
                break
            [_unused_cropName, minX, minY, maxX, maxY, fileName] = csvRow[:6]
            minX = int(minX)
            minY = int(minY)
            maxX = int(maxX)
            maxY = int(maxY)
            oldCoords = (minX, minY, maxX, maxY)
            if ((maxX - minX) > throwSize) or ((maxY - minY) > throwSize):
                logging.warning('Skip large image: dx=%d, dy=%d, name=%s', maxX - minX, maxY - minY, fileName)
                skippedHuge.append((rowIndex, fileName, maxX - minX, maxY - minY))
                continue
            if ((maxX - minX) * (maxY - minY)) < minArea:
                logging.warning('Skipping tiny image with area: %d, name=%s', (maxX - minX) * (maxY - minY), fileName)
                skippedTiny.append((rowIndex, fileName, (maxX - minX) * (maxY - minY)))
                continue

            nameParsed = img_archive.parseFilename(fileName)
            imgDT = datetime.datetime.fromtimestamp(nameParsed['unixTime'])
            (imgOrig, imgFilePath) = getArchiveImage(googleServices, downloadDirCache, camArchives, nameParsed['cameraID'], fileName, imgDT)
            if not imgOrig:
                logging.warning('Skip image without archive: %s', fileName)
                skippedArchive.append((rowIndex, fileName, imgDT))
                continue

            # find coordinates for cropping
            if recropType == 'raw':
                cropCoords = [oldCoords]
            elif recropType == 'full': # useful for generating full diffs
                cropCoords = [(0, 0, imgOrig.size[0], imgOrig.size[1])]
            else:
                # crop the full sized image to show just the smoke, but shifted and flipped
                # shifts and flips increase number of segments for training and also prevent overfitting by perturbing data
                cropCoords = getCropCoords((minX, minY, maxX, maxY), minSizeX, minSizeY, growRatio, (imgOrig.size[0], imgOrig.size[1]), recropType)
            fullImage = False
            if len(cropCoords) == 1 and cropCoords[0][0] == 0 and cropCoords[0][1] == 0 and cropCoords[0][2] == imgOrig.size[0] and cropCoords[0][3] == imgOrig.size[1]:
                fullImage = True
            assert fullImage or ('minX' not in nameParsed) # disallow crops of crops
            # find extrema (min/max) crop coordinates to crop the original image to speed up processing
            extremaCoords = list(cropCoords[0])
            for coords in cropCoords:
                extremaCoords[0] = min(extremaCoords[0], coords[0])
                extremaCoords[1] = min(extremaCoords[1], coords[1])
                extremaCoords[2] = max(extremaCoords[2], coords[2])
                extremaCoords[3] = max(extremaCoords[3], coords[3])
            imgOrig = imgOrig.crop(extremaCoords)

            # if in subracted images mode, download an earlier image and subtract
            if minusMinutes:
                if not img_archive.findCameraInArchive(camArchives, nameParsed['cameraID']):
                    earlierImg = None
                    files = img_archive.cacheFetchRange(downloadDirCache, nameParsed['cameraID'], nameParsed['unixTime'], -minusMinutes*60, -10*minusMinutes*60)
                    if files:
                        earlierImg = findAlignedImage(imgFilePath, files, fullImage)
                    if not files or not earlierImg:
                        logging.warning('Skipping image without prior image: %s', fileName)
                        skippedArchive.append((rowIndex, fileName, None))
                        continue
                else:
                    nameParsed['unixTime'] -= 60*minusMinutes
                    earlierName = img_archive.repackFileName(nameParsed)
                    dt = imgDT - timeGapDelta
                    (earlierImg, _) = getArchiveImage(googleServices, downloadDirCache, camArchives, nameParsed['cameraID'], earlierName, dt)
                    if not earlierImg:
                        logging.warning('Skipping image without prior image: %s, %s', str(dt), fileName)
                        skippedArchive.append((rowIndex, fileName, dt))
                        continue
                    logging.warning('Subtracting old image %s', earlierName)

                earlierImg = earlierImg.crop(extremaCoords)
                diffImg = img_archive.diffWithChecks(imgOrig, earlierImg)
                if not diffImg:
                    skippedTiny.append((rowIndex, fileName))
                    continue
                imgOrig = diffImg
                fileNameParts = os.path.splitext(fileName)
                fileName = str(fileNameParts[0]) + ('_Diff%d' % minusMinutes) + fileNameParts[1]

            for newCoords in cropCoords:
                logging.warning('coords old %s, new %s', str(oldCoords), str(newCoords))
                parsed = img_archive.parseFilename(fileName)
                if not fullImage:
                    parsed['minX'] = newCoords[0]
                    parsed['minY'] = newCoords[1]
                    parsed['maxX'] = newCoords[2]
                    parsed['maxY'] = newCoords[3]
                if minusMinutes:
                    parsed['diffMinutes'] = 1
                cropImgName = img_archive.repackFileName(parsed)
                cropImgPath = os.path.join(args.outputDir, cropImgName)
                cropped_img = imgOrig.crop((newCoords[0] - extremaCoords[0], newCoords[1] - extremaCoords[1],
                                            newCoords[2] - extremaCoords[0], newCoords[3] - extremaCoords[1]))
                cropped_img.save(cropImgPath, format='JPEG', quality=95)
                if recropType == 'augment':
                    flipped_img = cropped_img.transpose(Image.FLIP_LEFT_RIGHT)
                    flipImgName = cropImgName.replace('.jpg', '_Flip.jpg')
                    flipImgPath = os.path.join(args.outputDir, flipImgName)
                    flipped_img.save(flipImgPath, format='JPEG', quality=95)
            logging.warning('Processed row: %d, file: %s', rowIndex, fileName)
    logging.warning('Skipped tiny images %d, %s', len(skippedTiny), str(skippedTiny))
    logging.warning('Skipped huge images %d, %s', len(skippedHuge), str(skippedHuge))
    logging.warning('Skipped images without archives %d, %s', len(skippedArchive), str(skippedArchive))