def __init__(self, settings, args, google_services, dbManager, tfConfig,
              camArchives, minusMinutes, useArchivedImages):
     self.dbManager = dbManager
     self.args = args
     self.google_services = google_services
     self.camArchives = camArchives
     self.minusMinutes = minusMinutes
     self.useArchivedImages = useArchivedImages
     self.graph = tf_helper.load_graph(settings.model_file)
     self.labels = tf_helper.load_labels(settings.labels_file)
     self.tfSession = tf.Session(graph=self.graph, config=tfConfig)
Esempio n. 2
0
def main():
    #header = np.array(['Filename','Score','Class'])
    #with open('/home/fuego/Desktop/training_set_scores.csv', 'w', newline = '') as f:
    #    writer = csv.writer(f)
    #    writer.writerow(header)

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'  # quiet down tensorflow logging
    graph = tf_helper.load_graph(settings.model_file)
    labels = tf_helper.load_labels(settings.labels_file)

    smoke_directory = os.walk('/home/fuego/Desktop/training_set_smoke')
    smoke_files = []
    smoke_image_list = []

    for lists in smoke_directory:
        smoke_files += [lists]
    for x in smoke_files[0][2]:
        if x[-4:] == '.jpg':
            smoke_image_list += ['/home/fuego/Desktop/training_set_smoke/' + x]

    other_directory = os.walk('/home/fuego/Desktop/training_set_other')
    other_files = []
    other_image_list = []

    for other_lists in other_directory:
        other_files += [other_lists]
    for other_x in other_files[0][2]:
        if other_x[-4:] == '.jpg':
            other_image_list += [
                '/home/fuego/Desktop/training_set_other/' + other_x
            ]

    with open('/home/fuego/Desktop/training_set_scores.csv', 'a',
              newline='') as fd:
        writer = csv.writer(fd)
        with tf.Session(graph=graph) as tfSession:
            for smoke_image in smoke_image_list:
                smoke_score = smoke_check(tfSession, graph, labels,
                                          smoke_image)
                writer.writerow([smoke_image[39:], smoke_score, 'smoke'])

            for other_image in other_image_list:
                segments = segmentImage(other_image)
                tf_helper.classifySegments(tfSession, graph, labels, segments)
                for i in range(len(segments)):
                    writer.writerow([
                        segments[i]['imgPath'][39:], segments[i]['score'],
                        'other'
                    ])
                deleteImageFiles(other_image, segments)

    print("DONE")
Esempio n. 3
0
def main():
    reqArgs = [
        ["i", "image", "filename of the image"],
        ["o", "output", "output directory name"],
    ]
    optArgs = [["l", "labels", "labels file generated during retraining"],
               ["m", "model", "model file generated during retraining"],
               [
                   "d", "display",
                   "(optional) specify any value to display image and boxes"
               ]]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs)
    model_file = args.model if args.model else settings.model_file
    labels_file = args.labels if args.labels else settings.labels_file

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    graph = tf_helper.load_graph(model_file)
    labels = tf_helper.load_labels(labels_file)
    segments = []
    with tf.Session(graph=graph) as tfSession:
        if True:  # chops image in segment files and classifies each segment
            imgOrig = Image.open(args.image)
            segments = rect_to_squares.cutBoxes(imgOrig, args.output,
                                                args.image)
            tf_helper.classifySegments(tfSession, graph, labels, segments)

        if False:  # version that classifies entire image without cropping
            imgOrig = Image.open(args.image)
            segments = [{'imgPath': args.image}]
            tf_helper.classifySegments(tfSession, graph, labels, segments)

        if False:  # chops image into in-memory segments and classifies each segment
            calcScoresInMemory(args.model, args.labels, args.image)

        for segmentInfo in segments:
            print(segmentInfo['imgPath'], segmentInfo['score'])
        if args.display:
            drawBoxesAndScores(imgOrig, segments)
            displayImageWithScores(imgOrig, [])
Esempio n. 4
0
def main():
    optArgs = [
        ["b", "heartbeat", "filename used for heartbeating check"],
        [
            "c", "collectPositves",
            "collect positive segments for training data"
        ],
        ["d", "imgDirectory", "Name of the directory containing the images"],
        ["t", "time", "Time breakdown for processing images"],
        [
            "m", "minusMinutes",
            "(optional) subtract images from given number of minutes ago"
        ],
        [
            "r", "restrictType",
            "Only process images from cameras of given type"
        ],
        [
            "s", "startTime",
            "(optional) performs search with modifiedTime > startTime"
        ],
        [
            "e", "endTime",
            "(optional) performs search with modifiedTime < endTime"
        ],
    ]
    args = collect_args.collectArgs(
        [],
        optionalArgs=optArgs,
        parentParsers=[goog_helper.getParentParser()])
    minusMinutes = int(args.minusMinutes) if args.minusMinutes else 0
    googleServices = goog_helper.getGoogleServices(settings, args)
    dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
                                     psqlHost=settings.psqlHost,
                                     psqlDb=settings.psqlDb,
                                     psqlUser=settings.psqlUser,
                                     psqlPasswd=settings.psqlPasswd)
    cameras = dbManager.get_sources(activeOnly=True,
                                    restrictType=args.restrictType)
    startTimeDT = dateutil.parser.parse(
        args.startTime) if args.startTime else None
    endTimeDT = dateutil.parser.parse(args.endTime) if args.endTime else None
    timeRangeSeconds = None
    useArchivedImages = False
    camArchives = img_archive.getHpwrenCameraArchives(googleServices['sheet'],
                                                      settings)
    constants = { # dictionary of constants to reduce parameters in various functions
        'args': args,
        'googleServices': googleServices,
        'camArchives': camArchives,
        'dbManager': dbManager,
    }
    if startTimeDT or endTimeDT:
        assert startTimeDT and endTimeDT
        timeRangeSeconds = (endTimeDT - startTimeDT).total_seconds()
        assert timeRangeSeconds > 0
        assert args.collectPositves
        useArchivedImages = True

    deferredImages = []
    processingTimeTracker = initializeTimeTracker()
    graph = tf_helper.load_graph(settings.model_file)
    labels = tf_helper.load_labels(settings.labels_file)
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.1  #hopefully reduces segfaults
    with tf.Session(graph=graph, config=config) as tfSession:
        while True:
            classifyImgPath = None
            timeStart = time.time()
            if useArchivedImages:
                (cameraID, timestamp, imgPath, classifyImgPath) = \
                    getArchivedImages(constants, cameras, startTimeDT, timeRangeSeconds, minusMinutes)
            elif minusMinutes:
                (queueFull, deferredImageInfo) = getDeferrredImgageInfo(
                    deferredImages, processingTimeTracker, minusMinutes,
                    timeStart)
                if not queueFull:  # queue is not full, so add more to queue
                    addToDeferredImages(dbManager, cameras, deferredImages)
                if deferredImageInfo:  # we have a deferred image ready to process, now get latest image and subtract
                    (cameraID, timestamp, imgPath, classifyImgPath) = \
                        genDiffImageFromDeferred(dbManager, cameras, deferredImageInfo, deferredImages, minusMinutes)
                    if not cameraID:
                        continue  # skip to next camera without deleting deferred image which may be reused later
                    os.remove(deferredImageInfo['imgPath'])  # no longer needed
                else:
                    continue  # in diff mode without deferredImage, nothing more to do
            # elif args.imgDirectory:  unused functionality -- to delete?
            #     (cameraID, timestamp, imgPath, md5) = getNextImageFromDir(args.imgDirectory)
            else:  # regular (non diff mode), grab image and process
                (cameraID, timestamp, imgPath,
                 md5) = getNextImage(dbManager, cameras)
                classifyImgPath = imgPath
            if not cameraID:
                continue  # skip to next camera
            timeFetch = time.time()

            segments = segmentAndClassify(classifyImgPath, tfSession, graph,
                                          labels)
            timeClassify = time.time()
            recordFilterReport(constants, cameraID, timestamp, classifyImgPath,
                               imgPath, segments, minusMinutes,
                               googleServices['drive'], useArchivedImages)
            timePost = time.time()
            updateTimeTracker(processingTimeTracker, timePost - timeStart)
            if args.time:
                logging.warning(
                    'Timings: fetch=%.2f, classify=%.2f, post=%.2f',
                    timeFetch - timeStart, timeClassify - timeFetch,
                    timePost - timeClassify)
Esempio n. 5
0
def main():
    reqArgs = [
        ["d", "directory", "directory containing the image sets"],
        ["o", "outputFile", "output file name"],
    ]
    optArgs = [
        ["l", "labels", "labels file generated during retraining"],
        ["m", "model", "model file generated during retraining"],
    ]
    args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs)
    model_file = args.model if args.model else settings.model_file
    labels_file = args.labels if args.labels else settings.labels_file

    test_data = []

    image_name = []
    crop_name = []
    score_name = []
    class_name = []

    image_name += ["Image"]
    crop_name += ["Crop"]
    score_name += ["Score"]
    class_name += ["Class"]

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'  # quiet down tensorflow logging
    graph = tf_helper.load_graph(model_file)
    labels = tf_helper.load_labels(labels_file)

    smokeDir = os.path.join(args.directory, 'test_set_smoke')
    smoke_image_list = listJpegs(smokeDir)
    logging.warning('Found %d images of smoke', len(smoke_image_list))
    nonSmokeDir = os.path.join(args.directory, 'test_set_other')
    other_image_list = listJpegs(nonSmokeDir)
    logging.warning('Found %d images of nonSmoke', len(other_image_list))

    smokeFile = os.path.join(args.directory, 'test_smoke.txt')
    np.savetxt(smokeFile, smoke_image_list, fmt="%s")
    nonSmokeFile = os.path.join(args.directory, 'test_other.txt')
    np.savetxt(nonSmokeFile, other_image_list, fmt="%s")

    (i, cr, s, cl, numPositive) = classifyImages(graph, labels,
                                                 smoke_image_list, 'smoke',
                                                 args.outputFile)
    image_name += i
    crop_name += cr
    score_name += s
    class_name += cl
    logging.warning('Done with smoke images')
    truePositive = numPositive
    falseNegative = len(smoke_image_list) - numPositive
    logging.warning('True Positive: %d', truePositive)
    logging.warning('False Negative: %d', falseNegative)

    (i, cr, s, cl, numPositive) = classifyImages(graph, labels,
                                                 other_image_list, 'other',
                                                 args.outputFile)
    image_name += i
    crop_name += cr
    score_name += s
    class_name += cl
    logging.warning('Done with nonSmoke images')
    falsePositive = numPositive
    trueNegative = len(other_image_list) - numPositive
    logging.warning('False Positive: %d', falsePositive)
    logging.warning('True Negative: %d', trueNegative)

    accuracy = (truePositive + trueNegative) / (truePositive + trueNegative +
                                                falsePositive + falseNegative)
    logging.warning('Accuracy: %f', accuracy)
    precision = truePositive / (truePositive + falsePositive)
    logging.warning('Precision: %f', precision)
    recall = truePositive / (truePositive + falseNegative)
    logging.warning('Recall: %f', recall)
    f1 = 2 * precision * recall / (precision + recall)
    logging.warning('F1: %f', f1)

    test_data = [image_name, crop_name, score_name, class_name]
    np.savetxt(args.outputFile, np.transpose(test_data), fmt="%s")
    print("DONE")