コード例 #1
0
    def test_buildImage_GiveAwayWithEmptyText(self):
        """
		Test if buildImage works normally for GiveAway with empty text
		:return: void
		"""
        lg = LabelGenerator()
        lg.buildImage("give_away", "", outputfile=None)
コード例 #2
0
    def test_buildImage_Normal(self):
        """
		Test if buildImage works when called normally
		:return: void
		"""
        lg = LabelGenerator()
        lg.buildImage("give_away", "Text", outputfile=None)
コード例 #3
0
    def test_buildImage_WrongLabel(self):
        """
		Test if buildImage throws an error for unknown label type
		:return: void
		"""
        lg = LabelGenerator()
        with self.assertRaises(Exception):
            lg.buildImage("beef", "Text", outputfile=None)
コード例 #4
0
    def test_buildImage_UndocumentedWithEmptyText(self):
        """
		Test if buildImage throws an error for labels different to
		GiveAway and an empty text.
		:return: void
		"""
        lg = LabelGenerator()
        with self.assertRaises(Exception):
            lg.buildImage("undocumented", "", outputfile=None)
コード例 #5
0
def commandlineInterface():
    lg = LabelGenerator()
    argv = sys.argv[1:]

    opts, args = getopt.getopt(argv, 'vho:t:l:')

    # parse arguments
    outputfile = None
    text = None
    label = None

    for opt in opts:
        if opt[0] == "-o":
            outputfile = opt[1]
        elif opt[0] == "-t":
            text = opt[1]
        elif opt[0] == "-l":
            label = opt[1]
        elif opt[0] == "-v":
            printVersions()
            return
        elif opt[0] == "-h":
            print(
                """This is a tool to generate vspace.one e.V. labels with the names of the owners on them.
                \nPlease use the following options:
                -t: Text to print on the label (optional)
                -o: Outputfile for the generated image (optional)
                -l: Label: owner_only, instructed, documented, public, give_away
                -h: Help
                -v: Print version
                """)
            return

    # validating inputs
    if label is None:
        raise Exception(
            "\nPlease define a label (-l) \n\nPossible labels: \nowner_only\ninstructed\ndocumented\npublic\ngive_away\n"
        )

    if label not in lg.POSSIBLE_LABELS:
        raise Exception(
            "\n" + l +
            " is a unknown label \n\nPossible labels: \nowner_only\ninstructed\ndocumented\npublic\ngive_away\n"
        )

    if text is None:
        text = ""

    # call actual functionality
    lg.buildImage(label, text, outputfile)
コード例 #6
0
def generate(label, text, fileformat):
    lg = LabelGenerator()
    fileformats = ["png", "jpeg"]
    if fileformat not in fileformats:
        raise Exception("Unknown fileformat ", fileformat)

    # validate inputs
    if label not in lg.POSSIBLE_LABELS:
        raise Exception(
            "Label " + label +
            " not found! Only given_away, instructed, public, owner_only and documented are possible labels!"
        )

    # generate image
    img = lg.buildImage(label, text)
    if img is None:
        raise Exception("Image is none!")

    # return image png
    retval, buffer = cv2.imencode("." + fileformat, img)
    response = flask.make_response(buffer.tostring())
    response.headers['Content-Type'] = "image/" + fileformat
    return response
コード例 #7
0
import numpy as np
import keras
from DataGeneratorSimple import DataGenerator
from LabelGenerator import LabelGenerator

#%%
prefix = "C:/Users/Eiden/Desktop/BrainScanMotionDetection/CNN/DataArrays/under/"
labelGenerator = LabelGenerator()
labelGenerator.setSliceStart(124)
labelGenerator.setSliceEnd(132)
labelGenerator.generateLabels()
idList = labelGenerator.get_idList()
labels = labelGenerator.get_labels()
maxVals = labelGenerator.get_maxVals()

#%%

# Parameters
params = {
    'labels': labels,
    'max_brightness': maxVals,
    'dim': (128, 128),
    'batch_size': 32,
    'n_classes': 2,
    'n_channels': 1,
    'shuffle': True
}

# Generators
dataGen = DataGenerator(idList, **params)
コード例 #8
0
def processVideo(dp: DataPoint, vehicleDetector, laneLineDetector,
                 progressTracker, stopEvent):

    video = Video(dp.videoPath)
    totalNumFrames = video.getTotalNumFrames()

    videoFeaturesPath = dp.videoPath.replace('videos', 'features').replace(
        '.avi', '.pkl')

    if CONFIG.USE_PRECOMPUTED_FEATURES:
        vehicleDetector.loadFeaturesFromDisk(videoFeaturesPath)
        laneLineDetector.loadFeaturesFromDisk(videoFeaturesPath)

    tracker = VehicleTracker()
    labelGen = LabelGenerator(video.getFps())

    if CONFIG.MAKE_PRECOMPUTED_FEATURES:
        allboxes, allboxscores, allvehicles, alllines = [], [], [], []

    frames = []
    for frameIndex in range(totalNumFrames):

        if stopEvent.is_set():
            print("Classifier process exited.", flush=True)
            return dp

        if CONFIG.SHOULD_LOAD_VID_FROM_DISK:
            isFrameAvail, frame = video.getFrame(vehicleDetector.wantsRGB)
        else:
            isFrameAvail, frame = True, None

        if not isFrameAvail:
            print('Video=' + dp.videoPath + ' returned no frame for index=' +
                  str(frameIndex) + ' but totalNumFrames=' +
                  str(totalNumFrames))
            rawboxes, boxscores, vehicles, lines = [], [], [], [[], []]
        else:

            rawboxes, boxscores = vehicleDetector.getFeatures(frame)
            vehicles = tracker.getVehicles(frame, rawboxes, boxscores)
            lines = laneLineDetector.getLines(frame)
            try:
                labelGen.processFrame(vehicles, lines, frameIndex)
            except Exception as e:
                print(e)

        if CONFIG.MAKE_PRECOMPUTED_FEATURES:
            allboxes.append(rawboxes)
            allboxscores.append(boxscores)
            allvehicles.append(vehicles)
            alllines.append(lines)

        _updateDataPoint(dp, rawboxes, vehicles, lines)
        progressTracker.setCurVidProgress(frameIndex / totalNumFrames)
        progressTracker.incrementNumFramesProcessed()

    if CONFIG.MAKE_PRECOMPUTED_FEATURES:
        import pickle
        with open(videoFeaturesPath, 'wb') as file:
            pickle.dump([allboxes, allboxscores, alllines, allvehicles], file)

    dp.predictedLabels = labelGen.getLabels()
    dp.hasBeenProcessed = True
    return dp
コード例 #9
0
'''
Randomly chooses nii files to be part of train and test
Pulls appropriate slices from the test and train nii files for test and train sets
'''
import numpy as np
import keras
from DataGeneratorSimple import DataGenerator
from LabelGenerator import LabelGenerator
import random
#%%
gap = 50
labelGenerator = LabelGenerator()
labelGenerator.setSliceStart(128 - gap)
labelGenerator.setSliceEnd(128 + gap)
labelGenerator.generateLabels()
idList = labelGenerator.get_idList()
labels = labelGenerator.get_labels()
maxVals = labelGenerator.get_maxVals()

#%%

# Parameters
params = {
    'labels': labels,
    'max_brightness': maxVals,
    'dim': (128, 128),
    'batch_size': 32,
    'n_classes': 2,
    'n_channels': 1,
    'shuffle': True
}
コード例 #10
0
import os
os.environ['KERAS_BACKEND'] = 'plaidml.keras.backend'

import numpy as np
import random
import time
from keras.models import Sequential
from keras.utils import Sequence
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, BatchNormalization
from DataGenerator import DataGenerator
from LabelGenerator import LabelGenerator
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import TensorBoard

labelGenerator = LabelGenerator()
labelGenerator.generateLabels()
idList = labelGenerator.get_idList()
labels = labelGenerator.get_labels()
maxVals = labelGenerator.get_maxVals()

random.seed(1)
random.shuffle(idList)

train_listIDs = idList[:int(len(idList) * 0.05)]
val_listIDs = idList[int(len(idList) * 0.05):]

# Parameters
params = {
    'labels': labels,
    'max_brightness': maxVals,