Beispiel #1
0
    def __init__(self, formal, caps, group):
        # Load the dataset
        data = DatasetLoader()
        # Create and train the models
        modelz = Models(data)
        # modelz.showPerformances()
        modelz.setSingleModel()  # this will set the multiNB model

        # Wait for the models to finish loading
        while not modelz.endLoading:
            time.sleep(1)

        # Initialize the chat and run the dialogs
        chat = ChatManager(modelz, group, formal, caps)
        chat.run()
Beispiel #2
0
def main():
    # dataset, input_shapes = DatasetLoader().load_dataset("CIFAR10")
    # dataset, input_shapes = DatasetLoader().load_dataset("CIFAR100")
    # dataset, input_shapes = DatasetLoader().load_dataset("LLD")
    # dataset, input_shapes = DatasetLoader().load_dataset("MNIST")
    dataset, input_shapes = DatasetLoader().load_dataset("Fashion_MNIST")
    visualizers = [(image_tile, 20), (log_GAN_loss, 10), (image_tile_data, 20)]
    model = ModelClassLoader.load_model_class("GAN")
    InstanceManagerHelper.build_and_train(model=model,
                                          input_shapes=input_shapes,
                                          visualizers=visualizers,
                                          dataset=dataset,
                                          epoch_time=2)

    pass
Beispiel #3
0
def main():
    savePath = os.path.join("models", "SoM_GRU_1")
    datasetLoader = DatasetLoader()
    datasetLoader.dataset = Datasets().audio_features_mfcc_functionals
    datasetLoader.loadDataset()
    datasetLoader.colomnsSeeked = ["Valance", "Valence_0"]
    tarsFunc = lambda tars: tars[:, 0
                                 ] - tars[:, 1
                                          ]  # the target for which the model will get trained. Depends on how it is loaded from the dataset!

    saveDescription = datasetLoader.dataset

    wrapper = ModelWrapper([],
                           numModels=2,
                           loadFromPath=os.path.join(savePath, "best"),
                           device='cuda:0')

    _, evalLoss = wrapper.testCompute(datasetLoader.devDataset,
                                      verbose=True,
                                      computeLossFor=len(
                                          datasetLoader.devDataset),
                                      tarsFunc=tarsFunc,
                                      plusTar=-1)
    _, evalLoss2 = wrapper.testCompute(datasetLoader.devDataset,
                                       verbose=True,
                                       computeLossFor=len(
                                           datasetLoader.devDataset),
                                       tarsFunc=tarsFunc,
                                       plusTar=1)
    _, testLoss = wrapper.testCompute(datasetLoader.testDataset,
                                      verbose=True,
                                      computeLossFor=len(
                                          datasetLoader.testDataset),
                                      tarsFunc=tarsFunc,
                                      plusTar=-1)
    _, testLoss2 = wrapper.testCompute(datasetLoader.testDataset,
                                       verbose=True,
                                       computeLossFor=len(
                                           datasetLoader.testDataset),
                                       tarsFunc=tarsFunc,
                                       plusTar=1)

    writeLineToCSV(os.path.join("models", "results.csv"), [
        "savePath", "saveDescription", "evalLoss", "evalLoss2", "evalCCC",
        "evalCCC2", "testLoss", "testLoss2", "testCCC", "testCCC2"
    ], [
        savePath, saveDescription, evalLoss, evalLoss2, 1 - evalLoss,
        1 - evalLoss2, testLoss, testLoss2, 1 - testLoss, 1 - testLoss2
    ])
Beispiel #4
0
# usage
from DatasetLoader import DatasetLoader
from InstanceManger import InstanceManager

# load dataset by calling DatasetLoader
# input_shapes is for tensorflow.PlaceHolder's shape
# need to build instanc e
dataset, input_shapes = DatasetLoader().load_dataset("dataset_name")

# apply to train model
instanceManager = InstanceManager()
instanceManager.train_instance(epoch_time,
                               dataset=dataset,
                               check_point_interval=check_point_interval)

# 1. add dataset folder path in **env_setting.py**
EXAMPLE_DATASET_PATH = os.path.join(DATA_PATH, 'example_dataset')

# 2. add dataset_batch_keys in **dict_keys.dataset_batch_keys.py**
INPUT_SHAPE_KEY_DATA_X = "INPUT_SHAPE_KEY_DATA_X"
INPUT_SHAPE_KEY_DATA_X = "INPUT_SHAPE_KEY_DATA_X"
INPUT_SHAPE_KEY_LABEL = "INPUT_SHAPE_KEY_LABEL"
INPUT_SHAPE_KEY_LABEL_SIZE = "INPUT_SHAPE_KEY_LABEL_SIZE"

# 3. add input_shapes_keys in **dict_keys.dataset_batch_keys.py**
BATCH_KEY_EXAMPLE_TRAIN_X = "BATCH_KEY_EXAMPLE_TRAIN_X"
BATCH_KEY_EXAMPLE_TEST_X = "BATCH_KEY_EXAMPLE_TEST_X"
BATCH_KEY_EXAMPLE_TRAIN_LABEL = "BATCH_KEY_EXAMPLE_TRAIN_LABEL"
BATCH_KEY_EXAMPLE_TEST_LABEL = "BATCH_KEY_EXAMPLE_TEST_LABEL"

# 4. implement dataset class in **data_handler.dataset_name.py**
Beispiel #5
0
                  str(depth) + ', residual type: ' + residual_type)
            for iter in range(1):
                #---- parameter section -------------------------------
                lr = 0.01
                epoch = 1000
                weight_decay = 5e-4
                c = 0.1
                seed = iter
                dropout = 0.5
                nhid = 16
                #------------------------------------------------------

                #---- objection initialization setction ---------------
                print('Start')

                data_obj = DatasetLoader('', '')
                data_obj.dataset_source_folder_path = './data/' + dataset_name + '/'
                data_obj.c = c
                data_obj.method_type = 'GCN'

                method_obj = MethodDeepGCNResNet(nfeature, nhid, nclass,
                                                 dropout, seed, depth)
                method_obj.lr = lr
                method_obj.epoch = epoch
                method_obj.residual_type = residual_type

                result_obj = ResultSaving('', '')
                result_obj.result_destination_folder_path = './result/GResNet/'
                result_obj.result_destination_file_name = 'DeepGCNResNet_' + dataset_name + '_' + residual_type + '_depth_' + str(
                    depth) + '_iter_' + str(iter)
Beispiel #6
0
def main():
    savePath = os.path.join("models", "SoM_GRU_1")
    datasetLoader = DatasetLoader()
    datasetLoader.dataset = Datasets().audio_features_mfcc_functionals
    datasetLoader.loadDataset()
    datasetLoader.colomnsSeeked = ["Valance", "Valence_0"]
    tarsFunc = lambda tars: tars[:, 0
                                 ] - tars[:, 1
                                          ]  # the target for which the model will get trained. Depends on how it is loaded from the dataset!

    curriculum = False
    gru_hidden_size = 64
    gru_num_layers = 2
    mlp_hidden_size = 32
    dropOut = 0.1

    computeLossFor = len(datasetLoader.trainDataset)
    computeLossForEval = len(datasetLoader.devDataset)

    featSize = datasetLoader.trainDataset.shape()[1]
    model1 = GRU(featSize,
                 gru_hidden_size=gru_hidden_size,
                 gru_num_layers=gru_num_layers,
                 dropOut=dropOut)
    model2 = fullyConnected(gru_hidden_size, 1, dropOut=0)
    wrapper = ModelWrapper([model1, model2], device='cuda:0')

    # Curriculum learning or not
    if curriculum:
        from preprocess import filterClasses
        includes = [
            [2, 3, 9, 10], [2, 3, 4, 8, 9, 10],
            [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
        ]  # the labeled data included for curriculum learning to get trained in order
        epochs = [
            32, 64, 512
        ]  # the amount of epochs to train each list included for curriculum learning, note that for the last one, early stopping would come into play
        for i, include in enumerate(includes):
            trainCSVpathDynamicEqualC = os.path.join(
                ".", "data", "trainDynamicEqualC_") + str(i) + ".csv"
            filterClasses(datasetLoader.trainCsvPath,
                          trainCSVpathDynamicEqualC,
                          colomnTarget="Valance",
                          includeList=include)
        for i in range(len(includes)):
            trainCSVpathDynamicEqualC = os.path.join(
                ".", "data", "trainDynamicEqualC_") + str(i) + ".csv"
            datasetLoader.trainCsvPath = trainCSVpathDynamicEqualC
            datasetLoader.loadDataset()
            computeLossFor = len(datasetLoader.trainDataset)
            computeLossForEval = len(datasetLoader.devDataset)
            first = 1 if i == 0 else epochs[i - 1] + 1
            wrapper.train(datasetLoader.trainDataset,
                          epochs=epochs[i],
                          firstEpoch=first,
                          savePath=savePath,
                          evalDataset=datasetLoader.devDataset,
                          csvPath=os.path.join(savePath, "trainLog.csv"),
                          computeLossFor=computeLossFor,
                          computeLossForEval=computeLossForEval,
                          earlyStopAfter=epochs[-2] + 20,
                          tolerance=25,
                          tarsFunc=tarsFunc)
    else:
        wrapper.train(datasetLoader.trainDataset,
                      epochs=500,
                      firstEpoch=1,
                      savePath=savePath,
                      evalDataset=datasetLoader.devDataset,
                      csvPath=os.path.join(savePath, "trainLog.csv"),
                      computeLossFor=computeLossFor,
                      earlyStopAfter=60,
                      computeLossForEval=computeLossForEval,
                      tolerance=25,
                      tarsFunc=tarsFunc)
Beispiel #7
0
			people_path = '/GPUFS/ruc_tliu_1/fsx/SyncNet/data/demo/'+people+'/'
			mp4list = glob.glob(people_path + '*.mp4')
			if len(mp4list) == 5:
				for mp4file in mp4list:
					mp4path = mp4file
					wavpath = mp4file[:-4]+'.wav'
					if os.path.exists(wavpath):
						os.remove(wavpath)
					if not os.path.exists(wavpath):
						mp42wav(mp4path)
					total_frames = count_mp4(mp4path)
					if total_frames[0] != total_frames[1]:
						print('ERROR, frames mismatch:', total_frames[0], total_frames[1])
						continue
					fw.write("%s %s %s %d\n"%(mp4path, wavpath, '0', total_frames[0]))


if __name__ == '__main__':
	# get_meta()
	loader = DatasetLoader("train.txt", nPerEpoch=100000, nBatchSize=30, maxFrames=44, nDataLoaderThread=10)
	S = SyncNetModel(nOut=1024, stride=2).cuda()
	L = LossScale().cuda()
	for data in loader:
		data_i, data_a = data
		out_i, out_I = S.forward_vid(data_i.cuda())
		out_a, out_A = S.forward_aud(data_a.cuda())
		print(out_a.shape, out_A.shape)
		print(out_i.shape, out_I.shape)
		exit(0)

Beispiel #8
0
import multiprocessing
from glob import glob
from DatasetLoader import DatasetLoader
from Model import Model
from Average import Average

flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('batch_size', 100, 'batch size')
flags.DEFINE_string('test_files_glob', './input/test*.tfrecords', 'glob for TFRecords files containing testing data')
flags.DEFINE_string('model_file', './model.ckpt', 'path to load trained model parameters from')
flags.DEFINE_integer('read_threads', multiprocessing.cpu_count(), 'number of reading threads')
flags.DEFINE_string('summary', './tensorboard_test', 'Tensorboard output directory')

# Testing input
dataset_loader = DatasetLoader()
keep_prob_holder = tf.placeholder(tf.float32, shape = ())
image_batch, label_batch = dataset_loader.input_batch(
    glob(FLAGS.test_files_glob), FLAGS.batch_size, FLAGS.read_threads)
label_batch = tf.cast(label_batch, tf.float32)

# Model, correctness predicate, and correctness aggregator
inferred_labels = Model.create_graph(image_batch, keep_prob_holder)
correct_prediction = tf.equal(tf.argmax(inferred_labels, 1), tf.argmax(tf.cast(label_batch, tf.float32), 1))
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
correct_images = tf.boolean_mask(image_batch, correct_prediction)
incorrect_images = tf.boolean_mask(image_batch, tf.logical_not(correct_prediction))
tf.summary.image('Correct Inference', correct_images, max_outputs = 20)
tf.summary.image('Incorrect Inference', incorrect_images, max_outputs = 20)

# Run graph
import matplotlib.pyplot as plt
import numpy as np
import argparse

ap = argparse.ArgumentParser()
ap.add_argument('-d', '--dataset', required=True, help='path to input dataset')
ap.add_argument('-m', '--model', required=True, help='path to output model')
args = vars(ap.parse_args())

print('[INFO] loading images...')
imagePahts = list(paths.list_images(args['dataset']))

sp = SimplePreprocessor(32, 32)
iap = ImageToArrayPreprocessor()

sdl = DatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePahts, verbose=500)
data = data.astype('float') / 255.0

(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.25,
                                                  random_state=42)
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

print('[INFO] compling model...')
opt = SGD(lr=0.005)
model = ShallowNet.build(width=32, height=32, depth=3, classes=3)
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
Beispiel #10
0
import numpy as np
from DatasetLoader import DatasetLoader
import cv2

DATASET_PATH = "../datasets/gtsrb/GTSRB/Final_Training/Images/"
LABELS_PATH = "../datasets/gtsrb/labels.txt"

# TEST 1

print("TEST 1!")

simpleInitialization = DatasetLoader(DATASET_PATH)

dataset = simpleInitialization.getDataset()

imgs, labels = dataset
imgNames = simpleInitialization.getLabels(LABELS_PATH)

for i in np.random.randint(low=0, high=len(imgs), size=10):

    cv2.imshow("test", imgs[i])
    print(imgNames[labels[i]])
    cv2.waitKey(0)

# END TEST !

# TEST 2

print("TEST 2!")

sizeInitialization = DatasetLoader(DATASET_PATH, imgSize=(64, 64))
    'contrastive': 2,
    'softmax': 1,
    'amsoftmax': 1,
    'aamsoftmax': 1,
    'ge2e': args.nSpeakers,
    'angleproto': args.nSpeakers
}

assert args.trainfunc in gsize_dict
assert gsize_dict[args.trainfunc] <= 100

# ==================== CHECK SPK ====================

## print data stats
trainLoader = DatasetLoader(args.train_list,
                            gSize=gsize_dict[args.trainfunc],
                            **vars(args))

## update learning rate
clr = s.updateLearningRate(1)

while (1):
    print(time.strftime("%Y-%m-%d %H:%M:%S"), it,
          "Training %s with LR %.5f..." % (args.model, max(clr)))

    loss, traineer = s.train_network(loader=trainLoader)

    print(time.strftime("%Y-%m-%d %H:%M:%S"), it, "Evaluating...")

    # ==================== EVALUATE LIST ====================
Beispiel #12
0
from glob import glob
from DatasetLoader import DatasetLoader
from Model import Model

flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('training_epochs', 20, 'number of times to run through training dataset')
flags.DEFINE_integer('batch_size', 50, 'batch size')
flags.DEFINE_string('train_files_glob', './input/train*.tfrecords', 'glob for TFRecords files containing training data')
flags.DEFINE_string('model_file', './model.ckpt', 'path to save trained model parameters to')
flags.DEFINE_integer('read_threads', multiprocessing.cpu_count(), 'number of reading threads')
flags.DEFINE_string('profile', None, 'a Chrome trace file will be written at the specified path for the first training batch')
flags.DEFINE_string('summary', './tensorboard_train', 'Tensorboard output directory')

# Training input
dataset_loader = DatasetLoader()
keep_prob_holder = tf.placeholder(tf.float32, shape = ())
image_batch, label_batch = dataset_loader.input_shuffle_batch(
    glob(FLAGS.train_files_glob), FLAGS.batch_size, FLAGS.read_threads, num_epochs = FLAGS.training_epochs)
label_batch = tf.cast(label_batch, tf.float32)

# Model, loss function, and training op
inferred_labels = Model.create_graph(image_batch, keep_prob_holder)
cross_entropy = -tf.reduce_sum(tf.cast(label_batch, tf.float32) * tf.log(tf.maximum(inferred_labels, 1e-10)),
                          reduction_indices=[1])
batch_avg_cross_entropy = tf.reduce_mean(cross_entropy)
training_op = tf.train.AdamOptimizer(1e-4).minimize(batch_avg_cross_entropy)

# Add loss and training accuracy to Tensorboard output
correct_prediction = tf.equal(tf.argmax(inferred_labels, 1), tf.argmax(tf.cast(label_batch, tf.float32), 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
Beispiel #13
0
    'eeg:9': 'Cz',
    'eeg:10': 'C2',
    'eeg:11': 'C4',
    'eeg:12': 'CP3',
    'eeg:13': 'CP1',
    'eeg:14': 'CPz',
    'eeg:15': 'CP2',
    'eeg:16': 'CP4'
}

# Read the appropriate root from the config file.
parsing = ConfigParser()
parsing.read('config.ini')
root = parsing['DATA']['root']

dsl = DatasetLoader(root)

offline_dict = dsl.getOffline()

grand_avg_error_array = []
grand_avg_no_error_array = []
grand_var_error_array = []
grand_var_no_error_array = []


def applyFeature(triggerSplitVolume=np.ndarray,
                 feature=BCIFeature,
                 window=64,
                 overlap=48):
    grand_grand_avg_error = []
    grand_grand_avg_var = []
Beispiel #14
0
def main():
    savePath = os.path.join("models", "SoM_Mix_1")
    saveDescription = "mix: 1-2"
    trainDatasets, devDatasets, testDatasets = [], [], []

    ### dataset 1 #########
    datasetLoader1 = DatasetLoader()
    datasetLoader1.dataset = Datasets().audio_features_mfcc_functionals
    datasetLoader1.loadDataset()
    trainDatasets.append(datasetLoader1.trainDataset)
    devDatasets.append(datasetLoader1.devDataset)
    testDatasets.append(datasetLoader1.testDataset)
    #######################
    ### dataset 2 #########
    datasetLoader2 = DatasetLoader()
    datasetLoader2.dataset = Datasets().visual_features_functionals
    datasetLoader2.loadDataset()
    trainDatasets.append(datasetLoader2.trainDataset)
    devDatasets.append(datasetLoader2.devDataset)
    testDatasets.append(datasetLoader2.testDataset)
    #######################

    ### models ############
    model1Path = os.path.join("models", "SoM_GRU_1", "best")
    model2Path = os.path.join("models", "SoM_GRU_2", "best")
    models = [model1Path, model2Path]
    #######################

    # the paths to where the fused features would be (or already are)
    trainPath = os.path.join(savePath, "trainData.csv")
    devPath = os.path.join(savePath, "devData.csv")
    testPath = os.path.join(savePath, "testData.csv")

    # comment out the next three lines if already got the CSV files of fused feats for train
    modelsOutToCSVs(models, trainDatasets, trainPath)
    modelsOutToCSVs(models, devDatasets, devPath)
    modelsOutToCSVs(models, testDatasets, testPath)

    trainDataset = myDataset(address=trainPath, tars=[1, 2])
    devDataset = myDataset(address=devPath, tars=[1, 2])
    testDataset = myDataset(address=testPath, tars=[1, 2])
    tarsFunc = lambda tars: tars[:, 0
                                 ] - tars[:, 1
                                          ]  # the target for which the model will get trained. Depends on how it is loaded from the dataset!

    featSize = trainDataset.shape()[-1]
    model = fullyConnected(featSize, 1, hiddenSize=32)
    wrapper = ModelWrapper([model], tabuList=[], device='cuda:0')
    # comment out the next lines if you just want to test
    wrapper.train(trainDataset,
                  epochs=2500,
                  firstEpoch=1,
                  savePath=savePath,
                  evalDataset=devDataset,
                  csvPath=os.path.join(savePath, "trainLog.csv"),
                  computeLossFor=len(trainDataset),
                  computeLossForEval=len(devDataset),
                  tolerance=5,
                  tarsFunc=tarsFunc,
                  plusTar=-1)

    wrapper.load_model(os.path.join(savePath, "best"))
    _, evalLoss = wrapper.testCompute(devDataset,
                                      verbose=True,
                                      computeLossFor=len(devDataset),
                                      tarsFunc=tarsFunc,
                                      plusTar=-1)
    _, evalLoss2 = wrapper.testCompute(devDataset,
                                       verbose=True,
                                       computeLossFor=len(devDataset),
                                       tarsFunc=tarsFunc,
                                       plusTar=1)
    _, testLoss = wrapper.testCompute(testDataset,
                                      verbose=True,
                                      computeLossFor=len(testDataset),
                                      tarsFunc=tarsFunc,
                                      plusTar=-1)
    _, testLoss2 = wrapper.testCompute(testDataset,
                                       verbose=True,
                                       computeLossFor=len(testDataset),
                                       tarsFunc=tarsFunc,
                                       plusTar=1)

    writeLineToCSV(os.path.join("models", "results.csv"), [
        "savePath", "saveDescription", "evalLoss", "evalLoss2", "evalCCC",
        "evalCCC2", "testLoss", "testLoss2", "testCCC", "testCCC2"
    ], [
        savePath, saveDescription, evalLoss, evalLoss2, 1 - evalLoss,
        1 - evalLoss2, testLoss, testLoss2, 1 - testLoss, 1 - testLoss2
    ])
from ADMM import ADMM
from DatasetLoader import DatasetLoader

path = './Input/input_video.mp4'
dataloader = DatasetLoader(path, keep_color=True, down_sample=True, image_resolution=(360, 640))
a = ADMM(dataloader)
a.train()  # Train and also create output visualizations
Beispiel #16
0
if args.eval == True:

    sc, lab = s.evaluateFromListSave(args.test_list,
                                     print_interval=100,
                                     test_path=args.test_path)
    result = tuneThresholdfromScore(sc, lab, [1, 0.1])
    print('EER %2.4f' % result[1])

    quit()

# ==================== LOAD DATA LIST ====================

print('Reading data ...')

trainLoader = DatasetLoader(args.train_list,
                            nPerEpoch=args.nTrainPerEpoch,
                            **vars(args))
valLoader = DatasetLoader(args.verify_list,
                          nPerEpoch=args.nTestPerEpoch,
                          evalmode=True,
                          **vars(args))

print('Reading done.')

# ==================== CHECK SPK ====================

clr = s.updateLearningRate(1)

while (1):
    print(time.strftime("%Y-%m-%d %H:%M:%S"), it, "Start Iteration")
## Assertion
gsize_dict = {
    'triplet': 2,
    'contrastive': 2,
    'softmax': 1,
    'amsoftmax': 1,
    'aamsoftmax': 1
}

if args.trainfunc in gsize_dict:
    assert gsize_dict[args.trainfunc] == args.nPerSpeaker
else:
    assert 2 <= args.nPerSpeaker and args.nPerSpeaker <= 100

## Initialise data loader
trainLoader = DatasetLoader(args.train_list, **vars(args))

clr = s.updateLearningRate(1)

while (1):
    print(time.strftime("%Y-%m-%d %H:%M:%S"), it,
          "Training %s with LR %f..." % (args.model, max(clr)))

    ## Train network
    loss, traineer = s.train_network(loader=trainLoader)

    ## Validate and save
    if it % args.test_interval == 0:

        print(time.strftime("%Y-%m-%d %H:%M:%S"), it, "Evaluating...")
Beispiel #18
0
from Average import Average

flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('batch_size', 100, 'batch size')
flags.DEFINE_string('test_files_glob', './input/test*.tfrecords',
                    'glob for TFRecords files containing testing data')
flags.DEFINE_string('model_file', './model.ckpt',
                    'path to load trained model parameters from')
flags.DEFINE_integer('read_threads', multiprocessing.cpu_count(),
                     'number of reading threads')
flags.DEFINE_string('summary', './tensorboard_test',
                    'Tensorboard output directory')

# Testing input
dataset_loader = DatasetLoader()
keep_prob_holder = tf.placeholder(tf.float32, shape=())
image_batch, label_batch = dataset_loader.input_batch(
    glob(FLAGS.test_files_glob), FLAGS.batch_size, FLAGS.read_threads)
label_batch = tf.cast(label_batch, tf.float32)

# Model, correctness predicate, and correctness aggregator
inferred_labels = Model.create_graph(image_batch, keep_prob_holder)
correct_prediction = tf.equal(tf.argmax(inferred_labels, 1),
                              tf.argmax(tf.cast(label_batch, tf.float32), 1))
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
correct_images = tf.boolean_mask(image_batch, correct_prediction)
incorrect_images = tf.boolean_mask(image_batch,
                                   tf.logical_not(correct_prediction))
tf.summary.image('Correct Inference', correct_images, max_outputs=20)
tf.summary.image('Incorrect Inference', incorrect_images, max_outputs=20)
Beispiel #19
0
    'eeg:9': 'Cz',
    'eeg:10': 'C2',
    'eeg:11': 'C4',
    'eeg:12': 'CP3',
    'eeg:13': 'CP1',
    'eeg:14': 'CPz',
    'eeg:15': 'CP2',
    'eeg:16': 'CP4'
}

parser = ConfigParser()
parser.read('config.ini')

root = parser['DATA']['root']

dsl = DatasetLoader(root)

# Get dictionaries with filenames for each dataset
offline_dict = dsl.getTrials()[0]
S2_dict = dsl.getTrials()[1]
S3_dict = dsl.getTrials()[2]

channels_of_interest = ['FCz', 'C4', 'Cz', 'FC1', 'FC2', 'CPz', 'CP4']


# adding each trial with its own variable for error_master, no_error_master
def trials(trial_name=''):
    error_master = []
    no_error_master = []
    trial_dicts = {
        'Offline': offline_dict,
    'proto': args.nSpeakers,
    'triplet': 2,
    'contrastive': 2,
    'softmax': 1,
    'amsoftmax': 1,
    'aamsoftmax': 1,
    'ge2e': args.nSpeakers,
    'angleproto': args.nSpeakers
}

assert args.trainfunc in gsize_dict
assert gsize_dict[args.trainfunc] <= 100

## Initialise data loader
trainLoader = DatasetLoader(train_list,
                            gSize=gsize_dict[args.trainfunc],
                            new_train_path=train_path,
                            **vars(args))

clr = s.updateLearningRate(1)

# touch the output file/dir
print(f"Creating parent dir for path={args.save_tmp_model_to}")
Path(args.save_tmp_model_to).parent.mkdir(parents=True, exist_ok=True)

while (1):
    print(time.strftime("%Y-%m-%d %H:%M:%S"), it,
          "Training %s with LR %f..." % (args.model, max(clr)))

    ## Train network
    loss, traineer = s.train_network(loader=trainLoader)
Beispiel #21
0
    height_shift_range=0.2,
    brightness_range=None,
    shear_range=0.0,
    zoom_range=0.1,
    channel_shift_range=0.0,
    fill_mode="nearest",
    cval=0.0,
    horizontal_flip=False,
    vertical_flip=False,
    rescale=1.0/255,
    preprocessing_function=None,
    data_format=None,
    validation_split=0.0,
    dtype=None
)

DATASET_PATH = "../datasets/gtsrb/GTSRB/Final_Training/Images/"
LABELS_PATH = "../datasets/gtsrb/labels.txt"


print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
tf.debugging.set_log_device_placement(True)

datasetLoader = DatasetLoader(DATASET_PATH, imgSize = (64, 64))
dataset1 = datasetLoader.getDataset()
dataset = (dataset1[0][:2], dataset1[1][:2])

#imgPreproc = KerasImagePreprocessor(dataset, **imageDataGeneratorParams)

#imgFlwr = imgPreproc.getFlwr(saveToDir = "./preprocessingNotebookTest", savePrefix = "a", saveFormat = "jpeg")
#imgFlwr.next()
Beispiel #22
0
    else:
        # Wrapping a LSTMCell in a RNN layer will not use CuDNN.
        gru_layer = tf.keras.layers.GRU(
            tf.keras.layers.LSTMCell(units), input_shape=(None, input_dim)
        )

    model = tf.keras.models.Sequential(
        [
            gru_layer,
            #tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Dense(output_size,activation='softmax'),
        ]
    )
    return model

loader = Loader()
x, y, lens, lenMax = loader.load(datasetPath="out-dataset/dataset-variable-trace-110.npz")

#x, y, lens, lenMax = loader().loadDefault()
input_dim = lenMax

model = build_model(allow_cudnn_kernel=True)

opt = tf.keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)

model.compile(
    loss='categorical_crossentropy',
    optimizer=opt,
    metrics=["accuracy"],
)