def evaluate(binary_dataset, model_int, X_int_test, y_int_test, dim):
    train_str_bin = dataset_2_string(binary_dataset['train'],
                                     limit_num_sents=False,
                                     set_type='train')

    with NamedTemporaryFile() as f:
        f.write(train_str_bin.encode('utf8'))
        f.seek(0)

        # Train model for in-scope queries
        model_bin = fasttext.train_supervised(
            input=f.name,
            dim=dim,
            pretrainedVectors=f'{PRETRAINED_VECTORS_PATH}/cc.en.{dim}.vec')

    # Test
    testing = Testing(model_int,
                      X_int_test,
                      y_int_test,
                      'fasttext',
                      '__label__oos',
                      bin_model=model_bin)
    results_dct = testing.test_binary()

    return results_dct
def evaluate(binary_dataset, model_int, X_int_test, y_int_test):
    train_str_bin = dataset_2_string_rasa(binary_dataset['train'],
                                          limit_num_sents=False,
                                          set_type='train')

    with NamedTemporaryFile(suffix='.yml') as f:
        f.write(train_str_bin.encode('utf8'))
        f.seek(0)

        training_data = rasa.shared.nlu.training_data.loading.load_data(f.name)

    config = rasa.nlu.config.load('config.yml')
    trainer = rasa.nlu.model.Trainer(config)
    model_bin = trainer.train(training_data)

    # Test
    testing = Testing(model_int,
                      X_int_test,
                      y_int_test,
                      'rasa',
                      'oos',
                      bin_model=model_bin)
    results_dct = testing.test_binary()

    return results_dct
Пример #3
0
def test():
    # this test only tests the model interface.  If you are testing node types then use nodes_test.py
    testing = Testing()
    test_callbacks(testing)
    test_multiple(testing)
    test_bitcode(testing)
    return testing.GetFailedTests()
Пример #4
0
def test():
    testing = Testing()
    testModelBuilder(testing)
    if testing.DidTestFail():
        return 1
    else:
        return 0
def evaluate(dataset, limit_num_sents: bool):
    train_str = dataset_2_string_rasa(dataset['train'], limit_num_sents=limit_num_sents, set_type='train')
    X_val, y_val = get_X_y_rasa(dataset['val'] + dataset['oos_val'], limit_num_sents=limit_num_sents,
                                set_type='val')
    X_test, y_test = get_X_y_rasa(dataset['test'] + dataset['oos_test'], limit_num_sents=limit_num_sents,
                                  set_type='test')

    with NamedTemporaryFile(suffix='.yml') as f:
        f.write(train_str.encode('utf8'))
        f.seek(0)

        training_data = rasa.shared.nlu.training_data.loading.load_data(f.name)

    config = rasa.nlu.config.load('config.yml')
    trainer = rasa.nlu.model.Trainer(config)
    model = trainer.train(training_data)

    val_predictions_labels = []  # used to find threshold

    for sent, true_int_label in zip(X_val, y_val):
        pred = model.parse(sent)
        pred_label = pred['intent']['name']
        similarity = pred['intent']['confidence']

        pred = (pred_label, similarity)
        val_predictions_labels.append((pred, true_int_label))

    threshold = find_best_threshold(val_predictions_labels, 'oos')

    # Test
    testing = Testing(model, X_test, y_test, 'rasa', 'oos')
    results_dct = testing.test_threshold(threshold)

    return results_dct
def ites(args):
	es = Elasticsearch(args.offline_dir, args.url)

	# create a running instance of Elasticsearch if needed
	if args.url is None:
		es_reset = args.es_full_reset or args.es_reset
		if es_reset:
			es_dir = os.path.abspath(es_reset)
			es.reset(es_dir, args.es_full_reset is not None)
		else:
			assert(args.root_dir)
			root_dir = os.path.abspath(args.root_dir)
			if not args.version:
				toks = args.driver.split("-")
				for i in range(3, len(toks)):
					if re.match("\d+\.\d+\.\d+", toks[-i]):
						version = "-".join(toks[len(toks) - i : -2])
			else:
				version = args.version
			if not version:
				raise Exception("failed to determine Elasticsearch version to test against (params: driver: %s, "
						"version: %s)" % (args.driver, args.version))

			es.spawn(version, root_dir, args.ephemeral)
	elif not es.is_listening():
		raise Exception("no running prestaged Elasticsearch instance found.")
	else:
		print("Using pre-staged Elasticsearch.")

	# add test data into it
	if args.reindex or not (args.skip_indexing and args.skip_tests):
		if args.skip_indexing_tests:
			test_mode = TestData.MODE_NODATA
		elif args.skip_indexing:
			test_mode = TestData.MODE_NOINDEX
		elif args.reindex:
			test_mode = TestData.MODE_REINDEX
		else:
			test_mode = TestData.MODE_INDEX

		data = TestData(es, test_mode, args.offline_dir)
		data.load()

	# install the driver
	if args.driver:
		driver_path = os.path.abspath(args.driver)
		installer = Installer(driver_path)
		installer.install(args.ephemeral)

	# run the tests
	if not args.skip_tests:
		assert(data is not None)
		cluster_name = es.cluster_name()
		assert(len(cluster_name))
		if args.dsn:
			Testing(es, data, cluster_name, args.dsn).perform()
		else:
			Testing(es, data, cluster_name, "Packing=JSON;Compression=on;").perform()
			Testing(es, data, cluster_name, "Packing=CBOR;Compression=off;").perform()
def Phase1():
    c0 = RemoteController('c0', ip='127.0.0.1', port=6633)
    net = Mininet(topo=MyTopo(), controller=c0)
    net.start()
    testResult = Testing(net)

    #CLI(net)
    testResult.run_tests()
    net.stop()
Пример #8
0
def test():
    testing = Testing()
    hingeLossTest(testing)
    logLossTest(testing)
    squaredLossTest(testing)
    if testing.DidTestFail():
        return 1
    else:
        return 0
def evaluate(binary_dataset, mlp_int, X_int_test, y_int_test, split):
    X_bin_train, y_bin_train = split.get_X_y(binary_dataset['train'], fit=False, limit_num_sents=False,
                                             set_type='train')

    mlp_bin = MLPClassifier(activation='tanh').fit(X_bin_train, y_bin_train)

    # Test
    testing = Testing(mlp_int, X_int_test, y_int_test, 'mlp', split.intents_dct['oos'], bin_model=mlp_bin)
    results_dct = testing.test_binary()

    return results_dct
Пример #10
0
def test():
    # this test only tests the model interface.  If you are testing node types then use nodes_test.py
    testing = Testing()
    test_callbacks(testing)
    test_multiple(testing)
    test_bitcode(testing)

    if testing.DidTestFail():
        return 1
    else:
        return 0
Пример #11
0
def test():
    testing = Testing()
    test_callbacks(testing)
    test_typecast(testing)
    test_unary(testing)
    test_multiple(testing)
    #test_multiply(testing)  # bugbug: crashing on Linux...
    test_bitcode(testing)
    if testing.DidTestFail():
        return 1
    else:
        return 0
def evaluate(binary_dataset, svc_int, X_int_test, y_int_test, split):
    X_bin_train, y_bin_train = split.get_X_y(binary_dataset['train'], fit=False, limit_num_sents=False,
                                             set_type='train')
    # X_bin_test, y_bin_test = split.get_X_y(binary_dataset['test'], fit=False, limit_num_sents=False, set_type='test')

    svc_bin = svm.SVC(C=1, kernel='linear').fit(X_bin_train, y_bin_train)

    # Test
    testing = Testing(svc_int, X_int_test, y_int_test, 'svm', split.intents_dct['oos'], bin_model=svc_bin)
    results_dct = testing.test_binary()

    return results_dct
Пример #13
0
def ites(args):
    es = Elasticsearch(args.offline_dir)

    # create a running instance of Elasticsearch if needed
    if not args.pre_staged:
        if args.es_reset:
            es_dir = os.path.abspath(args.es_reset)
            es.reset(es_dir)
        else:
            assert (args.root_dir)
            root_dir = os.path.abspath(args.root_dir)
            if not args.version:
                toks = args.driver.split("-")
                for i in range(3, len(toks)):
                    if re.match("\d+\.\d+\.\d+", toks[-i]):
                        version = "-".join(toks[len(toks) - i:-2])
            else:
                version = args.version
            if not version:
                raise Exception(
                    "failed to determine Elasticsearch version to test against (params: driver: %s, "
                    "version: %s)" % (args.driver, args.version))

            es.spawn(version, root_dir, args.ephemeral)
    elif not es.is_listening(Elasticsearch.AUTH_PASSWORD):
        raise Exception("no running prestaged Elasticsearch instance found.")
    else:
        print("Using pre-staged Elasticsearch.")

    # add test data into it
    if args.reindex or not (args.skip_indexing and args.skip_tests):
        if args.skip_indexing:
            test_mode = TestData.MODE_NOINDEX
        elif args.reindex:
            test_mode = TestData.MODE_REINDEX
        else:
            test_mode = TestData.MODE_INDEX

        data = TestData(test_mode, args.offline_dir)
        data.load()

    # install the driver
    if args.driver:
        driver_path = os.path.abspath(args.driver)
        installer = Installer(driver_path)
        installer.install(args.ephemeral)

    # run the tests
    if not args.skip_tests:
        assert (data is not None)
        tests = Testing(data, args.dsn)
        tests.perform()
Пример #14
0
def test_double():
    testing = Testing()

    # empty vector
    e = ell.math.DoubleVector()
    np.testing.assert_equal(e.size(), 0)

    # vector from list
    l = [
        1.1,
        2.2,
        3.3,
        4.4,
    ]
    e = ell.math.DoubleVector(l)

    np.testing.assert_equal(list(e), l)

    # vector from numpy array
    a = np.array(range(10), dtype=float)
    e = ell.math.DoubleVector(a)

    np.testing.assert_equal(np.asarray(e), a)

    # conver to numpy using array
    b = np.array(e).ravel()
    np.testing.assert_equal(a, b)

    # copy_from numpy array
    e = ell.math.DoubleVector()
    e.copy_from(a)
    np.testing.assert_equal(np.asarray(e), a)

    # convert data types
    a = a.astype(np.float32)
    e = ell.math.DoubleVector(a)
    np.testing.assert_equal(np.asarray(e), a)

    # enumerating array
    for i in range(a.shape[0]):
        x = a[i]
        y = e[i]
        np.testing.assert_equal(x, y)

    # auto-ravel numpy arrays
    a = np.ones((10, 10), dtype=float)
    a *= range(10)
    e = ell.math.DoubleVector(a)
    np.testing.assert_equal(np.asarray(e), a.ravel())

    testing.ProcessTest("DoubleVector test", True)
Пример #15
0
def test():
    testing = Testing()
    # -dd auto -sw 1 -sb 1 -sz 1 -pd 10 -l 2 -mp 5 -v --evaluationFrequency 1 -plf L2

    args = ell.ProtoNNTrainerParameters()
    args.projectedDimension = 10
    args.numPrototypesPerLabel = 5
    args.numLabels = 2
    args.sparsityW = 1
    args.sparsityB = 1
    args.sparsityZ = 1
    args.gamma = -1
    args.lossFunction = ell.ProtoNNLossFunction.L2
    args.numInnerIterations = 1
    args.numFeatures = 0
    args.verbose = True

    trainer = ell.ProtoNNTrainer(args)

    dataset = ell.AutoSupervisedDataset()
    testFile = os.path.join(find_ell.get_ell_root(),
                            "examples/data/protonnTestData.txt")
    print("Loading: " + testFile)
    dataset.Load(testFile)

    total = dataset.NumExamples()
    features = dataset.NumFeatures()
    testing.ProcessTest("Proton dataset loaded",
                        testing.IsEqual(int(total), 200))

    trainer.SetDataset(dataset)

    numIterations = 20

    print("Training...")
    for i in range(numIterations):
        trainer.Update()

    predictor = trainer.GetPredictor()

    accuracy = get_accuracy(predictor, dataset, features)
    print("Accuracy %f" % (accuracy))
    testing.ProcessTest("Proton accuracy test",
                        testing.IsEqual(int(accuracy), 1))

    map = predictor.GetMap()
    map.Save("protonnTestData.ell")
    testing.ProcessTest(
        "Saving  protonnTestData.ell",
        testing.IsEqual(os.path.isfile("protonnTestData.ell"), True))

    if testing.DidTestFail():
        raise Exception("protonn_trainer_test failed")

    return 0
Пример #16
0
def evaluate(dataset, limit_num_sents: bool):
    # Split and tokenize dataset
    split = Split_BERT()
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

    X_train, y_train = split.get_X_y(dataset['train'] + dataset['oos_train'], limit_num_sents=limit_num_sents,
                                     set_type='train')
    X_val, y_val = split.get_X_y(dataset['val'] + dataset['oos_val'], limit_num_sents=limit_num_sents, set_type='val')
    X_test, y_test = split.get_X_y(dataset['test'] + dataset['oos_test'], limit_num_sents=limit_num_sents,
                                   set_type='test')

    train_ids, train_attention_masks, train_labels = tokenize_BERT(X_train, y_train, tokenizer)
    val_ids, val_attention_masks, val_labels = tokenize_BERT(X_val, y_val, tokenizer)
    test_ids, test_attention_masks, test_labels = tokenize_BERT(X_test, y_test, tokenizer)

    num_labels = len(split.intents_dct.keys())

    # Train model
    model = TFBertForSequenceClassification.from_pretrained('bert-base-uncased',
                                                            num_labels=num_labels)  # we have to adjust the number of labels
    print('\nBert Model', model.summary())

    log_dir = 'tensorboard_data/tb_bert'
    model_save_path = './models/bert_model.h5'

    callbacks = [
        tf.keras.callbacks.ModelCheckpoint(filepath=model_save_path, save_weights_only=True, monitor='val_loss',
                                           mode='min',
                                           save_best_only=True), tf.keras.callbacks.TensorBoard(log_dir=log_dir)]

    loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
    metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
    optimizer = tf.keras.optimizers.Adam(learning_rate=4e-5)

    model.compile(loss=loss, optimizer=optimizer, metrics=[metric])

    history = model.fit([train_ids, train_attention_masks],
                        train_labels,
                        batch_size=32,
                        epochs=5,
                        validation_data=([val_ids, val_attention_masks], val_labels),
                        callbacks=callbacks)

    # Test
    testing = Testing(model, {'test_ids': test_ids, 'test_attention_masks': test_attention_masks}, test_labels,
                      'bert', split.intents_dct['oos'])
    results_dct = testing.test_train()

    return results_dct
Пример #17
0
def Phase2():
    c0 = RemoteController('c0', ip='127.0.0.1', port=6633)
    net = Mininet(topo=MyTopo(), controller=c0, switch=OVSSwitch)

    net.start()
    print "Testing the functionality of the network...\n"
    print "It will take a long time to run all the tests...\n"
    print "After the tests are finished, please check the result in directory results."

    #CLI(net)
    testResult = Testing(net)
    testResult.run_tests()

    print "Tests are finished! Please check the result file."
    net.stop()
Пример #18
0
    def __init__(self):
        self.testing = Testing()
        self.thread_pool_size = max(1, int(cfg.getConfig('thread_pool')))
        self.test_task = queue.Queue()  # 创建队列
        self.executor = ThreadPoolExecutor(self.thread_pool_size)  # 创建线程池

        self.run()
Пример #19
0
def test_float():
    testing = Testing()

    # empty vector
    e = ELL.FloatVector()
    np.testing.assert_equal(e.size(), 0)

    # vector from list of floats
    l = [1.1, 2.2, 3.3, 4.4]
    e = ELL.FloatVector(l)

    assert_compare_floats(e, l)

    # vector from numpy array
    a = np.array(range(10), dtype=np.float32)
    e = ELL.FloatVector(a)

    np.testing.assert_equal(np.asarray(e), a)

    # convert to numpy using array
    b = np.array(e).ravel()
    np.testing.assert_equal(a, b)

    # copy_from numpy array
    e = ELL.FloatVector()
    e.copy_from(a)
    np.testing.assert_equal(np.asarray(e), a)

    # convert data types
    a = a.astype(np.float)
    e = ELL.FloatVector(a)
    np.testing.assert_equal(np.asarray(e), a)

    # enumerating array
    for i in range(a.shape[0]):
        x = a[i]
        y = e[i]
        np.testing.assert_equal(x, y)

    # auto-ravel numpy arrays
    a = np.ones((10, 10), dtype=np.float32)
    a *= range(10)
    e = ELL.FloatVector(a)
    np.testing.assert_equal(np.asarray(e), a.ravel())

    testing.ProcessTest("FloatVector test", True)
Пример #20
0
def test():
    testing = Testing()
    test_voice_activity_node(testing)
    test_gru_node_with_vad_reset(testing)
    test_hamming_node(testing)
    test_mel_filterbank(testing)
    test_fftnode(testing)
    return 0
 def run(self, mode, train_restored_saved_model):
     start_time = time.time()
     print('Mode: ', mode)
     print('Model_id: ', param.model_id)
     # building dataset
     if mode == param.mode_list[1] or mode == param.mode_list[2] or mode == param.mode_list[3] or \
             mode == param.mode_list[6]:
         BuildDataset(mode=param.mode)
     # Training
     elif mode == param.mode_list[4]:
         Train()
     # Testing
     elif mode == param.mode_list[5]:
         Testing(testing_mode='simple')
     elif mode == param.mode_list[7]:
         Testing(testing_mode='simple_duc')
     else:
         print('Please specify the mode')
     print('\nTime: {}\t({:.3f}sec)'.format((datetime.timedelta(seconds=time.time() - start_time)), time.time() - start_time))
def predict(url):
    f = Features()
    features = f.Create_features(url)
    # print(features)
    test = Testing()
    r = test.Test(rules, features)
    label = eval(r[-1])
    d = dict(label)
    # print(d.keys())
    if not url:
        return ("Empty string")
    else:
        for key in d.keys():
            if (key == '0'):
                return (
                    "\nYou are safe!This website is not vulnerable to XSS attack\n"
                )
            else:
                return ("\nAlert!This website is vulnerable to XSS attack\n")
Пример #23
0
def test():
    testing = Testing()
    test_buffer(testing, ell.nodes.PortType.smallReal)
    test_buffer(testing, ell.nodes.PortType.real)
    test_buffer(testing, ell.nodes.PortType.integer)
    test_buffer(testing, ell.nodes.PortType.bigInt)
    test_reorder(testing)
    test_typecast(testing)
    test_unary(testing)
    # test_multiply(testing)  # bugbug: crashing on Linux...
    test_scaling_node(testing)
    test_voice_activity_node(testing)
    test_gru_node_with_vad_reset(testing)
    test_hamming_node(testing)
    test_hanning_node(testing)
    test_mel_filterbank(testing)
    test_fftnode(testing)
    test_fastgrnn_node(testing)
    return testing.GetFailedTests()
Пример #24
0
def evaluate(dataset, limit_num_sents: bool):
    # Split dataset
    split = Split()

    X_train, y_train = split.get_X_y(
        dataset['train'] + dataset['oos_train'],
        fit=True,
        limit_num_sents=limit_num_sents,
        set_type='train')  # fit only on first dataset
    X_test, y_test = split.get_X_y(dataset['test'] + dataset['oos_test'],
                                   fit=False,
                                   limit_num_sents=limit_num_sents,
                                   set_type='test')

    svc_int = svm.SVC(C=1, kernel='linear').fit(X_train, y_train)

    # Test
    testing = Testing(svc_int, X_test, y_test, 'svm', split.intents_dct['oos'])
    results_dct = testing.test_train()

    return results_dct
def evaluate(dataset, limit_num_sents: bool):
    # Split dataset
    split = Split()

    X_train, y_train = split.get_X_y(
        dataset['train'] + dataset['oos_train'],
        fit=True,
        limit_num_sents=limit_num_sents,
        set_type='train')  # fit only on first dataset
    X_test, y_test = split.get_X_y(dataset['test'] + dataset['oos_test'],
                                   fit=False,
                                   limit_num_sents=limit_num_sents,
                                   set_type='test')

    mlp_int = MLPClassifier(activation='tanh').fit(X_train, y_train)

    # Test
    testing = Testing(mlp_int, X_test, y_test, 'mlp', split.intents_dct['oos'])
    results_dct = testing.test_train()

    return results_dct
def evaluate(dataset, limit_num_sents: bool):
    # Split dataset
    split = Split()

    X_train, y_train = split.get_X_y(
        dataset['train'],
        fit=True,
        limit_num_sents=limit_num_sents,
        set_type='train')  # fit only on first dataset
    X_val, y_val = split.get_X_y(dataset['val'] + dataset['oos_val'],
                                 fit=False,
                                 limit_num_sents=limit_num_sents,
                                 set_type='val')
    X_test, y_test = split.get_X_y(dataset['test'] + dataset['oos_test'],
                                   fit=False,
                                   limit_num_sents=limit_num_sents,
                                   set_type='test')

    svc_int = svm.SVC(C=1, kernel='linear',
                      probability=True).fit(X_train, y_train)

    val_predictions_labels = []  # used to find threshold

    for sent_vec, true_int_label in zip(X_val, y_val):
        pred_probs = svc_int.predict_proba(sent_vec)[
            0]  # intent prediction probabilities
        pred_label = argmax(pred_probs)  # intent prediction
        similarity = pred_probs[pred_label]

        pred = (pred_label, similarity)
        val_predictions_labels.append((pred, true_int_label))

    threshold = find_best_threshold(val_predictions_labels,
                                    split.intents_dct['oos'])

    # Test
    testing = Testing(svc_int, X_test, y_test, 'svm', split.intents_dct['oos'])
    results_dct = testing.test_threshold(threshold)

    return results_dct
def evaluate(dataset, dim: int, limit_num_sents: bool):
    train_str = dataset_2_string(dataset['train'],
                                 limit_num_sents=limit_num_sents,
                                 set_type='train')
    X_val, y_val = get_X_y_fasttext(dataset['val'] + dataset['oos_val'],
                                    limit_num_sents=limit_num_sents,
                                    set_type='val')
    X_test, y_test = get_X_y_fasttext(dataset['test'] + dataset['oos_test'],
                                      limit_num_sents=limit_num_sents,
                                      set_type='test')

    with NamedTemporaryFile() as f:
        f.write(train_str.encode('utf8'))
        f.seek(0)

        # Train model for in-scope queries
        model = fasttext.train_supervised(
            input=f.name,
            dim=dim,
            pretrainedVectors=f'{PRETRAINED_VECTORS_PATH}/cc.en.{dim}.vec')

    val_predictions_labels = []  # used to find threshold

    for sent, true_int_label in zip(X_val, y_val):
        pred = model.predict(sent)
        pred_label = pred[0][0]
        similarity = pred[1][0]

        pred = (pred_label, similarity)
        val_predictions_labels.append((pred, true_int_label))

    threshold = find_best_threshold(val_predictions_labels, '__label__oos')

    # Test
    testing = Testing(model, X_test, y_test, 'fasttext', '__label__oos')
    results_dct = testing.test_threshold(threshold)

    return results_dct
Пример #28
0
def evaluate(dataset, dim: int, limit_num_sents: bool):
    train_str = dataset_2_string(dataset['train'] + dataset['oos_train'],
                                 limit_num_sents=limit_num_sents,
                                 set_type='train')
    X_test, y_test = get_X_y_fasttext(dataset['test'] + dataset['oos_test'],
                                      limit_num_sents=limit_num_sents,
                                      set_type='test')

    with NamedTemporaryFile() as f:
        f.write(train_str.encode('utf8'))
        f.seek(0)

        # Train model for in-scope queries
        model = fasttext.train_supervised(
            input=f.name,
            dim=dim,
            pretrainedVectors=f'{PRETRAINED_VECTORS_PATH}/cc.en.{dim}.vec')

    # Test
    testing = Testing(model, X_test, y_test, 'fasttext', '__label__oos')
    results_dct = testing.test_train()

    return results_dct
def evaluate(dataset, limit_num_sents: bool):
    train_str = dataset_2_string_rasa(dataset['train'] + dataset['oos_train'],
                                      limit_num_sents=limit_num_sents,
                                      set_type='train')
    X_test, y_test = get_X_y_rasa(dataset['test'] + dataset['oos_test'],
                                  limit_num_sents=limit_num_sents,
                                  set_type='test')

    with NamedTemporaryFile(suffix='.yml') as f:
        f.write(train_str.encode('utf8'))
        f.seek(0)

        training_data = rasa.shared.nlu.training_data.loading.load_data(f.name)

    config = rasa.nlu.config.load('config.yml')
    trainer = rasa.nlu.model.Trainer(config)
    model = trainer.train(training_data)

    # Test
    testing = Testing(model, X_test, y_test, 'rasa', 'oos')
    results_dct = testing.test_train()

    return results_dct
Пример #30
0
def test():
    testing = Testing()
    dataset = ell.data.AutoSupervisedDataset()
    dataset.Load(
        os.path.join(find_ell.get_ell_root(), "examples/data/testData.txt"))
    num = dataset.NumExamples()
    print("Number of Examples:", num)
    testing.ProcessTest("Dataset NumExamples test",
                        testing.IsEqual(int(num), 200))

    features = dataset.NumFeatures()
    print("Number of Features:", features)
    testing.ProcessTest("Dataset NumFeatures test",
                        testing.IsEqual(int(features), 21))

    for i in range(num):
        exampleTest(dataset.GetExample(i))

    testing.ProcessTest("Dataset eumeration test", True)

    return testing.GetFailedTests()
    
    #ConvNets+PCA
    pproc = PreProcess('ConvNet',1,False,False,False,False,1.0,[(9, 9), (9, 9), (7, 7), (5, 5), (5, 5)],\
        [(9, 9), (9, 9), (7, 7), (5, 5), (5, 5)],[(9, 9), (9, 9), (7, 7), (5, 5), (5, 5)],\
        [64, 128, 256, 512, 1024],[(3,3),(2,2),(2,2),(2,2),(2,2)],False,False,None,None,False)
    X = pproc.transform(lstFilesX)
    pca = RandomizedPCA(n_components=2)
    X = pca.fit_transform(X)
    fig = pyplot.figure()
    pyplot.plot(X[y==False,0],X[y==False,1],'ro')
    pyplot.plot(X[y==True,0],X[y==True,1],'bo')
    pyplot.title('2D Visualization, Crossmatch LivDet 2013 Testing, ConvNet 5 Layers+PCA')
    pyplot.show()
    """

    testing = Testing()
    testing.divide_by = 5
    testing.n_processes_pproc =3
    lstFilesX,y = testing.load_dataset('Training', 'LivDet2011', 'digital')
    
    #PCA only
    pproc = PreProcess('',1,False,False,False,False,1.0,None,\
        None,None,None,None,None,None,None,None,None)
    X = pproc.transform(lstFilesX)
    pca = RandomizedPCA(n_components=2)
    X = pca.fit_transform(X)
    fig = pyplot.figure()
    pyplot.plot(X[y==False,0],X[y==False,1],'ro')
    pyplot.plot(X[y==True,0],X[y==True,1],'bo')
    pyplot.title('2D Visualization, Digital LivDet 2011 Training, PCA only')
    pyplot.show()
 #testing.params_pproc['pproc__size_percentage'] = [1.0]
 testing.params_lbp['pproc__feature_extractor__method'] = ['uniform', 'default']
 #testing.params_lbp['pproc__feature_extractor__method'] = ['uniform']
 testing.params_lbp['pproc__feature_extractor__n_tiles'] = [[1,1],[3,3],[5,5],[7,7]]
 #testing.params_lbp['pproc__feature_extractor__n_tiles'] = [[1,1],[7,7]]
 testing.params_auto['pca__n_components'] = [10, 30, 50, 100, 300, 500]
 #testing.params_auto['pca__n_components'] = [100]
 testing.params_svm['pred__C'] = [0.0001, 0.01, 0.01, 1, 100, 5000]
 #testing.params_svm['pred__C'] = [1000]
 testing.params_svm['pred__gamma'] = [0.0001, 0.001, 0.01, 0.1]
 #testing.params_svm['pred__gamma'] = [0.001]
 testing.var_sensor('LBP',datasettrain='all',sensortrain ='all',datasettest='all',sensortest ='all')
 #testing.var_sensor('LBP','all','')
 """
 
 testing = Testing()
 testing.size_percentage = 1.0
 testing.divide_by = 80
 testing.cross_validation = False
 testing.augmentation = False
 testing.aug_rotate = False
 testing.multi_column = False       
 testing.roi = False
 testing.low_pass = False
 testing.high_pass = False
 testing.n_processes_pproc = 3
 testing.n_processes_cv =1
 testing.lbp__method = 'uniform'
 testing.lbp__n_tiles = [1,1]
 testing.predict = 'SVM'
 testing.svm__kernel='rbf'
 #testing.shape_norm = [(9, 9)]
 #testing.shape_conv = [(9, 9)]
 #testing.shape_pool = [(9, 9)]
 #testing.stride_pool = [(9, 9)]
 testing.n_filters = [128,512]
 testing.shape_norm = [(9, 9), (7, 7)]
 testing.shape_conv = [(9, 9), (7, 7)]
 testing.shape_pool = [(9, 9), (7, 7)]
 testing.stride_pool = [(6, 6), (4, 4)]
 #testing.n_filters = [128,512,1024]
 #testing.shape_norm = [(9, 9), (7, 7), (5, 5)]
 #testing.shape_conv = [(9, 9), (7, 7), (5, 5)]
 #testing.shape_pool = [(9, 9), (7, 7), (5, 5)]
 #testing.stride_pool = [(4, 4), (3, 3), (2, 2)]
 #testing.n_filters = [64, 256, 512, 1024]
 #testing.shape_norm = [(9, 9), (9, 9), (7, 7), (5, 5)]
 #testing.shape_conv = [(9, 9), (9, 9), (7, 7), (5, 5)]
 #testing.shape_pool = [(7, 7), (5, 5), (5, 5), (5, 5)]
 #testing.stride_pool = [(3, 3), (2, 2), (2, 2), (2, 2)]
 #testing.n_filters = [64,128,256,512,1024]
 #testing.shape_norm = [(9, 9), (9, 9), (7, 7), (5, 5), (5, 5)]
 #testing.shape_conv = [(9, 9), (9, 9), (7, 7), (5, 5), (5, 5)]
 #testing.shape_pool = [(9, 9), (9, 9), (7, 7), (5, 5), (5, 5)]
 #testing.stride_pool = [(3, 3), (2, 2), (2, 2), (2, 2), (2, 2)]
 testing.predict = 'SVM'
 testing.var_sensor('ConvNet',datasettrain='all',sensortrain ='all',datasettest='all',sensortest ='all')
 #"""
 print 'starting'
 testing = Testing()
 testing.convert_dataset_to_txt("LivDet2011", "digital")
 print 'done'