def main():
    faceDetect = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    cam = cv2.VideoCapture(0)
    id = simpledialog.askstring("userid", "Enter the userid:")
    name = simpledialog.askstring("username", "Enter the userName:"******"dataSet/User." + str(id) + "." + str(sampleNum) + ".jpg",
                gray[y:y + h, x:x + w])
            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)

            cv2.waitKey(100)
        cv2.imshow("Face", img)
        cv2.waitKey(1)
        if (sampleNum > 20):
            break

    cam.release()
    cv2.destroyAllWindows()
    messagebox.showinfo("info", "User is Registered.Thank you for joining us")
    trainer.main()
示例#2
0
    def test_skip_tests(self, test_mock, train_mock, logging_mock):
        res = main(argv=[
            'testdata/trainer/features.json', '-tp', '10', '-i',
            'testdata/trainer/trainer.data.json', '--skip-test'
        ])
        self.assertEquals(res, DONE)
        train_mock.assert_called_with(ANY, 10, store_vect_data=False)
        self.assertFalse(test_mock.called)
        train_mock.reset()
        test_mock.reset()

        res = main(argv=[
            'testdata/trainer/features.json', '--skip-test', '-t',
            'testdata/trainer/test.data.json', '-i',
            'testdata/trainer/trainer.data.json'
        ])
        self.assertEquals(res, DONE)
        train_mock.assert_called_with(ANY, 0, store_vect_data=False)
        self.assertFalse(test_mock.called)
        train_mock.reset()
        test_mock.reset()

        res = main(argv=[
            'testdata/trainer/features.json', '--skip-test', '-e',
            'testdata/extractorxml/train-import-handler.xml', '-I',
            'start=2012-12-03', '-I', 'end=2012-12-04', '-tp', '20'
        ])
        self.assertEquals(res, DONE)
        train_mock.assert_called_with(ANY, 20)
        self.assertFalse(test_mock.called)
        train_mock.reset()
        test_mock.reset()
示例#3
0
def main(output_home='../output_home', dataset='BITCOIN_ALPHA', gpu_id=0):
    """
    Start training with a stored hyperparameters on the dataset
    :param output_home: home directory for output data
    :param dataset: dataset name
    :param gpu_id: gpu id
    """
    param_path = f'../pretrained/{dataset}/param.json'

    with open(param_path, 'r') as in_file:
        param = DotMap(json.load(in_file))

    trainer.main(data_home=param.data_home,
                 output_home=output_home,
                 dataset=dataset,
                 heldout_ratio=param.heldout_ratio,
                 random_seed=param.random_seed,
                 reduction_iterations=param.reduction_iterations,
                 reduction_dimension=param.reduction_dimension,
                 gpu_id=gpu_id,
                 c=param.hyper_param.c,
                 weight_decay=param.hyper_param.weight_decay,
                 learning_rate=param.hyper_param.learning_rate,
                 num_layers=param.hyper_param.num_layers,
                 hid_dim=param.hyper_param.hid_dim,
                 num_diff_layers=param.hyper_param.num_diff_layers,
                 epochs=param.epochs)
示例#4
0
    def test_train_with_extraction_plan(self, db_mock, logging_mock):
        res = main(argv=[
            'testdata/trainer/features.json', '-e',
            'testdata/trainer/trainer.data.json'
        ])
        self.assertEquals(res, INVALID_EXTRACTION_PLAN)
        self.assertTrue(logging_mock.warn.called)
        logging_mock.reset_mock()

        res = main(argv=[
            'testdata/trainer/features.json', '-e',
            'testdata/extractorxml/train-import-handler.xml'
        ])
        self.assertEquals(res, INVALID_EXTRACTION_PLAN)
        logging_mock.warn.assert_called_with(
            "Invalid extraction plan: Missing input parameters: start, end")

        res = main(argv=[
            'testdata/trainer/features.json', '-e',
            'testdata/extractorxml/train-import-handler.xml', '-I',
            'start=2012-12-03', '-I', 'end=2012-12-04'
        ])

        self.assertEquals(res, DONE)
        logging_mock.reset_mock()
        self.assertFalse(logging_mock.warn.called)
示例#5
0
def menu():
    print('''
\u001b[38;5;9;1mdP       .d88888b    dP
\u001b[38;5;10;1m88       88.    "'   88
\u001b[38;5;11;1m88  .dP  `Y88888b. d8888P 88d888b. .d8888b. dP.  .dP
\u001b[38;5;12;1m88888"         `8b   88   88'  `88 88'  `88  `8bd8'
\u001b[38;5;13;1m88  `8b. d8'   .8P   88   88       88.  .88  .d88b.
\u001b[38;5;14;1mdP   `YP  Y88888P    dP   dP       `88888P' dP'  `dP
\u001b[0m
    ''')

    is_running = True
    while is_running:
        choice = program_menu()

        if choice == "Sample recorder":
            sample_recorder.main()
        elif choice == "Trainer":
            trainer.main()
        elif choice == "Evaluator":
            evaluator.main()
        else:
            tester.main()

        again = get_binary_validation("Do you want to load another program ?",
                                      False)
        if not again:
            is_running = False
示例#6
0
def local_mnist():
    # laptop
    args['data_dir'] = '../data/'
    args['loss_func'] = F.nll_loss

    train_loader, test_loader = data.mnist(args)
    model = net.MNIST_Net()

    trainer.main(model, train_loader, test_loader, args)
    return
示例#7
0
def main(args):  # pylint:disable=redefined-outer-name
    """main: Entry point."""
    utils.prepare_dirs(args)

    torch.manual_seed(args.random_seed)

    if args.gpunum > 0:
        torch.cuda.manual_seed(args.random_seed)
    if args.rl:

        trainer_rl_typeloss.main(args)

    else:
        trainer.main(args)
示例#8
0
 def train(self):
     branch = self.branch.currentText()
     year = self.year.currentText()
     section = self.section.currentText()
     path = "dataset/" + str(branch) + "/" + str(year) + "/" + str(section)
     if not os.path.exists(path):
         messageWin.pop_up_window(messages.errorTitle,
                                  messages.noDirectoryError)
     else:
         path2 = "recognizer/" + str(branch) + "/" + str(year) + "/" + str(
             section)
         if not os.path.exists(path2):
             os.makedirs(path2)
         trainer.main(path, path2)
示例#9
0
 def test_invalid_features(self, logging_mock):
     filename = 'testdata/trainer/invalid-features.json'
     res = main(argv=[filename])
     self.assertEquals(res, INVALID_FEATURE_MODEL)
     logging_mock.warn.assert_called_with(
         "Invalid feature model: {0} No JSON object could"
         " be decoded ".format(filename))
示例#10
0
 def test_with_pretrained_transformers(self):
     res = main(argv=[
         'testdata/trainer/features-with-pretrained-transformer.json', '-i',
         'testdata/trainer/trainer.data.json', '--transformer-path',
         'testdata/transformers/'
     ])
     self.assertEquals(res, DONE)
示例#11
0
 def test_features_not_found(self, logging_mock):
     filename = 'not-exsistant-file.json'
     res = main(argv=[filename])
     self.assertEquals(res, INVALID_FEATURE_MODEL)
     logging_mock.warn.assert_called_with(
         "Invalid feature model: not-exsistant-file.json [Errno 2] "
         "No such file or directory: '{0}' ".format(filename))
示例#12
0
 def test_store_trainer(self):
     res = main(argv=[
         'testdata/trainer/features.json', '-o', 'trainer.bak', '-i',
         'testdata/trainer/trainer.data.json'
     ])
     self.assertEquals(res, DONE)
     self.assertTrue(os.path.isfile('trainer.bak'))
     os.remove('trainer.bak')
示例#13
0
    def test_store_vect(self, logging_mock):
        res = main(argv=[
            'testdata/trainer/features.json', '--store-vect', 'vect.bak', '-i',
            'testdata/trainer/trainer.data.json'
        ])
        self.assertEquals(res, PARAMETERS_REQUIRED)
        logging_mock.warn.assert_called_with(
            "Model was trained, but not evaluated. You need "
            "to add --test or --test-percent param.")
        logging_mock.reset_mock()

        res = main(argv=[
            'testdata/trainer/features.json', '--store-vect', 'vect.bak',
            '-tp', '50', '-i', 'testdata/trainer/trainer.data.json'
        ])
        self.assertEquals(res, DONE)
        self.assertFalse(logging_mock.warn.called)
        os.remove('vect.bak.npz')
示例#14
0
    def test_percent(self, test_mock, train_mock, logging_mock):
        res = main(argv=[
            'testdata/trainer/features.json', '-tp', 'percent', '-i',
            'testdata/trainer/trainer.data.json'
        ])
        self.assertEquals(res, DONE)
        logging_mock.warn.assert_called_with(
            "Percent value 'percent' would be"
            " ignored. Should be value from 0 to 100.")
        train_mock.assert_called_with(ANY, 0, store_vect_data=False)
        self.assertFalse(test_mock.called)
        train_mock.reset()
        test_mock.reset()
        logging_mock.reset()

        res = main(argv=[
            'testdata/trainer/features.json', '-tp', '200', '-i',
            'testdata/trainer/trainer.data.json'
        ])
        self.assertEquals(res, DONE)
        logging_mock.warn.assert_called_with(
            "Percent value '200' would be"
            " ignored. Should be value from 0 to 100.")
        train_mock.assert_called_with(ANY, 0, store_vect_data=False)
        train_mock.reset()
        test_mock.reset()
        logging_mock.reset()

        res = main(argv=[
            'testdata/trainer/features.json', '-tp', '40', '-i',
            'testdata/trainer/trainer.data.json'
        ])
        self.assertEquals(res, DONE)
        train_mock.assert_called_with(ANY, 40, store_vect_data=False)
        test_mock.assert_called_with(ANY, 40)

        res = main(argv=[
            'testdata/trainer/features.json', '-tp', '60', '-e',
            'testdata/extractorxml/train-import-handler.xml', '-I',
            'start=2012-12-03', '-I', 'end=2012-12-04'
        ])
        self.assertEquals(res, DONE)
        train_mock.assert_called_with(ANY, 60)
        test_mock.assert_called_with(ANY, 60)
示例#15
0
def main():
#    trainer.main(r"DNAVillage/input_1/", r"DNAVillage/output_1/")
    currentGeneration = SelectDNA()
    print ("Starting Generation", int(currentGeneration) + 1)
    
    new_path_input, new_path_output, old_path_input, old_path_output = GeneratePaths(currentGeneration)
    
    #check if we need to run trainer on the previous generation
    if len(os.listdir(old_path_output)) != len(os.listdir(old_path_input)):
        print ("Re-training previous generation (generation %s)" % (currentGeneration))
        trainer.main(old_path_input, old_path_output)
        print ("Restarting Generation", int(currentGeneration) + 1)

    if len(os.listdir(old_path_input)) == 0:
        raise Exception("Warning: Generation is empty. Double check folder")
        
    CloneDNA(new_path_input, new_path_output, old_path_input, old_path_output)
    RunGA(new_path_input, new_path_output)
    trainer.main(new_path_input, new_path_output)
示例#16
0
 def test_store_train_vect(self, vect_data2csv_mock, train_mock,
                           logging_mock):
     res = main(argv=[
         'testdata/trainer/features.json', '-i',
         'testdata/trainer/trainer.data.json', '-v', 'vect.bak'
     ])
     self.assertEquals(res, DONE)
     train_mock.assert_called_with(ANY, 0, store_vect_data=True)
     self.assertTrue(vect_data2csv_mock.called)
     train_mock.reset()
示例#17
0
 def test_define_train_and_test_dataset(self, test_mock, train_mock,
                                        logging_mock):
     res = main(argv=[
         'testdata/trainer/features.json', '-t',
         'testdata/trainer/test.data.json', '-i',
         'testdata/trainer/trainer.data.json'
     ])
     self.assertEquals(res, DONE)
     train_mock.assert_called_with(ANY, 0, store_vect_data=False)
     test_mock.assert_called_with(ANY)
示例#18
0
 def test_store_weights(self, test_mock, weights_mock, train_mock,
                        logging_mock):
     res = main(argv=[
         'testdata/trainer/features.json', '-w', 'weights.bak', '-i',
         'testdata/trainer/trainer.data.json'
     ])
     self.assertEquals(res, DONE)
     train_mock.assert_called_with(ANY, 0, store_vect_data=False)
     weights_mock.assert_called_with(ANY)
     self.assertFalse(test_mock.called)
     os.remove('weights.bak')
示例#19
0
    def test_with_test_params(self, test_mock, train_mock, logging_mock):
        res = main(argv=[
            'testdata/trainer/features.json', '-e',
            'testdata/extractorxml/train-import-handler.xml', '-I',
            'start=2012-12-03', '-I', 'end=2012-12-04', '-T',
            'start=2013-12-03', '-T', 'end=2013-12-04'
        ])

        self.assertEquals(res, DONE)
        logging_mock.reset_mock()
        self.assertFalse(logging_mock.warn.called)
        train_mock.assert_called_with(ANY, 0)
        test_mock.assert_called_with(ANY)
示例#20
0
def local_pretrained_mnist_lossvar():
    # laptop
    args['data_dir'] = '../data/'
    args['loss_func'] = F.nll_loss
    # args['learning_func_name'] = 'loss_var'
    args['learning_func_name'] = 'grad_var'
    args['stats_samplesize'] = 3
    args['num_eigens_hessian_approx'] = 1
    args['lr'] = 1e-3
    args['log_interval'] = 1

    train_loader, test_loader = data.mnist(args)

    batches = list(lib.iter_sample_fast(train_loader, args['stats_samplesize']))
    batch_loader = dataloader.get_subset_batch_loader(batches, args)
    args['subset_batches'] = True 
    print(f'\nTraining only on {args["stats_samplesize"]} batches of size {args["batch_size"]}!\n')

    pt_fn = '../data/models/mnist_model_epoch10.pt'
    model = net.load_pretrained_model(net.MNIST_Net, pt_fn, args)

    trainer.main(model, batch_loader, test_loader, args)
    return
示例#21
0
def local_pretrained_cifar10_lossvar():
    # laptop
    args['data_dir'] = '../data/'
    args['loss_func'] = F.cross_entropy
    # args['learning_func_name'] = 'loss_var'
    args['learning_func_name'] = 'grad_var'
    args['stats_samplesize'] = 3
    args['num_eigens_hessian_approx'] = 1
    args['lr'] = 1e-9
    args['log_interval'] = 1
    args['batch_size'] = 128

    train_loader, test_loader = data.cifar10(args)

    batches = list(lib.iter_sample_fast(train_loader, args['stats_samplesize']))
    batch_loader = dataloader.get_subset_batch_loader(batches, args)
    args['subset_batches'] = True 
    print(f'\nTraining only on {args["stats_samplesize"]} batches of size {args["batch_size"]}!\n')

    # https://github.com/huyvnphan/PyTorch_CIFAR10
    model = resnet18(pretrained=True)

    trainer.main(model, batch_loader, test_loader, args)
    return
示例#22
0
    # train without pseudo labeling
    for n_hidden in range(1, 6):
        for cv_index in range(5):
            main(model_type='bert-base-uncased',
                 input_type='name_desc',
                 use_word_input=False,
                 use_bert_input=True,
                 bert_trainable=True,
                 batch_size=32,
                 predict_batch_size=32,
                 use_pair_input=True,
                 use_bert_type='hidden',
                 n_last_hidden_layer=n_hidden,
                 dense_after_bert=True,
                 learning_rate=2e-5,
                 use_multi_task=True,
                 use_harl=False,
                 use_mask_for_cate2=False,
                 use_mask_for_cate3=True,
                 cate3_mask_type='cate1',
                 train_on_cv=True,
                 cv_random_state=42,
                 cv_fold=5,
                 cv_index=cv_index,
                 exchange_pair=True,
                 use_pseudo_label=False,
                 use_gpu_id=7)

    for n_hidden in range(1, 6):
        for cv_index in range(5):
            main(model_type='bert-base-uncased',
示例#23
0
def main(_):
    # get dtat set info
    train_size = dataset.train_size
    image_size = dataset.image_size
    test_size = dataset.test_size
    #------------------------------------------------------------------
    # Data Training placeholders
    # Only dropout
    #------------------------------------------------------------------

    data_node = tf.placeholder(data_type(),
                               shape=(BATCH_SIZE, *image_size),
                               name='train')

    labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE, ))

    tf.summary.image('input', data_node, BATCH_SIZE)

    #------------------------------------------------------------------
    # Init model
    #------------------------------------------------------------------

    [logits, *weight_and_biases] = model.net(data_node, TRAIN, data_type())

    #------------------------------------------------------------------
    # Train, validation & accuracy
    #------------------------------------------------------------------
    optimizer, predictions, accuracy, prediction, correct_prediction = trainer.main(
        labels_node, logits, weight_and_biases, BATCH_SIZE, data_type,
        train_size, labels_node)

    #------------------------------------------------------------------
    # Logging
    #------------------------------------------------------------------
    summ = tf.summary.merge_all()  # merges all previous outpus
    start_time = time.time()
    saver = tf.train.Saver()  # instance to save finished trained algorithm

    #------------------------------------------------------------------
    # session
    #------------------------------------------------------------------

    feed_dict_gen = dataset.feed_dict_gen(BATCH_SIZE, labels_node, data_node)

    if TRAIN:
        # util to clean all prev logs
        writerTrain = utils.fresh_log_writer(LOGDIR, 'train')
        writerValid = utils.fresh_log_writer(LOGDIR, 'validate')

        with tf.Session() as sess:
            # Run all the initializers to prepare the trainable parameters.
            tf.global_variables_initializer().run()

            writerTrain.add_graph(sess.graph)
            writerValid.add_graph(sess.graph)
            print('Initialized!')
            #------------------------------------------------------------------
            # training loop
            #------------------------------------------------------------------
            for step in xrange(int(train_size * NUM_EPOCHS) // BATCH_SIZE):
                feed_dict = feed_dict_gen(step, 0.5, 'train')[0]
                if step % EVAL_FREQUENCY == 0:  # and step!=0:
                    start_time = utils.epoch_tracker(step, start_time,
                                                     BATCH_SIZE / train_size,
                                                     EVAL_FREQUENCY)
                    feed_dict[
                        'keep_prob:0'] = 1  # when we eval or train we want the keep_prob = 1
                    ss = sess.run(summ, feed_dict=feed_dict)
                else:
                    __, _, ss, acc = sess.run(
                        [optimizer, predictions, summ, accuracy],
                        feed_dict=feed_dict)
                writerTrain.add_summary(ss, step)
            save_path = saver.save(sess, SAVE_MODEL_DIR)

    else:
        # --------------------------------------------------------------
        # test loop
        # --------------------------------------------------------------
        writerTest = utils.fresh_log_writer(LOGDIR, 'test')
        with tf.Session() as sess:
            saver.restore(sess, SAVE_MODEL_DIR)
            test_predictions = []
            test_misclassified = []
            for step in xrange(int(test_size) // BATCH_SIZE):
                writerTest.add_graph(sess.graph)
                feed_dict, batch_labels, batch_data = feed_dict_gen(
                    step, 1, 'test')
                if step % 10 == 0:
                    # Not properly calibrated; but just a way to know that something is happening
                    # We have 200 iterations to go
                    start_time = utils.epoch_tracker(step, start_time, 0, 10)
                ss = sess.run(summ, feed_dict=feed_dict)
                accuracyres, cnp, cp, ss = sess.run(
                    [accuracy, prediction, correct_prediction, summ],
                    feed_dict=feed_dict)
                misclassified = utils.get_mislabeled_cases(
                    cnp, batch_labels, batch_data, step, BATCH_SIZE)
                if (len(misclassified) > 0):
                    test_misclassified.append(misclassified)
                writerTest.add_summary(ss, step)
                test_predictions.append(accuracyres)
            utils.pprint('Missclassified images: ', test_misclassified)
            utils.pprint('Error rate:', 1 - numpy.average(test_predictions))
示例#24
0
def run_trainer():
    trainer.main()
    assert len(glob(os.path.join(trainer.FLAGS.output_dir,
                                 '*ckpt*'))) > 0, 'Model did not checkpoint'
示例#25
0
from trainer import main

main(v_learning_rate=.1, w_learning_rate=.01, epochs=10)
示例#26
0
 def test_not_all_params_filled(self, logging_mock):
     res = main(argv=['testdata/trainer/features.json'])
     self.assertEquals(res, PARAMETERS_REQUIRED)
     logging_mock.warn.assert_called_with(
         'You must define either an input file or an extraction plan')
示例#27
0

import trainer
trainer.main()

def get_eval_metrics_fn(tag_encoder) -> SequenceMetrics:
    """Get SequenceMetrics instance for evaluating on the evaluation data.
    """
    metrics = [
        ('f1_score', filtered(f1_score, tag_encoder)),
        ('precision', filtered(precision_score, tag_encoder)),
        ('recall', filtered(recall_score, tag_encoder)),
        ('classification_report',
         filtered(classification_report, tag_encoder, digits=4)),
        ('confusion_matrix', confusion_matrix_nested),
    ]

    return SequenceMetrics(metrics)


if __name__ == "__main__":

    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO)

    main(
        load_and_cache_examples,
        get_train_metrics_fn=get_train_metrics_fn,
        get_valid_metrics_fn=get_eval_metrics_fn,  # same as evaluation
        get_eval_metrics_fn=get_eval_metrics_fn,
    )
'''
Created on Feb 25, 2013

@author: yoav
'''
import trainer
import json
import nltk
import math
import os, sys

#sets initial data as global params
results=trainer.main()
teta=results[0]

dictionary=results[1]
genre_dictionary=results[2]
genre_probability=results[3]


def calculate_probability_in_genre_c(x,c): #logp(x|y=c)
    a=0
    for i in range(len(dictionary)):
        a+=x[i]*math.log(teta[c][i])+(1-x[i])*math.log(1-teta[c][i])
    return a
#denominator of posterior
def den(x,c):
    return math.exp(calculate_probability_in_genre_c(x,c)+math.log(genre_probability[c]))
#nominator of posterior
def nom(x,c):
    a=0
示例#30
0
 def test_main(self):
     model = main(args=self.args)
     save_model(model=model, out_dest=self.out_dest)