Esempio n. 1
0
def main():

    print "Select Network Type"
    print "1. MLP"
    print "2. Radial Basis"
    nn_type = raw_input()
    nn = NeuralNetwork()

    if nn_type == "1":
        num_inputs = int(raw_input("Enter number inputs"))
        num_hidden = int(raw_input("Enter number hidden layers"))
        nn = MLPNN(num_inputs, num_hidden)

    elif nn_type == "2":
        num_inputs = int(raw_input("Enter number inputs"))
        num_centers = int(raw_input("Enter number radial basis functions"))
        nn = RBNN(num_inputs, num_centers)

    trainer = Trainer(nn)
    tester = Tester(nn)

    if nn_type == "1":
        trainer.trainMLP(str(num_inputs))
        tester.test(str(num_inputs))
    elif nn_type == "2":
        trainer.trainRB(str(num_inputs))
        tester.testRB(str(num_inputs))
Esempio n. 2
0
def main():
    state = WorkerState.READY
    testers = []
    for i in range(num_of_testers):
        testers.append(Tester(i))
    while True:
        testers_state = []
        for tester in testers:
            if tester.is_ready():
                print("tester {} is ready".format(tester.get_id()))
                new_workload = db_client.pop_next_workload()
                print("new workload is", new_workload)
                if new_workload:
                    build = db_client.get_build(new_workload)
                    if build:
                        tester.start_test(new_workload, build,
                                          test_done_callback)
                else:
                    print("Waiting for new workload")
            current_workload = tester.get_current_workload()
            if current_workload:
                testers_state.append({
                    "state":
                    tester.get_state(),
                    "current_workload":
                    current_workload.val(),
                })
            else:
                testers_state.append({
                    "state": tester.get_state(),
                    "current_workload": None
                })
        db_client.update_worker_state(num_of_testers, testers_state)
        time.sleep(120)
def main():
    pp = Preprocessor()
    print 'processing custom data, computing bows...'
    tdpath = 'dataset/test/sms-data'
    pp.process_custom_data(tdpath)
    
    fm = FeatureModel()
    print 'converting custom data to fvs...'
    fm.compute_custom_fv_matrix('custom')
    
    tdpath = 'bin_data/custom_fv.npy'
    cpath = 'bin_data/mnb-classifier.npy'
    data = np.load('bin_data/custom-data.npy').item()
    
    tester = Tester(tdpath,cpath)
    print 'predicting labels for custom data...'
    results = tester.predict_labels_for_custom_data(data)
    
    with open('output/results.txt','w') as textfile:
        for msg in results:
            line = '%s -> %s\n' % (msg,results[msg])
            textfile.write(line)
        
        textfile.close()
    
    print 'Results written to results.txt'
def test_tester():
    tester = Tester()
    os.system("clear")
    if not tester.setup():
        return
    if not tester.reveil():
        return
    if not tester.liberer_leds():
        return
    if not tester.switch_18():
        return
    if not tester.switch_5():
        return
    if not tester.switch_12():
        return
    if not tester.switch_9():
        return
    if not tester.charge_rapide():
        return
    if not tester.charge_normale():
        return
    if not tester.batterie_faible():
        return
    if not tester.batterie_min():
        return
    if not tester.batterie_max():
        return
    if not tester.jack_debranche():
        return

    pass
Esempio n. 5
0
 def schedule_tester(self, cycle=TEST_CYLE):
     """
     定时测试代理
     :param cycle:
     :return:
     """
     tester = Tester()
     while True:
         print("测试器开始运行")
         tester.run()
         time.sleep(cycle)
 def schedule_tester(self,cycle=TESTER_CYCLE):
     """
     定时测试代理
     :param cycle:
     :return:
     """
     tester=Tester()
     while True:
         print('测试机器运行')
         tester.run()
         time.sleep(cycle)
Esempio n. 7
0
def main():
    ANDperceptron = SigmoidNeuron([random.randint(-2,2),random.randint(-2,2)],random.randint(-2,2),0.1)
    train_set_sizes = range(1,1000,10)
    presitions = []
    tester = Tester()
    #entrenar
    for train_size in train_set_sizes:
        for i in range(train_size):
            trainAND(ANDperceptron)
        presitions.append(tester.test(ANDperceptron,[[0,0],[0,1],[1,0],[1,1]],[0,0,0,1],ifhalf))
    tester.plot(train_set_sizes,presitions, "Neurona Sigmoide entrenada con AND")
Esempio n. 8
0
def main():
    neuralNetwork = NeuralNetwork()
    neuralNetwork.make(2, [3, 1], 0.5)
    nbepoch = 5000
    dataset = [[0, 0], [0, 1], [1, 0], [1, 1]]
    outputs = [[0], [1], [1], [0]]
    train_set_sizes = range(1, 5000, 10)
    errors = neuralNetwork.train(dataset, outputs, nbepoch)
    tester = Tester()
    tester.plotError(range(5000), errors,
                     "Red neuronal entrenada con XOR, 5000 epocas")
Esempio n. 9
0
 def reset(self):
     self.prog = bytearray()
     self.progPtr = 0x000000
     self.ops = {
         0b00: self.add_op,
         0b01: self.sub_op,
         0b10: self.jmp_op,
         0b11: self.prt_op
     }
     self.moves = ""
     self.t = Tester()
     self.numOfCycles = 0
Esempio n. 10
0
File: main.py Progetto: yarko90/GIT
def main_preparation(conn):
    Create_tables.create_tables(conn)
    Add = Adder(conn)
    number_of_buildings = 5
    Test = Tester(conn, number_of_buildings)
    test_sets_list = Test.test_sets_list

    for t_set in test_sets_list:
        t_set_DB = t_set.split("|")
        Add.add_detector(t_set_DB[0], t_set_DB[1], t_set_DB[2], t_set_DB[3], t_set_DB[4])
    Test.all_dectors_dict = Test.get_all_detectors()
    test_detector_dict = Test.all_dectors_dict
    return test_detector_dict, Test
Esempio n. 11
0
def main():
    neuralNetwork = NeuralNetwork()
    neuralNetwork.make(2,[3,1],0.1)
    train_set_sizes = range(1,5000,10)
    presitions = []
    tester = Tester()
    #entrenar
    for train_size in train_set_sizes:
        for i in range(train_size):
            trainXORWithNetwork(neuralNetwork)
        presitions.append(tester.testNetwork(neuralNetwork,[[0,0],[0,1],[1,0],[1,1]],[[0],[1],[1],[0]]))
    
    tester.plot(train_set_sizes,presitions, "Red neuronal con 3 neuronas en c.o. y 1 salida, entrenada con XOR")
Esempio n. 12
0
def main():
    started = datetime.now()

    tdpath = 'bin_data/testing_fv.npy'
    cpath = 'bin_data/mnb-classifier.npy'

    tester = Tester(tdpath, cpath)
    tester.test_classifier()

    finished = datetime.now()

    print 'Started at: ', started
    print 'Finished at: ', finished
    print 'Time taken: ', (finished - started)
Esempio n. 13
0
 def __init__(self):
     self.prog = bytearray()
     self.progPtr = 0x000000
     self.ops = {
         0b00: self.add_op,
         0b01: self.sub_op,
         0b10: self.jmp_op,
         0b11: self.prt_op
     }
     # self.mapa = mapa
     self.moves = ""
     self.t = Tester()
     self.numOfCycles = 0
     self.printCount = 0
Esempio n. 14
0
 def test(self):
     if 'FZF_DEFAULT_OPTS' in self.yml['test'][0]:
         app_env['FZF_DEFAULT_OPTS'] = self.yml['test'].pop(
             0)['FZF_DEFAULT_OPTS']
     else:
         app_env['FZF_DEFAULT_OPTS'] = ''
     tester = Tester(self.yml['test'])
     result = self.tasks[0].execute(tester=tester)
     while not self._is_job_end(result):
         new_task = Task.clone(self.tasks[-1])
         new_task.update(self.task_switch[result.key], result)
         self.tasks.append(new_task)
         result = self.tasks[-1].execute(tester=tester)
     self.tasks[-1].output(result, tester=tester)
Esempio n. 15
0
def main():
    XORperceptron = Perceptron(
        [random.randint(-2, 2), random.randint(-2, 2)], random.randint(-2,
                                                                       2), 0.1)
    train_set_sizes = range(1, 1000, 10)
    presitions = []
    tester = Tester()
    #entrenar
    for train_size in train_set_sizes:
        for i in range(train_size):
            trainXOR(XORperceptron)
        presitions.append(
            tester.test(XORperceptron, [[0, 0], [0, 1], [1, 0], [1, 1]],
                        [0, 1, 1, 0]))

    tester.plot(train_set_sizes, presitions, "Perceptron entrenado con XOR")
Esempio n. 16
0
def main():
    parser = argparse.ArgumentParser(description="AerialSeg by PyTorch: test.py")
    parser.add_argument('--eval_batch_size', type=int, default=1, help='batch size for validation')
    parser.add_argument('--eval_list', type=str, default='Potsdam_val.txt', help='list file for validation')
    parser.add_argument('--img_path', type=str, default='Potsdam/2_Ortho_RGB', help='path for images of dataset')
    parser.add_argument('--gt_path', type=str, default='Potsdam/5_Labels_all', help='path for ground truth of dataset')
    parser.add_argument('--num_of_class', type=int, default=6, help='number of classes')
    parser.add_argument('--crop_size', type=int, default=512, help='crop size of input images')
    parser.add_argument('--stride', type=int, default=256, help='stride to test tiles')
    parser.add_argument('--checkpoint', type=str, default=None, help='checkpoint to test')
    parser.add_argument('--cuda', type=bool, default=False, help='whether to use GPU')

    
    args = parser.parse_args()
    print(args)
    my_tester = Tester(args)
    my_tester.run()
Esempio n. 17
0
def main():
    logging.basicConfig(filename="result/log.txt",
                        filemode='w',
                        format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
                        datefmt='%H:%M:%S',
                        level=logging.DEBUG)
    logging.getLogger().setLevel(logging.INFO)

    parser = argparse.ArgumentParser()

    parser.add_argument('-n_clusters', type=int, default=5)
    parser.add_argument('-n_points', type=int, default=100)

    opt = parser.parse_args()

    tester = Tester(n_gaussian_clusters=opt.n_clusters)

    # Generate data from n 2d multivariate gaussian parameters
    data, labels = tester.generate_2d_gaussian_points(
        how_many_per_each_gaussian=opt.n_points)
    logging.info(" Generated {} data points from {} different 2 dimensional "
                 "multivariate gaussian distributions. ({} data points for "
                 "each cluster.)".format(opt.n_clusters * opt.n_points,
                                         opt.n_clusters, opt.n_points))

    # Raw Data
    utils.draw(data, labels, without_label_color=True, means=None,
               title="Data", save="result/raw.png", show=False)
    utils.draw(data, labels, without_label_color=False, means=tester.means,
               title="Gaussian", save="result/gaussian.png", show=False)

    # KMeans Prediction
    kmeans = KMeans(n_cluster=opt.n_clusters)
    prediction_lables, prediction_centers = kmeans.fit(data)
    utils.draw(data, prediction_lables, without_label_color=False,
               means=prediction_centers, title="KMeans",
               save="result/kmeans.png", show=False)

    # Concatenate results
    png_list = ["result/raw.png", "result/gaussian.png", "result/kmeans.png"]
    utils.concatenate_pngs(png_list, "result/final.png")
Esempio n. 18
0
def main():
    tester = Tester()

    line = SimpleLine(1.5,10)
    train_set = generateRandomPoints(1000)
    valid_set = generateRandomPoints(1000)
    results_valid_set = []
    for (x,y) in valid_set:
        results_valid_set.append(line.isUpperLine(x,y)>0.5)

    train_set_sizes = range(1,1000,10) #[10,50,100,250,500,750,1000]
    learning_rates = [0.1,0.5,1.5]

    for lr in learning_rates:
        precisions = []
        for train_set_size in train_set_sizes:
            perceptron = SigmoidNeuron([2,2],2,lr)
            for index in range(train_set_size):
                (x,y) = train_set[index]
                perceptron.trainLonely([x,y],line.isUpperLine(x,y))
            precisions.append(tester.test(perceptron,valid_set,results_valid_set,ifhalf))
        tester.plot(train_set_sizes,precisions,"Presiciones por numero de muestras de entrenamiento, lr %.1f" % (lr))
Esempio n. 19
0
    def __init__(self,
                 what_to_grade: WhatToGrade = None,
                 who_to_grade: WhoToGrade = None,
                 getter: Getter = None,
                 tester: Tester = None,
                 recorder: Recorder = None):
        if not what_to_grade:
            what_to_grade = WhatToGrade()
        if not who_to_grade:
            who_to_grade = WhoToGrade()
        if not getter:
            getter = Getter(what_to_grade, who_to_grade)
        if not tester:
            tester = Tester(what_to_grade, who_to_grade)
        if not recorder:
            recorder = Recorder(what_to_grade, who_to_grade)

        self.what_to_grade = what_to_grade
        self.who_to_grade = who_to_grade
        self.getter = getter
        self.tester = tester
        self.recorder = recorder
    def runTest(self, trainingFilename, startIndex, endIndex):

        a = Atomizer('learn')
        e = FeaturesExtractor()

        p = InputDataProcessor(a, e, (0.2, 0.8))
        r = InputDataReader(p)
        (X, y) = r.read_features(trainingFilename)

        n = MLPClassifier(solver='lbfgs',
                          alpha=1e-5,
                          hidden_layer_sizes=(5, ),
                          random_state=1)

        n.fit(X, y)

        a = Atomizer('test')
        e = FeaturesExtractor()

        t = Tester(a, e, n, 0.99)

        for i in range(startIndex, endIndex):
            testFilename = "suspicious-document{:05d}".format(i)
            test_file = r.get_file("dataSets/part{}/{}".format(
                1, testFilename))
            b = t.is_plagiarised(test_file)
            if b == False:
                continue
            print('odpowiedz systemu: ' + str(b[0]))

            print('stan rzeczywisty: ' + str(not not test_file['metadata']))
            csv_file = open("wyniki.csv", 'a')
            wr = csv.writer(csv_file)
            list = [
                trainingFilename, testFilename,
                str(b[0]),
                str(not not test_file['metadata'])
            ]
            wr.writerows([list])
Esempio n. 21
0
    def __init__(self,
                 n=20,
                 chromLen=64,
                 generationStrategy=None,
                 selectionStragegy=None,
                 crossoverStrategy=None,
                 mutationStrategy=None):

        self.numOfChrom = n
        self.chromLen = chromLen
        self.mutationChance = 7  # for now
        self.population = []

        # self.mapa = mapa
        self.t = Tester()
        self.heur = self.t.treasureCount
        self.area = self.t.x + self.t.y

        self.generator = GenerationStrategy.Heuristic
        self.selection = SelectionStrategy.DoubleTrournamentSelection
        self.cross = CrossoverStrategy.Take2Random
        self.mutation = MutationStrategy.RandomResetting

        self.generate()
Esempio n. 22
0
 def setupDevices(self):
     # reading devices from parameters server
     parameters = self.getParameters()
     # creating testers for every device
     self.devices_ = [Tester(device["name"], device["topic"], device["directory"],\
                       device["type"], device["frequency"]) for device in parameters]
Esempio n. 23
0
"""
    Running the class, generating output and plotting the output.
"""
from matplotlib import pyplot as plt
from SOM import SOM
import pandas as pd
import os
from Formatter import FormatCSVFile
from pathlib import Path
import Mapper as mapper
from Tester import Tester

formatter = FormatCSVFile()
tester = Tester()

if Path("huge_merged_csv_file.csv").is_file() is False:
    formatter.format_csv_files()
    formatter.generate_merged_huge_file()

if Path("Validation Data/validation_file.csv").is_file() is False:
    formatter.format_validation_data()
    formatter.generate_validation_file()

if Path("Test Data/test_file.csv").is_file() is False:
    formatter.format_validation_data()
    formatter.generate_validation_file()

# Train SOM, if there is no weights.
if Path("weights.txt").is_file() is False:

    # Use pandas for loading data using dataframes.
Esempio n. 24
0
if __name__ == "__main__":
    # targets1 = use_DFS_hash()
    # targets2 = use_DFS_marked()
    # if len(targets1) != len(targets2):
    # 	print ("discrepancy")
    # for i in range (0, len(targets1)):
    # 	if targets1[i] != targets2[i]:
    # 		print ("discrepancy" + str(targets1[i]))
    n = 10
    k = 25
    t = 5
    max_target = k**3
    trials = 1000
    debug = False

    tester = Tester(n, k)
    test_cases = tester.gen_test_nums(n, k, trials)
    target_numbers = tester.gen_target_nums(max_target, trials)

    timeouts = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]

    for time in timeouts:
        test_suite(tester, n, k, time, max_target, trials, debug, test_cases,
                   target_numbers, False)

    max_target = k**4

    for time in timeouts:
        test_suite(tester, n, k, time, max_target, trials, debug, test_cases,
                   target_numbers, False)
Esempio n. 25
0
def main():
    tester = Tester(fitnessFunction, alphabeth, genSize, populationSize,
                    genSize)
    tester.test()
    print "vector original"
    print real
Esempio n. 26
0
import utils.util as util
from options import options

import os

from Tester import Tester

if __name__ == '__main__':

    opt = options.parse()
    util.mkdirs(os.path.join(opt.checkpoints_dir, opt.name))
    logger = util.get_logger(
        os.path.join(opt.checkpoints_dir, opt.name, 'logger.log'))

    Tester(opt).test()
        raise Exception(
            'add continue_train flag to load pretrain embedding...')
    trainer.model.save_embeddings()
    exit(0)
if params.train_flag:
    print('[begin training]')
    trainer.run()

# '''
#     test
# '''
if params.test_flag:
    print('[begin testing]')

    model_load_name = trainer.save_best_name
    if torch.cuda.is_available():
        model.load_state_dict(torch.load(model_load_name))
    else:
        model.load_state_dict(
            torch.load(model_load_name, map_location=torch.device('cpu')))
    # ttype = ['test', '1-1', '1-N', 'N-1', 'N-N']
    ttype = ['test']
    for tt in ttype:
        # try:
        test_data_loader = get_data_loader(params, tt)
        ent_tot, rel_tot = dataset_param(params.data)
        tester = Tester(params, ent_tot, rel_tot, model, test_data_loader)
        tester.test_run(mode='test')
        # except Exception as e:
        #     print('no test data {}...'.format(tt))
Esempio n. 28
0
from __future__ import division  # Python 2 users only
import nltk, re
from nltk import word_tokenize
from Trainer import Trainer
from Tester import Tester

if __name__ == "__main__":
    testingFile = raw_input('Please enter the testing file: ')
    test = Tester(testingFile)
    test.execute_tests()
Esempio n. 29
0
    opts, args = getopt.getopt(
        sys.argv[1:], "c:i:td",
        ["classifier-dir=", "img-dir=", "teach", "debug"])
except getopt.GetoptError:
    print help()
    sys.exit(2)

if len(sys.argv) < 3:
    print help()
    sys.exit(2)

for opt, arg in opts:
    if opt in ('-h', '--help'):
        print help()
        sys.exit()
    elif opt in ("-c", "--classifier-dir"):
        classifier_dir = arg
    elif opt in ("-i", "--img-dir"):
        img_dir = arg
    elif opt in ("-t", "--teach"):
        teach = True
    elif opt in ("-d", "--debug"):
        debug = True

if teach:
    teacher = Teacher(img_dir, classifier_dir, debug)
    teacher.teach()
else:
    tester = Tester(img_dir, classifier_dir, debug)
    tester.test()
Esempio n. 30
0
    cnn_model=RESNET_V1(args.opt , args.use_bn , args.l2_weight_decay, args.logit_type , args.datatype ,args.batch_size, args.cropped_size,\
                    args.num_epoch ,args.init_lr, args.lr_decay_step, args.model_name ,args.aug_list)
elif 'vgg' in args.model_name:
    cnn_model=VGG(args.opt , args.use_bn , args.l2_weight_decay, args.logit_type , args.datatype ,args.batch_size, args.cropped_size,\
                    args.num_epoch ,args.init_lr, args.lr_decay_step, args.model_name ,args.aug_list)
elif 'inception' in args.model_name:
    pass
elif 'densenet' in args.model_name:
    cnn_model=Densenet(args.opt , args.use_bn , args.l2_weight_decay, args.logit_type , args.datatype ,args.batch_size, args.cropped_size,\
                    args.num_epoch ,args.init_lr, args.lr_decay_step, args.model_name ,args.aug_list)
else:
    raise NotImplementedError

recorder = Recorder(folder_name=args.model_name)
trainer = Trainer(recorder, train_iter=10)
tester = Tester(recorder)

# Reconstruct Test , Validation Data
test_imgs, test_labs, fnames = cnn_model.dataprovider.reconstruct_tfrecord_rawdata(
    cnn_model.dataprovider.test_tfrecord_path, None)
test_labs = utils.cls2onehot(test_labs, cnn_model.n_classes)

val_imgs, val_labs, fnames = cnn_model.dataprovider.reconstruct_tfrecord_rawdata(
    cnn_model.dataprovider.val_tfrecord_path, None)
val_labs = utils.cls2onehot(val_labs, cnn_model.n_classes)

if 'aug_clahe' in args.aug_list:
    print "Clahe is applied , Validation images , Test Images "
    test_imgs = aug.apply_clahe(test_imgs)
    val_imgs = aug.apply_clahe(val_imgs)