示例#1
0
文件: train.py 项目: chankeh/FaceDemo
def train_model():
    train_loader, test_loader = get_dataset(batch_size=config.BATCH_SIZE)
    net = Net().to(config.DEVICE)
    # 使用Adam优化器
    optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
    for epoch in range(config.EPOCHS):
        for step, (x, y) in enumerate(train_loader):
            x, y = x.to(config.DEVICE), y.to(config.DEVICE)
            output = net(x)
            # 使用最大似然 / log似然代价函数
            loss = F.nll_loss(output, y)
            # Pytorch会梯度累计所以需要梯度清零
            optimizer.zero_grad()
            # 反向传播
            loss.backward()
            # 使用Adam进行梯度更新
            optimizer.step()

            if (step + 1) % 3 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch + 1, step * len(x), len(train_loader.dataset),
                    100. * step / len(train_loader), loss.item()))
    # 使用验证集查看模型效果
    test(net, test_loader)
    # 保存模型权重到 config.DATA_MODEL目录
    torch.save(net.state_dict(),
               os.path.join(config.DATA_MODEL, config.DEFAULT_MODEL))
    return net
def main():

    if tf.gfile.Exists(a.output_dir):
        tf.gfile.DeleteRecursively(a.output_dir)
        tf.gfile.MakeDirs(a.output_dir)

    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    for k, v in a._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(a.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(a), sort_keys=True, indent=4))

    if a.mode == "train":
        train()
    elif a.mode == 'test':
        test()
示例#3
0
 def detect(self, testFileName):
         bigtable = {}
         testfile = open(testFileName, 'r')
         number = 1
         total = 0
         resultfile = open("TestResult%s.txt" % self.ngram,'w')
         resultfile.write("Id, Label\n")
         for line in testfile.readlines():
 # extract every review to put in a new file, and compare perplexities on each model to see where it belongs
                 tempfile = open("hotel_test_clip.lm", 'w')
                 tempfile.write(line)
                 tempfile.close()
                 Ttest = test(self.lmT, self.ngram, "hotel_test_clip.lm")
                 NTtest = test(self.lmNT, self.ngram, "hotel_test_clip.lm")
 # handle blank lines: skip them
                 if Ttest.pp() == None or NTtest.pp() == None:
                         continue
 # the smaller the perplexity on one model, the bigger chance that this review is corresponded with this model
                 if Ttest.pp() >= NTtest.pp():
                         bigtable[number] = 0
                 else:
                         bigtable[number] = 1
                         total += 1
                 result = str(number) + ', '
                 result += str(bigtable[number])
                 result += '\n'
                 resultfile.write(result)
                 number += 1                    
         print "Done predicting, truthful rate=%s" % str(1.0*total/(number-1))
         return 1.0*total/(number-1)
示例#4
0
def main():
    if testing:
        test()

    setGlobals()
    UISetup()
    mainloop()
示例#5
0
def test_kwarg_wrapper():
    """Test docinstance.wrapper.kwarg_wrapper."""
    @kwarg_wrapper
    def test(func, x=1):
        """Test function."""
        func.x = x
        return func

    def f():  # pragma: no cover
        """Test function."""
        pass

    with pytest.raises(AttributeError):
        f.x
    g = test(f)
    assert g.x == 1
    g = test(f, x=2)
    assert g.x == 2

    @test
    def f():  # pragma: no cover
        """Test function."""
        pass

    assert f.x == 1

    @test(x=2)
    def f():  # pragma: no cover
        """Test function."""
        pass

    assert f.x == 2
示例#6
0
def main(argv):
    torch.manual_seed(0)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    train_loader = torch.utils.data.DataLoader(DataLmdb(
        "/kaggle/working/Low_Test/Train-Low_lmdb",
        db_size=1464004,
        crop_size=128,
        flip=True,
        scale=0.00390625),
                                               batch_size=256,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(DataLmdb(
        "/kaggle/working/Low_Test/Valid-Low_lmdb",
        db_size=6831,
        crop_size=128,
        flip=False,
        scale=0.00390625,
        random=False),
                                              batch_size=256,
                                              shuffle=False)

    epochs = 18
    model = mfn_mini.MfnModelMini().cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.1,
                                momentum=0.9,
                                weight_decay=1e-4)
    for epoch in range(epochs):
        if epoch in [epochs * 0.5, epochs * 0.75]:
            for param_group in optimizer.param_groups:
                param_group['lr'] *= 0.1
        print("epoch {}".format(epoch))
        train(model, device, train_loader, optimizer, False)
        test(model, device, test_loader)
def main(_):

    # Create required directories
    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)

    if not os.path.exists(FLAGS.results_dir):
        os.makedirs(FLAGS.results_dir)

    if not os.path.exists(FLAGS.best_checkpoint_dir):
        os.makedirs(FLAGS.best_checkpoint_dir)

    # To configure the GPU fraction
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_frac)

    # Parameters of extracted training and testing patches
    patch_shape = (64, 64)
    extraction_step = (16, 16)
    testing_extraction_shape = (16, 16)

    if FLAGS.training:
        # For training the network
        with tf.Session(config=tf.ConfigProto(
                gpu_options=gpu_options)) as sess:
            network = model(sess, patch_shape, extraction_step)
            network.build_model()
            network.train()

    if FLAGS.testing:
        # For testing the trained network
        test(patch_shape, testing_extraction_shape)
示例#8
0
def main(_):
    if FLAGS.phase == 'train':
        if FLAGS.num_gpu == 0:
            train_with_cpu(FLAGS)
        else:
            train_with_gpu(FLAGS)
    else:
        test(FLAGS)
示例#9
0
def main(args):

    #prepare data for training and testing.
    gene_train_and_test(args)
    gene_train_list(args)
    gene_verification_pair(args)
    #training.
    main_train(args)
    #testing
    test(args)
示例#10
0
文件: main.py 项目: CNaiko/onlineTest
def core_for_test():
    for i in xrange(len(CORES)):
        if not CORES[i].test_status:
            if not CORES[i].type:
                CORES[i].test  = test()
                return CORES[i]
    for i in xrange(len(CORES)):
        if not CORES[i].test_status:
            CORES[i].test  = test()
            return CORES[i]
示例#11
0
def main():
    failCount = 0

    failCount += test(name="testLbsTri",
                      cmd=lbsBin,
                      settings=lbsSettings(element=3,
                                           data_file=lbsData2D,
                                           dim=2,
                                           velmodel="D2Q9"),
                      referenceNorm=0.444607812999762)

    failCount += test(name="testLbsQuad",
                      cmd=lbsBin,
                      settings=lbsSettings(element=4,
                                           data_file=lbsData2D,
                                           dim=2,
                                           velmodel="D2Q9"),
                      referenceNorm=0.446677648467864)

    failCount += test(name="testLbsTet",
                      cmd=lbsBin,
                      settings=lbsSettings(element=6,
                                           data_file=lbsData3D,
                                           dim=3,
                                           degree=2,
                                           velmodel="D3Q15"),
                      referenceNorm=0.816131769809708)

    failCount += test(name="testLbsHex",
                      cmd=lbsBin,
                      settings=lbsSettings(element=12,
                                           data_file=lbsData3D,
                                           dim=3,
                                           degree=2,
                                           velmodel="D3Q15"),
                      referenceNorm=0.818015892660822)

    failCount += test(name="testLbsTri_MPI",
                      ranks=4,
                      cmd=lbsBin,
                      settings=lbsSettings(element=3,
                                           data_file=lbsData2D,
                                           dim=2,
                                           velmodel="D2Q9"),
                      referenceNorm=0.44497547998273)

    #clean up
    for file_name in os.listdir(testDir):
        if file_name.endswith('.vtu'):
            os.remove(testDir + "/" + file_name)

    return failCount
示例#12
0
文件: train.py 项目: YCL105/filters
def MAIN():
    flag = 1  #在原来的基础上加载参数训练

    for epoch in range(epochs):
        print(time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime()))  #迭代一次的时间
        train(epoch, net, flag)
        if flag == 1:
            flag = 0
        if epoch % 25 == 0:
            test(epoch, net)
        if epoch % 25 == 0:
            predict(train_path, epoch, 'predict')
            predict(test_path, epoch, 'predict')
示例#13
0
def main():
    '''
    args = sys.argv
    model_name = args[2]
    if args[1] == 'train':
        train('AWS', int(args[3]),  model_name = model_name)
    elif args[1] == 'train_':
        train_('AWS', int(args[3]), learning_rate = float(args[4]), model_name = model_name)
    else:
        test('AWS', model_name)
    '''
    config = configparser.ConfigParser()
    config.read('config.ini')
    conf = config['AWS']

    image_file = conf['data_folder'] + conf['image_file']
    out_folder = conf['data_folder'] + conf['cropped_folder']
    win_size = int(conf['window_size'])

    args = sys.argv
    model_name = args[2]
    if args[1] == 'train':
        automatic_crop(file_name=image_file,
                       window_size=win_size,
                       step_size=20,
                       output_folder=out_folder)
        data_augmentation(conf)
        train('AWS', int(args[3]), model_name=model_name)
    elif args[1] == 'train_':
        automatic_crop(file_name=image_file,
                       window_size=win_size,
                       step_size=20,
                       output_folder=out_folder)
        data_augmentation(conf, blur=False)
        train('AWS', int(args[3]), model_name=model_name)
    else:
        import os

        m = model_name
        '''
        for f in os.listdir('./'):
            if fnmatch.fnmatch(f, 'model_' + model_name + '.*data*'):
                m = f
        if m == model_name:
            return
        '''
        print '********************'
        print 'test will run for: ', m
        print '********************'
        test('AWS', m)
def main():
    # load yaml config file
    param = load_yaml(file_name='./config.yaml')
    # preprocess dataset
    save_pre_data_file(param,
                       n_mels=param['n_mels'],
                       frames=param['frames'],
                       n_fft=param['n_fft'],
                       hop_length=param['hop_length'],
                       power=param['power'])
    # train
    train(param)

    # test
    test(param)
示例#15
0
文件: app.py 项目: aaronchen98/ReyeR
def get_frame():
    global cla
    print()
    print('=' * 20 + 'classification' + '=' * 20)
    # 接收图片
    # print(request.data)
    upload_file = request.files['file']
    # 获取图片名
    file_name = upload_file.filename
    # 文件保存目录(桌面)
    file_path = r'/root/CSC4001/img'
    if upload_file:
        # 地址拼接
        file_paths = os.path.join(file_path, file_name)
        print(file_paths)
        # 保存接收的图片到桌面
        upload_file.save(file_paths)
        result = test(file_paths, 0)
        print(result)
        print('=' * 55)
        print()

        # # 随便打开一张其他图片作为结果返回,
        # with open(r'/root/Project/img/1001.jpg', 'rb') as f:
        #     res = base64.b64encode(f.read())
        #     return res
        # time.sleep(30)
        cla = result
        # print(cla)
        return jsonify(result)
    else:
        return 'bad'
示例#16
0
def configureCustomScan(arch, name, hlt, trials):

    cpus = {'1': 'SandyBridge', '2': 'IvyBridge', '3': 'Haswell'}

    maxjobs = {'1': 32, '2': 32, '3': 48}

    print 'Configuring Custom Scan for %s machine, which has max number of threads %i.' % (
        cpus[arch], maxjobs[arch])

    print 'Enter parameters for new test in following format: njobs,ncores,nthreads,baseHLT. When you are finished adding tests type \'Done\''
    mt = multiTest(name)

    while (1):
        params = raw_input('Test parameters, \'Done\' for finished: ')
        if params == 'Done':
            return mt
        else:
            njobs = int(params.split(',')[0])
            ncores = int(params.split(',')[1])
            nthreads = int(params.split(',')[2])
            hlt = params.split(',')[3]
            current = test(njobs, ncores, nthreads, name, hlt, trials)
            #add the timing parameters now
            customizeMenuForTiming(hlt)
            mt.tests.append(current)
示例#17
0
def ddmin(circumstances, test):
    """Return a sublist of CIRCUMSTANCES that is a relevant configuration
       with respect to TEST."""
    n = 2
    while len(circumstances) >= 2:
        subsets = split(circumstances, n)

        some_complement_is_failing = 0
        for subset in subsets:
            complement = listminus(circumstances, subset)

            file = open('complement.xml', 'w')
            for c in complement:
                file.write(c[1])
            file.close()
            
            if test('complement.xml') == FAIL:
                circumstances = complement
                n = max(n - 1, 2)
                some_complement_is_failing = 1
                break

        if not some_complement_is_failing:
            if n == len(circumstances):
                break
            n = min(n * 2, len(circumstances))

    os.remove('complement.xml')
    return circumstances
示例#18
0
文件: main.py 项目: yxmfs/Note
def main():
    '''
    l = ['apple','big','cat','dog']
    #f = lambda x:part(x,2)
    f = partial(part,16,8)
    f_letter = lambda x: x[0]
    print(f())
    gen = s()
    for key in gen:
        print(key,'\t')
    for letter, name in itertools.groupby(l,f_letter):
        print(letter,list(name))
    for x in map(f_letter,l):
        print(x)
    path_fr('test.txt')
    '''
    #np_fun()
    t = test('hello')
    t()
    print(t)
    f = F()
    l = [1, 2, 45, 4, 1, 3, 2, 6, 77, 5]
    print(l)
    quickSort(l, 0, len(l) - 1)
    linklist_fun()
    pd_fun()
示例#19
0
def is_winning():
    global current_narrative
    global dialogue
    if (current_narrative == dialogue_exam_start):
        if check_inventory_for_cheats():
            print(
                "You are lucky that you have a cheat sheet with you :). Without it who knows what the right answer would be"
            )
            score = test(True)
            #sys.exit()
        else:
            score = test(False)
        if score > 10:
            if check_inventory_for_cheats():
                print(
                    "You cheated to win. You beat the system, but did you really. The only reason why you were able to cheat was because you were smart enough to hack into kirill's computer. Cheating may be frowned upon in our modern society, but to a rational human being the social norms are nothing, but guidelines. Cheating will always remain a strategy to gain an edge, but the only question is? Was it worth the risk? Unfortunately, this game has come to an end, but you can check out other ending by playing me again without or look at the player_choices.txt file to see how others did!!!"
                )
                user_choices.write(
                    "The user chose to cheat and won the game because of it\n")
                user_choices.close()
                sys.exit()
            print(
                "YOU WIN!!! CONGRATS! Your indepth knowledge of python is very well appreciated and you are an excellent student. There comes a time in our lives where we must part ways. I am afraid that this is it for the game, unless you want to check out the other ending to the game (go to kirills room and hack his computer) or check out how other players did by opening player_choices.txt file"
            )
            user_choices.write("The user won the game without cheating\n")
            user_choices.close()
            sys.exit()
        else:
            print(
                "Poor you. You failed the test and got kicked out of school. Depressed and sad, you turned to alcohol to wash away your problems. This ended your already short lifespan. The moral of the story is that in order to survive, you must either cheat or be intelligent. Don't worry though! You can play me again and see what happens if you cheat/answer correctly. Just make sure that no one is looking because that would be embarrassing! Or you can check who else failed by looking at the player_choices.txt file"
            )
            user_choices.write("The user lost the game\n")
            user_choices.close()
            sys.exit()
    elif current_narrative == dialogue_puzzle:
        global inventory
        hangman()
        inventory.append(item_cheat_notes)
        current_narrative = dialogue_stairwell
        dialogue_stairwell["numbers"].pop("4")
    elif current_narrative == dialogue_wait:
        dialogue["dialogue_jason"]["numbers"].pop("2", None)
    elif current_narrative == dialogue_coffee:
        dialogue["dialogue_breakfast"]["numbers"].pop("3", None)
        dialogue["dialogue_kitchen"]["numbers"].pop("2", None)
    else:
        return False
示例#20
0
def main():
    failCount = 0

    failCount += test(name="testAdvectionTri",
                      cmd=advectionBin,
                      settings=advectionSettings(element=3,
                                                 data_file=advectionData2D,
                                                 dim=2),
                      referenceNorm=0.723924419144375)

    failCount += test(name="testAdvectionQuad",
                      cmd=advectionBin,
                      settings=advectionSettings(element=4,
                                                 data_file=advectionData2D,
                                                 dim=2),
                      referenceNorm=0.722791610885232)

    failCount += test(name="testAdvectionTet",
                      cmd=advectionBin,
                      settings=advectionSettings(element=6,
                                                 data_file=advectionData3D,
                                                 dim=3),
                      referenceNorm=0.835495461081062)

    failCount += test(name="testAdvectionHex",
                      cmd=advectionBin,
                      settings=advectionSettings(element=12,
                                                 data_file=advectionData3D,
                                                 dim=3),
                      referenceNorm=0.833820360927384)

    failCount += test(name="testAdvectionTri_MPI",
                      ranks=4,
                      cmd=advectionBin,
                      settings=advectionSettings(element=3,
                                                 data_file=advectionData2D,
                                                 dim=2,
                                                 output_to_file="TRUE"),
                      referenceNorm=0.723627520020827)

    #clean up
    for file_name in os.listdir(testDir):
        if file_name.endswith('.vtu'):
            os.remove(testDir + "/" + file_name)

    return failCount
示例#21
0
def run_net(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_list

    # Create model directory
    if not os.path.exists(args.save_folder):
        os.makedirs(args.save_folder)

    transform = transforms.Compose([
        transforms.ToTensor(),
        #transforms.Normalize((0.4850, 0.4580, 0.4077), (1.0, 1.0, 1.0))
        #transforms.Normalize((123.68, 116.78, 103.94), (1.0, 1.0, 1.0))
    ])
    block_cnt = [int(c) for c in args.blocks.split(",")]
    net = FancyNet(block_cnt)
    #print "##### net:",net
    # net = ResNet(Bottleneck, [3, 4, 6, 3])

    if torch.cuda.is_available():
        print 'USE CUDA : net'
        net = net.cuda()
    #print net.keys()
    print 'before resume'
    if args.resume_flag == 1 and args.resume_path is not None:
        print 'resume in', args.resume_path
        trained_model = torch.load(args.resume_path)
        '''
        (key, value) = trained_model.popitem()
        print key, 'is remove from trained_model'
        (key, value) = trained_model.popitem()
        print key, 'is remove from trained_model'
        '''
        # print trained_model.keys()
        net.load_state_dict(trained_model)
        print 'resume_successs'
    else:
        print 'no resume path, start from random initialization'

    if args.phase == "test":
        print 'TEST phase'
        net = net.eval()
        test(args, net, transform)
    else:
        print 'TRAIN phase'
        train(args, net, transform)
def main():
    #Fetch the arguments
    parser = argparse_setup()
    args = parser.parse_args()

    # Train and validate the model
    model_to_test = train_validate(
        args.MODEL, args.N_CLASS, args.IMG_HEIGHT, args.IMG_WIDTH,
        args.CROP_HEIGHT, args.CROP_WIDTH, args.CLS_BALANCE,
        args.CLS_BALANCE_TYPE, args.OPTIMIZER, args.LR, args.DECAY,
        args.MOMENTUM, args.EPSILON, args.BETA1, args.EPOCH, args.BUFFER_SIZE,
        args.BATCH_SIZE, args.TRAINDIR, args.VALDIR, args.TRAIN_LOGDIR,
        args.VAL_LOGDIR, args.SNAPSHOTS_DIR, args.TEXTFILE_DIR,
        args.MAX_ITERATIONS, args.MAX_TO_KEEP, args.BREAK_POINT,
        args.AUGMENTATION, args.CROP)
    # Test the best model
    test(args.MODEL, args.IMG_HEIGHT, args.IMG_WIDTH, args.BATCH_SIZE,
         args.PREDDIR, args.MODPREDDIR, args.RGBDIR, args.TARGETDIR,
         args.SNAPSHOTS_DIR, args.TESTDIR, args.TEXTFILE_DIR, args.N_CLASS,
         model_to_test)
示例#23
0
def main():
    # args should include the following in sequence:
    # file path for negative words
    # file path for positive words
    # directory of negative training/testing examples
    # directory of positive training/testing examples
    # partial=True means model test on testing split; otherwise on training split
    # neg_hand=True means negation handling is enabled
    # bigram_hand=True means bigram handling is enabled

    partial = False
    neg_hand = False
    bigram_hand = False

    args = ['/Users/tianshuren/Google Drive/10701/Homework 1/opinion-lexicon-English/negative-words.txt',
            '/Users/tianshuren/Google Drive/10701/Homework 1/opinion-lexicon-English/positive-words.txt',
            '/Users/tianshuren/Google Drive/10701/Homework 1/review_polarity/txt_sentoken/neg/*',
            '/Users/tianshuren/Google Drive/10701/Homework 1/review_polarity/txt_sentoken/pos/*',
            partial, neg_hand, bigram_hand]

    N_Voc = args[0]
    P_Voc = args[1]
    N_Train = args[2]
    P_Train = args[3]
    N_Test = N_Train
    P_Test = P_Train

    config = [partial, neg_hand, bigram_hand]

    bigrams = ['extremely', 'quite', 'just', 'almost', 'very', 'too', 'enough']

    print 'building vocabulary...'
    vocab = build(N_Voc, P_Voc)
    print 'training neg files at ' + N_Train + '; pos files at ' + P_Train + '...'
    (num_neg, num_pos, vocab) = train(N_Train, P_Train, vocab, bigrams, config)
    print 'testing on negative files at ' + N_Test + '...'
    (neg_correct, neg_total) = test(N_Test, 'negative', vocab, bigrams, num_neg, num_pos, config)
    print 'testing on positive files at ' + P_Test + '...'
    (pos_correct, pos_total) = test(P_Test, 'positive', vocab, bigrams, num_neg, num_pos, config)
    total_accuracy = float(neg_correct + pos_correct) / (neg_total + pos_total)
    print 'total accuracy is %f' % total_accuracy
示例#24
0
def check_input(argv):
    global params
    args, params = check_arguments(argv, params)
    if args['load_flag']:
        prepare_data(params)
    if args['unzip_preloaded']:
        unzip_preloaded()
    if args['test_flag']:
        test(params)
    if args['filter_flag']:
        filter_dataset(params)
    if args['resize_flag']:
        resize_patches_to_size(params)
    if args['run_cv']:
        perform_5_2_CV(params)
    if args['auc']:
        get_CV_AUC(params)
    if args['regression']:
        get_regression_res(params)
    if args['mse']:
        get_CV_MSE(params)
示例#25
0
def run_labeling(weights, label_data, x_tr, index_tr, x_ts, index_ts):
    class_nbr = 10
    sigma_kernel = 1.0
    for i in range(10):
        x_tr, index_tr = unison_shuffle(x_tr, index_tr)
        x_lb = np.copy(x_tr[:label_data,:])
        index_lb = np.copy(index_tr[:label_data])

        # label the network
        neuron_label = labeling(label_data, class_nbr, weights, x_lb, index_lb, sigma_kernel)

        # test the network
        accuracy = test(class_nbr, weights, x_ts, index_ts, neuron_label, sigma_kernel)
示例#26
0
def ppoupas(Vf):
    """Renvoie True s'il existe un pp, ie un sous ensemble contenant autant de filles que de garçons, et false sinon"""

    S = []
    i = 0
    if test(Vf) == 0:  #Il y a autant de de filles que de garçons => pp
        return (True)

    else:  #On parcours tous les sous ensembles pour chercher les pp
        pp = False

        while i != len(Vf) and pp == False:
            S = Vf[:]
            S.pop(i)

            if test(S) == 0 and len(S) != 0:
                pp = True
            elif test(S) != 0:
                pp = ppoupas(S)
            i = i + 1

    return (pp)
示例#27
0
def main():
    failCount = 0

    failCount += test(name="testGradientTri",
                      cmd=gradientBin,
                      settings=gradientSettings(element=3,
                                                data_file=gradientData2D,
                                                dim=2,
                                                output_to_file="TRUE"),
                      referenceNorm=4.44288293763069)

    failCount += test(name="testGradientQuad",
                      cmd=gradientBin,
                      settings=gradientSettings(element=4,
                                                data_file=gradientData2D,
                                                dim=2),
                      referenceNorm=4.44288293763069)

    failCount += test(name="testGradientTet",
                      cmd=gradientBin,
                      settings=gradientSettings(element=6,
                                                data_file=gradientData3D,
                                                dim=3),
                      referenceNorm=12.1674394705786)

    failCount += test(name="testGradientHex",
                      cmd=gradientBin,
                      settings=gradientSettings(element=12,
                                                data_file=gradientData3D,
                                                dim=3),
                      referenceNorm=12.1673360264757)

    #clean up
    for file_name in os.listdir(testDir):
        if file_name.endswith('.vtu'):
            os.remove(testDir + "/" + file_name)

    return failCount
示例#28
0
def main():
    failCount = 0

    failCount += test(name="testLinearSolver_PCG",
                      cmd=ellipticBin,
                      settings=ellipticSettings(element=3,
                                                data_file=ellipticData2D,
                                                dim=2,
                                                precon="NONE",
                                                linear_solver="PCG"),
                      referenceNorm=0.500000001211135)

    failCount += test(name="testLinearSolver_FPCG",
                      cmd=ellipticBin,
                      settings=ellipticSettings(element=3,
                                                data_file=ellipticData2D,
                                                dim=2,
                                                precon="NONE",
                                                linear_solver="FPCG"),
                      referenceNorm=0.500000001211135)

    failCount += test(name="testLinearSolver_NBPCG",
                      cmd=ellipticBin,
                      settings=ellipticSettings(element=3,
                                                data_file=ellipticData2D,
                                                dim=2,
                                                precon="NONE",
                                                linear_solver="NBPCG"),
                      referenceNorm=0.500000001211135)

    failCount += test(name="testLinearSolver_NBFPCG",
                      cmd=ellipticBin,
                      settings=ellipticSettings(element=3,
                                                data_file=ellipticData2D,
                                                dim=2,
                                                precon="NONE",
                                                linear_solver="NBFPCG"),
                      referenceNorm=0.500000001211135)

    failCount += test(name="testLinearSolver_PGMRES",
                      cmd=ellipticBin,
                      settings=ellipticSettings(element=3,
                                                data_file=ellipticData2D,
                                                dim=2,
                                                precon="NONE",
                                                linear_solver="PGMRES"),
                      referenceNorm=0.500000001211135)

    failCount += test(name="testLinearSolver_PMINRES",
                      cmd=ellipticBin,
                      settings=ellipticSettings(element=3,
                                                data_file=ellipticData2D,
                                                dim=2,
                                                precon="NONE",
                                                linear_solver="PMINRES"),
                      referenceNorm=0.500000001211135)

    return failCount
示例#29
0
def train_eval_test(model,lr,optimizer,criterion,train_loader,validation_loader,test_loader,sta,checkpoint_path,
                    start_epoch,args,super_train_loss,super_validation_loss,super_test_loss,best_loss):
    # todo add the eval process and add the test process
    if args.resume:
        epoches = args.epoches
    else:
        epoches = args.refine_epoches
    for epoch in range(start_epoch, epoches):
        lr = adjust_learning_rate(optimizer, epoch, args.adjust_per_epochs, lr, args.schedule,
                                  gamma=args.gamma)
        print("*" * 58)
        print("PID:[{}] | Sta:[{}] | model:[{}] | seq_len:[{}] | pred_len:[{}]".
              format(os.getpid(), sta, args.arch, args.seq_len, args.pred_len))
        print("Epoch:[%d | %d]; LR: [%f]" % (epoch, args.refine_epoches, lr))
        train_loss = train(train_loader, model, optimizer, criterion, args)
        print("train loss=[%.2f];\tsuper_train_loss=[%.2f]"%(train_loss.avg, super_train_loss.avg))
        validation_loss = test(validation_loader, model, criterion, args)
        print("validation loss=[%.2f];\tsuper_validation_loss=[%.2f]"%(validation_loss.avg, super_validation_loss.avg))
        test_loss = test(test_loader, model, criterion, args)
        print("test loss=[%.2f];\tsuper_test_loss=[%.2f]"%(test_loss.avg, super_test_loss.avg))
        # save checkpoints
        logger = Logger(os.path.join(checkpoint_path, 'log.txt'), title="{} Log".format(args.arch), resume=True)
        logger.append([epoch, lr, train_loss.avg, validation_loss.avg, test_loss.avg,super_train_loss.avg,
                       super_validation_loss.avg,super_test_loss.avg])
        is_best = test_loss.avg < best_loss
        best_loss = min(test_loss.avg, best_loss)
        save_checkpoint({
            'lr': lr,
            'epoch': epoch,
            'state_dict': model.state_dict(),
            'optimizer_static_dict': optimizer.state_dict(),
            'train_loss': train_loss.avg,
            'test_loss': test_loss.avg,
            'best_loss': best_loss
        },
            is_best=is_best,
            checkpoint_path=checkpoint_path,
            filename=args.resume_file)
示例#30
0
def extractionpp(Vf):
    """Procédure permettant d'extraire les ensembles pp"""

    Ss = []  #Liste des sous ensemble
    global pp

    if test(Vf) == 0:
        pp.append(Vf)

    for i in range(0, len(Vf)):
        Ss = Vf[:]
        Ss.pop(i)

        extractionpp(Ss)
示例#31
0
def main():
  failCount=0;

  failCount += test(name="testAcousticsTri",
                    cmd=acousticsBin,
                    settings=acousticsSettings(element=3,data_file=data2D,dim=2),
                    referenceNorm=10.1302322430996)

  failCount += test(name="testAcousticsQuad",
                    cmd=acousticsBin,
                    settings=acousticsSettings(element=4,data_file=data2D,dim=2),
                    referenceNorm=10.1299609797959)

  failCount += test(name="testAcousticsTet",
                    cmd=acousticsBin,
                    settings=acousticsSettings(element=6,data_file=data3D,dim=3,
                                               degree=2),
                    referenceNorm=31.6577046152384)

  failCount += test(name="testAcousticsHex",
                    cmd=acousticsBin,
                    settings=acousticsSettings(element=12,data_file=data3D,dim=3,
                                               degree=2),
                    referenceNorm=31.6576028812776)

  failCount += test(name="testAcousticsTri_MPI", ranks=4,
                    cmd=acousticsBin,
                    settings=acousticsSettings(element=3,data_file=data2D,dim=2,output_to_file="TRUE"),
                    referenceNorm=10.1300558638317)

  #clean up
  for file_name in os.listdir(testDir):
    if file_name.endswith('.vtu'):
      os.remove(testDir + "/" + file_name)

  return failCount
示例#32
0
def load_input():
    df = test()
    df.dropna(inplace=True)
    
    labels = df['result'].to_numpy()#dependent variable
    labels = labels.T #transpose
    #print(type(labels))
    
    df.drop(columns = ['result','corr'], inplace = True)
    input_set = normal_data(df.to_numpy())
    #t = pd.read_csv('c:/Users/yanzheng/Desktop/samplesinput.csv',dtype=float)
    #labels = np.array(t['result'].tolist())
    #t = t[[var_1,var_2]]
    #input_set = normal_data(t.to_numpy())
    
    print(count(labels.tolist()))
    return labels,input_set
示例#33
0
文件: knn.py 项目: tanay-s/Thesis
def get_img_features(check_label=None):
    # x_test, y_test = load_data(args.img_w, args.n_cls, args.n_ch, mode='test')
    h5f_train = h5py.File(
        '/data/hula/tanay/Codes/ChestX/image_retrieval/chest_xray/retrieved_images/realimg_cond_'
        + str(check_label) + '.h5', 'r')
    #h5f_train = h5py.File(
    #    '/data/hula/tanay/CXR8/chest256_train_801010_no_normal.h5', 'r')
    x_test = h5f_train['X_test'][:]
    y_test = h5f_train['Y_test'][:]
    # features = h5f_train['features'][:]
    h5f_train.close()
    # y_test = np.zeros((x_test.shape[0], 14))
    # y_test[:, 5] = 1
    img = x_test  #/255.   #make sure x is scaled between 0 and 1
    label = y_test
    features = test(img, label, single_img=0, all=0)
    return x_test, label, features  #, y_t
示例#34
0
文件: main.py 项目: joycez/NLP_Proj1
def main():
#  ----     part1       ----

##        lm1 = LangModel("sample.lm", 1)
##        lm2 = LangModel("sample.lm", 2)
##        print lm2.prob_bigram("God", "saw");
##
##        lm1 = LangModel("bible_train.lm", 1)
##        lm2 = LangModel("bible_train.lm", 2)
##
##        lm1 = LangModel("hotel_train.lm", 1)
##        lm2 = LangModel("hotel_train.lm", 2)
##        
##        g1 = generator(lm1,1)
##        g2 = generator(lm2,2)

##        print '------Sentences by Unigram Model------'
##        for i in range(3):
##                g1.go(25)
##        print '------Sentences by Bigram Model------'
##        for i in range(3):
##                g2.go(25)
##        print '------End------'

#  ----     part2       ----
##
        lm1 = GTLangModel("bible_train.lm", "bible_valid.lm", 1)
        lm2 = GTLangModel("bible_train.lm", "bible_valid.lm", 2)
        lm3 = GTLangModel("bible_train.lm", "bible_valid.lm", 3)        
        bible_test1 = test(lm1, 1, "bible_test.lm")
        bible_test2 = test(lm2, 2, "bible_test.lm")
        bible_test3 = test(lm3, 3, "bible_test.lm")
        print "bible Unigram perplexity:"        
        print bible_test1.pp();
        print "bible Bigram perplexity:"
        print bible_test2.pp();
        print "bible Trigram perplexity"
        print bible_test3.pp();

        lm1 = GTLangModel("hotel_train.lm", "hotel_valid.lm", 1)
        lm2 = GTLangModel("hotel_train.lm", "hotel_valid.lm", 2)
        lm3 = GTLangModel("hotel_train.lm", "hotel_valid.lm", 3)        
        hotel_test1 = test(lm1, 1, "hotel_test.lm")
        hotel_test2 = test(lm2, 2, "hotel_test.lm")
        hotel_test3 = test(lm3, 3, "hotel_test.lm")
        print "hotel Unigram perplexity:"
        print hotel_test1.pp();
        print "hotel Bigram perplexity:"
        print hotel_test2.pp();
        print "hotel Trigram perplexity"
        print hotel_test3.pp();
def configureThreadScan(arch,name,hlt,trials):

    cpus = {'1':'SandyBridge','2':'IvyBridge','3':'Haswell'}

    maxjobs = {'1':32,'2':32,'3':48}

    print 'Configuring Thread scan for %s machine, which has max number of cores %i. Would you like to run this or up to a different number?' % (cpus[arch],maxjobs[arch])
    maxThreads = int(raw_input('Enter desired maximum number of threads: '))    

    mt = multiTest(name)
    for i in range(1,maxThreads+1):
        if i <= maxjobs[arch]:
            cores = i
        else:
            cores = 999
        current = test(1,cores,i,name,hlt,trials)
        mt.tests.append(current)

    return mt
def configureCPUScan(arch,name,hlt,trials):
 
    cpus = {'1':'SandyBridge','2':'IvyBridge','3':'Haswell'}

    maxjobs = {'1':32,'2':32,'3':48}

    print 'Configuring CPU scan for %s machine, which has max number of jobs %i. Would you like to run this or up to a smaller number?' % (cpus[arch],maxjobs[arch])
    nJobs = int(raw_input('Enter desired number of jobs: '))

    #make multiTest
    mt = multiTest(name)
    
    #now add tests to multiTest
    for i in range(1,nJobs+1):
        current = test(i,i,1,name,hlt,trials)
        mt.tests.append(current)

    #now return correctly configured multiTest
    return mt
示例#37
0
def configureThreadScan(arch, name, hlt, trials):

    cpus = {'1': 'SandyBridge', '2': 'IvyBridge', '3': 'Haswell'}

    maxjobs = {'1': 32, '2': 32, '3': 48}

    print 'Configuring Thread scan for %s machine, which has max number of cores %i. Would you like to run this or up to a different number?' % (
        cpus[arch], maxjobs[arch])
    maxThreads = int(raw_input('Enter desired maximum number of threads: '))

    mt = multiTest(name)
    for i in range(1, maxThreads + 1):
        if i <= maxjobs[arch]:
            cores = i
        else:
            cores = 999
        current = test(1, cores, i, name, hlt, trials)
        mt.tests.append(current)

    return mt
def configureCustomScan(arch,name,hlt,trials):
 
    cpus = {'1':'SandyBridge','2':'IvyBridge','3':'Haswell'}

    maxjobs = {'1':32,'2':32,'3':48}

    print 'Configuring Custom Scan for %s machine, which has max number of threads %i.' % (cpus[arch],maxjobs[arch])

    print 'Enter parameters for new test in following format: njobs,ncores,nthreads,baseHLT. When you are finished adding tests type \'Done\''
    mt = multiTest(name)

    while(1):
        params = raw_input('Test parameters, \'Done\' for finished: ')
        if params=='Done':
            return mt
        else:
            njobs = int(params.split(',')[0])
            ncores = int(params.split(',')[1])
            nthreads = int(params.split(',')[2])
            hlt = params.split(',')[3]
            current = test(njobs,ncores,nthreads,name,hlt,trials)
        #add the timing parameters now
            customizeMenuForTiming(hlt)
            mt.tests.append(current)
示例#39
0
from test import *
import os
import Image

if __name__ == "__main__":
    model = loadModel("./model/model.h5")
    path = os.listdir("./Img/Demo/")
    for name in path:
        img = Image.open("./Img/Demo/" + name)
        showResult(test(model, img), name)
示例#40
0
from test import *
import numpy
A = numpy.array([[0,1],[2,3]])
B = numpy.array([[0,1],[2,3]])
C = numpy.array([0,1,2,3])
test(A,B,C)
示例#41
0
文件: sdunit.py 项目: elmore/sixthdev
 def run(self, test):
     result = SixthDayTestResult(self.stream)
     test(result)
     result.printErrors()
     return result
示例#42
0
def pam_sm_end(pamh):
  return test(pam_sm_end, pamh, None, None)
示例#43
0
文件: softmax.py 项目: hhcho/rnn
    def __init__(self, dtype):
        self.dtype = dtype

    def init(self, shape, dtype=None):
        (k,b,n) = shape
        self.y = np.random.randint(n, size=(k,b))
        return self

    def __call__(self, yh):
        return cross_entropy(yh, self.y)

    def grad(self, yh):
        return self.y

def test():
    import test
    size = 4
    layer = SoftmaxCrossEntropy()
    
    error_f = CrossEntropyError(np.int32)
    test.layer(layer, ins=size, outs=size, err_f=error_f, float_t = np.float64)
    test.layer(layer, ins=size, outs=size, err_f=error_f, float_t = np.float32)

    error_f = CrossEntropyError(np.int64)
    test.layer(layer, ins=size, outs=size, err_f=error_f, float_t=np.float64)
    test.layer(layer, ins=size, outs=size, err_f=error_f, float_t=np.float32)
    
if __name__ == '__main__':
    test()

示例#44
0
def cursive(n):
    if n > 0:
        cursive(n-1)

result = testfunc(cursive, [200], None)

logging.info(result)

assertTrue(result.exception != None)

def test(func, args, expect):
    try:
        logging.logCall(func, args)
        r = apply(func, args)
        assert r == expect
    except Exception as e:
        assert e == expect

def foo(parg, varg = 10):
    return parg + varg
    
test(foo, [], "ArgError,parg=1,varg=1,given=0")
test(foo, [0], 10)
test(foo, [1,2], 3)
test(foo, [1,2,3], "ArgError,parg=1,varg=1,given=3")

def add(a,b):
    return a+b
    
print("add(*[1,2])=", add(*[1,2]))
    def test_get_tags(self):

        # SETUP ----------------------------------------------------------------

        # create mock user docs in the database 
        @tornado.gen.coroutine
        def setup_users():
            result = []
            for _ in range(self.NUM_USERS):
                username = self.rand_noun()
                user_id = yield sundowner.data.users.create({
                    "facebook": {
                        "id":   None,
                        "name": username,
                        }})
                result.append(user_id)
            raise tornado.gen.Return(result)
        users = IOLoop.instance().run_sync(setup_users)

        # one of the users will act as the user issuing the request and another
        # subset will act as friends of this user
        friends = random.sample(users, self.NUM_FRIENDS+1)
        user_id = friends.pop()

        # add friends to cache
        FriendsCache.put(user_id, friends)

        # create mock tags
        @tornado.gen.coroutine
        def create_tag():

            # pick a random lng/lat inside the query radius
            # it appears that a lng/lat > (QUERY_RADIUS_RADIANS + .006) will 
            # fall outside the query radius
            qrr = sundowner.data.content._QUERY_RADIUS_RADIANS
            lng = random.uniform(self.QUERY_LNG-qrr, self.QUERY_LNG+qrr)
            lat = random.uniform(self.QUERY_LAT-qrr, self.QUERY_LAT+qrr)
           
            user_id = random.choice(users)
            text = self.rand_noun()

            content_id = yield sundowner.data.content.put(
                user_id=    user_id,
                text=       text,
                url=        None,
                accuracy=   0,
                lng=        lng,
                lat=        lat)

            raise tornado.gen.Return(content_id)

        for _ in range(self.NUM_TAGS):
            IOLoop.instance().run_sync(create_tag)


        # TEST -----------------------------------------------------------------

        def test():
            @tornado.gen.coroutine
            def query_content():
                result = yield ContentModel.get_nearby(
                    self.QUERY_LNG, self.QUERY_LAT, user_id)
                raise tornado.gen.Return(result)
            return IOLoop.instance().run_sync(query_content)
        
        """
        def test_old():
            @tornado.gen.coroutine
            def query_content():
                result = yield ContentModel.get_nearby_old(
                    self.QUERY_LNG, self.QUERY_LAT)
                raise tornado.gen.Return(result)
            result = IOLoop.instance().run_sync(query_content)
        """

        #print "OLD", timeit.timeit(test_old, number=self.NUM_TRIALS)
        #print "NEW", timeit.timeit(test, number=self.NUM_TRIALS)
        pprint.pprint(test())
        raise Exception
示例#46
0
 def run(self, test):
     result = unittest.TestResult()
     test(result)
     return result
示例#47
0
from test import *
import matplotlib.pyplot as plt

input = [1000,[10, 20, 30, 40, 50, 60, 70, 75, 80, 85, 87, 89, 90,
91, 92, 93, 94, 95, 95, 97, 98, 99, 100]]
pickle.dump(input, open("input.p", "wb"))

test_ = test()
test_.start()

output = pickle.load(open("output.p", "rb"))

# creazione dell'oggetto plot

plt.switch_backend('TkAgg')

# subplot Collisioni in Indirizzamento Aperto
plt.subplot(221)
plt.plot(output[0], output[1][0])
plt.plot(output[0], output[1][1])
plt.plot(output[0], output[1][2])
plt.xlabel("Percentuali")
plt.ylabel("Collisioni")
plt.title("Collisioni in Indirizzamento Aperto")
plt.legend(["Min", "Med", "Max"])

# subplot Sequenze di Ispezione in Indirizzamento Aperto
plt.subplot(222)
plt.plot(output[0], output[2][0])
plt.plot(output[0], output[2][1])
plt.plot(output[0], output[2][2])
示例#48
0
def dd(c_pass, c_fail, test, splitter = None):
    """Return a triple (DELTA, C_PASS', C_FAIL') such that
       - C_PASS subseteq C_PASS' subset C_FAIL' subseteq C_FAIL holds
       - DELTA = C_FAIL' - C_PASS' is a minimal difference
         between C_PASS' and C_FAIL' that is relevant with respect to TEST."""

    if splitter is None:
        splitter = split.split

    n = 2
    while 1:
        delta = listminus(c_fail, c_pass)

        if n > len(delta):
            # No further minimizing
            return (delta, c_pass, c_fail)

        deltas = splitter(delta, n)

        offset = 0
        j = 0
        while j < n:
            i = (j + offset) % n
            next_c_pass = listunion(c_pass, deltas[i])
            next_c_fail = listminus(c_fail, deltas[i])

            file = open('next_c_pass.xml', 'w')
            for c in next_c_pass:
                file.write(c[1])
            file.close()

            file = open('next_c_fail.xml', 'w')
            for c in next_c_pass:
                file.write(c[1])
            file.close()

            if test('next_c_fail.xml') == FAIL and n == 2:
                c_fail = next_c_fail
                n = 2
                offset = 0
                break
            elif test('next_c_fail.xml') == PASS:
                c_pass = next_c_fail
                n = 2
                offset = 0
                break
            elif test('next_c_pass.xml') == FAIL:
                c_fail = next_c_pass
                n = 2
                offset = 0
                break
            elif test('next_c_fail.xml') == FAIL:
                c_fail = next_c_fail
                n = max(n - 1, 2)
                offset = i
                break
            elif test('next_c_pass.xml') == PASS:
                c_pass = next_c_pass
                n = max(n - 1, 2)
                offset = i
                break
            else:
                j = j + 1

        os.remove('next_c_pass.xml')
        os.remove('next_c_fail.xml')

        if j >= n:
            if n >= len(delta):
                return (delta, c_pass, c_fail)
            else:
                n = min(len(delta), n * 2)
示例#49
0
def pam_sm_acct_mgmt(pamh, flags, argv):
  return test(pam_sm_acct_mgmt, pamh, flags, argv)
def test_suit(r):
    test(r.contains(Point(0, 0)))
    test(r.contains(Point(3, 3)))
    test(not r.contains(Point(3, 7)))
    test(not r.contains(Point(3, 5)))
    test(r.contains(Point(3, 4.9999999)))
    test(not r.contains(Point(-3, -3)))
示例#51
0
def pam_sm_authenticate(pamh, flags, argv):
  return test(pam_sm_authenticate, pamh, flags, argv)
示例#52
0
def pam_sm_chauthtok(pamh, flags, argv):
  return test(pam_sm_chauthtok, pamh, flags, argv)
示例#53
0
    window.y = 200
    window.ycenter = -2.0
    group = pygame.sprite.LayeredDirty(window, layer = 0, _use_update = True)
    bgd = []
    def testlogic(event):
        if event == None:
            window.rotation += .01
            return
        if event.type == pygame.KEYDOWN:
            if event.key == 27:
                return True
            if event.key >= 256:
                return
            import random
            if event.mod & 4095 == 0:
                bar.actual -= min((random.randint(5, 500), bar.actual))
            else:
                bar.actual += min((random.randint(5, 500), bar._max - bar.actual))
    def testrender(screen):
        clock.tick()
        time = clock.get_time()
        group.update(time)
        if len(bgd) == 0:
            bgd.append(pygame.Surface((screen.get_width(), screen.get_height())))
            bgd[0].fill((0, 255, 0))
            group.clear(screen, bgd[0])
        group.draw(screen)
    test.test(testlogic, testrender)

if __name__ == '__main__': test()
示例#54
0
def pam_sm_close_session(pamh, flags, argv):
  return test(pam_sm_close_session, pamh, flags, argv)
示例#55
0
	def start_test(self):
		self.p1_var.set(0)
		voltage_set = self.voltage.final_check()
		if voltage_set == True:
			self.data_disp.configure(state=tk.NORMAL)
			self.data_disp.delete("1.0", tk.END)
			self.data_disp.configure(state=tk.DISABLED)
			# Whoever's supposed to maintain this...I am so sorry for these lines
			self.parent._nametowidget(self.parent.winfo_parent()).openmenu.entryconfig("Configuration File",state="disabled")
			self.parent._nametowidget(self.parent.winfo_parent()).openmenu.entryconfig("Current Test", state="normal")
			self.parent._nametowidget(self.parent.winfo_parent()).editmenu.entryconfig("Test Parameters",state="disabled")
			self.parent._nametowidget(self.parent.winfo_parent()).editmenu.entryconfig("Zero Balance",state="disabled")
			self.parent._nametowidget(self.parent.winfo_parent()).editmenu.entryconfig("Restore Default Config",state="disabled")
			self.parent._nametowidget(self.parent.winfo_parent()).editmenu.entryconfig("Heatflux Parameters",state="disabled")

			# close any external windows
			try:
				self.homepage.params_window.destroy()
				self.homepage.heatflux_window.destroy()
			except:{}
			self.parent.select(self)
			self.after(0,lambda:disable_button(self.homepage.start_button))
			self.after(0,lambda:disable_button(self.homepage.edit_params_button))
			self.after(0,lambda:disable_button(self.homepage.zero_balance_button))
			self.after(0,lambda:disable_button(self.start_button))
			self.after(0,lambda:enable_button(self.cancel_button))
			self.after(0,lambda:enable_button(self.pause_button))
			self.after(0,lambda:disable_button(self.homepage.refresh_button))
			self.after(0,lambda:disable_button(self.homepage.debug_page.refresh_button))
			self.after(0,lambda:disable_button(self.voltage.add_new))
			self.after(0,lambda:disable_button(self.voltage.delete_button))
			self.after(0,lambda:disable_button(self.voltage.clear_button))
			self.voltage.local.configure(state=tk.DISABLED)
			self.voltage.remote.configure(state=tk.DISABLED)
			# Checking the status (1 or 0) of each of the data input devices, which is stored in the config file
			values = [int(self.store.get_config('Top Temperature')),int(self.store.get_config('Bottom Temperature')),int(self.store.get_config('Surface Temperature')),int(self.store.get_config('Ambient Temperature')),int(self.store.get_config('Humidity')),int(self.store.get_config('Mass')),int(self.store.get_config('Voltage')),int(self.store.get_config('Current')),int(self.store.get_config('Plate Voltage')),int(self.store.get_config('Heat Transfer'))]

			# Creating the items to be written to the first two rows of the new .csv file corresponding to the current test
			write_to_file_params = ["Time (s)"]
			i = 0
			for val in values:
				if val == 1:
					write_to_file_params.append(self.parameter_list_2[i])
				i = i+1
			# Sample Rate
			self.sample_rate = self.store.get_config('Sample Rate').rstrip()
			self.sample_unit = self.store.get_config('Sample Unit').rstrip()
			if self.sample_unit == 'second(s)':
				self.sample_rate = int(self.sample_rate)
			if self.sample_unit == 'minute(s)':
				self.sample_rate = int(self.sample_rate)
			if self.sample_unit == 'hour(s)':
				self.sample_rate = int(self.sample_rate)

			self.write_to_file_info = ["Test Name:", self.store.get_config("Test Name").rstrip(),"Test Duration (HH:MM):",self.store.get_config("Test Duration").rstrip(),"Sample Rate:",str(self.sample_rate) + ' ' + self.sample_unit]

			# Setting the length of the Progressbar
			duration = (self.store.get_config("Test Duration")).split(':')
			hr = int(duration[0])
			m = int(duration[1])
			self.total_m = m + hr*60
			self.total_s = self.total_m*60
			self.test_runs_total = self.total_s/self.sample_rate
			self.counter_var.set("0/%d"%self.test_runs_total)

			self.p1.config(maximum=self.test_runs_total+1)
			# Creating the .csv file and writing the first two lines to it
			if self.voltage.mode_var.get() == 2:
				self.new_voltage_item = Voltage(self,self.voltage.schedule,self.hardwareIO,self.total_s)
				self.new_voltage_item.state("withdrawn")
			else: 
				self.new_voltage_item = None
			self.store.test_init(write_to_file_params,self.write_to_file_info)
			self.test_runs = 0
			self.current_test = test(self,self.test_runs_total,self.sample_rate,self.new_voltage_item)
			self.current_test.state("withdrawn")
			self.current_test.mainloop()
示例#56
0
def pam_sm_open_session(pamh, flags, argv):
  return test(pam_sm_open_session, pamh, flags, argv)
示例#57
0
    def run(self, classes):

        assert isinstance(classes, list), 'Classes must be provided as a list'

        suiteStart     = datetime.datetime.now()

        totalCompleted = 0
        totalPassed    = 0
        totalSkipped   = 0

        for cls in classes:

            if not issubclass(cls, TestClass):
                self.log.error('{0} does not inherit from TestClass'.format(cls))
                continue

            self.addFileHandler(cls)

            self.log.info('[ {0} ]'.format(cls.description))

            instance = cls()

            setup   = None
            cleanup = None
            tests   = []

            for name in dir(cls):

                method = getattr(cls, name)

                if not hasattr(method, 'fixture'):
                    continue

                fixture = getattr(method, 'fixture')

                if fixture == TestClass.Fixtures.SETUP:
                    setup = method

                elif fixture == TestClass.Fixtures.CLEANUP:
                    cleanup = method

                elif fixture == TestClass.Fixtures.TEST:
                    tests.append(method)

            assert tests, 'No tests found.'

            if hasattr(cls, TestClass.Fixtures.SKIP) and getattr(cls, TestClass.Fixtures.SKIP):

                totalSkipped += len(tests)
                continue

            report = Report()
            report.initialize([test.description for test in tests])

            testStart = datetime.datetime.now()

            for test in tests:

                if hasattr(test, TestClass.Fixtures.SKIP) and getattr(test, TestClass.Fixtures.SKIP):
                    continue

                self.log.info('[ {0} should {1} ]'.format(cls.description, test.description))
                status = Status.INCOMPLETE

                try:

                    if setup:
                        self.log.debug('[ Setup ]')
                        setup(instance)

                    try:
                        self.log.debug('[ Test ]')
                        test(instance)

                        status = Status.PASS

                    except AssertionError as assertion:

                        self.log.error(traceback.format_exc())
                        report.addFailure(assertion.message)

                        status = Status.FAIL
                        print 'test error: {0}'.format(assertion.message)

                    finally:

                        if cleanup:
                            self.log.debug('[ Cleanup ]')
                            cleanup(instance)

                except Exception as exception:

                    self.log.error(traceback.format_exc())
                    report.addFailure(str(exception))

                    status = Status.BLOCKED

                finally:

                    report.update(test.description, status)

            completed = report.completed
            duration  = (datetime.datetime.now() - testStart).total_seconds()
            passed    = report.passes
            skipped   = len(tests) - completed

            self.log.info('=============================================')
            self.log.info(cls.description)
            self.log.info('---------------------------------------------')

            results = report.results

            if not results:
                self.log.warning('No results')

            for result in results:
                self.log.info(result)

            self.log.info('=============================================')

            self.log.info('Passed {0}/{1}. Skipped {2}. Run time: {3} seconds.'.format(passed, completed, skipped, duration))

            totalCompleted += completed
            totalPassed    += passed
            totalSkipped   += skipped

            self.removeFileHandler()

        duration = (datetime.datetime.now() - suiteStart).total_seconds()
        self.log.info('Passed {0}/{1}. Skipped {2}. Run time: {3} seconds.'.format(totalPassed, totalCompleted, totalSkipped, duration))
示例#58
0
def pam_sm_setcred(pamh, flags, argv):
  return test(pam_sm_setcred, pamh, flags, argv)
示例#59
0
文件: inject.py 项目: nikhilrj/CARDS
import inspect
from test import *
import sys


#from test2 import *
g=322

def a():
	b = 3
	c = 'af'


if __name__ == '__main__':
	a()
	b = test()

	print b.__dict__

	for i in globals().keys():
		#print i, globals()[i]
		print i, globals()[i], sys.getsizeof(globals()[i])

	for i in locals().keys():
		print i, locals()[i], sys.getsizeof(locals()[i])
	#b.doStuff()
	#print dir(), locals(), globals()


	#for i in globals().keys():
	#	if inspect.isclass(globals()[i]):