コード例 #1
0
def main():
   if len(sys.argv) != 6:
      print('Usage: '+sys.argv[0]+' URL databasehost databaseport datenbaseuser databasepassword')
      print('e.g. '+sys.argv[0]+ 'https://gerbera.informatik.uni-stuttgart.de:8081 gerbera.informatik.uni-stuttgart.de 3306 tourenplaner toureNPlaner')
      sys.exit(1)
   
   cmdline = ['mysql', '-u', sys.argv[4], '-p'+sys.argv[5] ,'--verbose', '--force', '--host', sys.argv[2], '--port', sys.argv[3]]
   
   #initialize DB 
   PIPE = None
   initFileSql = io.open('initTestDB.sql', 'rt', encoding='UTF-8')
   Popen(cmdline,stdin = initFileSql, stdout=PIPE)
 
   #execute tests 
   http = httplib2.Http(disable_ssl_certificate_validation=True)
   requester = tester.Requester(http)


   t = tester.Tester(sys.argv[1], requester, GetAllTests())
   t.runTests()
   
   #finalize DB
   PIPE = None
   finalFileSql = io.open('finalTestDB.sql', 'rt', encoding='UTF-8')
   Popen(cmdline,stdin = finalFileSql, stdout=PIPE)
コード例 #2
0
ファイル: testsuite.py プロジェクト: yuriks/fsm_sim
def run_tests(verbose=True):
    m = fsm.ControlFSM()
    t = tester.Tester(m, [
        'RegDst', 'RegWrite', 'ALUSrcA', 'MemRead', 'MemWrite', 'MemToReg',
        'lorD', 'IRWrite', 'PCWrite', 'PCWriteCond', 'AluOP1', 'AluOP0',
        'AluSrcB1', 'AluSrcB0', 'PCSrc1', 'PCSrc0'
    ])

    out = [
        "-0010-0110000100",  # 0
        "-0000--0000011--",  # 1
        "-0100--0000010--",  # 2
        "-0-10-1000------",  # 3
        "11-000-000------",  # 4
        "-0-01-1000------",  # 5
        "-0100--0001000--",  # 6
        "11-000-000------",  # 7
        "-0100--001010001",  # 8
        "-0-00--010----10"
    ]  # 9

    t.check_output(out[0])
    t.run_test("100011", [out[1], out[2], out[3], out[4], out[0]])
    t.run_test("101011", [out[1], out[2], out[5], out[0]])
    t.run_test("000000", [out[1], out[6], out[7], out[0]])
    t.run_test("000100", [out[1], out[8], out[0]])
    t.run_test("000010", [out[1], out[9], out[0]])

    return t.status()
コード例 #3
0
ファイル: test.py プロジェクト: drudru/-tomuss-code
def do_tests(client, output, server, nb):
    output.write('<td style="vertical-align:top" width="%d%%">\n' %
                 int(100. / nb))
    start = time.time()
    t = tester.Tester(client, output, server)
    m = '?BUG?'
    try:
        try:
            t.initialize()

            run('test_home', t)
            run('test_table', t)
            run('test_popup', t)
        except tester.Regtest:
            pass
        if t.errors:
            m = '***bad[' + ' '.join(t.errors) + ']***'
        else:
            m = 'ok'
    finally:
        if t:
            t.stop()

    m = t.client_name + ':' + m + '(%ds) ' % (time.time() - start)
    output.write(m + '</td>\n')

    return m
コード例 #4
0
 def test(self, ans, client):
     patch = os.path.join(self.__dir, "abc.ans")
     with open(patch, "wb") as f:
         f.write(ans)
     os.chmod(patch, 0o755)
     client.write("[*] SLA Test Start...")
     if not tester.Tester(patch, USER, alarm=7).run_tests(
             gen_SLA_test(self.__opt)):
         client.write("[!] SLA Test Failed!")
         return False
     client.write("[*] SLA Test Done")
     client.write("[*] Vulnerability Test Start...")
     if not tester.Tester(patch, USER, alarm=7).run_tests(
             gen_PATCH_test(self.__opt)[:self.__stage]):
         client.write("[!] Vulnerability Test Failed!")
         return False
     client.write("[*] Vulnerability Test Done")
     return True
コード例 #5
0
ファイル: main.py プロジェクト: zwx230741/zubax_chibios
def main() -> int:
    argparser = argparse.ArgumentParser(
        description='Zubax Embedded Bootloader automated test script')
    argparser.add_argument(
        'iface', help='name of the CAN interface, e.g. "can0", "/dev/ttyACM0"')
    argparser.add_argument(
        'uid',
        help='unique ID of the device under test as a hex string, e.g. '
        '37ffdc05465430353344164300000000, or as a base64 encoded string, e.g. '
        '"N//cBUZUMDUzRBZDAAAAAA==". The string may contain spaces, they will be removed.'
    )
    argparser.add_argument(
        'valid_fw_dir',
        help='path to the directory with valid firmware images')
    argparser.add_argument(
        'invalid_fw_dir',
        help='path to the directory with invalid firmware images')
    argparser.add_argument('--verbose',
                           '-v',
                           action='count',
                           help='verbosity level (-v, -vv)')
    argparser.add_argument(
        '--nid',
        help='local node ID used by the script (default: 127)',
        type=int,
        default=127)
    args = argparser.parse_args()

    _configure_stderr_logging(args.verbose)

    iface = args.iface

    try:
        args.uid = args.uid.replace(' ', '')
        if len(args.uid) == 32:
            uid = binascii.unhexlify(args.uid)
        else:
            uid = binascii.a2b_base64(args.uid)
    except Exception as ex:
        logger.error('Could not parse UID: %r', ex, exc_info=True)
        return 1

    logger.info('Started; iface %s, UID %s', iface,
                binascii.hexlify(uid).decode('utf8'))

    try:
        t = tester.Tester(iface, uid, os.getcwd(), args.valid_fw_dir,
                          args.invalid_fw_dir, args.nid)
        t.run()
    except Exception as ex:
        logger.error('Test failure: %r', ex, exc_info=True)
        return 1
    else:
        return 0
コード例 #6
0
ファイル: main.py プロジェクト: chenjr95/SAAST-2014
def main(args):
    ''' Entry point of the microgames program '''
    pygame.init()
    try:
        surface = pygame.display.set_mode([locals.WIDTH, locals.HEIGHT])
        pygame.display.set_caption('Microgames Tester')
        clock = pygame.time.Clock()
        t = tester.Tester(surface)
        while not t.finished:
            events = pygame.event.get()
            clock.tick(locals.FPS)
            t.update(events)
            t.render()
            pygame.display.flip()
    except Exception as e:
        ty, v, tb = sys.exc_info()
        print 'An exception occurred.  Terminating the game!'
        print traceback.print_exception(ty, v, tb)
    pygame.quit()
    sys.exit()
コード例 #7
0
    def __init__(self, window, test=False):
        QThread.__init__(self)
        self.window = window

        if not test:
            import dataStream as stream

            self.window.statusBar().showMessage(
                "Opening connection to remote...", 2000)
            print("Opening connection to remote...")
            self.s = stream.DataStream()
        else:
            import tester as t

            self.window.statusBar().showMessage(
                "Opening tester data stream...", 2000)
            print("Opening tester data stream...")
            self.s = t.Tester()

        self.data_recorder = data.Data()
        return
コード例 #8
0
ファイル: tic_tac_toe.py プロジェクト: adirohayonk/ticTacToe
def main():
    parser = argparse.ArgumentParser("Tic Tac Toe game")
    parser.add_argument('-t',
                        '--tester',
                        type=int,
                        metavar="[Num of tests]",
                        help="Random tester against Hard computer")
    args = parser.parse_args()

    gameObj = game.Game()

    if args.tester:
        testerObj = tester.Tester(gameObj)
        testerObj.testerRunner(args.tester)
        exit(0)

    numberOfPlayers = readInt("How many players: ")
    if numberOfPlayers == 2:
        gameObj.multiPlayerGame()
    elif numberOfPlayers == 1:
        computerLevel = readInt(
            "Level(1:Hard, 2:Medium, 3:Easy, Default:Easy:")
        gameObj.singlePlayerGame(computerLevel)
コード例 #9
0
ファイル: contester.py プロジェクト: sqrlab/ARC
def test_execution(runs):
    """Test the testsuite to ensure it can run successfully at least once.

  The testsuite will run through the tester.py test process to ensure that the
  testsuite can actually run successfully.

  Args:
    runs (int): the number of runs the testsuite will be tested for
  """

    testRunner = tester.Tester()
    try:
        testRunner.begin_testing(True, False, runs=runs)

        #logger.info("Testing Runs Results...")
        #logger.info("Successes: {}".format(testRunner.successes))
        #logger.info("Timeouts: {}".format(testRunner.timeouts))
        #logger.info("Dataraces: {}".format(testRunner.dataraces))
        #logger.info("Deadlock: {}".format(testRunner.deadlocks))
        #logger.info("Errors: {}".format(testRunner.errors))

        if (testRunner.errors >= 1):
            raise Exception('ERROR', 'testsuite')
        elif (testRunner.timeouts >= 1):
            raise Exception('ERROR', 'config._CONTEST_TIMEOUT_SEC is too low')
        elif (testRunner.dataraces >= 1):
            logger.info("Data races were encountered")
        elif (testRunner.deadlocks >= 1):
            logger.info("Deadlocks were encountered")
        elif (testRunner.successes >= 1):
            logger.info("Test suite execution successful")
        else:
            logger.warn("The test suite wasn't executed successfully")

    except Exception as message:
        print(message.args)
        sys.exit()
コード例 #10
0
ファイル: contester.py プロジェクト: sqrlab/ARC
def run_contest():
    """Run the testsuite with ConTest using the approach in tester.py."""
    testRunner = tester.Tester()
    testRunner.begin_testing(True, False)
コード例 #11
0
ファイル: tester_test.py プロジェクト: dmh43/entity-linking
def test_tester(monkeypatch, myMock):
    dataset = [{
        'label':
        0,
        'sentence_splits': [['a', 'b', 'c'], ['c', 'd']],
        'candidate_ids':
        torch.tensor([0, 1]),
        'embedded_page_content':
        torch.tensor([[1], [-2], [2], [3], [-3], [4]]),
        'entity_page_mentions':
        torch.tensor([[1], [-2], [0], [3], [0], [4]]),
        'p_prior':
        torch.tensor([0.1, 0.9])
    }, {
        'label':
        2,
        'sentence_splits': [['a', 'b', 'c'], ['c', 'd']],
        'candidate_ids':
        torch.tensor([2, 1]),
        'embedded_page_content':
        torch.tensor([[1], [-2], [2], [3], [-3], [4]]),
        'entity_page_mentions':
        torch.tensor([[1], [-2], [0], [3], [0], [4]]),
        'p_prior':
        torch.tensor([0.1, 0.9])
    }, {
        'label':
        1,
        'sentence_splits': [['a', 'b', 'c'], ['c', 'd']],
        'candidate_ids':
        torch.tensor([3, 1]),
        'embedded_page_content':
        torch.tensor([[1], [-2], [2], [3], [-3], [4]]),
        'entity_page_mentions':
        torch.tensor([[1], [-2], [0], [3], [0], [4]]),
        'p_prior':
        torch.tensor([0.1, 0.9])
    }]
    num_entities = 10
    embed_len = 200
    batch_size = 3
    entity_embeds = nn.Embedding(num_entities,
                                 embed_len,
                                 _weight=torch.randn(
                                     (num_entities, embed_len)))
    embedding_dict = dict(
        zip(string.ascii_lowercase, [
            torch.tensor([i]) for i, char in enumerate(string.ascii_lowercase)
        ]))
    token_idx_lookup = dict(
        zip(embedding_dict.keys(), range(len(embedding_dict))))
    embedding = nn.Embedding.from_pretrained(
        torch.stack([embedding_dict[token] for token in token_idx_lookup]))
    vector_to_return = entity_embeds(torch.tensor([1, 1, 1]))
    model = get_mock_model(vector_to_return)
    device = None
    batch_sampler = BatchSampler(RandomSampler(dataset), batch_size, True)
    mock_experiment = create_autospec(Experiment, instance=True)
    calc_logits = Logits()
    softmax = Softmax()
    logits_and_softmax = {
        'mention':
        lambda hidden, candidate_ids_or_targets: softmax(
            calc_logits(hidden, entity_embeds(candidate_ids_or_targets)))
    }
    with monkeypatch.context() as m:
        m.setattr(nn, 'DataParallel', _.identity)
        m.setattr(u, 'tensors_to_device', lambda batch, device: batch)
        tester = t.Tester(
            dataset=dataset,
            batch_sampler=batch_sampler,
            model=model,
            logits_and_softmax=logits_and_softmax,
            embedding=embedding,
            token_idx_lookup=token_idx_lookup,
            device=device,
            experiment=mock_experiment,
            ablation=['prior', 'local_context', 'document_context'],
            use_adaptive_softmax=False)
        assert tester.test() == (1, 3)
        labels_for_batch = tester._get_labels_for_batch(
            torch.tensor([elem['label'] for elem in dataset]),
            torch.tensor([[1, 0], [4, 5], [1, 0]]))
        assert torch.equal(labels_for_batch, torch.tensor([1, -1, 0]))
コード例 #12
0
ファイル: start.py プロジェクト: GustavoKatel/stackcontest
				if len(row[i])>0:
					tags += [row[i]]
			featureset += util.featuresetIt( util.featureIt(util.tokenizeIt(title)), tags ) + util.featuresetIt( util.featureIt(util.tokenizeIt(body)), tags )
			lines+=1
			if MAX_LOADED_LINES>0 and lines>MAX_LOADED_LINES:
				print "Maximum exceeded!"
				break
			print "%d lines parsed." % lines
			#break
		self.saveFeaturesetFile(featureset,filename+".featureset")
		return featureset

	def getFeatureset(self,filename):
		if os.path.exists(filename+".featureset"):
			return self.loadFeaturesetFile(filename+".featureset")
		else:
			return self.newFeatureset(filename)

if __name__=="__main__":
	classifier = stackClassifier()
	classifier.train("/media/Arquivos/g4/NLP & ML/stackoverflow/train.csv")
	#

	#TESTER
	print "\nTESTER..."
	f = open("/media/Arquivos/g4/NLP & ML/stackoverflow/train.csv","rb")
	reader = csv.reader(f)
	reader.next()
	test = tester.Tester(classifier.getNaiveObj(),reader)#,classifier.getCsvObj())
	test.test(True,1)
コード例 #13
0
def test_eval_seg(net,
                  model,
                  test_root,
                  origin_root,
                  gt_root,
                  test_list,
                  out_root,
                  uniform_size,
                  mean,
                  batch_size,
                  show=0,
                  save_seg=0,
                  save_img=0,
                  save_prob=0,
                  use_hyper=0,
                  hyper_downsample_rate=1,
                  hyper_centroids_name=None,
                  score_name='score',
                  start=0,
                  end=-1,
                  gpu=-1,
                  f=None):

    # init
    OUT_PROB = False
    LUT = load_color_LUT_21(
        os.path.dirname(__file__) + '/VOC_color_LUT_21.mat')
    if save_seg:
        if not os.path.isdir(out_root + '/seg/'):
            os.makedirs(out_root + '/seg/')
        if not os.path.isdir(out_root + '/vlz/'):
            os.makedirs(out_root + '/vlz/')
    if save_prob:
        if not os.path.isdir(out_root + '/prob/'):
            os.makedirs(out_root + "/prob/")
        if not os.path.isdir(out_root + '/shape/'):
            os.makedirs(out_root + "/shape/")
        OUT_PROB = True
    if save_img:
        if not os.path.isdir(out_root + '/img/'):
            os.makedirs(out_root + "/img/")
        OUT_PROB = True
    with open(test_list, 'r') as infile:
        img_list = [
            line.strip() for line in infile.readlines()
            if len(line.strip()) > 0
        ]
    if end == -1:
        img_list = img_list[start:]
    elif end <= len(img_list):
        img_list = img_list[start:end]
    else:
        raise Exception('end should not be larger than img_list length')
    base_size = [uniform_size, uniform_size]
    mean_map = np.tile(mean, [base_size[0], base_size[1], 1])  # H x W x C

    # centroids init for hyper-column
    if use_hyper:
        hyper_dsr = hyper_downsample_rate
        hyper_total = (uniform_size / hyper_dsr) * (uniform_size / hyper_dsr)
        hyper_yc = np.tile(
            np.arange(0, uniform_size, hyper_dsr).reshape(
                (uniform_size / hyper_dsr, 1)),
            [1, uniform_size / hyper_dsr]).reshape((hyper_total, 1))
        hyper_xc = np.tile(
            np.arange(0, uniform_size, hyper_dsr).reshape(
                (1, uniform_size / hyper_dsr)),
            [uniform_size / hyper_dsr, 1]).reshape((hyper_total, 1))
        hyper_centroids_blob = np.tile(np.hstack((hyper_yc, hyper_xc)),
                                       [batch_size, 1, 1])

    # caffe model init
    caffe_tester = tester.Tester(net, model, gpu)
    if use_hyper:
        if hyper_centroids_name in caffe_tester.blobs.keys():
            caffe_tester.blobs[
                hyper_centroids_name].data.flat = hyper_centroids_blob.flat
        else:
            raise Exception("Can not find the blob: %s" % hyper_centroids_name)

    # loop list
    hist = np.zeros((21, 21))
    for i in range(0, len(img_list), batch_size):
        if i % 100 == 0:
            print('Processing: %d/%d' % (i, len(img_list)))
        true_batch_size = min(batch_size, len(img_list) - i)
        batch_data = np.zeros((batch_size, 3, base_size[0], base_size[1]),
                              dtype=np.float)
        for k in range(true_batch_size):
            if not os.path.isfile(test_root + img_list[i + k]):
                raise Exception('file not exist: %s' %
                                (test_root + img_list[i + k]))
                sys.exit(1)
            img = cv2.imread(test_root + img_list[i + k])  # BGR, 0-255
            batch_data[k, ...] = (resize_uniform.resize_pad_to_fit(
                img, base_size, pad_value=mean) - mean_map).transpose(
                    (2, 0, 1))
        inputs = {}
        inputs['data'] = batch_data
        qblobs = caffe_tester.predict(inputs, {}, [score_name])
        if use_hyper:
            if len(qblobs[score_name].shape) != 2:
                raise Exception("qblobs[score_name] should have 2 axis")
        for k in range(0, true_batch_size):
            origin_img = cv2.imread(origin_root + img_list[i + k])
            origin_shape = origin_img.shape
            if gt_root is not None:
                gt_img = cv2.imread(gt_root + img_list[i + k].split('.')[0] +
                                    '.png')[:, :, 0]
            if use_hyper:
                if OUT_PROB:
                    prob_map = qblobs[score_name].reshape(
                        (batch_size, uniform_size / hyper_dsr,
                         uniform_size / hyper_dsr, -1))[k, ...].transpose(
                             (2, 0, 1))
                cls_map = qblobs[score_name].argmax(axis=1).reshape(
                    (batch_size, uniform_size / hyper_dsr,
                     uniform_size / hyper_dsr))[k, ...].astype(np.uint8)
            else:
                if OUT_PROB:
                    prob_map = qblobs[score_name][k]
                cls_map = np.array(qblobs[score_name][k].transpose(
                    1, 2, 0).argmax(axis=2),
                                   dtype=np.uint8)
            if save_prob:
                np.save(
                    out_root + "/prob/" + img_list[i + k].split('.')[0] +
                    ".npy", prob_map)
                np.save(
                    out_root + "/shape/" + img_list[i + k].split('.')[0] +
                    ".npy", origin_shape[:2])
            if save_img:
                cv2.imwrite(out_root + '/img/' + img_list[i + k], origin_img)

            # origin size
            out_map = np.uint8(LUT[cls_map] * 255)
            cls_map_origin = resize_uniform.resize_crop_to_fit(
                cls_map, origin_shape[:2], interp=cv2.INTER_NEAREST)
            out_map_origin = resize_uniform.resize_crop_to_fit(
                out_map, origin_shape[:2], interp=cv2.INTER_NEAREST)

            # mIU
            if gt_root is not None:
                hist += fast_hist(gt_img.flatten(), cls_map_origin.flatten(),
                                  21)
            if show:
                cv2.imshow("image", origin_img)
                cv2.imshow("seg result", out_map_origin)
                cv2.waitKey(0)
            if save_seg:
                cls_map_fn = out_root + '/seg/' + img_list[i + k].split(
                    '.')[0] + ".png"
                out_map_fn = out_root + "/vlz/" + img_list[i + k].split(
                    '.')[0] + ".png"
                if not os.path.isdir(os.path.dirname(cls_map_fn)):
                    os.makedirs(os.path.dirname(cls_map_fn))
                if not os.path.isdir(os.path.dirname(out_map_fn)):
                    os.makedirs(os.path.dirname(out_map_fn))
                cv2.imwrite(cls_map_fn, cls_map_origin)
                cv2.imwrite(out_map_fn, out_map_origin)
    # results
    if gt_root is not None:
        acc = np.diag(hist).sum() / hist.sum()
        print '>>>', 'overall accuracy', acc
        acc = np.diag(hist) / hist.sum(1)
        print '>>>', 'mean accuracy', np.nanmean(acc)
        iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
        print '>>>', 'per class IU:\n', iu
        show_iu = ["{:.2f}".format(i * 100) for i in iu]
        print '>>>', show_iu
        print '>>>', 'mean IU', np.nanmean(iu)
        if f is not None:
            f.write('model: %s\n' % model)
            f.write('%s\n' % show_iu)
            f.write('Mean IU: %f\n\n' % np.nanmean(iu))
コード例 #14
0
    def __init__(self, parent=None, args=None, macros=None):
        super(ComMpmTests, self).__init__(parent=parent, macros=macros)

        logging.basicConfig(filename='comMpmTests%s.log', level=logging.DEBUG)

        plutoGateway_mapping_path = path.join(
            path.dirname(path.realpath(__file__)), "mapping",
            "com_mpm_modbus_mapping.csv")
        testbox_mapping_path = path.join(path.dirname(path.realpath(__file__)),
                                         "mapping",
                                         "PLC_Certification_Chassis.xlsx")

        testBox, plutoGateway = import_mappings(plutoGateway_mapping_path,
                                                testbox_mapping_path,
                                                'COM MPM Cables')

        print('ola')

        self.mpm_tester = tester.Tester(testBox, plutoGateway)
        #self.mpm_tester.connectTestBox()
        #self.mpm_tester.connectGateway(timeout=30)

        self.mpm_tester.tests.append(
            com_mpm_tests.TestPlutoGatewayConfig(self.mpm_tester, -1))
        self.mpm_tester.tests.append(
            com_mpm_tests.TestPlutoPLCsPresent(self.mpm_tester, -1))

        self.mpm_tester.tests.append(
            com_mpm_tests.TestChannelsBootDefault(self.mpm_tester, -1))

        self.mpm_tester.tests.append(
            com_mpm_tests.TestPlutoWriteReadback(self.mpm_tester, -1))

        self.mpm_tester.tests.append(
            com_mpm_tests.TestPermitsBlock(self.mpm_tester, -1))
        self.mpm_tester.tests.append(
            com_mpm_tests.TestAcPermitCoolantValve(self.mpm_tester, -1))

        self.mpm_tester.tests.append(
            com_mpm_tests.TestVacuumToRefPermits(self.mpm_tester, -1))
        #self.mpm_tester.tests.append(com_mpm_tests.TestColdCryoPermits(self.mpm_tester, -1))

        for i, test in enumerate(self.mpm_tester.tests):
            test.id = i

        self.table = self.ui.tableWidget

        headers = ["Test", "Description", "", "Step", "Details"]

        self.table.setRowCount(len(self.mpm_tester.tests))
        self.table.setColumnCount(len(headers))

        self.table.setHorizontalHeaderLabels(headers)
        self.table.setVerticalHeaderLabels(
            [str(e) for e in list(range(1,
                                        len(self.mpm_tester.tests) + 1))])

        for i, test in enumerate(self.mpm_tester.tests):
            self.update_table_line(i)
        self.table.setCurrentCell(0, 0, QItemSelectionModel.Rows)

        self.table.itemChanged.connect(self.item_changed)

        self.table.setColumnWidth(0, 160)
        self.table.setColumnWidth(1, 300)
        self.table.setColumnWidth(2, 50)
        self.table.setColumnWidth(3, 250)

        self.mpm_tester.test_line_update.connect(self.update_table_line)
        self.mpm_tester.monitor_update.connect(self.update_monitor_menu)

        self.ui.runAllButton.clicked.connect(self.mpm_tester.run_all)
        self.ui.abortButton.clicked.connect(self.mpm_tester.abort)
コード例 #15
0
import websockets

import tester


async def connect(base: str) -> None:
    await websockets.connect(base)


if __name__ == '__main__':
    Tester = tester.Tester()
    Tester.addAsyncTest('connect',
                        connect('ws://localhost:2000'))  # type: ignore
コード例 #16
0
def start_app():
	tester.Tester()
コード例 #17
0
ファイル: main.py プロジェクト: etoce/hannah-python
import stringproblems
import stringproblems2
import stringproblems2test
import stringproblems3
import stringproblems4
import stringproblems3test
import stringproblems4test
import stringanswers
import stringanswers2
import stringanswers3
import stringanswers4
import listproblems
import listanswers
import listproblemstest
import sortingproblems
import sortingproblemstest
import sortinganswers
import yahtzeeanswers
import yahtzeeproblems
import yahtzeetest
import tester

moduleToTest = yahtzeeanswers
testModule = yahtzeetest

myTester = tester.Tester(moduleToTest, getattr(testModule, "cases"))

myTester.testMethod("sum_of_dice")
myTester.testMethod("three_of_a_kind")
myTester.testMethod("full_house")
コード例 #18
0
    def __init__(self, parent=None, args=None, macros=None):
        super(ColdTests, self).__init__(parent=parent, macros=macros)

        logging.basicConfig(filename='vaccumTests%s.log', level=logging.DEBUG)

        plutoGateway_mapping_path = path.join(
            path.dirname(path.realpath(__file__)), "mapping",
            "cold_modbus_mapping.csv")
        testbox_mapping_path = path.join(path.dirname(path.realpath(__file__)),
                                         "mapping",
                                         "PLC_Certification_Chassis.xlsx")

        testBox, plutoGateway = import_mappings(plutoGateway_mapping_path,
                                                testbox_mapping_path,
                                                'ColdCryo Cables')

        self.cold_tester = tester.Tester(testBox, plutoGateway)
        self.cold_tester.connectTestBox()
        self.cold_tester.connectGateway(timeout=30)

        self.cold_tester.tests.append(
            cold_tests.TestPlutoGatewayConfig(self.cold_tester, -1))
        self.cold_tester.tests.append(
            cold_tests.TestPlutoPLCsPresent(self.cold_tester, -1))

        self.cold_tester.tests.append(
            cold_tests.TestChannelsBootDefault(self.cold_tester, -1))

        self.cold_tester.tests.append(
            cold_tests.TestPlutoWriteReadback(self.cold_tester, -1))

        self.cold_tester.tests.append(
            cold_tests.TestDigitalInputs(self.cold_tester, -1))
        self.cold_tester.tests.append(
            cold_tests.TestSensorsValid(self.cold_tester, -1))
        self.cold_tester.tests.append(
            cold_tests.TestImmediateTrips(self.cold_tester, -1))
        self.cold_tester.tests.append(
            cold_tests.TestImmediatePowerTrips(self.cold_tester, -1))

        self.cold_tester.tests.append(
            cold_tests.TestOilFailureWhileRunning(self.cold_tester, -1))

        self.cold_tester.tests.append(
            cold_tests.TestCurrentValid(self.cold_tester, -1))
        self.cold_tester.tests.append(
            cold_tests.TestDelayPowerTrip(self.cold_tester, -1))
        self.cold_tester.tests.append(
            cold_tests.TestDelayDisPressTrip(self.cold_tester, -1))
        self.cold_tester.tests.append(
            cold_tests.TestDelayDisTempTrip(self.cold_tester, -1))

        for i, test in enumerate(self.cold_tester.tests):
            test.id = i

        self.table = self.ui.tableWidget

        headers = ["Test", "Description", "", "Step", "Details"]

        self.table.setRowCount(len(self.cold_tester.tests))
        self.table.setColumnCount(len(headers))

        self.table.setHorizontalHeaderLabels(headers)
        self.table.setVerticalHeaderLabels(
            [str(e) for e in list(range(1,
                                        len(self.cold_tester.tests) + 1))])

        for i, test in enumerate(self.cold_tester.tests):
            self.update_table_line(i)
        self.table.setCurrentCell(0, 0, QItemSelectionModel.Rows)

        self.table.itemChanged.connect(self.item_changed)

        self.table.setColumnWidth(0, 160)
        self.table.setColumnWidth(1, 300)
        self.table.setColumnWidth(2, 50)
        self.table.setColumnWidth(3, 250)

        self.cold_tester.test_line_update.connect(self.update_table_line)
        self.cold_tester.monitor_update.connect(self.update_monitor_menu)

        self.ui.runAllButton.clicked.connect(self.cold_tester.run_all)
        self.ui.abortButton.clicked.connect(self.cold_tester.abort)
コード例 #19
0
ファイル: teste.py プロジェクト: slaclab/lsstPLCsTestBox
import mpm_tests
import cold_tests
from mapping_parser import import_mappings
import logging
import time

plutoGateway_mapping_path = path.join(path.dirname(path.realpath(__file__)),
                                      "mapping", "cold_modbus_mapping.csv")
testbox_mapping_path = path.join(path.dirname(path.realpath(__file__)),
                                 "mapping", "PLC_Certification_Chassis.xlsx")

testBox, plutoGateway = import_mappings(plutoGateway_mapping_path,
                                        testbox_mapping_path,
                                        'ColdCryo Cables')

mpm_tester = tester.Tester(testBox, plutoGateway)
mpm_tester.connectTestBox()
mpm_tester.connectGateway(timeout=30)

#asdas

#a= mpm_tests.TestPlutoConnect(mpm_tester, -1)
#a.run()
#a= mpm_tests.TestTestBoxConnect(mpm_tester, -1)
#a.run()

a = cold_tests.TestDigitalInputs(mpm_tester, 1)
a.button_run()
'''
while 1:
    start=time.time()
コード例 #20
0
ファイル: main.py プロジェクト: yahya-alshammout/reservations
def start_app():
	test_up = tester.Tester()
	test_up.fill_test_data()
コード例 #21
0
 def test(self, audio_to_test, speaker_id_worker, outq):
     print 'in test'
     tester = te.Tester(audio_to_test, speaker_id_worker, outq,
                        self.gmm_models)
     tester.run()
コード例 #22
0
    def __init__(self, parent=None, args=None, macros=None):
        super(VaccumTests, self).__init__(parent=parent, macros=macros)

        #logging.basicConfig(filename=path.join(path.dirname(path.realpath(__file__)), "logs",'vaccumTests.log'), level=logging.DEBUG)
        logging.basicConfig(filename='C:\\Users\\joaoprod\\Documents\\GitHub\\lsstPLCsTestBox\\logs\\vaccumTests2.log',
                            level=logging.DEBUG)
        logging.debug('This message should go to the log file')
        logging.info('So should this')
        logging.warning('And this, too')
        print(path.join(path.dirname(path.realpath(__file__)), "logs",'vaccumTests.log'))

        plutoGateway_mapping_path = path.join(path.dirname(path.realpath(__file__)), "mapping", "vac_modbus_mapping.csv")
        testbox_mapping_path = path.join(path.dirname(path.realpath(__file__)), "mapping", "PLC_Certification_Chassis.xlsx")

        testbox , plutoGateway = import_mappings(plutoGateway_mapping_path,testbox_mapping_path,'Vaccum cables')

        self.vac_tester = tester.Tester(testbox , plutoGateway)


        self.vac_tester.tests.append(vac_tests.TestPlutoGatewayConfig(self.vac_tester, -1))
        self.vac_tester.tests.append(vac_tests.TestPlutoPLCsPresent(self.vac_tester, -1))

        self.vac_tester.tests.append(vac_tests.TestChannelsBootDefault(self.vac_tester, -1))

        self.vac_tester.tests.append(vac_tests.TestPlutoWriteReadback(self.vac_tester, -1))

        #self.vac_tester.tests.append(vac_tests.TestAnalogScaling(self.vac_tester, -1))

        #self.vac_tester.tests.append(vac_tests.TestHvCvDifferences(self.vac_tester, -1))

        self.vac_tester.tests.append(vac_tests.TestCvValves(self.vac_tester, -1))
        self.vac_tester.tests.append(vac_tests.TestValveMonitors(self.vac_tester, -1))

        self.vac_tester.tests.append(vac_tests.TestHvStat(self.vac_tester, -1))
        #self.vac_tester.tests.append(vac_tests.TestHvTurboOnOfflogic(self.vac_tester, -1))
        self.vac_tester.tests.append(vac_tests.TestHvTurboPermitBlock(self.vac_tester, -1))
        self.vac_tester.tests.append(vac_tests.TestHvTurboPermitAuto(self.vac_tester, -1))

        self.vac_tester.tests.append(vac_tests.TestCvStat(self.vac_tester, -1))
        #self.vac_tester.tests.append(vac_tests.TestCvTurboOnOfflogic(self.vac_tester, -1))
        self.vac_tester.tests.append(vac_tests.TestCvTurboPermitBlock(self.vac_tester, -1))
        self.vac_tester.tests.append(vac_tests.TestCvTurboPermitAuto(self.vac_tester, -1))


        for i, test in enumerate( self.vac_tester.tests):
            test.id=i



        self.table = self.ui.tableWidget

        headers= ["Test","Description","","Step","Details"]

        self.table.setRowCount(len(self.vac_tester.tests))
        self.table.setColumnCount(len(headers))

        self.table.setHorizontalHeaderLabels(headers)
        self.table.setVerticalHeaderLabels([str(e)  for e in list(range(1,len(self.vac_tester.tests)+1))])

        for i, test in enumerate(self.vac_tester.tests):
            self.update_table_line(i)
        self.table.setCurrentCell(0, 0 ,QItemSelectionModel.Rows)

        self.table.itemChanged.connect(self.item_changed)

        self.table.setColumnWidth(0, 160)
        self.table.setColumnWidth(1, 300)
        self.table.setColumnWidth(2, 50)
        self.table.setColumnWidth(3, 250)

        self.vac_tester.test_line_update.connect(self.update_table_line)
        self.vac_tester.monitor_update.connect(self.update_monitor_menu)

        self.ui.runAllButton.clicked.connect(self.vac_tester.run_all)
        self.ui.abortButton.clicked.connect(self.vac_tester.abort)
コード例 #23
0
def test_eval_seg(net,
                  model,
                  img_list,
                  name_list,
                  gt_list,
                  out_root,
                  nclass,
                  crop_size,
                  target_size,
                  stride_ratio,
                  mean,
                  batch_size,
                  show=0,
                  save_seg=0,
                  save_img=0,
                  save_prob=0,
                  save_saliency=0,
                  use_hyper=0,
                  hyper_downsample_rate=1,
                  hyper_centroids_name=None,
                  score_name='score',
                  start=0,
                  end=-1,
                  gpu=-1,
                  LUT=None,
                  f=None):
    # preparing data
    if save_seg:
        if not os.path.isdir(out_root + '/seg/'):
            os.makedirs(out_root + '/seg/')
        if not os.path.isdir(out_root + '/vlz/'):
            os.makedirs(out_root + '/vlz/')
    if save_prob:
        raise NotImplementedError("save prob not implemented")

    if end == -1:
        img_list = img_list[start:]
    elif end <= len(img_list):
        img_list = img_list[start:end]
    else:
        raise Exception('end should not be larger than img_list length')

    base_size = [crop_size, crop_size]
    # grid
    hist = np.zeros((nclass, nclass))
    img0 = cv2.imread(img_list[0])
    stride = int(np.ceil(crop_size * stride_ratio))
    hgrid_num = int(np.ceil((img0.shape[0] - crop_size) / float(stride))) + 1
    assert (hgrid_num -
            1) * stride + crop_size - img0.shape[0] < crop_size * 0.05
    wgrid_num = int(np.ceil((img0.shape[1] - crop_size) / float(stride))) + 1
    assert (wgrid_num -
            1) * stride + crop_size - img0.shape[1] < crop_size * 0.05
    mean_map = np.tile(mean, [base_size[0], base_size[1], 1])  # H x W x C
    # caffe model init
    caffe_tester = tester.Tester(net, model, gpu)
    if use_hyper:
        if hyper_centroids_name in caffe_tester.blobs.keys():
            hyper_dsr = hyper_downsample_rate
            hyper_total = (crop_size / hyper_dsr) * (crop_size / hyper_dsr)
            hyper_yc = np.tile(
                np.arange(0, crop_size, hyper_dsr).reshape(
                    (crop_size / hyper_dsr, 1)),
                [1, crop_size / hyper_dsr]).reshape((hyper_total, 1))
            hyper_xc = np.tile(
                np.arange(0, crop_size, hyper_dsr).reshape(
                    (1, crop_size / hyper_dsr)),
                [crop_size / hyper_dsr, 1]).reshape((hyper_total, 1))
            hyper_centroids_blob = np.tile(np.hstack((hyper_yc, hyper_xc)),
                                           [batch_size, 1, 1])
            caffe_tester.blobs[
                hyper_centroids_name].data.flat = hyper_centroids_blob.flat
        else:
            raise Exception("Can not find the blob: %s" % hyper_centroids_name)

    # loop list
    for i in range(0, len(img_list)):
        if i % 10 == 0:
            print('Processing: %d/%d' % (i, len(img_list)))
        if not os.path.isfile(img_list[i]):
            raise Exception('file not exist: %s' % (img_list[i]))
            sys.exit(1)
        img = cv2.imread(img_list[i])  # BGR, 0-255
        H, W = img.shape[:2]
        hstart = stride * np.arange(hgrid_num)
        wstart = stride * np.arange(wgrid_num)
        batch_data = np.array([
            (crop_images.crop_padding(img,
                                      (wst, hst, crop_size, crop_size), mean) -
             mean_map).astype(np.float32).transpose(2, 0, 1) for hst in hstart
            for wst in wstart
        ])

        assert batch_data.shape[0] >= batch_size
        assert batch_data.shape[0] % batch_size == 0

        #
        if gt_list is not None:
            gt_img = cv2.imread(gt_list[i])[:, :, 0]
        ensemble_prob = np.zeros((nclass, H, W), dtype=np.float32)
        ensemble_cls = np.zeros((H, W), dtype=np.uint8)
        # loop crops by batch
        for j in range(batch_data.shape[0] / batch_size):
            inputs = {
                'data': batch_data[j * batch_size:(j + 1) * batch_size, ...]
            }
            qblobs = caffe_tester.predict(inputs, {}, [score_name])
            if use_hyper and len(qblobs[score_name].shape) != 2:
                raise Exception(
                    "for hypercolumn, qblobs[score_name] should have 2 axis")
            for k in range(0, batch_size):
                if use_hyper:
                    prob_map = qblobs[score_name].reshape(
                        (batch_size, crop_size / hyper_dsr,
                         crop_size / hyper_dsr, -1))[k, ...].transpose(
                             (2, 0, 1))[:nclass, ...]
                else:
                    prob_map = qblobs[score_name][k, nclass, ...]
                if prob_map.max() > 1 or prob_map.min() < 0:
                    raise Exception("should with softmax")
                prob_map = np.array([
                    cv2.resize(pm, (crop_size, crop_size)) for pm in prob_map
                ])
                hid, wid = (j * batch_size + k) // wgrid_num, (j * batch_size +
                                                               k) % wgrid_num
                ensemble_prob[:, hid * stride:hid * stride + crop_size,
                              wid * stride:wid * stride +
                              crop_size] += prob_map  # accumulate probability

        ensemble_cls = ensemble_prob.argmax(axis=0)
        if target_size is not None:
            ensemble_cls = cv2.resize(ensemble_cls,
                                      (target_size[1], target_size[0]),
                                      interpolation=cv2.INTER_NEAREST)
        ensemble_vlz = np.uint8(LUT[ensemble_cls])
        # mIU
        if gt_list is not None:
            ensemble_cls[ensemble_cls >= nclass] = 0
            hist += fast_hist(gt_img.flatten(), ensemble_cls.flatten(), nclass)
        if show:
            cv2.imshow("image", img)
            cv2.imshow("seg result", ensemble_vlz)
            cv2.waitKey(0)
        if save_seg:
            cls_map_fn = out_root + '/seg/' + name_list[i] + ".png"
            out_map_fn = out_root + "/vlz/" + name_list[i] + ".png"
            if not os.path.isdir(os.path.dirname(cls_map_fn)):
                os.makedirs(os.path.dirname(cls_map_fn))
            if not os.path.isdir(os.path.dirname(out_map_fn)):
                os.makedirs(os.path.dirname(out_map_fn))
            cv2.imwrite(cls_map_fn, ensemble_cls)
            cv2.imwrite(out_map_fn, ensemble_vlz)
    if gt_list is not None:
        acc = np.diag(hist).sum() / hist.sum()
        print '>>>', 'overall accuracy', acc
        acc = np.diag(hist) / hist.sum(1)
        print '>>>', 'mean accuracy', np.nanmean(acc)
        iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
        print '>>>', 'per class IU:\n', iu
        show_iu = ["{:.2f}".format(i * 100) for i in iu]
        print '>>>', show_iu
        print '>>>', 'mean IU', np.nanmean(iu)
        if f is not None:
            f.write('model: %s\n' % model)
            f.write('%s\n' % show_iu)
            f.write('Mean IU: %f\n\n' % np.nanmean(iu))