コード例 #1
0
def run_simulation(elevator_controller, floors, start_floor_index=0,
                   _sleep_seconds=0, number_of_steps= -1, visualization=True, debug=False,
                   speed=3, capacity=10,
                   print_results=True, _chromosome_controller=False):
    
    """ Runs a simulation on elevator/floors with given controller.
        Returns a simulation_recorder object that has data about the simulation
    """
    
    global sleep_seconds
    sleep_seconds = _sleep_seconds
    simulation_recorder = SimulationRecorder()
    elevator = Elevator(floors[start_floor_index], floors, speed, capacity)
    elevator.set_recorder(simulation_recorder)
    elevator_controller.register_elevator_and_floors(elevator, floors)
    
    if debug:
        DebugModelListener(elevator, floors)
    if visualization:
        init_visualization(elevator, floors, elevator_controller, _event_callback=event_from_visualization, _chromosome_controller=_chromosome_controller)
       
        
    i = 0
    while True:
        update_model_and_controller(elevator, floors, elevator_controller)
        time.sleep(sleep_seconds)
        if visualization:
            if not draw():
                break
        if number_of_steps != -1 and i >= number_of_steps:
            break
        i += 1
    
    simulation_recorder.notify_end_of_simulation(floors, elevator)
    if print_results:
        try:
            simulation_recorder.print_nice_data();
        except _:
            pass
        
    return simulation_recorder
コード例 #2
0
ファイル: main.py プロジェクト: HoangPhung98/dss_dijkstra
import sys

import numpy as np

import core
import visualization
import myParser

if __name__ == '__main__':
    # data
    maxint = sys.maxsize
    matrix = np.array([[0, 2, maxint, 1, maxint],
                       [2, 0, 5, 2, 2],
                       [maxint, 5, 0, maxint, 5],
                       [1, 2, maxint, 0, 1],
                       [maxint, 2, 5, 1, 0]])

    edges = myParser.matrixDistanceToEdgesList(matrix)
    newEdges, edgeToJamFactors = core.calculateNewDistanceCost(edges,)
    jams = myParser.edgeToJamFactorArrayTojams(newEdges, edgeToJamFactors)
    newMatrix = myParser.edgesListToMatrixDistance(newEdges)
    newMatrix = np.round(newMatrix, 3)
    print("jams")
    print(repr(jams))
    print(newEdges)
    print("new matrix")
    print(repr(newMatrix))
    visualization.draw(newMatrix, jams, matrix)
コード例 #3
0
ファイル: main.py プロジェクト: skulitom/MATProblem
def main_algorithm(problem):
    logger = logging.getLogger('main_logger')

    logger.critical('Problem %i' % problem.question_number)
    # pb = problems[no]

    graph = Graph(problem)
    graph.sort_edges()
    solution_edges = set()

    logger.info('Generating Dijkstra Routes')
    pb_robots = copy.deepcopy(problem.robots)
    sol_robots = list()
    print(len(pb_robots))
    # Special for the first robot
    r_f = pb_robots.pop(0)
    r_f.awaken = True

    pb_robots = sorted(
        pb_robots,
        key=lambda robot: Edge(r_f.vertices, robot.vertices, None).weight)
    v = list()

    v.append(pb_robots[0].vertices)
    edges = dijkstra_path(v_1=r_f.vertices,
                          v_2=pb_robots[0].vertices,
                          vertices=graph.vertices,
                          edges=graph.edges)
    r_f.track.extend(edges)
    sol_robots.append(r_f)

    solution_edges = solution_edges.union(set(edges))
    r_p = r_f  # Previous Robot
    pb_robots[0].awaken = True
    awake = 2
    while len(pb_robots) > 0:
        logger.info('Generating Dijkstra, remaining: %i' % len(pb_robots))
        r_i = pb_robots.pop(0)

        dist, prev = find_path(v_1=r_i.vertices,
                               vertices=graph.vertices,
                               edges=graph.edges)

        pb_robots = sorted(pb_robots, key=lambda robot: dist[robot.vertices])

        robots = list()
        # Select Destination robots that have not been reached. They should not be a destination.
        for i in range(0, len(pb_robots)):
            if pb_robots[i].awaken:
                continue
            robots.append(pb_robots[i])

            if len(robots) is 2:
                break

        for r in robots:
            edges = set(
                dijkstra_path(r_i.vertices, r.vertices, graph.vertices,
                              graph.edges, dist,
                              prev))  # Generated by Dijkstra

            found = False
            for edge in edges:

                for sol_edge in solution_edges:

                    if edge.start == sol_edge.start and edge.end == sol_edge.end:
                        found = True
                        break

                    if edge.start == sol_edge.end and edge.end == sol_edge.start:
                        found = True
                        break

            if found:
                r.awaken = False
                continue

            if r_i not in sol_robots:
                sol_robots.append(r_i)
                r_i.track.extend(edges)

            else:  # Second Path
                r_p.track.extend(edges)

            solution_edges = solution_edges.union(edges)
            r.awaken = True
            awake += 1

        r_p = r_i  # Previous Robot

    # logger.info('Generating Dijkstra Routes Complete')
    # logger.info('Visualizing the solution')

    for robot in sol_robots:
        robot.sort_track()
        # print("Robot: %s" % (robot.vertices,))
        # for t in robot.track:
        #     print('%s -> %s' % (t.start, t.end))

    solution = Solution(question_number=problem.question_number,
                        robots=sol_robots)
    logger.critical('Finished Writing Solution for %i')

    logger.info('%i Robots Awake' % awake)
    writer.write_solution([solution])
    print(solution.list_of_coordinates)
    # Visualize is using process (non blocking)
    Process(target=visualization.draw(
        problem, mst_edges=list(solution_edges), edges=graph.edges)).start()
コード例 #4
0
ファイル: test.py プロジェクト: GuangyaoSHI/AirSimNeurIPS
    # state_read.orientation.y_val =  -0.0021525132469832897
    # state_read.orientation.z_val =  -0.17364919185638428
    # state_read.angular_velocity.x_val =  -0.030123945325613022
    # state_read.angular_velocity.y_val =  0.0011088978499174118
    # state_read.angular_velocity.z_val = 7.625947910128161e-05
    # state_read.linear_velocity.x_val =  0.0
    # state_read.linear_velocity.y_val =  0.0
    # state_read.linear_velocity.z_val =  -0.24393419921398163
    # state_read.position.z_val =  -0.07
    # state_read.angular_velocity.y_val =   1.946580171585083
    # state_read.angular_velocity.z_val =  -0.0002931684139184654

    # state_read.position.x_val = 6.373129367828369
    # state_read.position.y_val = 81.43741607666016
    # state_read.position.z_val = -42.87995529174805 #-43.689579010009766

    mpc_control.set_traj(path, state.orientation)
    # mpc_control.append_traj(path2)
    assert(mpc_control.tracking_status(state)==0)
    # cur = time.time()
    # for i in range(1000):
    #     u = mpc_control.getInput(state)
    # print("Spent time", (time.time()-cur)/1000)

    ref,_ = mpc_control.getReference(state)
    X, U = mpc_control.getFullMpcOutput(state)

    # print(ref[:,3:7]-X[:20,3:7])
    import visualization
    visualization.draw(path, X, U, 10.0)
コード例 #5
0
        return img, target

    visualcheck(path='tmp/orig.jpg')

    img = cv2.imread('hw2_train_val/train15000/images/00000.jpg')
    label_path = 'hw2_train_val/train15000/labelTxt_hbb/00000.txt'
    boxes, labels = reader(label_path)
    boxes = torch.Tensor(boxes)
    labels = torch.Tensor(labels)
    img, target = too(img, boxes)


    img = trans(img)
    
    img, result = regroup(img, target)
    draw(img, result, 'tmp/0.jpg')

    img = cv2.imread('hw2_train_val/train15000/images/00000.jpg')
    label_path = 'hw2_train_val/train15000/labelTxt_hbb/00000.txt'
    boxes, labels = reader(label_path)
    boxes = torch.Tensor(boxes)
    labels = torch.Tensor(labels)
    img, boxes = RandomFlipHV(img, boxes, mode='h')
    img, target = too(img, boxes)
    img = trans(img)
    img, result = regroup(img, target)
    draw(img, result, 'tmp/1.jpg')

    img = cv2.imread('hw2_train_val/train15000/images/00000.jpg')
    label_path = 'hw2_train_val/train15000/labelTxt_hbb/00000.txt'
    boxes, labels = reader(label_path)
コード例 #6
0
def runTSPGA(kwargs):
	"""
		pre:
			isinstance(kwargs, dict)
			'maxGens' in kwargs
			kwargs['maxGens'] > 0
		
		post[kwargs]:
			__old__.kwargs == kwargs
			__return__[0][1] >= kwargs['targetscore'] or __return__[1] == kwargs['maxGens']
			isinstance(__return__[0][0], Individual)
	"""
	
	# # # # # # PARAMETERS # # # # # #
	
	testmode = kwargs['testmode']
	
	maxGens = kwargs['maxGens']
	targetscore = kwargs['targetscore']
	genfunc = kwargs['genfunc']
	genparams = kwargs['genparams']

	scorefunc = kwargs['scorefunc']
	scoreparams = kwargs['scoreparams']

	selectfunc = kwargs['selectfunc']
	selectparams = kwargs['selectparams']
	
	numcross = kwargs['numcross']
	crossfunc = kwargs['crossfunc']
	crossfuncs = kwargs['crossfuncs']
	crossprob = kwargs['crossprob']
	crossparams = kwargs['crossparams']

	mutfunc = kwargs['mutfunc']
	mutprob = kwargs['mutprob']
	mutparams = kwargs['mutparams']
	
	SCORES = kwargs['SCORES']
	visualize = kwargs['visualize']
	getWheel = kwargs['getWheel']
	
	if visualize:
		makeScreenParams = kwargs['makeScreenParams']
		drawParams = kwargs['drawParams']
		font = kwargs['font']
		fontParams = kwargs['fontParams']
		labelParams = kwargs['labelParams']

	# # # # # # /PARAMETERS # # # # # #
	
	pop = genfunc(*genparams)
	for p in pop:
		if p not in SCORES:
			SCORES[p] = scorefunc(p, *scoreparams)
	
	best = max(SCORES, key=SCORES.__getitem__)
	best = best, SCORES[best]	# indiv, score
	
	if visualize:
		screen = vis.makeScreen(*makeScreenParams)
		label = font.render("%d / %d" %(best[1], targetscore), *fontParams)
		screen.blit(label, *labelParams)
		pg.display.init()
		vis.draw(best[0], screen, *drawParams)
	
	g = 0
	while g < maxGens:
		if testmode:
			assert g < maxGens
			assert best[1] < targetscore
			
		if getWheel:
			wheel = selection.getRouletteWheel(pop, SCORES)
		
		newpop = []
		for _ in xrange(numcross):
			if getWheel:
				p1 = selectfunc(wheel, *selectparams)
				p2 = selectfunc(wheel, *selectparams)
			else:
				p1, p2 = selectfunc(pop, *selectparams)
			if rand() <= crossprob:
				c1 = crossfunc(p1, p2, crossfuncs, crossparams)
				c2 = crossfunc(p2, p1, crossfuncs, crossparams)
				newpop.extend([c1,c2])
		
		for i,p in enumerate(newpop):
			if rand() <= mutprob:
				newpop[i] = mutfunc(p, *mutparams)
				p = newpop[i]
			SCORES[p] = scorefunc(p, *scoreparams)
		
		pop = sorted(pop+newpop, key=SCORES.__getitem__, reverse=True)[:len(pop)]
		
		fittest = max(pop, key=SCORES.__getitem__)
		fittest = fittest, SCORES[fittest]
		log.info("Generation %03d | highest fitness: %s | fittest indiv: %r" %(g, fittest[1], fittest[0].chromosomes[0]) )
		
		if fittest[1] > best[1]:
			best = fittest
			if visualize:
				screen = vis.makeScreen(*makeScreenParams)
				label = font.render("%d / %d" %(best[1], targetscore), *fontParams)
				screen.blit(label, *labelParams)
				pg.display.init()
				vis.draw(fittest[0], screen, *drawParams)
		
			if best[1] >= targetscore:
				if vis:
					raw_input("Hit <ENTER> to kill visualization: ")
					vis.killscreen()
				
				return best[0], g
		g += 1
	if vis:
		raw_input("Hit <ENTER> to kill visualization: ")
		vis.killscreen()
	
	if testmode:
		assert (g == maxGens) or best[1] >= targetscore

	return best, g
コード例 #7
0
    # expect to call: python datafunc.py
    trans = torchvision.transforms.Compose([
        torchvision.transforms.Resize((448, 448)),
        torchvision.transforms.ToTensor(),
        ])

    trainset = YoloDataset(os.path.join(data_dir_root,'train15000'), augment=True, transform=trans)
    validset = YoloDataset(os.path.join(data_dir_root,'val1500'), augment=False, transform=trans)

    check_index = [1,3,5,12,13,111,1114,14443]

    # trainset
    for i in check_index:
        image, label = trainset[i]
        image, result = regroup(image, label)
        draw(image, result, 'tmp/trainset/%d.jpg'%i)
        visualcheck(index=i, path='tmp/trainorig/%d.jpg'%i)

    check_index = [1,3,5,12,13,111,1114,1443]

    for i in check_index:
        image, label = validset[i]
        image, result = regroup(image, label)
        draw(image, result, 'tmp/validset/%d.jpg'%i)
        visualcheck(index=i, train=False, path='tmp/validorig/%d.jpg'%i)

    trainloader, validloader = make_dataloader(data_dir_root)

    # just check 1 batch
    for images, labels in trainloader:
        for i, (image, label) in enumerate(zip(images, labels)):
コード例 #8
0
def interactive(N, width, output=None, ask=False):
    height = int(3/4*width)

    plt.clf()
    plt.tight_layout()
    plt.setp(plt.gca(), autoscale_on=False)
    plt.xlim(xmax=width)
    plt.ylim(ymax=height)
    plt.xticks(np.arange(0,width+1,1), labels=[])
    plt.yticks(np.arange(0,height+1,1), labels=[])
    plt.grid(alpha=0.3)
    plt.draw()

    getpoint = lambda: tuple(np.asarray(plt.ginput(1, timeout=-1)).reshape(2).round().astype(np.int32))
    to_px = lambda p: plt.gca().transData.transform_point(p)
    near = lambda p, q: d2(to_px(p), to_px(q)) < 15

    tmp_patches = []
    objs = []
    for i in range(N):
        plt.title("Objects remaining: %d" % (N-i))
        plt.draw()

        obj = []
        while len(obj) < 3 or obj[0] != obj[-1]:
            pt = getpoint()
            if len(obj) >= 3 and near(obj[0], pt):
                pt = obj[0]  # Force match
            if obj:
                tmp_patches.append(plt.gca().add_patch(mpl.patches.PathPatch(mpl.path.Path([obj[-1], pt]), fc='none', ec='r')))
            else:
                tmp_patches.append(plt.scatter(pt[0], pt[1], c='r'))
            plt.draw()
            obj.append(pt)
        from itertools import cycle
        orientation = sum((x1-x0) * (y1+y0) for (x0,y0), (x1,y1) in zip(obj, obj[1:] + [obj[0]]))
        if orientation > 0:
            obj.reverse()  # ensure ccw
        objs.append(obj[:-1])  # Don't include duplicated last vertex

    plt.title("Select starting point")
    start = getpoint()
    plt.scatter(start[0], start[1], c='k')
    plt.draw()

    plt.title("Select end point")
    end = getpoint()
    plt.scatter(end[0], end[1], c='k')
    plt.draw()

    plt.title("Calculating visibility graph...")
    plt.draw()

    now = time()
    adj = visgraph(objs, start, end)
    path = astar(adj, start, end)
    duration = time() - now

    for patch in tmp_patches:
        patch.remove()
    plt.title("Visibility graph (%.3fs)" % duration)
    draw(objs, path, adj)
    plt.show()

    if output is not None:
        os.makedirs("input", exist_ok=True)
        os.makedirs("output", exist_ok=True)
        iname = os.path.join("input", "input%s.txt" % output)
        oname = os.path.join("output", "output%s.txt" % output)
        if not ((os.path.exists(iname) or os.path.exists(oname))
                and ask and not input("Overwrite? [y/N] ").upper().startswith("Y")):
            with open(iname, "w+") as ifile:
                print("%d %d" % start, file=ifile)
                print("%d %d" % end, file=ifile)
                print(len(objs), file=ifile)
                for obj in objs:
                    print(len(obj), file=ifile)
                    for x, y in obj:
                        print(x, y, file=ifile)
            with open(oname, "w+") as ofile:
                print(pathlength(path), file=ofile)
        else:
            print("Skipping")
コード例 #9
0
ファイル: integrate_pss.py プロジェクト: jakpra/ucca
def main(args):
    try:
        integrate_full = True
        integrate_term = False
        concatenate = False
        pss_feature = False
        annotate = True
        object = False
        v2_only = True
        draw = False
        output = True
        inp_ucca = False
        if '-I' in args:
            args.remove('-I')
            args.append('--no-integrate')
        if '--no-integrate' in args:
            integrate_full = False
            args.remove('--no-integrate')

        if '-c' in args:
            args.remove('-c')
            args.append('--concatenate')
        if '--concatenate' in args:
            concatenate = True
            args.remove('--concatenate')

        if '-A' in args:
            args.remove('-A')
            args.append('--no-annotate')
        if '--no-annotate' in args:
            integrate_full = False
            annotate = False
            args.remove('--no-annotate')

        if '-s' in args:
            args.remove('-s')
            args.append('--pss-feature')
        if '--pss-feature' in args:
            pss_feature = True
            args.remove('--pss-feature')

        if '--term' in args:
            integrate_term = True
            integrate_full = False
            args.remove('--term')

        if '--inp_ucca' in args:
            inp_ucca = True
            args.remove('--inp_ucca')

        if '-o' in args:
            args.remove('-o')
            args.append('--object')
        if '--object' in args:
            object = True
            args.remove('--object')

        if '-n' in args:
            args.remove('-n')
            args.append('--no-output')
        if '--no-output' in args:
            output = False
            args.remove('--no-output')

        if '--all' in args:
            v2_only = False
            args.remove('--all')

        if '--draw' in args:
            draw = True
            args.remove('--draw')
            import visualization as uviz
            import matplotlib.pyplot as plt

        streusle_file = args[
            0]  #'../../streusle/streusle.govobj.json' #args[0] #'streusle.govobj.json'  # sys.argv[1]
        ucca_path = args[
            1]  #'../../UCCA_English-EWT' #args[1] # '/home/jakob/nert/corpora/UCCA_English-EWT/xml'  # sys.argv[2]
        out_dir = args[2]

    except:
        print(f'usage: python3 {sys.argv[0]} STREUSLE_JSON UCCA_PATH OUT_DIR',
              file=sys.stderr)
        exit(1)

    with open(streusle_file) as f:
        streusle = json.load(f)

    print()

    global_error = Counter()

    unit_counter = 0
    successful_units = 0
    unsuccessful_units = 0
    deductible_multiple_successes = 0
    deductible_multiple_fails = 0
    deductible_fail_and_success = 0
    units_with_remote = 0

    doc_error = 0

    primary_edges = 0
    remote_edges = 0

    _doc_id = None

    v2_docids = set()
    if v2_only:
        with open(ucca_path + '/v2.txt') as f:
            for line in f:
                v2_docids.add(line.strip())

    ignore = []
    #"""020851
    #            020992
    #            059005
    #            059416
    #            200957
    #            210066
    #            211797
    #            216456
    #            217359
    #            360937
    #            399348""".split()

    unit_times = []

    # print('usnacs.get_passages(streusle_file, ucca_path, annotate=(integrate or annotate), ignore=ignore, docids=v2_docids)')

    tag_refinements = Counter()

    for doc, passage, term2tok in get_passages(
            streusle_file,
            ucca_path,
            annotate=(integrate_term or integrate_full or annotate),
            target='obj' if object else 'prep',
            ignore=ignore,
            docids=v2_docids):

        if output and (not integrate_full and not integrate_term):
            for p in uconv.split_passage(
                    passage, doc['ends'],
                    map(lambda x: ''.join(x['sent_id'].split('-')[-2:]),
                        doc['sents'])):
                uconv.passage2file(p, out_dir + '/' + p.ID + '.xml')
            continue

        l1 = passage.layer('1')

        if not output:
            primary_edges += len(
                uconstr.extract_candidates(
                    passage, constructions=(uconstr.PRIMARY, ))['primary'])
            remote_edges += len(
                uconstr.extract_candidates(passage,
                                           constructions=uconstr.get_by_names(
                                               ['remote']))['remote'])

        for terminal in passage.layer('0').words:

            if integrate_term and concatenate:  # and not terminal.incoming[0].parent.tag.startswith('Preterminal'):
                old_term_edge = terminal.incoming[0]
                preterminal = old_term_edge.parent
                preterminal._outgoing.remove(old_term_edge)
                terminal._incoming.remove(old_term_edge)
                passage._remove_edge(old_term_edge)
                #                old_preterm_edge = preterminal._fedge()
                #                preterminal.fparent._outgoing.remove(old_preterm_edge)
                new_preterminal = l1.add_fnode(
                    preterminal, 'Preterminal'
                )  #[[c.tag, '', c.layer, ''] for c in old_preterm_edge.categories])
                #                passage._add_node(new_preterminal)
                #for outg in preterminal.outgoing:
                #if inc.parent != preterminal.fparent and ul1.EdgeTags.Terminal not in inc.tags:
                #                new_preterminal.add(ul1.EdgeTags.Terminal, terminal)
                #                passage._add_node(new_preterminal)
                #preterminal._incoming = []
                #                new_preterminal.add('Preterminal', preterminal)
                #                passage._remove_edge(old_term_edge)
                new_preterminal.add_multiple(
                    [[c.tag, '', c.layer, '']
                     for c in old_term_edge.categories], terminal)
#                assert preterminal.outgoing
#                assert new_preterminal.outgoing
#                print(preterminal)
#                print(new_preterminal)
#                print(terminal)

            pss_label = ''
            if 'ss' in terminal.extra:
                pss_label = terminal.extra['ss']
            if not pss_label.startswith('p'):
                # print(terminal.extra)
                continue

            # print('ok')

            start_time = time.time()
            unit_counter += 1

            if integrate_term:
                if concatenate:
                    #                    old_term_edge = terminal.incoming[0]
                    #                    preterminal = old_term_edge.parent
                    #                    new_preterminal = l1.add_fnode(preterminal, 'Preterminal')
                    #                    passage._add_node(new_preterminal)
                    #                    old_term_edge.parent._outgoing.remove(old_term_edge)
                    #                    old_term_edge.child._incoming.remove(old_term_edge)
                    #                    passage._remove_edge(old_term_edge)
                    #                    new_term_edge = new_preterminal.add(ul1.EdgeTags.Terminal, terminal)
                    #                    passage._add_edge(new_term_edge)
                    #                    refined = new_preterminal.incoming
                    refined = terminal.incoming[0].parent.incoming
                else:
                    refined = terminal.incoming
            else:
                refined, error = find_refined(
                    terminal, dict(passage.layer(ul0.LAYER_ID).pairs))

                global_error += Counter(
                    {k: v
                     for k, v in error.items() if isinstance(v, int)})

                if error['successes_for_unit'] >= 1:
                    successful_units += 1
                    deductible_multiple_successes += error[
                        'successes_for_unit'] - 1
                    if error['fails_for_unit'] >= 1:
                        deductible_fail_and_success += 1
                else:
                    unsuccessful_units += 1

                if error['fails_for_unit'] >= 1:
                    deductible_multiple_fails += error['fails_for_unit'] - 1

                if error['remotes'] >= 1:
                    units_with_remote += 1

                if not output:
                    if 'larger_UNA_warn' in error['failed_heuristics']:
                        print(terminal, terminal.incoming[0].parent)

                    if 'PP_idiom_not_UNA' in error['failed_heuristics']:
                        print('PP_idiom:', terminal.extra['lexlemma'],
                              terminal, terminal.incoming[0].parent)

                    if 'MWP_not_UNA' in error['failed_heuristics']:
                        print('MWP:', terminal.extra['lexlemma'], terminal,
                              terminal.incoming[0].parent)

            for r in refined:
                # TODO: deal with doubly refined edges
                if (not concatenate and r.refinement) or (concatenate
                                                          and ':' in r.tag):
                    pass
                else:
                    if concatenate:
                        cats, r.categories = r.categories, []
                        for c in cats:
                            composit_tag = f'{c.tag}:{pss_label}'
                            r.add(composit_tag)
                            tag_refinements[composit_tag] += 1
                    else:
                        r.refinement = pss_label
#                print('FAIL', doc['id'], terminal.extra['toknums'], terminal.extra['lexlemma'])

            unit_times.append(time.time() - start_time)

            if not pss_feature:
                terminal.extra.pop('ss')  # ensuring pss is not also a feature

#            if integrate_term:
#                terminal.extra['identified_for_pss'] = str(True)

        if draw:
            for sent, psg in zip(doc['sents'],
                                 uconv.split_passage(passage, doc['ends'])):
                uviz.draw(psg)
                plt.savefig(f'../graphs/{sent["sent_id"]}.svg')
                plt.clf()


#        print(passage)
        if output:
            for p in uconv.split_passage(
                    passage, doc['ends'],
                    map(lambda x: ''.join(x['sent_id'].split('-')[-2:]),
                        doc['sents'])):
                #                print(p)
                #            augmented = uconv.join_passages([p, ucore.Passage('0')])
                #            for root_edge in augmented.layer(ul1.LAYER_ID)._head_fnode.outgoing:
                #                if len(root_edge.tag.split('-')) > 1:
                #                    assert False, augmented
                #                root_edge.tag = root_edge.tag.split('-')[0]
                uconv.passage2file(p, out_dir + '/' + p.ID + '.xml')

    for x, y in tag_refinements.most_common(len(tag_refinements)):
        print(x, y, sep='\t')

    #print(f'successful units\t{successful_units}\t{100*successful_units/(unit_counter-doc_error)}%')
    #print(f'unsuccessful units\t{unsuccessful_units}\t{100-(100*successful_units/(unit_counter-doc_error))}%') #={unit_counter - doc_error - successful_units}={mwe_una_fail+abgh_fail+c_fail+d_fail+e_fail+f_fail+g_fail+no_match}

    if integrate_full and not output:

        print('\n\n')
        print(f'total units\t{unit_counter}')
        #   print(f'gov and obj present\t{gov_and_obj_counter}')
        print(f'document error\t{doc_error}\t{100*doc_error/unit_counter}%')
        print(
            f'document success\t{unit_counter - doc_error}\t{100-(100 * doc_error / unit_counter)}%'
        )
        print(f'total primary edges\t{primary_edges}')
        print(f'total remote edges\t{remote_edges}')
        print('----------------------------------------------------')
        print(
            f'successful units\t{successful_units}\t{100*successful_units/(unit_counter-doc_error)}%'
        )
        print(
            f'unsuccessful units\t{unsuccessful_units}\t{100-(100*successful_units/(unit_counter-doc_error))}%'
        )  #={unit_counter - doc_error - successful_units}={mwe_una_fail+abgh_fail+c_fail+d_fail+e_fail+f_fail+g_fail+no_match}
        print(f'warnings\t{global_error["warnings"]}')
        print('---------------------------------')
        #    for ftype, count in fail_counts.most_common():
        #        print(f'{ftype}\t{count}')
        print(
            f'syntactic and semantic obj match\t{global_error["synt_sem_obj_match"]}'
        )
        print('---------------------------------')
        print(f'\tMWE but not UNA\t{global_error["mwe_una_fail"]}')
        print(f'\tPP idiom\t{global_error["idiom"]}')
        print(
            f'\tR, N, F ({global_error["abgh"]}) but A and B miss\t{global_error["abgh_fail"]}'
        )
        print(f'\tA (scene mod)\t{global_error["a"]}')
        print(f'\tB (non-scene mod) \t{global_error["b"]}')

        print(f'\tG (inh purpose) \t{global_error["g"]}')
        print(f'\t  scn \t{global_error["g_scn_mod"]}')
        print(f'\t  non scn \t{global_error["g"] - global_error["g_scn_mod"]}')

        print(f'\tH (approximator) \t{global_error["h"]}')
        print(f'\t  scn \t{global_error["h_scn_mod"]}')
        print(f'\t  non scn \t{global_error["h"] - global_error["h_scn_mod"]}')

        print(
            f'\tP, S ({global_error["c"]}) but C miss\t{global_error["c_fail"]}'
        )
        print(
            f'\tL ({global_error["d"]}) but D miss\t{global_error["d_fail"]}')

        print(
            f'\tA, D, E, T ({global_error["ef"]}) but E miss\t{global_error["ef_fail"]}'
        )

        print(f'\tE (intr adp) \t{global_error["e"]}')
        print(f'\t  scn \t{global_error["e_scn_mod"]}')
        print(f'\t  non scn \t{global_error["e"] - global_error["e_scn_mod"]}')

        print(f'\tF (poss pron) \t{global_error["f"]}')
        print(f'\t  scn \t{global_error["f_scn_mod"]}')
        print(f'\t  non scn \t{global_error["f"] - global_error["f_scn_mod"]}')

        #print(f'\tA ({f}) but F miss\t{f_fail}')
        #print(f'\tF ({g}) but G miss\t{g_fail}')
        print(
            f'\tno match\t{global_error["no_match"]}')  #\t{ucca_categories}')
        print(f'\tnon-semantic role\t{global_error["non_semrole"]}')
        print(
            f'\tmultiple preterminals\t{global_error["multiple_preterminals"]}'
        )
        print(
            f'\tunits with remote\t{units_with_remote} (total {global_error["remotes"]})'
        )
        #
        #
        print('---------------------------------')
        print(
            f'\tdeductible (multiple successes for single unit)\t{deductible_multiple_successes}'
        )
        print(
            f'\tdeductible (multiple fails for single unit)\t{deductible_multiple_fails}'
        )
        print(
            f'\tdeductible (fail and success for single unit)\t{deductible_fail_and_success}'
        )
コード例 #10
0
        if token.isnumeric:
            self.depth += 1
            self.cur_depth += 1
            return TreeNode(val=token,
                            left=self.subtree(),
                            right=self.subtree())

        else:
            self.error()

    def get_depth(self):
        return self.depth


def string2Tree(string):
    lst = string.split()
    parser = Parser(lst)

    root = parser.get_root()
    depth = parser.get_depth()

    return root, depth


if __name__ == "__main__":
    s = "1 2 4 null null 5 null null 3 6 7 null null null null"
    # s = input()
    root, depth = string2Tree(s)

    visualization.draw(root, depth)