Beispiel #1
0
    def test_ac1(self):
        problem = prob.setup_2room_rect()
        title = 'ac-10step_penalty100k'
        args = base_args('-')
        # -----------------------------
        viz = visdom.Visdom()
        viz.text(args.print())
        print(args)

        base = 10
        goal = [base, base]

        size = math.ceil(sum(goal) ** 0.5)
        act = UDLRStep(always_draw=False)
        obj = DiscreteSpaceObjective(goal)
        env = DiscreteEnv(problem, obj, size, act, random_init=True)
        trainer = gr.ACTrainer(env, title=args.title, log_every=args.log_every)

        trainer.train(episodes=100000, steps=base, lr=0.0005, loss_args=loss_args)
        insts = trainer._instances
        num_insts = len(insts)
        print(num_insts)
        gr.save_model(trainer.model, './data/{}.pkl'.format(title))
        for i in range(5):
            utils.layout_disc_to_viz(insts[-i][-1])
        print('num_solutions', len(trainer._solutions))
Beispiel #2
0
 def test4(self):
     problem = prob.setup_2room_rect()
     layout = PointsInBound(problem, None)()
     room = layout.random()
     print(room)
     assert len(room[0]) == 4
     assert len(room[4]) == 2
     [print(x) for x in [room[1], room[2], room[3], room[4]]]
     move_action = MoveWall()
     new_layout = move_action(layout)
Beispiel #3
0
    def test3(self):
        problem = prob.setup_2room_rect()
        layout = PointsInBound(problem, None)()
        cost_fn = objectives.ConstraintsHeur(problem, AreaConstraint=3)

        print(layout.is_valid, cost_fn(layout))
        print(layout.to_vec4())
        transition = MoveWall()
        model = gr.Annealer()
        state = model(layout, cost_fn, transition, num_iter=10000)
        return state
Beispiel #4
0
    def test_run1(self):
        problem = prob.setup_2room_rect()
        goal = [10, 10]
        size = math.ceil(sum(goal) ** 0.5)
        act = UDLRStep(False)
        obj = DiscreteSpaceObjective(goal)
        env = DiscreteEnv(problem, True, obj, size, act)

        policy = nns.LSTMDQN(size, size, act.num_actions,
                             in_size=2, feats_in=2, feats_size=10)
        target = nns.LSTMDQN(size, size, act.num_actions,
                             in_size=2, feats_in=2, feats_size=10)
Beispiel #5
0
    def test5(self):
        # wierd polygon intersections
        l1 = [(1, 1), (1, 10), (10, 10), (10, 5) , (15, 5), (15, 1)]
        l2 = [(7, 5), (7, 10), (17, 10), (17, 5), (7, 5)]
        p1, p2 = Room(l1, name='r1'), Room(l2, name='r2')
        # print(Polygon(l1).intersection(Polygon(l2)))
        print(p2.intersection(p1))
        inters = p2.intersection(p1)
        p2 = p2.difference(inters)

        problem = prob.setup_2room_rect()
        lyt = BuildingLayout(problem, rooms=[p1, p2])
        utils.plotpoly([lyt])
Beispiel #6
0
    def test_disclyt(self):
        problem = prob.setup_2room_rect()
        state = StackedRooms(problem, size=(10, 10))

        objective = DiscProbDim(None)
        print(objective.keys)
        # action = DrawBox()
        state.add_step([0, 0, 0, 4, 4])
        r1, m1 = objective.reward(state)
        print(r1)
        print(m1)
        state.add_step([1, 5, 5, 8, 8])

        # print(np.stack(state.rooms()))
        r2, m2 = objective.reward(state)
        print(r2)
        print(m2)
        print(state.state)
Beispiel #7
0
    def test3(self):
        problem = prob.setup_2room_rect()
        goal = [10, 10]
        size = math.ceil(sum(goal) ** 0.5)
        lyt = CellComplexLayout(problem, size=(size, size))
        obj = DiscreteSpaceObjective(goal)

        env_action = UDLRStep(False)
        for i in range(3):
            lyt, success = env_action.forward(lyt, action=6)
        lyt, success = env_action.forward(lyt, action=4)
        for i in range(3):
            lyt, success = env_action.forward(lyt, action=7)
        print(obj.reward(lyt, True))
        #for i in range(4):
        #     lyt, success = env_action.forward(lyt, action=4)

        utils.layout_disc_to_viz(lyt)
Beispiel #8
0
    def test2(self):
        problem = prob.setup_2room_rect()
        goal = [10, 10]
        size = math.ceil(sum(goal) ** 0.5)
        lyt = CellComplexLayout(problem, size=(size, size))
        obj = DiscreteSpaceObjective(goal)

        env_action = UDLRStep(False)
        # for i in range(2):
        lyt, success = env_action.forward(lyt, action=2)
        lyt, success = env_action.forward(lyt, action=6)
        print(obj.reward(lyt, True))
        for i in range(4):
            lyt, success = env_action.forward(lyt, action=4)

        # img = lyt.to_image()
        # viz.image(img)

        utils.layout_disc_to_viz(lyt)
        r, m = obj.reward(lyt, True)
        print(r, m)
Beispiel #9
0
def make_env():
    problem = prob.setup_2room_rect()
    return MockEnv(problem, PointsInBound(problem, None))