Ejemplo n.º 1
0
 def play_live():
     live = TV.get_live()
     if live is not None:
         shared_data = SharedData()
         shared_data.set('playing', {
             'what': 'nba_tv_live',
         })
         common.play(live)
Ejemplo n.º 2
0
 def play_serieepisode():
     episode = TV.get_serie_episode()
     if episode is not None:
         shared_data = SharedData()
         shared_data.set('playing', {
             'what': 'episode_nba_tv',
         })
         common.play(episode)
Ejemplo n.º 3
0
 def play_episode():
     start_timestamp = vars.params.get('start_timestamp')
     duration = vars.params.get('duration')
     episode = TV.get_episode(start_timestamp, duration)
     if episode is not None:
         shared_data = SharedData()
         shared_data.set('playing', {
             'what': 'nba_tv_episode',
             'data': {
                 'start_timestamp': start_timestamp,
                 'duration': duration,
             },
         })
         common.play(episode)
Ejemplo n.º 4
0
def play_game():
    if not common.authenticate():
        return

    currentvideo_id = vars.params.get("video_id")
    currentvideo_type = vars.params.get("video_type")
    start_time = vars.params.get("start_time")
    duration = vars.params.get("duration")
    currentvideo_ishomefeed = vars.params.get("video_ishomefeed", "1")
    currentvideo_ishomefeed = currentvideo_ishomefeed == "1"

    # Authentication is needed over this point!
    game = get_game(currentvideo_id, currentvideo_type, currentvideo_ishomefeed, start_time, duration)
    if game is not None:
        common.play(game)
Ejemplo n.º 5
0
def play_one_game():
    client = MongoClient()
    db = client["sandbox"]
    solutions = list(db.solutions.find())
    if len(solutions) < 2:
        print("Not enough solutions to play")
        return

    solutions = random.sample(solutions, 2)
    # print solutions
    binaries = [s["binary"] for s in solutions]
    print("Playing {}".format(binaries))
    winner = play(*binaries)
    print("Winner: {}".format(winner))
    if winner == 0:
        # draw
        solution = solutions[0]["solution"]
        db.results.update_one({"solution": solution},
                              {"$inc": {
                                  "wins.{}".format(0): 1
                              }})
        solution = solutions[1]["solution"]
        db.results.update_one({"solution": solution},
                              {"$inc": {
                                  "wins.{}".format(0): 1
                              }})
    else:
        assert (winner in (1, 2))
        solution = solutions[0]["solution"]
        db.results.update_one({"solution": solution},
                              {"$inc": {
                                  "wins.{}".format(winner): 1
                              }})
        solution = solutions[1]["solution"]
        db.results.update_one({"solution": solution},
                              {"$inc": {
                                  "wins.{}".format(3 - winner): 1
                              }})
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# dynamic-graph is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Lesser Public License for more details.  You should have
# received a copy of the GNU Lesser General Public License along with
# dynamic-graph. If not, see <http://www.gnu.org/licenses/>.

from dynamic_graph.sot.dynamics.tools import *
from dynamic_graph.sot.motion_planner. \
    test.feet_follower_from_file_walk_on_a_thread import *
from common import play

play(f)

finalPosition = (
    -0.015361, -0.0049075500000000001, -0.00047065200000000001, -0.0172946,
     -0.020661800000000001, 0.0374547, -0.037641599999999997,
     0.025434399999999999, -0.45398100000000002, 0.86741800000000002,
     -0.39213799999999999, -0.0089269499999999995, -0.037646100000000002,
     0.025648199999999999, -0.46715499999999999, 0.87717599999999996,
     -0.38872200000000001, -0.0091408199999999992, 0.080488199999999996,
     -0.18355399999999999, -0.00036695100000000002, -0.0056776600000000002,
     -0.12173299999999999, -0.23972599999999999, -0.00637303,
     -0.56908000000000003, 0.00296262, 0.19108900000000001, 0.100088,
     0.23896800000000001, 0.21485599999999999, -0.18973400000000001,
     -0.49457699999999999, 0.040646799999999997, 0.16970299999999999, 0.100067)
checkFinalConfiguration(robot.device.state.value, finalPosition)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011, Florent Lamiraux, Thomas Moulard, JRL, CNRS/AIST
#
# This file is part of sot-motion-planner.
# sot-motion-planner is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# sot-motion-planner is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Lesser Public License for more details.  You should have
# received a copy of the GNU Lesser General Public License along with
# sot-motion-planner. If not, see <http://www.gnu.org/licenses/>.

from dynamic_graph.sot.dynamics.tools import *
from dynamic_graph.sot.motion_planner. \
    test.feet_follower_analytical_pg_correction import *

from common import play; play(f, maxIter=1000*12)
f.trace.dump()
Ejemplo n.º 8
0
@author: mingrui
"""

import sys
import torch
from common import get_env, play
from dqn_agent import Agent

if __name__ == '__main__':
    if len(sys.argv) == 1:
        dqn_fname = None
    else:
        dqn_fname = sys.argv[1]

    # get env
    env, state_size, action_size = get_env()

    # load and play with trained agent
    agent = None
    if dqn_fname is not None:
        agent = Agent(state_size, action_size, seed=0)
        agent.dqn_local.load_state_dict(torch.load(dqn_fname))

        print('Playing with agent {}...'.format(dqn_fname))
        play(env, agent)
    else:
        # play ranomly
        print('Playing randomly...')
        play(env)

    env.close()