예제 #1
0
def open_unity_prompt():
    print(
        "\n\n=========================== CONNECTION ==========================="
    )
    opened = False
    while not opened:
        opened = True
        openUnityEnv = prompt('\nOpen Unity environment (y/n): ', 'y')
        if openUnityEnv == 'y':
            cwd = getcwd()
            if on_linux():
                foldersToFile = cwd[0:len(cwd) -
                                    len(r'/Python/Python')] + r'/Unity/Build/'
                fileName = []
                fileName = prompt('Name of the built executable: ', fileName)
                unityEnvPath = foldersToFile + fileName
            elif on_windows():
                foldersToFile = cwd[0:len(cwd) -
                                    len(r'\Python\Python')] + r'\Unity\Build\\'
                fileName = "build.exe"
                fileName = prompt('Name of the built executable: ', fileName)
                unityEnvPath = foldersToFile + fileName
            try:
                man = prompt("Use manual position (y/n): ", 'n')
                Popen([unityEnvPath])
                return man
            except:
                opened = False
예제 #2
0
def progress(pawns):
    new_fences = build()
    player_turn = 1
    state = State.RUNNING
    console.clear()
    console.prompt(
        "\n *** Welcome to PyQuoridor ***\n  Press Enter to start ...")
    new_pawns = deepcopy(pawns)
    while state == State.RUNNING:
        display(new_pawns, new_fences)
        action = console.prompt_action(player_turn)
        if action == Action.EXIT:
            state = State.QUIT
        else:
            try:
                new_pawns = deepcopy(new_pawns)
                new_pawn = act(action, new_pawns[player_turn - 1], new_fences)
                new_pawns[player_turn - 1] = new_pawn
                if is_a_victory(new_pawn):
                    state = State.VICTORY
                else:
                    player_turn = get_next_player(player_turn, new_pawns)
            except QuoridorException:
                pass
    if state == State.VICTORY:
        display(new_pawns, new_fences)
        console.display("\n *** Player # " + str(player_turn) + " won ***\n")
예제 #3
0
def position(camera,actual_position,target_position,base,lnk1,lnk2):

	print('Displaying camera.')
	ImageShow.show(camera)
	time = float(prompt('time ->'))
	base = float(prompt('base ->', '%.1f' % base))
	lnk1 = float(prompt('lnk1 ->', '%.1f' % lnk1))
	lnk2 = float(prompt('lnk2 ->', '%.1f' % lnk2))
	reset = False
	
	return time, base, lnk1, lnk2, reset
예제 #4
0
파일: main.py 프로젝트: dtokos/mosaiq
def main():
    imagesPath = console.prompt('Enter images path', './images/')
    sourceName = console.prompt('Enter source', 'source.png')
    outputName = console.prompt('Enter source', 'output.png')

    source = Image.open(sourceName).convert('RGBA')
    generator = makeGenerator(getTileImages(imagesPath))

    console.header('Generating')
    generatedImage = generator.generate(source)
    console.header('Saving')
    generatedImage.save(outputName, compress_level=1)
예제 #5
0
def main():
    goal = prompt('Angka tujuan? [100] ', 100)
    population_size = prompt('Jumlah populasi per generasi? (x > 1) [10] ', 10, lambda x: x > 1)
    mutation_rate = prompt('Tingkat mutasi? (0 < x < 1) [0.01] ', 0.01, lambda x: 0 < x < 1)
    crossover_rate = prompt('Tingkat persilangan? (0 < x < 1) [0.90] ', 0.90, lambda x: 0 < x < 1)

    env = Environment(Numeral, goal, population_size, crossover_rate, mutation_rate)

    if confirm('Jalankan per generasi? [Y/n] '):
        env.step()
    else:
        max_generation = prompt('Generasi maksimal? [100] ', 100, lambda x: x > 1)
        env.run(max_generation)
예제 #6
0
파일: configuration.py 프로젝트: hans/pyggs
 def update(self, section, option, prompt, validate=None):
     """ Update option via user input.
     """
     default = self.get(section, option)
     value = console.prompt(prompt, padding=2, default=default, validate=validate)
     self.set(section, option, value)
     return self.get(section, option)
예제 #7
0
파일: configuration.py 프로젝트: hans/pyggs
 def update(self, section, option, prompt, validate=None):
     """ Update option via user input.
     """
     default = self.get(section, option)
     value = console.prompt(prompt,
                            padding=2,
                            default=default,
                            validate=validate)
     self.set(section, option, value)
     return self.get(section, option)
예제 #8
0
 def train(self, train, epochs, validation_data=None, test_file=None):
     losses = []
     cumulative_epochs = 0
     while epochs > 0:
         for epoch in range(epochs):
             console.info("epoch", cumulative_epochs)
             loss = self.train_epoch(train, validation_data, test_file)
             losses.append(loss)
             cumulative_epochs += 1
         while True:
             try:
                 epochs = int(console.prompt("How many more epochs should we train for? "))
                 break
             except ValueError:
                 console.warn("Oops, number parse failed. Try again, I guess?")
예제 #9
0
파일: pyggs.py 프로젝트: xificurk/pyggs
    workDir = os.path.expanduser(opts.workdir)
    parserDir = os.path.join(workDir, "parser")
    pyggsDir = os.path.join(workDir, "pyggs")
    profilesDir = os.path.join(pyggsDir, "profiles")

    # Check if the upgrade script is needed
    if os.path.isdir(pyggsDir):
        if os.path.isfile(os.path.join(pyggsDir, "version")):
            with open(os.path.join(pyggsDir, "version")) as fp:
                version = VersionInfo(fp.read())
        else:
            version = VersionInfo("0.1")
        if version < __version__:
            if version < "0.2":
                rootlog.error(_("Detected incompatible version of working directory {0}. Please, delete the directory and set up your profiles from start.").format(workDir))
                delete = console.prompt(_("Do you want to delete the working directory and enter the setup script now ({CHOICES})?"), validate=["y", "n"], default="y")
                if delete == "n":
                    raise SystemExit
                else:
                    rootlog.warn(_("Deleting content of working directory {0}.").format(workDir))
                    rmtree(workDir)
                    setup = "full"
            else:
                if version < "0.2.7":
                    if os.path.isfile(os.path.join(pyggsDir, "storage.sqlite")):
                        globalStorage = Storage(os.path.join(pyggsDir, "storage.sqlite"))
                        globalStorage.query("UPDATE environment SET variable = REPLACE(variable, '.db.', '.') WHERE variable LIKE 'plug.%.db.%'")
                        rootlog.info(_("Updating environment variables in global storage."))
            with open(os.path.join(pyggsDir, "version"), "w") as fp:
                fp.write(__version__)
예제 #10
0
def console(quit=True):
    import console
    console.prompt(testo.getJep())
    if (quit):
        raise RuntimeError, 'blah'
예제 #11
0
파일: test.py 프로젝트: Kroisse/jep
def console(quit=True):
    import console
    console.prompt(testo.getJep())
    if(quit):
        raise RuntimeError, 'blah'
예제 #12
0
def train():

    #Simulation parameters
    total_episodes = 1000
    total_step = 30
    best_episode_reward = 0.0
    best_reward = -1000
    worst_reward = 1000
    avg_reward = 0.0
    e = 0.1
    pixel_done = False
    reset_crash = False
    nb_pixel_crash = 0

    #Robot environement creation
    env = EnvRobot(total_step)
    print(
        "\n\n=========================== RL PROGRAM ==========================="
    )

    #TensorFlow graph
    tf.reset_default_graph()
    cnn = ConvolutionalNeuralNetwork()  #Load the convolutional neural network
    init = tf.global_variables_initializer()

    #To save the Tensorflow graph
    saver = tf.train.Saver()
    restore_destination = "save/checkpoint/restore/"
    saver_destination = find_folder("save/checkpoint/trained/")

    #Plot parameters
    plot_reward = np.empty([total_episodes])
    plot_avg_reward = np.empty([total_episodes])
    print(saver_destination.split("/")[-2])
    fig = PlotReward(saver_destination.split("/")[-2])

    #Launch the graph
    with tf.Session() as sess:
        sess.run(init)

        #Restore a model
        restore = prompt('\nRestore session (y/ n): ', 'y')
        if restore == 'y':
            try:
                saver.restore(sess, restore_destination + "model.ckpt")
                print("The model was successfully restored !")
            except Exception as exception:
                print("Sorry, couldn't find a model to restore !")
        else:
            print("No model has been restored !")

        time_start = time()
        #Episode loop
        for x in range(total_episodes):

            #Reset the environement
            reset_cube = pixel_done or reset_crash
            state = env.res(reset_cube=reset_cube)
            reset_crash = False
            done = False
            running_reward = 0

            #Step loop
            for i in range(total_step):

                print(
                    "\n--------------------------- Step %d ---------------------------"
                    % (i + 1))

                #Choose action from camera with the covnet
                #input_image    : camera
                #keep_prob      : dropout (if 0.75, 75% chance to keep the neuron)
                #Q              : output of the last fully connected layer of the covnet
                #soft           : softmax function from Q values
                #action         : chosen action
                Q, soft, action = sess.run(
                    [cnn.Q, cnn.soft, cnn.chosen_action],
                    feed_dict={
                        cnn.input_image: [state[0]],
                        cnn.sensors: [state[1]],
                        cnn.keep_prob: [cnn.dropout]
                    })
                """print("\nPrediction Q     : %0.3f  %0.3f  %0.3f  %0.3f  %0.3f  %0.3f" % (Q[0][0], Q[0][1], Q[0][2], Q[0][3], Q[0][4], Q[0][5]))
                print("Softmax          : %d %d %d %d %d %d" % (soft[0][0], soft[0][1], soft[0][2], soft[0][3], soft[0][4], soft[0][5]))"""

                #Action choice (with e chance of random action)
                rdm = np.random.rand(1)
                if rdm < e:
                    action = random.randint(0, cnn.n_classes - 1)
                    print("Random action    : %d (%s)" %
                          (action, action_name(action)))
                else:
                    action = action[0]
                    print("Chosen action    : %d (%s)" %
                          (action, action_name(action)))

                #Step with the action + informations recovery
                #action     : chosen action
                #state1     : next state
                #reward     : reward value
                #done       : true if the simulation is finished
                #pixel_done : true if the camera is composed of only red pixels
                state1, reward, done, pixel_done = env.step(action)
                """Q1 = sess.run([cnn.Q], feed_dict={cnn.input_image:[state1], cnn.keep_prob:[cnn.dropout]})
                maxQ1 = np.max(Q1)
                targetQ = Q
                targetQ[0,action] = reward + 0.99*maxQ1
                _, loss = sess.run([cnn.updateModel, cnn.loss], feed_dict={cnn.input_image:[state], cnn.keep_prob:[cnn.dropout], cnn.Q1:[targetQ]})"""

                #Update the covnet
                #input_image    : camera
                #keep_prob      : dropout (if 0.75, 75% chance to keep the neuron)
                #reward_holder  : obtained reward
                #action_holder  : achieved action
                #resp           : responsible weight
                #log            : log(resp)
                #loss           : calculated loss => -(log(resp) * reward_holder)
                _, resp, log, loss = sess.run(
                    [
                        cnn.updateModel, cnn.responsible_weight, cnn.log,
                        cnn.loss
                    ],
                    feed_dict={
                        cnn.input_image: [state[0]],
                        cnn.sensors: [state[1]],
                        cnn.keep_prob: [cnn.dropout],
                        cnn.reward_holder: [reward],
                        cnn.action_holder: [action]
                    })
                """print("\nCalulation")
                print("Resp     : ", resp)
                print("Log      : ", log)
                print("A        : ", reward)
                print("Loss     : ", loss)"""

                #Update the state
                state = state1

                #Display the current reward
                print("\nCurrent reward : %.2f" % reward)

                #Best reward calculation
                running_reward += reward
                if running_reward > best_episode_reward:
                    best_episode_reward = running_reward

                if done:
                    #Episode finished

                    #Change red cube position after n fails
                    if pixel_done:
                        nb_pixel_crash = 0
                    else:
                        nb_pixel_crash += 1

                    if nb_pixel_crash == 10:
                        reset_crash = True
                    break

            time_end = time()
            #Display episode informations
            print("\n\n                           Time = %.2fs" %
                  (time_end - time_start))
            print(
                "=========================== EPISODE : %d ==========================="
                % (x + 1))
            print("REWARD       = %.2f" % running_reward)
            print("BEST REWARD  = %.2f" % best_episode_reward)

            #Training reward calculation
            avg_reward += running_reward
            if running_reward > best_reward:
                best_reward = running_reward
            if running_reward < worst_reward:
                worst_reward = running_reward

            #Plot rewards and save the graph
            plot_reward[x] = running_reward
            plot_avg_reward[x] = avg_reward / (x + 1)
            fig.update(plot_reward, x, best_reward, worst_reward,
                       plot_avg_reward, done, pixel_done)

            # Save the variables to disk.
            if x % 100 == 0 and x != 0:
                save_path = saver.save(sess, saver_destination + "model.ckpt")
                print("Model saved in file: %s" % save_path)

        #Display training informations
        print(
            "\n======================== TRAINING FINISHED ========================"
        )
        print("AVG REWARD           = %.2f" % (avg_reward / total_episodes))
        print("BEST EPISODE REWARD  = %.2f" % best_reward)
예제 #13
0
    # Check if the upgrade script is needed
    if os.path.isdir(pyggsDir):
        if os.path.isfile(os.path.join(pyggsDir, "version")):
            with open(os.path.join(pyggsDir, "version")) as fp:
                version = VersionInfo(fp.read())
        else:
            version = VersionInfo("0.1")
        if version < __version__:
            if version < "0.2":
                rootlog.error(
                    _("Detected incompatible version of working directory {0}. Please, delete the directory and set up your profiles from start."
                      ).format(workDir))
                delete = console.prompt(_(
                    "Do you want to delete the working directory and enter the setup script now ({CHOICES})?"
                ),
                                        validate=["y", "n"],
                                        default="y")
                if delete == "n":
                    raise SystemExit
                else:
                    rootlog.warn(
                        _("Deleting content of working directory {0}.").format(
                            workDir))
                    rmtree(workDir)
                    setup = "full"
            else:
                if version < "0.2.7":
                    if os.path.isfile(os.path.join(pyggsDir,
                                                   "storage.sqlite")):
                        globalStorage = Storage(