Exemplo n.º 1
0
 def run(self):
     fn = backup.export_revision(gui.uuid, gui.host, gui.path,
                                 rev, target_dir)
     util.open_file(fn)
     gtk.gdk.threads_enter()
     running_tasks_model.remove(i)
     gtk.gdk.threads_leave()
Exemplo n.º 2
0
 def run(self):
   fn = backup.export_revision( gui.uuid, gui.host, gui.path, rev, target_dir )
   os.chdir(target_dir)
   os.system('tar -zxvf "%s"' % fn)
   os.remove(fn)
   os.chdir(util.RUN_FROM_DIR)
   util.open_file(target_dir)
   gtk.gdk.threads_enter()
   running_tasks_model.remove(i)
   gtk.gdk.threads_leave()
Exemplo n.º 3
0
 def _link_clicked(browser, frame, request,
                  action, decision, *args, **kwargs):
     if action.get_reason() == WEBKIT_WEB_NAVIGATION_REASON_OTHER:
         # let this load
         pass
     else:
         # open file in system
         util.open_file(action.get_original_uri())
         # ignore webkit request
         decision.ignore()
Exemplo n.º 4
0
 def run(self):
     fn = backup.export_revision(gui.uuid, gui.host, gui.path, rev,
                                 target_dir)
     os.chdir(target_dir)
     os.system('tar -zxvf "%s"' % fn)
     os.remove(fn)
     os.chdir(util.RUN_FROM_DIR)
     util.open_file(target_dir)
     gtk.gdk.threads_enter()
     running_tasks_model.remove(i)
     gtk.gdk.threads_leave()
Exemplo n.º 5
0
def load(filename):
    global datafile

    datafile = util.open_file(os.path.join(workingdir, filename))
    core.clean_all(datafile)

    return "loaded " + filename
Exemplo n.º 6
0
def extract_features(assignment_id, buckets):
    file_path = '/'.join(['./data/files/1222', assignment_id, 'Breakout.java'])
    file = util.open_file(file_path)
    features = []

    for bucket in buckets:
        features += get_features(file, bucket)

    return features
def extract_features(assignment_id, bucket, report=None):
    file_path = '/'.join(['./data/files', assignment_id, 'Breakout.java'])
    file = util.open_file(file_path)
    file_lines = [l for l in file]

    if bucket == 'Decomposition':
        return decomposition_features(file_lines, report)
    elif bucket == 'Commenting':
        return commenting_features(file_lines, report)
    elif bucket == 'Naming and Spacing':
        return naming_and_spacing_features(file_lines, report)
    elif bucket == 'Instance Variables and Parameters and Constants':
        return variable_features(file_lines, report)
    elif bucket == 'Logic and Redundancy':
        return logic_redundancy_features(file_lines, report)
    else:
        # TODO: implement feature extraction for other buckets.
        print('Can\'t read that bucket yet :/')

    return []
Exemplo n.º 8
0
    sess = tf.Session(config=config)
    init = tf.initialize_all_variables()
    sess.run(init)
    saver = tf.train.Saver()  # save all variables
    checkpoint_dir = model_dir
    checkpoint_file = 'segm.ckpt'

    if validation_path:
        validation_data = util.get_validation_data(validation_path, char_dic,
                                                   vocab_size, n_steps, padd)

    seq = 0
    while seq < training_iters:
        c_istate = np.zeros((batch_size, 2 * n_hidden))
        i = 0
        fid = util.open_file(train_path, 'r')
        for line in fid:
            line = line.strip()
            if line == "": continue
            line = line.decode('utf-8')
            sentence = util.snorm(line)
            pos = 0
            while pos != -1:
                batch_xs, batch_ys, next_pos, count = util.next_batch(
                    sentence, pos, char_dic, vocab_size, n_steps, padd)
                '''
				print 'window : ' + sentence[pos:pos+n_steps].encode('utf-8')
				print 'count : ' + str(count)
				print 'next_pos : ' + str(next_pos)
				print batch_ys
				print batch_xs
Exemplo n.º 9
0
 def run(self):
   fn = backup.export_revision( gui.uuid, gui.host, gui.path, rev, target_dir )
   util.open_file(fn)
   gtk.gdk.threads_enter()
   running_tasks_model.remove(i)
   gtk.gdk.threads_leave()
Exemplo n.º 10
0
def run_games(first_agent,
              second_agent,
              first_agent_turn,
              num_games,
              update_param=0,
              quiet=False,
              first_file_name="./data/first_save",
              second_file_name="./data/second_save",
              first_weights_file_name="./data/first_weights",
              second_weights_file_name="./data/second_weights",
              first_result_file_name="./data/first_results",
              second_result_file_name="./data/second_results",
              first_m_result_file_name="./data/first_m_results",
              second_m_result_file_name="./data/second_m_results",
              play_against_self=False):
    """
    first_agent: instance of Agent which reflects first agent
    second_agent: instance of Agent which reflects second agent
    first_agent_turn: True if turn is of the first agent
    num_games: total number of games to run without training
    num_training: total number of training games to run
    """

    try:
        write_str = "num_moves,win,reward,max_q_value\n"
        if first_agent.is_learning_agent:
            first_f = open_file(first_file_name, header=write_str)

            first_w_deq = deque()

            first_f_res = open_file(first_result_file_name)
            first_writer_res = csv.writer(first_f_res, lineterminator='\n')

            first_f_m_res = open_file(first_m_result_file_name)
            first_writer_m_res = csv.writer(first_f_m_res, lineterminator='\n')

            first_f_str = ""
            first_writer_w_list = []

        if second_agent.is_learning_agent:
            second_f = open_file(second_file_name, header=write_str)

            second_w_deq = deque()

            second_f_res = open_file(second_result_file_name)
            second_writer_res = csv.writer(second_f_res, lineterminator='\n')

            second_f_m_res = open_file(second_m_result_file_name)
            second_writer_m_res = csv.writer(second_f_m_res,
                                             lineterminator='\n')

            second_f_str = ""
            second_writer_w_list = []

        # learn weights
        # save weights
        # test using weights
        # change agent

        print('starting game', 0)
        for i in range(num_games):

            if (i + 1) % NOTIFY_FREQ == 0:
                print('Starting game', (i + 1))

            rules = ClassicGameRules()

            if first_agent.has_been_learning_agent:
                first_agent.start_learning()

            if second_agent.has_been_learning_agent:
                second_agent.start_learning()

            game = rules.new_game(first_agent,
                                  second_agent,
                                  first_agent_turn,
                                  quiet=quiet)

            num_moves, game_state = game.run()

            if first_agent.is_learning_agent:
                reward = first_agent.episode_rewards
                win = 1 if game_state.is_first_agent_win() else 0

                init_state = GameState(the_player_turn=first_agent_turn)
                max_q_value = first_agent.compute_value_from_q_values(
                    init_state)

                w_str = str(num_moves) + "," + str(win) + "," + str(
                    reward) + "," + str(max_q_value) + "\n"
                first_f_str += w_str

                if (i + 1) % WEIGHTS_SAVE_FREQ == 0:
                    if len(first_w_deq
                           ) != 0 and len(first_w_deq) % NUM_WEIGHTS_REM == 0:
                        first_w_deq.popleft()
                    first_w_deq.append(np.array(first_agent.weights))

                if (i + 1) % WRITE_FREQ == 0:
                    first_f.write(first_f_str)
                    first_f_str = ""

            if second_agent.is_learning_agent:
                reward = second_agent.episode_rewards
                win = 1 if game_state.is_second_agent_win() else 0

                init_state = GameState(the_player_turn=first_agent_turn)
                max_q_value = second_agent.compute_value_from_q_values(
                    init_state)

                w_str = str(num_moves) + "," + str(win) + "," + str(
                    reward) + "," + str(max_q_value) + "\n"
                second_f_str += w_str

                if (i + 1) % WEIGHTS_SAVE_FREQ == 0:
                    if len(second_w_deq
                           ) != 0 and len(second_w_deq) % NUM_WEIGHTS_REM == 0:
                        second_w_deq.popleft()
                    second_w_deq.append(np.array(second_agent.weights))

                if (i + 1) % WRITE_FREQ == 0:
                    second_f.write(second_f_str)
                    second_f_str = ""

            if (i + 1) % TEST_FREQ == 0:
                if first_agent.is_learning_agent:
                    first_agent.stop_learning()

                if second_agent.is_learning_agent:
                    second_agent.stop_learning()

                result_f = []
                result_s = []
                print('strting', TEST_GAMES, 'tests')

                result_f, result_s = \
                multiprocess(rules, first_agent, second_agent, first_agent_turn, quiet=True)

                if first_agent.has_been_learning_agent:
                    first_writer_res.writerow(result_f[0])
                    first_writer_m_res.writerow(result_f[1])

                if second_agent.has_been_learning_agent:
                    second_writer_res.writerow(result_s[0])
                    second_writer_m_res.writerow(result_s[1])

            if first_agent.has_been_learning_agent and play_against_self:
                if (i + 1) % CHANGE_AGENT_FREQ == 0:
                    weights = first_w_deq[-1]
                    second_agent = QLearningAgent(weights=weights,
                                                  is_learning_agent=False)

            if first_agent.has_been_learning_agent and update_param:
                first_agent.update_parameters(update_param, (i + 1))

            if second_agent.has_been_learning_agent and update_param:
                second_agent.update_parameters(update_param, (i + 1))
            #-------------------------------
            #Printing result
            if game_state.is_first_agent_win():
                print(type(first_agent).__name__)
            elif game_state.is_second_agent_win():
                print(type(second_agent).__name__)

    except Exception as e:
        print(sys.exc_info()[0])
        traceback.print_tb(e.__traceback__)

    finally:
        if first_agent.has_been_learning_agent:
            first_f.close()
            first_f_res.close()
            first_f_m_res.close()

            first_f_w = open_file(first_weights_file_name)
            first_writer_w = csv.writer(first_f_w, lineterminator='\n')
            first_writer_w.writerows(first_w_deq)
            first_f_w.close()

        if second_agent.has_been_learning_agent:
            second_f.close()
            second_f_res.close()
            second_f_m_res.close()

            second_f_w = open_file(second_weights_file_name)
            second_writer_w = csv.writer(second_f_w, lineterminator='\n')
            second_writer_w.writerows(second_w_deq)
            second_f_w.close()
Exemplo n.º 11
0
 def open_folder(self, widget, file=''):
     f = pathjoin(self._hub.config_manager.get_watchlist(), file)
     util.open_file(f)
Exemplo n.º 12
0
            inter_op_parallelism_threads=NUM_THREADS,
            log_device_placement=False)
    sess = tf.Session(config=config)
    init = tf.global_variables_initializer()
    sess.run(init)
    saver = tf.train.Saver() # save all variables
    checkpoint_dir = model_dir
    checkpoint_file = 'segm.ckpt'

    validation_data = util.get_validation_data_emb(validation_path, char_dic, id2emb, n_steps, padd)

    seq = 0
    while seq < training_iters :
        c_istate = np.zeros((batch_size, 2*n_hidden))
        i = 0
        fid = util.open_file(train_path, 'r')
        for line in fid :
            line = line.strip()
            if line == "" : continue
            line = line.decode('utf-8')
            sentence = util.snorm(line)
            pos = 0
            while pos != -1 :
                batch_xs, batch_ys, next_pos, count = util.next_batch_emb(sentence, pos, char_dic, id2emb, n_steps, padd)
                '''
                print 'window : ' + sentence[pos:pos+n_steps].encode('utf-8')
                print 'count : ' + str(count)
                print 'next_pos : ' + str(next_pos)
                print batch_ys
                print batch_xs
                '''