Exemple #1
0
def main():
    """The main function."""

    # parse command line arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('--new', action='store_true')
    args = parser.parse_args()

    # tells pandas what data types the columns of the .csv file are
    dtypes = {
        'match_id': int,
        'win': bool,
        'estimate': int,
        'score': float
    }

    # use new data
    if args.new == True:
        print('Using new OpenDota api data')
        print('Gathering data...' )
        # from preprocessing.py
        data = get_data(True)
    
    # defaults to old data
    else:
        print('Using openDota.csv data')
        print('Gathering data...' )
        # get data from csv file
        data = read_csv('data/openDota.csv', dtypes, True)

    print('Processing data...')
    # process data into required shapes (N, 20) and (N, )
    data, labels = process(data)

    print("Number of complete matches: " + str(data.shape[0]))
    while data.shape[0] < 1000:
        print("Not enough matches, trying again")
        data = get_data(True)
        data, labels = process(data)

    best_model = train(data, labels)

    print("Beginning testing on new data...")
    test(best_model, args.new, dtypes)

    # path to save your model
    open('best_model.pkl', 'w')
    path = os.getcwd() + '/best_model.pkl'
    joblib.dump(best_model, path)
def filter_data(news_df):
    """过滤数据"""
    df = preprocessing.data_filter(news_df)
    now_time = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M')
    # now_time = '2018-04-06 23:59'
    df = preprocessing.get_data(df, last_time=now_time, delta=5)
    return df
Exemple #3
0
def main():
    print(tf.__version__)
    print("GPU Available: ", tf.test.is_gpu_available())

    results_dict = {'eval_rewards': []}

    args = create_argument_parser().parse_args()

    segrot, states, markpos = get_data(file=args.expert_file)
    actions = get_actions_from_segrot(segrot)
    action_dim = actions.shape[1]
    state_dim = states.shape[1]
    args.action_dim = action_dim
    args.state_dim = state_dim

    if args.curtail_length:
        states = states[0:args.curtail_length + 1]
        actions = actions[0:args.curtail_length + 1]

    num_states = states.shape[0]
    num_train = int(0.9 * num_states)
    num_test = num_states - num_train
    train_states = states[1:num_train]
    train_actions = actions[1:num_train]
    test_states = states[-num_test:]
    test_actions = actions[-num_test:]

    base_dir = os.getcwd() + '/models/IDPAgent/'
    run_number = 0
    while os.path.exists(base_dir + str(run_number)):
        run_number += 1
    base_dir = base_dir + str(run_number)
    os.makedirs(base_dir)

    idp_agent = IDPAgent(**args.__dict__)
    for epoch in trange(args.epochs):
        train(idp_agent, train_states, train_actions, args.batch_size)
        eval_rewards = evaluate_policy(idp_agent, test_states, test_actions,
                                       args.eval_episodes, args.batch_size)
        eval_reward = sum(eval_rewards) / args.eval_episodes
        eval_variance = float(np.var(eval_rewards))
        results_dict['eval_rewards'].append({
            'total_steps':
            epoch * states.shape[0],
            'epoch':
            epoch,
            'average_eval_reward':
            eval_reward,
            'eval_reward_variance':
            eval_variance
        })
        with open(args.results_file, 'w') as file:
            file.write(json.dumps(results_dict['eval_rewards']))

    utils.save_model(idp_agent.actor, base_dir)
def data_filter():
    if filter_df.shape[0] == 0:
        messagebox.showinfo('Message', '未选择任何新闻数据!')
        return
    date_f = Entry_Date.get()
    day_f = Entry_Day.get()
    if date_f == '' or day_f == '':
        messagebox.showinfo('Message', '请先填写筛选的日期和天数!')
        return
    global filter_df0
    filter_df0 = preprocessing.get_data(filter_df, last_time=date_f + ' 23:59', delta=int(day_f))
    news_pandas.save_news(filter_df0, os.path.join(temp_news_path, 'filter_news_by_time.csv'))
    news_num = filter_df0.shape[0]
    filter_n.set(news_num)
Exemple #5
0
def test(model, new, dtypes):
    '''
    Author: Jordan Patterson

    Function to test live data on a pretrained model

    Parameters
    ----------
    model: Object
        Any scikit learn generated model

    new: boolean
        Whether we trained on new data from OpenDota or not

    dtypes: dictionary
        Key / value pair specifying type of each column/header

    '''

    print("Getting test data...")
    # don't test on same data we trained on
    if new == True:
        data = read_csv('data/openDota.csv', dtypes, True)
    else:
        data = get_data(True)
    
    print("Processing test data...")
    data, labels = process(data)

    print("Number of complete matches: " + str(data.shape[0]))

    # create array of random values
    s = np.arange(data.shape[0])
    np.random.shuffle(s)

    # shuffle data
    test_labels = labels[s]
    test_data = data[s]

    # get test accuracy
    acc = model.score(test_data, test_labels) * 100
    print("Test accuracy: " + str(acc))
Exemple #6
0
def main():
    print(tf.__version__)
    print("GPU Available: ", tf.test.is_gpu_available())

    args = create_argument_parser().parse_args()

    segrot, states, markpos = get_data(file=args.expert_file)
    actions = get_actions_from_segrot(segrot)

    if args.curtail_length:
        states = states[0:args.curtail_length + 1]
        actions = actions[0:args.curtail_length + 1]

    action_dim = actions.shape[1]
    state_dim = states.shape[1]
    args.action_dim = action_dim
    args.state_dim = state_dim + action_dim
    """
    Create environment
    """
    env = IDPEnvironment(states[1:], actions[1:], args.max_steps,
                         args.rand_init)
    eval_env = IDPEnvironment(states[1:], actions[1:], args.max_steps,
                              args.rand_init)

    if args.create_testset:
        num_states = states.shape[0]
        num_train = int(0.9 * num_states)
        num_test = num_states - num_train
        train_states = states[1:num_train]
        train_actions = actions[1:num_train]
        test_states = states[-num_test:]
        test_actions = actions[-num_test:]
        env = IDPEnvironment(train_states, train_actions, args.max_steps,
                             args.rand_init)
        eval_env = IDPEnvironment(test_states, test_actions, args.max_steps,
                                  args.rand_init)

    if args.noise == 'ou':
        noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(args.action_dim),
                                             sigma=float(args.noise_scale) *
                                             np.ones(args.action_dim))
    elif args.noise == 'normal':
        noise = NormalActionNoise(mu=np.zeros(args.action_dim),
                                  sigma=float(args.noise_scale) *
                                  np.ones(args.action_dim))
    else:
        noise = None

    if args.noise == 'param':
        param_noise = AdaptiveParamNoiseSpec(
            initial_stddev=args.noise_scale,
            desired_action_stddev=args.noise_scale)
    else:
        param_noise = None

    base_dir = os.getcwd() + '/models/GACAgent/'
    run_number = 0
    while os.path.exists(base_dir + str(run_number)):
        run_number += 1
    base_dir = base_dir + str(run_number)
    os.makedirs(base_dir)

    gac = GACAgent(**args.__dict__)
    state = env.reset()
    results_dict = {
        'train_rewards': [],
        'eval_rewards': [],
        'actor_losses': [],
        'value_losses': [],
        'critic_losses': []
    }
    episode_steps, episode_rewards = 0, 0  # total steps and rewards for each episode

    num_steps = args.num_steps
    if num_steps is not None:
        nb_epochs = int(num_steps) // (args.epoch_cycles * args.rollout_steps)
    else:
        nb_epochs = 500

    _reset_noise(gac, noise, param_noise)
    """
    training loop
    """
    average_rewards = 0
    count = 0
    total_steps = 0
    train_steps = 0
    for epoch in trange(nb_epochs):
        for cycle in range(args.epoch_cycles):
            for rollout in range(args.rollout_steps):
                """
                Get an action from neural network and run it in the environment
                """
                # print('t:', t)
                if total_steps < args.start_timesteps:
                    action = env.sample_action()
                else:
                    action = gac.select_perturbed_action(
                        tf.convert_to_tensor([state], dtype=tf.float32), noise,
                        param_noise)
                # remove the batch_size dimension if batch_size == 1
                action = tf.squeeze(action, [0]).numpy()
                # modify action from [-1, 1] to [-180, 180]
                next_state, reward, is_terminal = env.step(action)
                next_state, reward = np.float32(next_state), np.float32(reward)
                gac.store_transition(state, action, reward, next_state,
                                     is_terminal)
                episode_rewards += reward
                # print('average_rewards:', average_rewards)

                # check if game is terminated to decide how to update state, episode_steps,
                # episode_rewards
                if is_terminal:
                    state = np.float32(env.reset())
                    results_dict['train_rewards'].append(
                        (total_steps, episode_rewards))
                    episode_steps = 0
                    episode_rewards = 0
                    _reset_noise(gac, noise, param_noise)
                else:
                    state = next_state
                    episode_steps += 1

                # evaluate
                if total_steps % args.eval_freq == 0:
                    eval_rewards = evaluate_policy(gac, eval_env,
                                                   args.eval_episodes)
                    eval_reward = sum(eval_rewards) / args.eval_episodes
                    eval_variance = float(np.var(eval_rewards))
                    if args.verbose:
                        results_dict['eval_rewards'].append({
                            'total_steps':
                            total_steps,
                            'train_steps':
                            train_steps,
                            'average_eval_reward':
                            eval_reward,
                            'eval_reward_variance':
                            eval_variance,
                            'eval_rewards_list':
                            eval_rewards
                        })
                    else:
                        results_dict['eval_rewards'].append({
                            'total_steps':
                            total_steps,
                            'train_steps':
                            train_steps,
                            'average_eval_reward':
                            eval_reward,
                            'eval_reward_variance':
                            eval_variance
                        })
                    with open(args.results_file, 'w') as file:
                        file.write(json.dumps(results_dict['eval_rewards']))

                total_steps += 1
            # train
            if gac.replay.size >= args.batch_size:
                for _ in range(args.training_steps):
                    if train_steps % args.param_noise_interval == 0 and param_noise is not None:
                        episode_transitions = gac.replay.sample_batch(
                            args.batch_size)
                        states = episode_transitions.s
                        unperturbed_actions = gac.get_action(states)
                        perturbed_actions = episode_transitions.a
                        ddpg_dist = ddpg_distance_metric(
                            perturbed_actions.numpy(),
                            unperturbed_actions.numpy())
                        param_noise.adapt(ddpg_dist)

                    gac.train_one_step()
                    train_steps += 1

    with open('results.txt', 'w') as file:
        file.write(json.dumps(results_dict))

    utils.save_model(gac.actor, base_dir)
Exemple #7
0
from utils.plotting import plot_dataframe_attribute
from utils.preprocessing import get_data, impute_data

if __name__ == "__main__":

	data = get_data("../data/titanic3.xls")
	imputed_data = impute_data(data)
	plot_dataframe_attribute(imputed_data, "age", ["mean", "std"])
Exemple #8
0
from utils.plotting import plot_dataframe_attribute
from utils.preprocessing import get_data, impute_data

if __name__ == "__main__":

    data = get_data("../data/titanic3.xls")
    imputed_data = impute_data(data)
    plot_dataframe_attribute(imputed_data, "age", ["mean", "std"])