Exemplo n.º 1
0
    D_in, D_out = expert_data["X"].shape[-1], expert_data["y"].shape[-1]

    env = gym.make(args["env"])

    model_fn = getattr(models, args["model_fn"])
    model_dir = args["model_dir"]
    model = model_fn(D_in, D_out, model_dir=model_dir)

    expert_policy_file = args['expert_policy_file']
    expert_policy = load_policy(expert_policy_file)

    N = args["dagger_N"]
    num_rollouts = args["num_rollouts"]

    max_timesteps = args["max_timesteps"]

    render = args["render"]

    returns = dagger(env, model, expert_policy, num_rollouts, max_timesteps, N,
                     render)

    if args.get('results_file') is not None:
        results = args.copy()
        results["returns"] = returns
        results["timestamp"] = datetime.now().isoformat()
        dump_results(args['results_file'], results)
    else:
        print('returns', returns)
        print('mean returns', map(np.mean, returns))
        print('std of returns', map(np.std, returns))
Exemplo n.º 2
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('expert_policy_file', type=str)
    parser.add_argument('envname', type=str)
    parser.add_argument('--render', action='store_true')
    parser.add_argument("--max_timesteps", type=int)
    parser.add_argument('--num_rollouts',
                        type=int,
                        default=20,
                        help='Number of expert roll outs')
    parser.add_argument('--results_file',
                        type=str,
                        help='File path for dumping the results')
    args = parser.parse_args()

    print('loading and building expert policy')
    policy_fn = load_policy.load_policy(args.expert_policy_file)
    print('loaded and built')

    with tf.Session():
        tf_util.initialize()

        import gym
        env = gym.make(args.envname)
        max_steps = args.max_timesteps or env.spec.timestep_limit

        returns = []
        observations = []
        actions = []
        for i in range(args.num_rollouts):
            print('iter', i)
            obs = env.reset()
            done = False
            totalr = 0.
            steps = 0
            while not done:
                action = policy_fn(obs[None, :])
                observations.append(obs)
                actions.append(action)
                obs, r, done, _ = env.step(action)
                totalr += r
                steps += 1
                if args.render:
                    env.render()
                if steps % 100 == 0: print("%i/%i" % (steps, max_steps))
                if steps >= max_steps:
                    break
            returns.append(totalr)

        print('returns', returns)
        print('mean return', np.mean(returns))
        print('std of return', np.std(returns))

        if args.results_file is not None:
            results = vars(args).copy()
            results['returns'] = returns
            results["timestamp"] = datetime.now().isoformat()
            dump_results(args.results_file, results)

        expert_data = {
            'observations': np.array(observations),
            'actions': np.array(actions)
        }

        dump_file = ("./expert_data/{}-{}.pkl"
                     "".format(args.envname, args.num_rollouts))

        with open(dump_file, "wb") as f:
            pickle.dump(expert_data, f)
Exemplo n.º 3
0
# get all csv files in input directory
reg_x = re.compile(r'\.(csv)')
csv_input_files = []
for path, dnames, fnames in os.walk(csv_input_directory):
    csv_input_files.extend(
        [os.path.join(path, f) for f in fnames if reg_x.search(f)])

csv_input_files.sort()
modelFactoryInstance = modelFactory()
for m in models:
    output_files = []
    output_files.append("file,mse,parameters")
    nan_output_files = []
    nan_output_files.append("file")
    helpers.dump_results(output_files, csv_output_directory, m)

    print("##### [" + m + "]" + str(len(csv_input_files)) +
          " CSV input files to process #####")
    count = 1
    for f in csv_input_files:
        print("Processing [" + m + "]" + f)
        if f.split("/")[-1] == "electricity_utilization_specific_user":
            parameters["training_ratio"] = 0.58979536887
        else:
            parameters["training_ratio"] = defualt_training_ratio
        # fetching input file data
        dataframe_expect = pandas.read_csv(f)
        value = np.array(dataframe_expect['value'])
        timestamp = np.array(dataframe_expect['timestamp'])
Exemplo n.º 4
0
model = "sherlock-framework-lstmcnn"
tesing_start = 362

list_file = "../results/"
model_results_folder = results_folder + "data/" + model + "/"

reg_x = re.compile(r'\.(csv)')
csv_input_files = []
for path, dnames, fnames in os.walk(csv_input_directory):
    csv_input_files.extend([os.path.join(path, f) for f in fnames if reg_x.search(f)])

csv_input_files.sort()

output_files = []
output_files.append("file,mse,parameters")
helpers.dump_results(output_files, results_folder,model)
for f in csv_input_files:
    try :
        # fetching input file data
        dataframe_expect = pandas.read_csv(f)
        value = np.array(dataframe_expect['value'])
        timestamp = np.array(dataframe_expect['timestamp'])
        try :
            label = np.array(dataframe_expect['label'])
        except KeyError:
            print "Warnning :["+f+"] No `label` colomn found data set without labels !. Assumes there are no anomolies"
            label = np.zeros(len(value))

        dataframe_predction = pandas.read_csv(helpers.get_result_file_name(f,results_folder,model))
        prediction = np.array(dataframe_predction['prediction'])
        params = "training_size="+str(tesing_start)