Пример #1
0
def load_experiment_data(experiment_path):
    d = Namespace()

    # Read experiment CFG
    d.cfg = read_cfg(os.path.join(experiment_path, CFG_FILE))
    d.collect = d.cfg.collect
    d.record_timestamp = d.cfg.recorded_min_max_tp
    d.common_min_max_tp = d.cfg.common_min_max_tp

    # Read Phone data
    d.phone_log_path = os.path.join(experiment_path, PHONE_FILE)
    d.phone = pd.read_pickle(d.phone_log_path) if os.path.isfile(
        d.phone_log_path) else None

    # Read Can data
    d.steer_log_path = os.path.join(experiment_path, STEER_FILE)
    d.steer = pd.read_csv(d.steer_log_path) if os.path.isfile(
        d.steer_log_path) else None

    d.speed_log_path = os.path.join(experiment_path, SPEED_FILE)
    d.speed = pd.read_csv(d.speed_log_path) if os.path.isfile(
        d.speed_log_path) else None

    d.cfg_extra = read_cfg(os.path.join(experiment_path, CFG_EXTRA_FILE))

    # Read camera stuff
    d.cameras = []
    for camera in ["camera_{}".format(x) for x in d.cfg.camera.ids]:
        c = Namespace()
        c.name = camera
        c.video_path = os.path.join(experiment_path, "{}.mkv".format(camera))
        c.cfg = getattr(d.cfg.camera, camera)

        cfg_extras = [k for k in d.cfg_extra.__dict__.keys() if camera in k]
        c.cfg_extra = [getattr(d.cfg_extra, x) for x in cfg_extras]
        with open(os.path.join(experiment_path, f"{camera}_timestamp"),
                  "r") as f:
            c.start_timestamp = float(f.read())

        c.pts = pd.read_csv(os.path.join(experiment_path,
                                         "{}_pts.log".format(camera)),
                            header=None)
        c.pts.sort_index(inplace=True, ascending=True)
        c.pts = c.pts[0].values.tolist()
        d.cameras.append(c)
        print(d.cameras[-1].name)

        # TODO Add camera start move frame

    return d
Пример #2
0
def main():

	subscriptions = read_cfg("conf//subscriptions.yaml")

	r = Rest()

	rest_clients = {
		'coinbase': r.coinbase,
		'binance': r.binance,
		'kraken': r.kraken,
		'poloniex': r.poloniex
	}
	
	for exch, data in subscriptions.items():
		for pair in data['pairs']:
			print(f'Getting trade data for {exch} {pair}')
			trades = rest_clients[exch].trades(pair)
			for trade in trades:
				for t in trade:
					hwt = str(datetime.utcnow().isoformat()).replace("T","D").replace("-",".")
					ts = str(datetime.fromtimestamp(t['timestamp']).isoformat()).replace("T","D").replace("-",".")
					qStr = f"`trades insert (`timestamp${hwt};`timestamp${ts};" \
							f"`{t['feed']};`$\"{t['pair']}\";`{t['side']};`float${t['amount']};" \
							f"`float${t['price']};`int${t['id']})"
					try:
						q.sendSync(qStr, param=None)    
					except QException as e:
						print(f"Error executing query {qStr} against server. {e}")
Пример #3
0
    def qlearning(self):
        train_scores = []
        eval_scores = []

        cfg = read_cfg(self.config_file)

        all_scores = []

        for train_ep in range(self.epochs):
            score = 0
            env = FallingObjects(cfg)
            obs = env.reset()
            state, _ = self.get_state(obs)

            for i in range(self.moves_per_epoch):
                actions = self.actions
                action = self.epsilon_greedy(self.Q, state, actions,
                                             self.epsilon)

                obs, r, done, _ = env.step(action)
                statep, r = self.get_state(obs)
                if train_ep > 1:
                    print(statep, r)
                    cv2.imshow('hehe', obs)
                    cv2.waitKey(0)
                score += r

                maximum = -float('inf')
                actionsp = self.actions
                for actionp in actionsp:
                    if self.Q.get((statep, actionp), 0) > maximum:
                        maximum = self.Q.get((statep, actionp), 0)

                self.Q[(state, action)] = self.Q.get(
                    (state, action), 0) + self.learning_rate * (
                        r + self.discount * maximum - self.Q.get(
                            (state, action), 0))

                state = statep

                if self.epsilon > self.epsilon_min:
                    self.epsilon *= 0.99999

            print("Epoch: {}; Score: {}; Epsilon: {}".format(
                train_ep, score, self.epsilon))
            all_scores.append(score)
            if train_ep % 200 == 0 and train_ep > 0:
                self.save_q()
                print("Mean score for the last 200 epochs: {}".format(
                    np.average(all_scores[:-200])))
Пример #4
0
    def qlearning(self):
        train_scores = []
        eval_scores = []

        cfg = read_cfg(self.config_file)

        all_scores = []

        for train_ep in range(self.epochs):
            if train_ep <= 10:
                self.epsilon = 1
            elif self.done_pre == False:
                self.done_pre = True
                self.epsilon = 0.25
            score = 0
            env = FallingObjects(cfg)
            obs = env.reset()
            obs, _ = self.get_state(obs)
            stack_frame = deque([obs for _ in range(self.frame_size)],
                                maxlen=self.frame_size)
            state, stack_frame = self.get_frame(stack_frame, obs)
            state = np.reshape(state, [1, 1, self.frame_size, 86, 86])

            for i in range(self.moves_per_epoch):
                actions = self.actions
                action = self.epsilon_greedy(state, actions, self.epsilon)

                obs, r, done, _ = env.step(actions[action])
                obs, r = self.get_state(obs)
                print("Move: {}; action: {}; reward: {}; epsilon: {}".format(
                    i, actions[action], r, self.epsilon))

                statep, stack_frame = self.get_frame(stack_frame, obs)
                statep = np.reshape(statep, [1, 1, self.frame_size, 86, 86])
                score += r

                self.memory.append((state, action, r, statep))

                state = statep

                if train_ep > 10:
                    self.replay()

            print("Episode: {}; score: {}".format(train_ep, score))
            all_scores.append(score)
            if train_ep % 20 == 0 and train_ep > 0:
                print("Mean score for the last 200 epochs: {}".format(
                    np.average(all_scores[:-200])))
                torch.save(self.model, 'model.pt')
Пример #5
0
    def qlearning(self):
        cfg = read_cfg(self.config_file)
        all_scores = []
        for train_ep in range(self.epochs):
            if train_ep <= 10:
                self.epsilon = 0.02
            elif self.done_pre == False:
                self.done_pre = True
                self.epsilon = 0.6
            score = 0
            env = FallingObjects(cfg)
            obs = env.reset()
            obs, _ = self.get_state(obs)
            #stack_frame = deque([obs for _ in range(self.frames)], maxlen=self.frames)
            #state, stack_frame = self.get_frame(stack_frame, obs)
            #state = np.reshape(state, [1, self.frames, 86, 86, 1])
            state = obs

            for i in range(self.moves_per_epoch):
                actions = self.actions
                action = self.epsilon_greedy(state, actions, self.epsilon)
                #print("Move: {}; action: {}".format(i, actions[action]))

                obs, r, done, _ = env.step(actions[action])
                if train_ep > 10000:
                    print(statep, r)
                    cv2.imshow('hehe', obs)
                    cv2.waitKey(0)
                obs, r = self.get_state(obs)
                #statep, stack_frame = self.get_frame(stack_frame, obs)
                #statep = np.reshape(statep, [1, self.frames, 86, 86, 1])
                statep = obs
                score += r

                self.memory.append((state, action, r, statep))

                state = statep

                if train_ep > 0:
                    self.replay()

            print("Epoch: {}; Score: {}; Epsilon: {}".format(
                train_ep, score, self.epsilon))
            all_scores.append(score)
            if train_ep % 200 == 0:
                self.model.save('configs/model.h5')
                print("Mean score for the last 200 epochs: {}".format(
                    np.average(all_scores[:-200])))
Пример #6
0
def main():
    args = parse_args()
    cfg = read_cfg(args.config)
    if args.reloads is not None:
        if 'all' in args.reloads:
            tags = cfg.keys()
        else:
            tags = args.reloads
        tags = tuple(
            (to_unicode(tag) for tag in tags if to_unicode(tag) in cfg))
        reload_cfg(cfg, tags)
    elif args.failovers is not None:
        if 'all' in args.failovers:
            tags = cfg.keys()
        else:
            tags = args.failovers
        tags = tuple(
            (to_unicode(tag) for tag in tags if to_unicode(tag) in cfg))
        do_failover(cfg, tags)
    else:
        start_scheduler(cfg, args.seconds)
Пример #7
0
                            '--config-file',
                            default='configs/default.yaml',
                            type=str,
                            dest='config_file',
                            help='Default configuration file')
    arg_parser.add_argument(
        '-a',
        '--agent',
        default='demo_agent+DemoAgent',
        type=str,
        dest='agent',
        help='The agent to test in format <module_name>+<class_name>')

    args = arg_parser.parse_args()
    config_file = args.config_file
    cfg = read_cfg(config_file)
    test_agent_name = args.agent.split("+")
    test_steps = cfg.test_steps
    test_agent = getattr(importlib.import_module(test_agent_name[0]),
                         test_agent_name[1])

    print(f"Testing agent {test_agent_name[1]}")

    env = FallingObjects(cfg)

    #agent = test_agent(max(ACTIONS.keys()))

    # Dueling Deep Q-Learning Agent
    agent = DDQNAgent()
    all_r = 0
    obs = env.reset()
Пример #8
0
    arg_parser.add_argument('--camera-view-size',
                            default=400,
                            type=int,
                            dest="camera_view_size")
    arg_parser.add_argument('--start-tp',
                            default=0,
                            type=float,
                            dest="start_tp")

    arg_parser = arg_parser.parse_args()

    experiment_path = arg_parser.experiment_path
    camera_view_size = arg_parser.camera_view_size
    start_tp = arg_parser.start_tp

    cfg = read_cfg(os.path.join(experiment_path, CFG_FILE))
    cfg_extra = read_cfg(os.path.join(experiment_path, CFG_EXTRA_FILE))

    collect = cfg.collect
    record_timestamp = cfg.recorded_min_max_tp
    common_min_max_tp = cfg.common_min_max_tp

    video_loders = []
    obd_loader = None
    can_plot = None
    phone_plot = None
    plot_stuff = False
    live_plot = True

    processes = []
    recv_queue = Queue()
Пример #9
0
        # 生成Presenter
        out_path_pres = '%s/impl/%sPresenter.java' % (path_pres,
                                                      phd['ApiName'])
        fp = open(out_path_pres, 'w+')
        fp.write(replace_all(cont_pres, phd))
        fp.close()

        # 2.读取IPresenter接口模板
        f_ipres = open(template_ipres)
        cont_ipres = f_ipres.read()

        # 生成IPresenter
        out_path_ipres = '%s/I%sPresenter.java' % (path_pres, phd['ApiName'])
        fip = open(out_path_ipres, 'w+')
        fip.write(replace_all(cont_ipres, phd))
        fip.close()

        # 2.读取IView模板
        f_iview = open(template_view)
        cont_iview = f_iview.read()

        # 生成IView
        out_path_iview = '%s/I%sView.java' % (path_view, phd['ApiName'])
        fv = open(out_path_iview, 'w+')
        fv.write(replace_all(cont_iview, phd))


if __name__ == '__main__':

    dic_list = read_cfg(api_cfg)
    create_presenter(dic_list)
# import sys and add the directory of our helper functions
import sys
sys.path.append('/Users/dwhitehead/Documents/github/open_source_data_science_masters/')
import utils
import oauth2 as oauth
import urllib2 as urllib

twitter_creds = utils.read_cfg(utils.find_pass_cfg(), 'twitter')

# See assignment1.html instructions or README for how to get these credentials

api_key = twitter_creds['api_key']
api_secret = twitter_creds['api_secret']
access_token_key = twitter_creds['access_token_key']
access_token_secret = twitter_creds['access_token_secret']

_debug = 0

oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)

oauth_consumer = oauth.Consumer(key=api_key, secret=api_secret)

signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()

http_method = "GET"

http_handler = urllib.HTTPHandler(debuglevel=_debug)

https_handler = urllib.HTTPSHandler(debuglevel=_debug)

'''
# import sys and add the directory of our helper functions
import sys

sys.path.append(
    '/Users/dwhitehead/Documents/github/open_source_data_science_masters/')
import utils
import oauth2 as oauth
import urllib2 as urllib

twitter_creds = utils.read_cfg(utils.find_pass_cfg(), 'twitter')

# See assignment1.html instructions or README for how to get these credentials

api_key = twitter_creds['api_key']
api_secret = twitter_creds['api_secret']
access_token_key = twitter_creds['access_token_key']
access_token_secret = twitter_creds['access_token_secret']

_debug = 0

oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)

oauth_consumer = oauth.Consumer(key=api_key, secret=api_secret)

signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()

http_method = "GET"

http_handler = urllib.HTTPHandler(debuglevel=_debug)

https_handler = urllib.HTTPSHandler(debuglevel=_debug)
def train_dqn_model(args):
    action_size = max(ACTIONS.keys()) + 1
    env = FallingObjects(read_cfg(args.config_file))
    obs = env.reset()

    tf.reset_default_graph()

    with tf.Session() as sess:
        # Create and initialize the agent.
        agent = DQNAgent(action_size, training=True)
        agent.do_setup(args, obs, sess)

        # Tensorboard setup.
        writer = tf.summary.FileWriter("./logs")
        saver = tf.train.Saver()
        tf.summary.scalar("Loss", agent.dqn.loss)
        write_op = tf.summary.merge_all()

        # Now start learning.
        obs = env.reset()
        all_rewards = []

        # We first play a bit in order to explore the environment
        # and populate the experience buffer.
        for i in range(num_exploration_steps):
            action = agent.get_random_action()
            obs, reward, _, _ = env.step(action)
            all_rewards.append(reward)
            total_reward = sum(all_rewards[-args.stack_size:])
            # total_reward = reward
            agent.remember(obs, action, total_reward)

        all_rewards = []
        for step in range(args.num_train_steps):
            # Predict an action using an e-greedy policy, where the
            # probability of exploration is decaying in time.
            action, explore_prob = agent.predict_action(
                explore_prob_begin, explore_prob_min, decay_rate, step)

            # Apply the action and get the observation and reward from
            # the environment.
            obs, reward, _, _ = env.step(action)
            all_rewards.append(reward)

            # Save the current observation to see how the agent behaves.
            cv2.imwrite(str(step) + '.png', obs)

            # And make this part of the agent's experience.
            total_reward = sum(all_rewards[-args.stack_size:])
            agent.remember(obs, action, total_reward)
            print('Step %7d, total reward = %2d' % (step, total_reward))

            # Get a mini-batch from memory and train the net.
            mini_batch = agent.mem.sample(batch_size)
            states, actions, rewards, next_states = (list(elem)
                                                     for elem in zip(
                                                         *mini_batch))

            # Compute one-host encodings for the actions.
            actions_one_hot = np.zeros((len(actions), action_size))
            actions_one_hot[np.arange(len(actions)), actions] = 1

            target_Qs = []

            # Q values for the next states using.
            next_Qs = agent.sess.run(
                agent.dqn.output, feed_dict={agent.dqn.inputs_: next_states})

            # Q target should be reward + gamma * maxQ(s', a')
            target_Qs = np.array([
                rewards[i] + args.discount_factor * np.max(next_Qs[i])
                for i in range(batch_size)
            ])

            loss, _ = agent.sess.run(
                [agent.dqn.loss, agent.dqn.optimizer],
                feed_dict={
                    agent.dqn.inputs_: states,
                    agent.dqn.target_Q: target_Qs,
                    agent.dqn.actions_: actions_one_hot
                })

            summary = sess.run(write_op,
                               feed_dict={
                                   agent.dqn.inputs_: states,
                                   agent.dqn.target_Q: target_Qs,
                                   agent.dqn.actions_: actions_one_hot
                               })

            writer.add_summary(summary, step)
            writer.flush()

            # Save the model every 10 steps.
            if step % 10 == 0:
                saver.save(sess, './models/' + args.model_name + '.ckpt')