예제 #1
0
 def test_get_item_not_full(self):
     buf = RingBuffer(3)
     buf.append("test1")
     buf.append("test2")
     buf.append("test3")
     self.assertEqual(buf[0], "test1")
     self.assertEqual(buf[1], "test2")
     self.assertEqual(buf[2], "test3")
예제 #2
0
 def test_len(self):
     buf = RingBuffer(3)
     buf.append("test1")
     self.assertEqual(len(buf), 1)
     buf.append("test2")
     self.assertEqual(len(buf), 2)
     buf.append("test3")
     self.assertEqual(len(buf), 3)
     buf.append("test4")
     self.assertEqual(len(buf), 3)
예제 #3
0
    def __init__(self, buffer_size, resolution, oauth, channel):
        self.oauth = oauth
        self.resolution = resolution
        self.channel = channel

        self.buffer_size = buffer_size
        self.buffer = RingBuffer(buffer_size=buffer_size)

        self.initialized = False
        self.stream = None
예제 #4
0
파일: agent.py 프로젝트: adrianbella/OpenAI
 def __init__(self, env, action_size, config):
     self.memory = RingBuffer(int(
         config.config_section_map()['memorysize']))
     self.gamma = float(
         config.config_section_map()['gamma'])  # discount rate
     self.epsilon = float(
         config.config_section_map()['epsilon'])  # exploration rate
     self.epsilon_min = float(config.config_section_map()['epsilonmin'])
     self.epsilon_decay = float(config.config_section_map()['epsilondecay'])
     self.learning_rate = float(config.config_section_map()['learningrate'])
     self.action_size = action_size
     self.env = env
     self.dqn_model = DQNModel(self.learning_rate, action_size)
예제 #5
0
    def test_threading(self):
        buf = RingBuffer(3)
        buf.append("test1")
        buf.append("test2")
        buf.append("test3")

        thread1 = self.__class__.TestIterThreading(buf)
        thread2 = self.__class__.TestIterThreading(buf)
        thread1.start()
        thread2.start()
        thread1.join(3)
        thread2.join(3)
        self.assertListEqual(thread1.actual, ["test1", "test2", "test3"])
        self.assertListEqual(thread2.actual, ["test1", "test2", "test3"])
예제 #6
0
    def test_iter(self):
        buf = RingBuffer(3)
        buf.append("test1")
        buf.append("test2")
        buf.append("test3")
        buf.append("test4")
        buf.append("test5")
        actual1 = []
        for i in buf:
            actual1.append(i)
        self.assertListEqual(actual1, ["test3", "test4", "test5"])

        actual2 = []
        for i in buf:
            actual2.append(i)
        self.assertListEqual(actual2, ["test3", "test4", "test5"])
예제 #7
0
 def test_get_item_out_of_index(self):
     try:
         buf = RingBuffer(1)
         buf[1]
     except BaseException as e:
         self.assertIsInstance(e, IndexError)
예제 #8
0
 def test_get_item_key_is_not_int(self):
     try:
         buf = RingBuffer(1)
         buf["0"]
     except BaseException as e:
         self.assertIsInstance(e, TypeError)
예제 #9
0
def main(argv):
    del argv
    config = FLAGS

    tf.set_random_seed(config.seed)
    np_state = np.random.RandomState(config.seed)

    global_step = tf.train.get_or_create_global_step()
    global_step_update = tf.assign(global_step, global_step + 1)

    real_ds = tf.data.TFRecordDataset(config.input_path)
    real_ds = real_ds.map(lambda x: _parse_record(x, config.image_size))
    real_ds = real_ds.shuffle(buffer_size=1000)
    real_ds = real_ds.batch(config.batch_size // 2)  # Half will be generated
    real_ds = real_ds.repeat()
    real_ds_iterator = real_ds.make_one_shot_iterator()
    real_ds_example = real_ds_iterator.get_next()

    discriminator = Discriminator('discriminator')
    generator = Generator('generator')

    z = tf.placeholder(dtype=tf.float32, shape=[None, 100])

    G_sample = generator.create_main_graph(z)

    D_logit_real = discriminator.create_main_graph(real_ds_example)
    D_logit_fake = discriminator.create_main_graph(G_sample)

    D_expected_real = tf.zeros_like(D_logit_real)
    D_expected_fake = tf.ones_like(D_logit_fake)

    D_loss_real = tf.losses.sigmoid_cross_entropy(D_expected_real,
                                                  D_logit_real,
                                                  label_smoothing=0.2)
    D_loss_fake = tf.losses.sigmoid_cross_entropy(D_expected_fake,
                                                  D_logit_fake,
                                                  label_smoothing=0.00)

    D_loss = 0.5 * (D_loss_real + D_loss_fake)

    G_loss = tf.losses.sigmoid_cross_entropy(tf.zeros_like(D_logit_fake),
                                             D_logit_fake,
                                             label_smoothing=0.00)

    with tf.variable_scope('metrics'):
        D_prediction_real = tf.round(tf.nn.sigmoid(D_logit_real))
        D_prediction_fake = tf.round(tf.nn.sigmoid(D_logit_fake))

        D_accuracy_real = accuracy(D_prediction_real, D_expected_real)
        D_accuracy_fake = accuracy(D_prediction_fake, D_expected_fake)

        real_size = tf.to_float(tf.shape(D_prediction_real)[0])
        fake_size = tf.to_float(tf.shape(D_prediction_fake)[0])
        D_accuracy = (real_size * D_accuracy_real +
                      fake_size * D_accuracy_fake) / (real_size + fake_size)

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                   scope='discriminator')
    with tf.control_dependencies(update_ops):
        D_optimizer = tf.train.AdamOptimizer(
            config.discriminator_learning_rate).minimize(
                D_loss, var_list=discriminator.get_variables())

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='generator')
    with tf.control_dependencies(update_ops):
        G_optimizer = tf.train.AdamOptimizer(
            config.generator_learning_rate).minimize(
                G_loss, var_list=generator.get_variables())

    with tf.variable_scope('summaries'):
        D_loss_summary = tf.summary.scalar('loss',
                                           D_loss,
                                           family='discriminator')
        D_accuracy_real_summary = tf.summary.scalar('real_accuracy',
                                                    D_accuracy_real,
                                                    family='discriminator')
        D_accuracy_fake_summary = tf.summary.scalar('fake_accuracy',
                                                    D_accuracy_fake,
                                                    family='discriminator')
        D_accuracy_summary = tf.summary.scalar('accuracy',
                                               D_accuracy,
                                               family='discriminator')
        G_loss_summary = tf.summary.scalar('loss', G_loss, family='generator')
        G_image_summary = tf.summary.image('generation',
                                           G_sample,
                                           max_outputs=1,
                                           family='generator')
        Real_image_summary = tf.summary.image('real',
                                              real_ds_example,
                                              max_outputs=1)

        summary_op = tf.summary.merge_all()

    # Session
    hooks = []
    hooks.append(tf.train.StopAtStepHook(num_steps=config.iterations))
    if (config.save_checkpoints):
        hooks.append(
            tf.train.CheckpointSaverHook(
                checkpoint_dir=config.checkpoint_directory,
                save_secs=config.checkpoint_save_secs,
                save_steps=config.checkpoint_save_steps))

    if (config.save_summaries):
        hooks.append(
            tf.train.SummarySaverHook(output_dir=config.summary_directory,
                                      save_secs=config.summary_save_secs,
                                      save_steps=config.summary_save_steps,
                                      summary_op=summary_op))

    if config.restore:
        sess = tf.train.MonitoredTrainingSession(
            checkpoint_dir=config.checkpoint_directory,
            save_checkpoint_steps=None,
            save_checkpoint_secs=None,
            save_summaries_steps=None,
            save_summaries_secs=None,
            log_step_count_steps=None,
            hooks=hooks)
    else:
        sess = tf.train.MonitoredTrainingSession(save_checkpoint_steps=None,
                                                 save_checkpoint_secs=None,
                                                 save_summaries_steps=None,
                                                 save_summaries_secs=None,
                                                 log_step_count_steps=None,
                                                 hooks=hooks)

    def step_generator(step_context, accuracy_buffer):
        np_global_step = step_context.session.run(global_step)
        step_context.session.run(global_step_update)

        random_noise = np_state.normal(size=[config.batch_size, 100])
        _, np_loss, np_accuracy = step_context.run_with_hooks(
            [G_optimizer, G_loss, D_accuracy], feed_dict={z: random_noise})

        accuracy_buffer.add(np_accuracy)
        if np_global_step % config.log_step == 0:
            logging.debug(
                'Training Generator: Step: {}   Loss: {:.3e}   Accuracy: {:.2f}'
                .format(np_global_step, np_loss,
                        accuracy_buffer.mean() * 100))

    def step_discriminator(step_context, accuracy_buffer):
        np_global_step = step_context.session.run(global_step)
        step_context.session.run(global_step_update)

        random_noise = np_state.normal(size=[config.batch_size // 2, 100])
        _, np_loss, np_accuracy = step_context.run_with_hooks(
            [D_optimizer, D_loss, D_accuracy], feed_dict={z: random_noise})

        accuracy_buffer.add(np_accuracy)
        if np_global_step % config.log_step == 0:
            logging.debug(
                'Training Discriminator: Step: {}   Loss Mean: {:.3e}   Accuracy: {:.2f}'
                .format(np_global_step, np_loss,
                        accuracy_buffer.mean() * 100))

    accuracy_buffer = RingBuffer(config.buffer_size)
    accuracy_buffer.clear()
    while not sess.should_stop():
        for _ in xrange(config.D_steps):
            sess.run_step_fn(lambda step_context: step_discriminator(
                step_context, accuracy_buffer))
        for _ in xrange(config.G_steps):
            sess.run_step_fn(lambda step_context: step_generator(
                step_context, accuracy_buffer))

    sess.close()