예제 #1
0
class Twitch(object):
    RING_BUFFER_SIZE_KEY = 'ringbuffer-size'
    OAUTH_TOKEN_KEY = 'oauth_token'
    LIVESTREAMER_PLUGIN_TWITCH = 'twitch'

    def __init__(self, buffer_size, resolution, oauth, channel):
        self.oauth = oauth
        self.resolution = resolution
        self.channel = channel

        self.buffer_size = buffer_size
        self.buffer = RingBuffer(buffer_size=buffer_size)

        self.initialized = False
        self.stream = None

    def __del__(self):
        if self.initialized:
            self.stream.close()

    def initialize(self):
        self.buffer.clear()
        stream = self._init_stream(self.oauth, self.channel)
        if stream:
            self.initialized = True
            self.stream = stream.open()

    def get_stream_data(self):
        if not self.initialized:
            print('Read: Try to initialize')
            self.initialize()
            raise StreamBufferIsEmptyException

        return self.buffer.read_all()

    def update_stream_data(self):
        if self.initialized:
            data = self.stream.read(self.buffer_size)
            print('Update: {length}'.format(length=len(data)))

            if len(data) != 0:
                self.buffer.write(data)
            else:
                print('Update: Try to initialize')
                self.initialize()

        else:
            print('Update: Try to initialize')
            self.initialize()

    def stream_initialized(self):
        return self.stream is not None

    def _init_stream(self, oauth, channel):
        session = Livestreamer()

        session.set_plugin_option(self.LIVESTREAMER_PLUGIN_TWITCH,
                                  self.OAUTH_TOKEN_KEY, oauth)
        session.set_option(self.RING_BUFFER_SIZE_KEY, self.buffer_size)

        streams = session.streams(self._generate_stream_url(channel))
        return streams.get(self.resolution)

    @staticmethod
    def _generate_stream_url(channel):
        return 'https://www.twitch.tv/{channel}'.format(channel=channel)
예제 #2
0
def main(argv):
    del argv
    config = FLAGS

    tf.set_random_seed(config.seed)
    np_state = np.random.RandomState(config.seed)

    global_step = tf.train.get_or_create_global_step()
    global_step_update = tf.assign(global_step, global_step + 1)

    real_ds = tf.data.TFRecordDataset(config.input_path)
    real_ds = real_ds.map(lambda x: _parse_record(x, config.image_size))
    real_ds = real_ds.shuffle(buffer_size=1000)
    real_ds = real_ds.batch(config.batch_size // 2)  # Half will be generated
    real_ds = real_ds.repeat()
    real_ds_iterator = real_ds.make_one_shot_iterator()
    real_ds_example = real_ds_iterator.get_next()

    discriminator = Discriminator('discriminator')
    generator = Generator('generator')

    z = tf.placeholder(dtype=tf.float32, shape=[None, 100])

    G_sample = generator.create_main_graph(z)

    D_logit_real = discriminator.create_main_graph(real_ds_example)
    D_logit_fake = discriminator.create_main_graph(G_sample)

    D_expected_real = tf.zeros_like(D_logit_real)
    D_expected_fake = tf.ones_like(D_logit_fake)

    D_loss_real = tf.losses.sigmoid_cross_entropy(D_expected_real,
                                                  D_logit_real,
                                                  label_smoothing=0.2)
    D_loss_fake = tf.losses.sigmoid_cross_entropy(D_expected_fake,
                                                  D_logit_fake,
                                                  label_smoothing=0.00)

    D_loss = 0.5 * (D_loss_real + D_loss_fake)

    G_loss = tf.losses.sigmoid_cross_entropy(tf.zeros_like(D_logit_fake),
                                             D_logit_fake,
                                             label_smoothing=0.00)

    with tf.variable_scope('metrics'):
        D_prediction_real = tf.round(tf.nn.sigmoid(D_logit_real))
        D_prediction_fake = tf.round(tf.nn.sigmoid(D_logit_fake))

        D_accuracy_real = accuracy(D_prediction_real, D_expected_real)
        D_accuracy_fake = accuracy(D_prediction_fake, D_expected_fake)

        real_size = tf.to_float(tf.shape(D_prediction_real)[0])
        fake_size = tf.to_float(tf.shape(D_prediction_fake)[0])
        D_accuracy = (real_size * D_accuracy_real +
                      fake_size * D_accuracy_fake) / (real_size + fake_size)

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                   scope='discriminator')
    with tf.control_dependencies(update_ops):
        D_optimizer = tf.train.AdamOptimizer(
            config.discriminator_learning_rate).minimize(
                D_loss, var_list=discriminator.get_variables())

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='generator')
    with tf.control_dependencies(update_ops):
        G_optimizer = tf.train.AdamOptimizer(
            config.generator_learning_rate).minimize(
                G_loss, var_list=generator.get_variables())

    with tf.variable_scope('summaries'):
        D_loss_summary = tf.summary.scalar('loss',
                                           D_loss,
                                           family='discriminator')
        D_accuracy_real_summary = tf.summary.scalar('real_accuracy',
                                                    D_accuracy_real,
                                                    family='discriminator')
        D_accuracy_fake_summary = tf.summary.scalar('fake_accuracy',
                                                    D_accuracy_fake,
                                                    family='discriminator')
        D_accuracy_summary = tf.summary.scalar('accuracy',
                                               D_accuracy,
                                               family='discriminator')
        G_loss_summary = tf.summary.scalar('loss', G_loss, family='generator')
        G_image_summary = tf.summary.image('generation',
                                           G_sample,
                                           max_outputs=1,
                                           family='generator')
        Real_image_summary = tf.summary.image('real',
                                              real_ds_example,
                                              max_outputs=1)

        summary_op = tf.summary.merge_all()

    # Session
    hooks = []
    hooks.append(tf.train.StopAtStepHook(num_steps=config.iterations))
    if (config.save_checkpoints):
        hooks.append(
            tf.train.CheckpointSaverHook(
                checkpoint_dir=config.checkpoint_directory,
                save_secs=config.checkpoint_save_secs,
                save_steps=config.checkpoint_save_steps))

    if (config.save_summaries):
        hooks.append(
            tf.train.SummarySaverHook(output_dir=config.summary_directory,
                                      save_secs=config.summary_save_secs,
                                      save_steps=config.summary_save_steps,
                                      summary_op=summary_op))

    if config.restore:
        sess = tf.train.MonitoredTrainingSession(
            checkpoint_dir=config.checkpoint_directory,
            save_checkpoint_steps=None,
            save_checkpoint_secs=None,
            save_summaries_steps=None,
            save_summaries_secs=None,
            log_step_count_steps=None,
            hooks=hooks)
    else:
        sess = tf.train.MonitoredTrainingSession(save_checkpoint_steps=None,
                                                 save_checkpoint_secs=None,
                                                 save_summaries_steps=None,
                                                 save_summaries_secs=None,
                                                 log_step_count_steps=None,
                                                 hooks=hooks)

    def step_generator(step_context, accuracy_buffer):
        np_global_step = step_context.session.run(global_step)
        step_context.session.run(global_step_update)

        random_noise = np_state.normal(size=[config.batch_size, 100])
        _, np_loss, np_accuracy = step_context.run_with_hooks(
            [G_optimizer, G_loss, D_accuracy], feed_dict={z: random_noise})

        accuracy_buffer.add(np_accuracy)
        if np_global_step % config.log_step == 0:
            logging.debug(
                'Training Generator: Step: {}   Loss: {:.3e}   Accuracy: {:.2f}'
                .format(np_global_step, np_loss,
                        accuracy_buffer.mean() * 100))

    def step_discriminator(step_context, accuracy_buffer):
        np_global_step = step_context.session.run(global_step)
        step_context.session.run(global_step_update)

        random_noise = np_state.normal(size=[config.batch_size // 2, 100])
        _, np_loss, np_accuracy = step_context.run_with_hooks(
            [D_optimizer, D_loss, D_accuracy], feed_dict={z: random_noise})

        accuracy_buffer.add(np_accuracy)
        if np_global_step % config.log_step == 0:
            logging.debug(
                'Training Discriminator: Step: {}   Loss Mean: {:.3e}   Accuracy: {:.2f}'
                .format(np_global_step, np_loss,
                        accuracy_buffer.mean() * 100))

    accuracy_buffer = RingBuffer(config.buffer_size)
    accuracy_buffer.clear()
    while not sess.should_stop():
        for _ in xrange(config.D_steps):
            sess.run_step_fn(lambda step_context: step_discriminator(
                step_context, accuracy_buffer))
        for _ in xrange(config.G_steps):
            sess.run_step_fn(lambda step_context: step_generator(
                step_context, accuracy_buffer))

    sess.close()