Ejemplo n.º 1
0
def twitter_bot():
    # Only allocate part of the gpu memory when predicting.
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
    tf_config = tf.ConfigProto(gpu_options=gpu_options)

    with tf.Session(config=tf_config) as sess:
        train.show_progress("Creating model...")
        model = train.create_or_restore_model(sess,
                                              train.buckets,
                                              forward_only=True)
        model.batch_size = 1
        train.show_progress("done\n")

        enc_vocab, _ = data_processer.initialize_vocabulary(
            config.VOCAB_ENC_TXT)
        _, rev_dec_vocab = data_processer.initialize_vocabulary(
            config.VOCAB_DEC_TXT)

        auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
        auth.set_access_token(access_token, access_token_secret)
        api = tweepy.API(auth)

        while True:
            try:
                stream = tweepy.Stream(auth=api.auth,
                                       listener=StreamListener(
                                           api, sess, model, enc_vocab,
                                           rev_dec_vocab))
                stream.userstream()
            except Exception as e:
                print(e.__doc__)
                print(e.message)
Ejemplo n.º 2
0
 def __init__(self, session):
     self.session = session
     train.show_progress("Creating model...")
     self.model = train.create_or_restore_model(self.session, config.buckets, forward_only=True, beam_search=config.beam_search, beam_size=config.beam_size)
     self.model.batch_size = 1
     train.show_progress("done\n")
     self.enc_vocab, _ = data_processer.initialize_vocabulary(config.VOCAB_ENC_TXT)
     _, self.rev_dec_vocab = data_processer.initialize_vocabulary(config.VOCAB_DEC_TXT)
Ejemplo n.º 3
0
def twitter_bot():
    # Only allocate part of the gpu memory when predicting.
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
    tf_config = tf.ConfigProto(gpu_options=gpu_options)

    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    api = tweepy.API(auth)
    with tf.Session(config=tf_config) as sess:
        train.show_progress("Creating model...")
        model = train.create_or_restore_model(sess,
                                              train.buckets,
                                              forward_only=True)
        model.batch_size = 1
        train.show_progress("done\n")

        enc_vocab, _ = data_processer.initialize_vocabulary(
            config.VOCAB_ENC_TXT)
        _, rev_dec_vocab = data_processer.initialize_vocabulary(
            config.VOCAB_DEC_TXT)

        for tweet in tweets():
            status_id, status = tweet
            print("Processing {0}...".format(status.text))
            screen_name = status.author.screen_name
            reply_body = predict.get_predition(sess, model, enc_vocab,
                                               rev_dec_vocab,
                                               status.text.encode('utf-8'))
            if reply_body is None:
                print("No reply predicted")
            else:
                reply_body = reply_body.replace('_UNK', '💩')
                reply_text = "@" + screen_name + " " + reply_body
                print("Reply:{0}".format(reply_text))
                api.update_status(status=reply_text,
                                  in_reply_to_status_id=status_id)
            mark_tweet_processed(status_id)
def predict():
  # Only allocate part of the gpu memory when predicting.
  gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
  tf_config = tf.ConfigProto(gpu_options=gpu_options)

  with tf.Session(config=tf_config) as sess:
    train.show_progress("Creating model...")
    model = train.create_or_restore_model(sess, train.buckets, forward_only=True)
    model.batch_size = 1
    train.show_progress("done\n")

    enc_vocab, _ = data_processer.initialize_vocabulary(config.VOCAB_ENC_TXT)
    _, rev_dec_vocab = data_processer.initialize_vocabulary(config.VOCAB_DEC_TXT)

    sys.stdout.write("> ")
    sys.stdout.flush()
    line = sys.stdin.readline()
    while line:
      line = line.encode('utf-8')
      predicted = get_predition(sess, model, enc_vocab, rev_dec_vocab, line)
      print(predicted)
      print("> ", end="")
      sys.stdout.flush()
      line = sys.stdin.readline()