Beispiel #1
0
def twitter_bot():
    # Only allocate part of the gpu memory when predicting.
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
    tf_config = tf.ConfigProto(gpu_options=gpu_options)

    with tf.Session(config=tf_config) as sess:
        train.show_progress("Creating model...")
        model = train.create_or_restore_model(sess,
                                              train.buckets,
                                              forward_only=True)
        model.batch_size = 1
        train.show_progress("done\n")

        enc_vocab, _ = data_processer.initialize_vocabulary(
            config.VOCAB_ENC_TXT)
        _, rev_dec_vocab = data_processer.initialize_vocabulary(
            config.VOCAB_DEC_TXT)

        auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
        auth.set_access_token(access_token, access_token_secret)
        api = tweepy.API(auth)

        while True:
            try:
                stream = tweepy.Stream(auth=api.auth,
                                       listener=StreamListener(
                                           api, sess, model, enc_vocab,
                                           rev_dec_vocab))
                stream.userstream()
            except Exception as e:
                print(e.__doc__)
                print(e.message)
Beispiel #2
0
 def __init__(self, session):
     self.session = session
     train.show_progress("Creating model...")
     self.model = train.create_or_restore_model(self.session, config.buckets, forward_only=True, beam_search=config.beam_search, beam_size=config.beam_size)
     self.model.batch_size = 1
     train.show_progress("done\n")
     self.enc_vocab, _ = data_processer.initialize_vocabulary(config.VOCAB_ENC_TXT)
     _, self.rev_dec_vocab = data_processer.initialize_vocabulary(config.VOCAB_DEC_TXT)
Beispiel #3
0
def run():
    '''
    Training function to run the training process after specifying parameters
    '''

    preprocessing.config.paths = ['./training_data/depressive1.json',
                                  './training_data/depressive2.json',
                                  './training_data/depressive3.json',
                                  './training_data/depressive4.json',
                                  './training_data/depressive5.json',
                                  './training_data/depressive6.json',
                                  './training_data/non-depressive1.json',
                                  './training_data/non-depressive2.json',
                                  './training_data/non-depressive3.json',
                                  './training_data/non-depressive4.json',
                                  './training_data/non-depressive5.json',
                                  './training_data/non-depressive6.json']

    preprocessing.config.save_path = './training_data/all_training_data.csv'


    preprocessing.config.labels = ['depressive', 'depressive', 'depressive', 'depressive', 'depressive', 'depressive',
                                   'not-depressive', 'not-depressive', 'not-depressive', 'not-depressive',
                                   'not-depressive', 'not-depressive']

    preprocessing.config.keywords = ['depressed', 'lonely', 'sad', 'depression', 'tired', 'anxious',
                                     'happy', 'joy', 'thankful', 'health', 'hopeful', 'glad']

    preprocessing.config.nr_of_tweets = [1000, 1000, 1000, 1000, 1000, 1000,
                                         1000, 1000, 1000, 1000, 1000, 1000]

    history, early_stop_check = train.train_rnn(save_path='./weights/lstm_model_2.pth', collect=True) # Collect=False if already collected data

    train.show_progress(history=history, save_name='./plots/training_progress.png')

    train.animate_progress(history=history, save_path='./plots/training_animation_progress_REAL.gif',
                           early_stop_check=early_stop_check)
Beispiel #4
0
def twitter_bot():
    # Only allocate part of the gpu memory when predicting.
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
    tf_config = tf.ConfigProto(gpu_options=gpu_options)

    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    api = tweepy.API(auth)
    with tf.Session(config=tf_config) as sess:
        train.show_progress("Creating model...")
        model = train.create_or_restore_model(sess,
                                              train.buckets,
                                              forward_only=True)
        model.batch_size = 1
        train.show_progress("done\n")

        enc_vocab, _ = data_processer.initialize_vocabulary(
            config.VOCAB_ENC_TXT)
        _, rev_dec_vocab = data_processer.initialize_vocabulary(
            config.VOCAB_DEC_TXT)

        for tweet in tweets():
            status_id, status = tweet
            print("Processing {0}...".format(status.text))
            screen_name = status.author.screen_name
            reply_body = predict.get_predition(sess, model, enc_vocab,
                                               rev_dec_vocab,
                                               status.text.encode('utf-8'))
            if reply_body is None:
                print("No reply predicted")
            else:
                reply_body = reply_body.replace('_UNK', '💩')
                reply_text = "@" + screen_name + " " + reply_body
                print("Reply:{0}".format(reply_text))
                api.update_status(status=reply_text,
                                  in_reply_to_status_id=status_id)
            mark_tweet_processed(status_id)
def predict():
  # Only allocate part of the gpu memory when predicting.
  gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
  tf_config = tf.ConfigProto(gpu_options=gpu_options)

  with tf.Session(config=tf_config) as sess:
    train.show_progress("Creating model...")
    model = train.create_or_restore_model(sess, train.buckets, forward_only=True)
    model.batch_size = 1
    train.show_progress("done\n")

    enc_vocab, _ = data_processer.initialize_vocabulary(config.VOCAB_ENC_TXT)
    _, rev_dec_vocab = data_processer.initialize_vocabulary(config.VOCAB_DEC_TXT)

    sys.stdout.write("> ")
    sys.stdout.flush()
    line = sys.stdin.readline()
    while line:
      line = line.encode('utf-8')
      predicted = get_predition(sess, model, enc_vocab, rev_dec_vocab, line)
      print(predicted)
      print("> ", end="")
      sys.stdout.flush()
      line = sys.stdin.readline()