def extract_tweets(datapath, writer):
    with open(datapath) as tuning_file:
        for line in tuning_file:
            ids = line.split('\t')
            try:
                statuses = api.statuses_lookup(ids)
            except tweepy.RateLimitError:
                print("Rate limit exceeded")
                time.sleep(15 * 60)
                print("Resuming")
                statuses = api.statuses_lookup(ids)
            texts = [status.text.replace('\n', '') for status in statuses]
            if len(texts) > 1:
                writer.writerow(texts)


if __name__ == '__main__':
    """ https://www.microsoft.com/en-us/download/details.aspx?id=52375&from=http%3A%2F%2Fresearch.microsoft.com%2Fen-us%2Fdownloads%2F6096d3da-0c3b-42fa-a480-646929aa06f1%2F """
    twitter_config = parse_config('twitter')
    auth = tweepy.OAuthHandler(twitter_config['ConsumerToken'],
                               twitter_config['ConsumerSecret'])
    auth.secure = True
    auth.set_access_token(twitter_config['AccessToken'],
                          twitter_config['AccessSecret'])
    api = tweepy.API(auth)

    with open(OUTPUT_FILE, 'w') as output_file:
        writer = csv.writer(output_file, quoting=csv.QUOTE_MINIMAL)
        extract_tweets(MICROSOFT_TUNING_DATA, writer)
        extract_tweets(MICROSOFT_VALIDATION_DATA, writer)
示例#2
0
            dirpath='./', 
            filename=f"{config['model']}-{config['backbone']}-{{val_iou:.4f}}",
            save_top_k=1, 
            monitor='val_iou', 
            mode='max'
        )
        lr_monitor = LearningRateMonitor(logging_interval='step')
        cbs = [checkpoint, lr_monitor]
    return cbs

def train(config):
    pl.seed_everything(42, workers=True)
    dm = DataModule(**config)
    model = SMP.load_from_checkpoint(config['load_from']) if config['load_from'] else SMP(config)
    wandb_logger = WandbLogger(project="MnMs2", config=config)
    trainer = pl.Trainer(
        gpus=config['gpus'],
        precision=config['precision'],
        logger=wandb_logger if config['log'] else None,
        max_epochs=config['max_epochs'],
        callbacks=get_cbs(config),
        limit_train_batches=config['train_batches'],
        limit_val_batches=config['val_batches'],
        deterministic=True
    )
    trainer.fit(model, dm)

if __name__ == '__main__':
    config_file = sys.argv[1]
    config = parse_config(config_file)
    train(config)
示例#3
0
                           checkpoint_every=save_every,
                           print_every=print_every,
                           expt_dir=save_dir)

        seq2seq = t.train(seq2seq,
                          dataset,
                          num_epochs=epochs,
                          dev_data=None,
                          optimizer=optimizer,
                          resume=resume,
                          teacher_forcing_ratio=teacher_forcing,
                          early_stopping_patience=early_stopping_patience)

    beam_search = EmotionSeq2seq(
        seq2seq.encoder, EmotionTopKDecoder(seq2seq.decoder, beam_size))
    predictor = Predictor(beam_search,
                          dataset.vocabulary,
                          emotion_vocabulary=dataset.emotion_vocabulary)

    seq = "how are you".split()
    logger.info("Happy: " + " ".join(predictor.predict(seq, 'happiness')))
    logger.info("Angry: " + " ".join(predictor.predict(seq, 'anger')))
    logger.info("Sad: " + " ".join(predictor.predict(seq, 'sadness')))
    logger.info("Neutral: " + " ".join(predictor.predict(seq, 'neutral')))
    logger.info("Love: " + " ".join(predictor.predict(seq, 'love')))


if __name__ == '__main__':
    config = parse_config('dialogue')
    run(config)
示例#4
0
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--no-coverage", default=False, action='store_true')
parser.add_argument("--masked_loss", type=str, default="False")
parser.add_argument("--scheduler_on", type=str, default="True")
parser.add_argument("--last_layer_activation", type=str, default="True")

parser.add_argument("--show_progress", default=False, action='store_true')
args = parser.parse_args()
# -

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

api = wandb.Api()
sweep = api.sweep("{}/{}/{}".format(entity, project, args.sweepid))
config = parse_config(sweep.best_run().json_config)

epochs = config['epochs']
data_name = config['dataname']
data_pack = config['datapack']
batch_size = config['batch_size']
if "activation" in config.keys():
    activation = config['activation']
else:
    activation = "no"
hidden_dim_factor = config['hidden_dim_factor']
loss = config['loss']
masked_loss = (config['masked_loss'] == "True")
num_encoders = config['num_encoders']
model = config['model']
embedding_dim = config['embedding_dim']
示例#5
0
文件: run.py 项目: s-omranpour/ErfBot
import argparse

from src.bot import TelegramBot
from src.utils import parse_config

if __name__ == '__main__':
    # Script arguments can include path of the config
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('--config', type=str, default="chatbot.cfg")
    args = arg_parser.parse_args()
    config_path = args.config

    config = parse_config(config_path)
    telegram_bot = TelegramBot(**config)
    telegram_bot.run_bot()
示例#6
0
from collections import Counter
from src.settings import *
from src.utils import read_data, parse_config
from src.functions import create_distance_matrix, create_vertices, teitz_bart_algorithm

if __name__ == '__main__':
    config = parse_config(CONFIG_FILE)
    count = int(config['DEFAULT']['count'])
    p = int(config['DEFAULT']['p'])
    data = read_data(DATA_FILE)
    vertices = create_vertices(data)
    distance_matrix = create_distance_matrix(data)
    medians = [
        tuple(teitz_bart_algorithm(distance_matrix, vertices, p))
        for i in range(count)
    ]
    counter = Counter(medians).items()
    for key, val in sorted(counter, key=lambda item: item[1], reverse=True):
        print(f"median {key}, amount {val}")
示例#7
0
import json
import logging

import bottle
from bottle import request, response

from src.app.bot import EmoCourseChat
from src.models.conversational.utils import APP_NAME
from src.utils import parse_config, LOG_FORMAT

config = parse_config('app')
logger = logging.getLogger(APP_NAME)
logger.setLevel(config['LogLevel'])
handler = logging.FileHandler(config['LogPath'])
handler.setLevel(config['LogLevel'])
formatter = logging.Formatter(LOG_FORMAT)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info(dict(config.items()))
app = application = bottle.default_app()
bot = EmoCourseChat(config["Checkpoint"], config["Vocabulary"],
                    config["EmotionVocabulary"], config["Word2Vec"],
                    config.getint("BeamSize"), config.getfloat("Threshold"))


def cors(func):
    def wrapper(*args, **kwargs):
        bottle.response.set_header("Access-Control-Allow-Origin", "*")
        bottle.response.set_header("Access-Control-Allow-Methods",
                                   "GET, POST, OPTIONS")
        bottle.response.set_header("Access-Control-Allow-Headers",