예제 #1
0
def terminate_bot(channel):
    log.info("Terminating bot for channel: {}".format(channel))
    process = processes[channel]
    if process.is_alive():
        processes[channel].terminate()
    del processes[channel]
    del bot_lifetimes[channel]
예제 #2
0
    def on_pubmsg(self, connection, event):
        text = event.arguments[0]
        text_split = text.split(' ', 1)

        username = ''
        for tag in event.tags:  # Iterating through to find the key because this gives us a list instead of a dict...
            if tag['key'] == 'display-name':
                username = tag['value']

        if self.log_chats_in_x >= 0:
            self.log_chats_in_x -= 1

        message_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        self.last_chats.append("{} {} - {}: {}".format(message_time, self.channel, username, text))
        if len(self.last_chats) > 11:
            self.last_chats.popleft()
        if self.log_chats_in_x == 0:
            self.log_chat_lines()

        if text.startswith(('!prismata', '!unit', '@PrismataBot')):
            log.info('Answering "{}" in channel {} from user {}'.format(text, self.channel, username))
            if self.log_chats_in_x > 0:  # We were already going to print chat logs, print preemptively
                self.log_chat_lines()
            self.log_chats_in_x = 5

        if text_split[0] == '!prismata':
            if len(text_split) == 2:
                self.answer_prismata_command(text_split[1])
        elif text_split[0] == '!unit':
            if len(text_split) == 2:
                self.answer_unit_command(text_split[1])
            else:
                self.chat('You need to type a unit name FailFish')
        elif text_split[0] == '@PrismataBot':
            self.answer_hello_command(username)
예제 #3
0
    def on_pubmsg(self, connection, event):
        text = event.arguments[0].lower()
        username = ''
        for tag in event.tags:  # Iterating through to find the key because this gives us a list instead of a dict...
            if tag['key'] == 'display-name':
                username = tag['value']
        chat_log.debug('{}: {}'.format(username, text))

        valid_command = False
        if text.startswith('click'):
            valid_command = self.command_click(text)
        if text.startswith('emote'):
            valid_command = self.command_emote(text)
        else:
            command_non_repeatable_match = re.fullmatch(VALID_NON_REPEATABLE_COMMAND, text)
            if command_non_repeatable_match is not None:
                command_text = command_non_repeatable_match.group(0)
                self.queue_command('press', command_text)
                valid_command = True
            else:
                command_repeatable_match = re.fullmatch(VALID_COMMAND_REGEX, text)
                if command_repeatable_match is not None:
                    commands_text = command_repeatable_match.group(0)
                    commands_text = re.findall(VALID_COMMAND_INDIVIDUAL_REGEX, commands_text)
                    log.info("New combo command: {}".format(text))
                    for command_text in commands_text:
                        self.queue_command('press', command_text)
                    valid_command = True
                else:
                    command_hotkey_match = re.fullmatch(VALID_HOTKEY_COMMAND_REGEX, text)
                    if command_hotkey_match is not None:
                        command_text = command_hotkey_match.group(0)
                        command_text = command_text.replace('+', '-')
                        hotkey, key = command_text.split('-')
                        self.queue_command('hotkey', [hotkey, key])

        if valid_command:  # Add command to the stream screen
            with open(config['Files']['stream_display_usernames'], 'a') as file:
                truncated_username = username[0:DISPLAY_USERNAME_MAX_LENGTH]
                file.write(truncated_username + '\n')
            with open(config['Files']['stream_display_commands'], 'a') as file:
                truncated_command = text[0:DISPLAY_COMMAND_MAX_LENGTH]
                file.write(truncated_command + '\n')
예제 #4
0
def get_srcs():

    sources = {}
    pos_size = 0
    neg_size = 0
    dat_files = glob.glob('./datasets/*.dat')
    log.info('Found ' + str(len(dat_files)) + ' data files: ' + str(dat_files))

    for dat_file in dat_files:
        if "neg_" in dat_file:
            sources[dat_file] = "NEG"
            neg_size += len(
                open(dat_file).readlines())  # A necessary evil for now
        elif "pos_" in dat_file:
            sources[dat_file] = "POS"
            pos_size += len(
                open(dat_file).readlines())  # A necessary evil for now

    log.info('Sample Size:' + str(neg_size + pos_size) + ' -ve:' +
             str(neg_size) + ' +ve:' + str(pos_size))

    return {"sources": sources, "neg_size": neg_size, "pos_size": pos_size}
예제 #5
0
def score(model, neg_size, pos_size):
    #def score():
    log.info('Scoring with LogisticRegression...')

    # we'll use 80/20 for train/test
    ntrain_size = int(neg_size * 0.8)
    ptrain_size = int(pos_size * 0.8)

    ntest_size = neg_size - ntrain_size
    ptest_size = pos_size - ptrain_size

    # initialize the arrays
    train_docvecs = numpy.zeros((ntrain_size + ptrain_size, DIM_SIZE))
    train_labels = numpy.zeros(ntrain_size + ptrain_size)

    test_docvecs = numpy.zeros((ntest_size + ptest_size, DIM_SIZE))
    test_labels = numpy.zeros(ntest_size + ptest_size)

    for count in range(ntrain_size + ntest_size):
        if count < ntrain_size:
            train_docvecs[count] = model.docvecs['NEG_' + str(count)]
            train_labels[count] = 0
        else:
            test_docvecs[count - ntrain_size] = model.docvecs['NEG_' +
                                                              str(count)]
            test_labels[count - ntrain_size] = 0

    for count in range(ptrain_size + ptest_size):
        if count < ptrain_size:
            train_docvecs[ntrain_size + count] = model.docvecs['POS_' +
                                                               str(count)]
            train_labels[ntrain_size + count] = 1
        else:
            test_docvecs[ntest_size + count -
                         ptrain_size] = model.docvecs['POS_' + str(count)]
            test_labels[ntest_size + count - ptrain_size] = 1

    log.info('Fitting classifier...')
    clf = LogisticRegression()
    #clf = MLPClassifier()
    clf.fit(train_docvecs, train_labels)

    log.info('Score: ' + str(clf.score(test_docvecs, test_labels)))
예제 #6
0
def train():

    model = Doc2Vec(dm=0,
                    min_count=0,
                    window=10,
                    vector_size=DIM_SIZE,
                    hs=1,
                    epoch=20,
                    sample=0.0,
                    negative=5,
                    workers=5)

    # Convert the sources into d2v TaggedDocument
    log.info('Compiling data sources...')
    exDict = get_srcs()
    sources = exDict["sources"]
    tot_size = exDict["neg_size"] + exDict["pos_size"]

    sentences = []
    neg_count, pos_count, prefix_count = 0, 0, 0
    log.info('Processing data sources...')
    for source, prefix in sources.items():
        if prefix == 'NEG': prefix_count = neg_count
        elif prefix == 'POS': prefix_count = pos_count
        else:
            log.error('Unknown prefix found: ' + prefix + '. Exiting...')
            sys.exit()
        with utils.smart_open(source) as fin:
            for line_no, line in enumerate(fin):
                words = transform(utils.to_unicode(line))
                sentences.append(
                    TaggedDocument(words, [prefix + '_%s' % prefix_count]))
                prefix_count += 1
                update_progress(
                    float((neg_count + pos_count + line_no + 1) / tot_size))
        if prefix == 'NEG': neg_count = prefix_count
        elif prefix == 'POS': pos_count = prefix_count

    log.info('Building vocabulary...')
    model.build_vocab(sentences)

    alpha = 0.025
    min_alpha = 0.001
    num_epochs = 20
    alpha_delta = (alpha - min_alpha) / num_epochs

    log.info('Training doc2vec model...')
    log.setLevel(logging.WARNING)
    for epoch in range(num_epochs):
        update_progress(float((epoch + 1) / num_epochs))
        model.alpha = alpha
        model.min_alpha = alpha
        shuffle(sentences)
        model.train(sentences,
                    total_examples=model.corpus_count,
                    epochs=model.epochs)
        alpha -= alpha_delta

    log.setLevel(logging.INFO)

    # score
    score(model, exDict["neg_size"], exDict["pos_size"])

    log.info('Saving to model file...')
    try:
        model.save(MODEL_FILE)
    except:
        log.error('Error saving model')
        sys.exit()
    try:
        log.info('Saving model metadata...')
        pickle.dump(exDict, open(META_FILE, 'wb'))
        log.info('Saved ' + META_FILE)
    except:
        log.error('Error saving model metadata')
        sys.exit()
예제 #7
0
def create_new_bot(channel):
    log.info("Starting new bot for channel: {}".format(channel))
    p = Process(target=start_bot, args=(channel, ))
    processes[channel] = p
    p.start()
예제 #8
0
        if channel in bot_lifetimes:
            log.debug(
                "Bot for channel {} was on lifetime {} but was saved".format(
                    channel, bot_lifetimes[channel]))
            del bot_lifetimes[channel]


def decrement_bot_lifetime(channel):
    if channel not in bot_lifetimes:
        bot_lifetimes[channel] = 5
    else:
        bot_lifetimes[channel] -= 1
    log.debug("Bot for channel {} is on lifetime {}".format(
        channel, bot_lifetimes[channel]))
    if bot_lifetimes[channel] <= 0:
        terminate_bot(channel)


def terminate_bot(channel):
    log.info("Terminating bot for channel: {}".format(channel))
    process = processes[channel]
    if process.is_alive():
        processes[channel].terminate()
    del processes[channel]
    del bot_lifetimes[channel]


if __name__ == '__main__':
    log.info('=== Starting BotManager ===')
    bot_manager_loop()
예제 #9
0
def infer():

    model = None
    exDict = None
    try:
        log.info('Loading model file...')
        model = Doc2Vec.load(MODEL_FILE)
    except:
        log.error('Error loading ' + MODEL_FILE + '. Try running train.py')
        sys.exit()
    try:
        log.info('Loading meta file...')
        exDict = pickle.load(open(META_FILE, 'rb'))
    except:
        log.error('Error loading ' + META_FILE + '. Try running train.py')
        sys.exit()

    log.info('Preparing training data...')
    neg_size = exDict["neg_size"]
    pos_size = exDict["pos_size"]
    tot_size = neg_size + pos_size
    log.info('Sample Size:' + str(tot_size) + ' -ve:' + str(neg_size) +
             ' +ve:' + str(pos_size))

    # initialize the arrays
    docvecs = numpy.zeros((tot_size, DIM_SIZE))
    labels = numpy.zeros(tot_size)

    for count in range(neg_size):
        docvecs[count] = model.docvecs['NEG_' + str(count)]
        labels[count] = 0

    for count in range(pos_size):
        docvecs[neg_size + count] = model.docvecs['POS_' + str(count)]
        labels[neg_size + count] = 1

    log.info('Fitting classifier...')
    clf = LogisticRegression()
    clf.fit(docvecs, labels)

    # Checking inference with one sample
    filename = 'infer.txt'
    file = open(filename, 'rt')
    text = file.read()
    file.close()

    pred_sam = transform(text)
    log.info('Predicting on: %s' % pred_sam)
    pred_lbl = clf.predict_proba(model.infer_vector(pred_sam).reshape(1, -1))
    percent_neg = str('%.2f' % (pred_lbl[0, 0] * 100))
    percent_pos = str('%.2f' % (pred_lbl[0, 1] * 100))

    log.info(pred_lbl)
    log.info(clf.classes_)
    if percent_neg > percent_pos:
        log.info('Sentiment: Negative ' + percent_neg + '%')
    else:
        log.info('Sentiment: Positive ' + percent_pos + '%')
예제 #10
0
 def on_disconnect(self, connection, event):
     log.info('Disconnected (channel {})'.format(self.channel))
     log.debug(event)
예제 #11
0
 def queue_command(self, command_method, args):
     command = Command(command_method, args)
     log.info("New command: {} {}".format(command_method, args))
     self.command_queue.put(command)
예제 #12
0
        elif method == 'hotkey':
            modifier, key = command.arguments
            PrismataController.hotkey(modifier, key)
        elif method == 'click':
            column, horizontal = command.arguments
            PrismataController.click(column, horizontal)
        elif method == 'emote':
            PrismataController.emote(command.arguments)

def check_and_restart_match():
    while True:
        PrismataController.check_and_restart_match()
        sleep(2)

if __name__ == '__main__':
    log.info('=== Starting TwitchPlaysPrismata ===')
    # os.system(config['Files']['prismata_exe_path'])

    # Clear display files
    open(config['Files']['stream_display_usernames'], 'w').close()
    open(config['Files']['stream_display_commands'], 'w').close()

    bot_process = Process(target=start_twitch_bot, args=(command_queue,))
    bot_process.start()

    PrismataController.post_startup()
    PrismataController.vs_computer()
    sleep(1)
    PrismataController.split_unit_tab()

    check_and_restart = Process(target=check_and_restart_match)