Пример #1
0
def classify_recipe(made_recipe):
    """
    grabs a recipe and classifies it as italian, mexican, asian, or neutral
    :param made_recipe:
    :return:
    """
    italian_titles = kb.read_txt_lines_into_list(
        util.relative_path("kb_data/italian_titles.txt"))
    mexican_titles = kb.read_txt_lines_into_list(
        util.relative_path("kb_data/mexican_titles.txt"))
    east_asian_titles = kb.read_txt_lines_into_list(
        util.relative_path("kb_data/east_asian_titles.txt"))
    recipe_title = (made_recipe.title()).lower()

    recipe_type = "neutral"
    for potential_title in italian_titles:
        if potential_title in recipe_title:
            recipe_type = "italian"
    for potential_title in mexican_titles:
        if potential_title in recipe_title:
            recipe_type = "mexican"
    for potential_title in east_asian_titles:
        if potential_title in recipe_title:
            recipe_type = "asian"
    return recipe_type
Пример #2
0
def main():
    try:
        exercise_file = open(util.relative_path("exercise_reports"), 'r+')
        ex_reports = json.loads(exercise_file.read())
    except IOError:
        exercise_file = open(util.relative_path("exercise_reports"), 'w')
        ex_reports = {"elapsed_time": 1,  # Filler value
                      "max_id": -1,
                      "last_time": 0}

    new_reports = get_errors(copy.deepcopy(ex_reports))

    period_len = new_reports["time_this_period"]

    for ex in new_reports:
        if ex in SPECIAL_VALUES:
            continue

        if ex in ex_reports and ex_reports[ex]["num_errors"] > 0:
            errors_this_period = new_reports[ex]["this_period"]

            mean, probability = util.probability(ex_reports[ex]["num_errors"],
                                                 ex_reports["elapsed_time"],
                                                 errors_this_period,
                                                 period_len)

            print ("%s] TOTAL %s/%ss; %s-: %s/%ss; m=%.3f p=%.3f"
                   % (time.strftime("%Y-%m-%d %H:%M:%S %Z"),
                      ex_reports[ex]["num_errors"], ex_reports["elapsed_time"],
                      ex_reports["last_time"],
                      errors_this_period, period_len,
                      mean, probability))

            if (probability > 0.997 and errors_this_period > 1):
                util.send_to_slack(
                    "*Elevated exercise bug report rate in exercise `%s`\n"
                    "Reports: %s.  We saw %s in the last %s minutes,"
                    " while the mean indicates we should see around %s."
                    " *Probability that this is abnormally elevated: %.4f.*"
                    % (ex,
                       generate_slack_links(new_reports[ex]["href"]),
                       util.thousand_commas(errors_this_period),
                       util.thousand_commas(int(period_len / 60)),
                       util.thousand_commas(round(mean, 2)),
                       probability),
                    channel="#support")
        if "href" in new_reports[ex].keys():
            del new_reports[ex]["href"]  # don't need to keep the links around

    del new_reports["time_this_period"]
    # Overwrite with new contents
    exercise_file.seek(0)
    exercise_file.truncate()
    exercise_file.write(json.dumps(new_reports))

    exercise_file.close()
Пример #3
0
def main():
    try:
        exercise_file = open(util.relative_path("exercise_reports"), 'r+')
        ex_reports = json.loads(exercise_file.read())
    except IOError:
        exercise_file = open(util.relative_path("exercise_reports"), 'w')
        ex_reports = {
            "elapsed_time": 1,  # Filler value
            "max_id": -1,
            "last_time": 0
        }

    new_reports = get_errors(copy.deepcopy(ex_reports))

    period_len = new_reports["time_this_period"]

    for ex in new_reports:
        if ex in SPECIAL_VALUES:
            continue

        if ex in ex_reports and ex_reports[ex]["num_errors"] > 0:
            errors_this_period = new_reports[ex]["this_period"]

            mean, probability = util.probability(ex_reports[ex]["num_errors"],
                                                 ex_reports["elapsed_time"],
                                                 errors_this_period,
                                                 period_len)

            print("%s] TOTAL %s/%ss; %s-: %s/%ss; m=%.3f p=%.3f" %
                  (time.strftime("%Y-%m-%d %H:%M:%S %Z"),
                   ex_reports[ex]["num_errors"], ex_reports["elapsed_time"],
                   ex_reports["last_time"], errors_this_period, period_len,
                   mean, probability))

            if (probability > 0.997 and errors_this_period > 1):
                util.send_to_slack(
                    "*Elevated exercise bug report rate in exercise `%s`\n"
                    "Reports: %s.  We saw %s in the last %s minutes,"
                    " while the mean indicates we should see around %s."
                    " *Probability that this is abnormally elevated: %.4f.*" %
                    (ex, generate_slack_links(new_reports[ex]["href"]),
                     util.thousand_commas(errors_this_period),
                     util.thousand_commas(int(period_len / 60)),
                     util.thousand_commas(round(mean, 2)), probability),
                    channel="#support")
        if "href" in new_reports[ex].keys():
            del new_reports[ex]["href"]  # don't need to keep the links around

    del new_reports["time_this_period"]
    # Overwrite with new contents
    exercise_file.seek(0)
    exercise_file.truncate()
    exercise_file.write(json.dumps(new_reports))

    exercise_file.close()
Пример #4
0
 def onFindDir(self, dir_path):
     global msg_index
     msg_index += 1
     print(
         dict('cd') + os.path.basename(self.rootpath) + dir_divider() +
         relative_path(self.rootpath, dir_path))
     self.filename = dir_path
     self.filesize = -1
     self.commandThread.send_fileinfo(
         COMMANE_FILE_INFO + divider_arg + os.path.basename(self.rootpath) +
         dir_divider() + relative_path(self.rootpath, dir_path) +
         divider_arg + str(-1) + divider_arg + str(msg_index))
Пример #5
0
def main():
    try:
        exercise_file = open(util.relative_path("exercise_reports"), 'r+')
        ex_reports = json.loads(exercise_file.read())
    except IOError:
        exercise_file = open(util.relative_path("exercise_reports"), 'w')
        ex_reports = {"elapsed_time": 1,  # Filler value
                      "max_id": -1,
                      "last_time": 0}

    new_reports = get_errors(copy.deepcopy(ex_reports))

    period_len = new_reports["time_this_period"]

    for ex in new_reports:
        if ex in SPECIAL_VALUES:
            continue

        if ex in ex_reports and ex_reports[ex]["num_errors"] > 0:
            errors_this_period = new_reports[ex]["this_period"]

            mean, probability = util.probability(ex_reports[ex]["num_errors"],
                                                 ex_reports["elapsed_time"],
                                                 errors_this_period,
                                                 period_len)

            if (probability > 0.997 and errors_this_period > 1):
                # Too many errors!
                hipchat_message.send_message(
                    "Elevated exercise bug report rate in exercise %s!"
                    " Reports: %s.  We saw %s in the last %s minutes,"
                    " while the mean indicates we should see around %s."
                    " Probability that this is abnormally elevated: %.4f."
                        % (ex,
                           generate_links(new_reports[ex]["href"]),
                           util.thousand_commas(errors_this_period),
                           util.thousand_commas(int(period_len / 60)),
                           util.thousand_commas(round(mean, 2)),
                           probability),
                    room_id="Exercises")
        if "href" in new_reports[ex].keys():
            del new_reports[ex]["href"]  # don't need to keep the links around

    del new_reports["time_this_period"]
    # Overwrite with new contents
    exercise_file.seek(0)
    exercise_file.truncate()
    exercise_file.write(json.dumps(new_reports))

    exercise_file.close()
Пример #6
0
 def _load_foods(self):
     util.vprint('Loading nutrient data')
     nutritional_data = self._load_nutritional_data()
     util.vprint('Loading food data')
     with open(util.relative_path(
             'kb_data/sr27asc/FOOD_DES.txt')) as food_des_txt:
         food_des_lines = food_des_txt.readlines()
         for food_des_line in food_des_lines:
             parsed_line = parse_usda_line(food_des_line)
             new_food = Food(parsed_line[0],
                             parsed_line[1],
                             parsed_line[2],
                             common_name=parsed_line[4])
             if new_food.food_group in food_group_blacklist:
                 continue
             if new_food.food_id in food_id_blacklist:
                 continue
             bad_food_name = False
             for keyword_group in food_keyword_blacklist:
                 for keyword in keyword_group:
                     if keyword in new_food.name:
                         bad_food_name = True
             if bad_food_name:
                 continue
             if new_food.food_id in nutritional_data:
                 new_food.nutritional_data = nutritional_data[
                     new_food.food_id]
             self.foods.append(new_food)
Пример #7
0
def main():
    try:
        google_code_file = open(util.relative_path("google_code"), 'r+')
        old_reports = json.loads(google_code_file.read())
    except IOError:
        google_code_file = open(util.relative_path("google_code"), 'w')
        # elapsed_time is filler value: doesn't matter what it is
        # since issue_count is 0.
        old_reports = {"elapsed_time": 1,
                       "last_id": -1,
                       "issue_count": 0,
                       "last_time": 0}

    new_reports = get_errors(copy.deepcopy(old_reports))

    time_this_period = new_reports["time_this_period"]

    mean, probability = util.probability(old_reports["issue_count"],
                                         old_reports["elapsed_time"],
                                         new_reports["issues_this_period"],
                                         time_this_period)

    if (mean != 0 and probability > 0.99):
        # Too many errors!
        hipchat_message.send_message(
            "Elevated bug report rate on"
            " <a href='http://khanacademy.org/r/bugs'>Google"
            " code!</a>"
            " We saw %s in the last %s minutes,"
            " while the mean indicates we should see around %s."
            " Probability that this is abnormally elevated: %.4f."
            % (util.thousand_commas(new_reports["issues_this_period"]),
               util.thousand_commas(int(time_this_period / 60)),
               util.thousand_commas(round(mean, 2)),
               probability))

    # Delete fields we don't need anymore
    del(new_reports["issues_this_period"])
    del(new_reports["time_this_period"])

    google_code_file.seek(0)
    google_code_file.truncate()
    google_code_file.write(json.dumps(new_reports))

    google_code_file.close()
Пример #8
0
 def onFindFile(self,file_path,size):
     global msg_index
     msg_index += 1
     print('文件 :'+ os.path.basename(self.rootpath)+dir_divider()+relative_path(self.rootpath,file_path) +
           ' '+ formated_size(size))
     self.filename = file_path
     self.filesize = size
     if (os.path.isfile(file_path) and relative_path(self.rootpath,file_path) == ''):
         self.commandThread.send_fileinfo(COMMANE_FILE_INFO + divider_arg +
                                          os.path.basename(file_path) + divider_arg+
                                          str(size) + divider_arg+
                                          getFileMd5(file_path) + divider_arg+
                                          str(msg_index))
     else:
         self.commandThread.send_fileinfo(COMMANE_FILE_INFO + divider_arg +
                                          os.path.basename(self.rootpath) + dir_divider()+ relative_path(self.rootpath,file_path) + divider_arg+
                                          str(size) + divider_arg +
                                          getFileMd5(file_path) + divider_arg +
                                          str(msg_index))
Пример #9
0
 def splash_state(self):
     """
     Loads splash image
     """
     self.current_window = 'splash'
     self.init_main_window()
     splash_image_file = Tkinter.PhotoImage(
         file=util.relative_path('img/cheese_wiz_splash.gif'))
     splash_image_widget = Tkinter.Label(self.main_window,
                                         image=splash_image_file)
     splash_image_widget.photo = splash_image_file
     splash_image_widget.pack()
     self.center_on_screen()
Пример #10
0
def load_knowledge_base():
    """
    Loads and returns knowledge base
    :return: KnowledgeBase object
    """
    kb_object_path = util.relative_path('kb_data/kb_object.p')
    if os.path.isfile(kb_object_path):
        knowledge_base = pickle.load(open(kb_object_path, 'rb'))
    else:
        knowledge_base = kb.KnowledgeBase()
        knowledge_base.load()
        pickle.dump(knowledge_base, open(kb_object_path, 'wb'))
    return knowledge_base
Пример #11
0
 def _load_nutritional_data():
     result = {}
     with open(util.relative_path(
             'kb_data/sr27asc/NUT_DATA.txt')) as nut_data_txt:
         nut_data_lines = nut_data_txt.readlines()
         for nut_data_line in nut_data_lines:
             parsed_line = parse_usda_line(nut_data_line)
             food_id = parsed_line[0]
             nut_id = parsed_line[1]
             nut_data = parsed_line[2:]
             if nut_id not in nutritional_data_whitelist:
                 continue
             if food_id not in result:
                 result[food_id] = {}
             result[food_id][nut_id] = nut_data
     return result
Пример #12
0
def read_txt_lines_into_list(file_name):
    """
    Given a filename, returns a list with each cell being a line from the file
    Lines that have no content or begin with a '#' (comments) are skipped
    Converts to lowercase
    :param file_name: filename of source
    :return: list of file lines
    """
    result = []
    with open(util.relative_path(file_name)) as source_file:
        source_lines = source_file.readlines()
        for line in source_lines:
            if len(line) and line[-1] == '\n':
                line = line[:-1]
            if len(line) and line[0] != '#':
                result.append(line.lower())
    return result
Пример #13
0
def read_txt_lines_into_list(file_name):
    """
    Given a filename, returns a list with each cell being a line from the file
    Lines that have no content or begin with a '#' (comments) are skipped
    Converts to lowercase
    :param file_name: filename of source
    :return: list of file lines
    """
    result = []
    with open(util.relative_path(file_name)) as source_file:
        source_lines = source_file.readlines()
        for line in source_lines:
            if len(line) and line[-1] == '\n':
                line = line[:-1]
            if len(line) and line[0] != '#':
                result.append(line.lower())
    return result
Пример #14
0
def read_specific_lines(file_name, start, end):
    """
    Given a filename, returns a list with each cell being a line from the file
    starting with start tag and ending with end tag
    Converts to lowercase
    :param file_name: filename of source
    :return: list of file lines
    """
    result = []
    read = False
    with open(util.relative_path(file_name)) as source_file:
        source_lines = source_file.readlines()
        for line in source_lines:
            if len(line) and line[-1] == '\n':
                line = line[:-1]
            if line == start:
                read = True
            if line == end:
                read = False
                break
            if len(line) and line[0] != '#' and read:
                result.append(line.lower())
    return result
Пример #15
0
def read_specific_lines(file_name, start, end):
    """
    Given a filename, returns a list with each cell being a line from the file
    starting with start tag and ending with end tag
    Converts to lowercase
    :param file_name: filename of source
    :return: list of file lines
    """
    result = []
    read = False
    with open(util.relative_path(file_name)) as source_file:
        source_lines = source_file.readlines()
        for line in source_lines:
            if len(line) and line[-1] == '\n':
                line = line[:-1]
            if line == start:
                read = True
            if line == end:
                read = False
                break
            if len(line) and line[0] != '#' and read:
                result.append(line.lower())
    return result
Пример #16
0
def main():
    try:
        zendesk_status_file = util.relative_path("zendesk")
        with open(zendesk_status_file) as f:
            old_data = cPickle.load(f)
    except (IOError, EOFError):
        old_data = {"elapsed_time_weekday": 0.0001,   # avoid a divide-by-0
                    "elapsed_time_weekend": 0.0001,   # avoid a divide-by-0
                    "ticket_count_weekday": 0,
                    "ticket_count_weekend": 0,
                    "last_time_t": None,
                    "last_time_t_weekday": None,
                    "last_time_t_weekend": None,
                    }

    # We compare the number of tickets in the last few minutes against
    # the historical average for all time.  But we don't start "all
    # time" at AD 1, we start it a week ago.  Longer than that and it
    # takes forever due to quota issues.  That's still plenty of
    # historical data. :-)
    #
    # Zendesk seems to wait 5 minutes to update API data :-(, so we
    # ask for data that's a bit time-lagged
    end_time = int(time.time()) - 300
    start_time = old_data['last_time_t']

    # Set flag to track if current time period is a weekend. Separate
    # ticket_count/elapsed_time stats are kept for weekend vs. weekday
    # to improve sensitivity to increases during low-traffic periods
    is_off_hours = _is_off_hours(datetime.datetime.fromtimestamp(end_time))

    (new_tickets, oldest_ticket_time_t) = get_tickets_between(
        start_time or (end_time - 86400 * 7), end_time)
    num_new_tickets = len(new_tickets)

    # The first time we run this, we take the starting time to be the
    # time of the first bug report.

    if start_time is None:
        start_time = oldest_ticket_time_t

    time_this_period = end_time - start_time

    if is_off_hours:
        # To simplify backcompat we still use "weekend" and "weekday" in the
        # saved data; really they mean "on hours" and "off hours" now.
        ticket_count = old_data['ticket_count_weekend']
        elapsed_time = old_data['elapsed_time_weekend']
    else:
        ticket_count = old_data['ticket_count_weekday']
        elapsed_time = old_data['elapsed_time_weekday']

    (mean, probability) = util.probability(ticket_count,
                                           elapsed_time,
                                           num_new_tickets,
                                           time_this_period)

    print ("%s] TOTAL %s/%ss; %s-: %s/%ss; m=%.3f p=%.3f"
           % (time.strftime("%Y-%m-%d %H:%M:%S %Z"),
              ticket_count, int(elapsed_time),
              start_time,
              num_new_tickets, time_this_period,
              mean, probability))

    handle_alerts(new_tickets, time_this_period, mean, probability,
                  start_time, end_time)

    if is_off_hours:
        new_data = {"elapsed_time_weekend": (
                        old_data["elapsed_time_weekend"] + time_this_period),
                    "ticket_count_weekend": (
                        old_data["ticket_count_weekend"] + num_new_tickets),
                    "elapsed_time_weekday": old_data["elapsed_time_weekday"],
                    "ticket_count_weekday": old_data["ticket_count_weekday"],
                    }
    else:
        new_data = {"elapsed_time_weekend": old_data["elapsed_time_weekend"],
                    "ticket_count_weekend": old_data["ticket_count_weekend"],
                    "elapsed_time_weekday": (
                        old_data["elapsed_time_weekday"] + time_this_period),
                    "ticket_count_weekday": (
                        old_data["ticket_count_weekday"] + num_new_tickets),
                    }

    new_data['last_time_t'] = end_time

    with open(zendesk_status_file, 'w') as f:
        cPickle.dump(new_data, f)
Пример #17
0
import datetime
import json
import httplib
import logging
import re
import socket
import time
import urllib2

import util

# In theory, you can use an API key to access zendesk data, but I
# couldn't get it to work in my tests (I got 'access denied'), so we
# use the real password instead. :-(
ZENDESK_USER = '******'
ZENDESK_PASSWORD_FILE = util.relative_path("zendesk.cfg")
ZENDESK_PASSWORD = None     # set lazily

# This is the currently defined boundary for what is considered
# 'significant' in number of new tickets. Used as threshold to determine
# when to send alerts.
SIGNIFICANT_TICKET_COUNT = 5

# We have a higher ticket boundary for paging someone.
MIN_TICKET_COUNT_TO_PAGE_SOMEONE = 7


def _parse_time(s):
    """Convert a string of the form "YYYY-MM-DD HH:MM:SS -0700" to time_t.

    We ignore the -0700; it looks like all times (and time_t's!)
Пример #18
0
import base64
import cPickle
import json
import httplib
import logging
import socket
import time
import urllib2

import util

# In theory, you can use an API key to access zendesk data, but I
# couldn't get it to work in my tests (I got 'access denied'), so we
# use the real password instead. :-(
ZENDESK_USER = '******'
ZENDESK_PASSWORD_FILE = util.relative_path("zendesk.cfg")
ZENDESK_PASSWORD = None     # set lazily

# This is the currently defined boundary for what is considered
# 'significant' in number of new tickets. Used as threshold to determine
# when to send alerts.
SIGNIFICANT_TICKET_COUNT = 5


def _parse_time(s):
    """Convert a string of the form "YYYY-MM-DD HH:MM:SS -0700" to time_t.

    We ignore the -0700; it looks like all times (and time_t's!)
    reported by the API are given as PDT times, so I'm assuming
    they'll change appropriately when daylight savings time ends.
    """
Пример #19
0
def spice_classify(made_recipe, knowledge_base):
    sauce_words = kb.read_specific_lines(
        util.relative_path("kb_data/style_substitutions.txt"), "#sauce_words",
        "#end_sauce_words")
    classification = classify_recipe(made_recipe.title)
    print "classification of recipe: ", classification
    recipe_steps = made_recipe.steps
    print "Recipe Steps: ", recipe_steps
    sauce_status = False
    result = ""

    for step in recipe_steps:
        if "soy sauce" in step:
            step = step.replace("soy sauce", '')
        if "taco sauce" in step:
            step = step.replace("taco sauce", '')
        if " sauce " in step:
            sauce_status = True
            break
        for sauce in sauce_words:
            if sauce in step:
                print sauce
                sauce_status = True
                break
            elif sauce in recipe_steps[0]:
                print sauce
                sauce_status = True
                break

    ingredient_list = []
    for item in made_recipe.ingredients:
        print item.name, item.quantity.amount, item.quantity.unit
        ingredient_list.append(item.name)

    if sauce_status == True and classification == "italian":
        # print "italian with sauce"
        italian_red = kb.read_specific_lines(
            util.relative_path("kb_data/italian_red_sauce.txt"),
            "#italian_red", "#end_italian_red")
        italian_white = kb.read_specific_lines(
            util.relative_path("kb_data/italian_red_sauce.txt"),
            "#italian_white", "#end_italian_white")
        italian_green = kb.read_specific_lines(
            util.relative_path("kb_data/italian_red_sauce.txt"),
            "#italian_pesto", "#end_italian_pesto")

        italian_red_result = SequenceMatcher(None, ingredient_list,
                                             italian_red).ratio()
        print "italian red:"
        print italian_red_result

        italian_white_result = SequenceMatcher(None, ingredient_list,
                                               italian_white).ratio()
        print "italian white:"
        print italian_white_result

        italian_green_result = SequenceMatcher(None, ingredient_list,
                                               italian_green).ratio()
        print "italian green:"
        print italian_green_result

        if italian_white_result > italian_red_result:
            result = "italian_white"
        else:
            result = "italian_red"

    if sauce_status == True and classification == "asian":
        # print "asian with sauce"
        asian_orange = kb.read_specific_lines(
            util.relative_path("kb_data/italian_red_sauce.txt"),
            "#asian_orange", "#asian_orange_end")
        asian_brown = kb.read_specific_lines(
            util.relative_path("kb_data/italian_red_sauce.txt"),
            "#asian_brown", "#asian_brown_end")
        asian_sesame = kb.read_specific_lines(
            util.relative_path("kb_data/italian_red_sauce.txt"),
            "#asian_sesame", "#asian_sesame_end")
        # asian_teriyaki = kb.read_specific_lines(util.relative_path("kb_data/italian_red_sauce.txt"), "#asian_teriyaki", "#asian_teriyaki_end")

        orange_result = SequenceMatcher(None, ingredient_list,
                                        asian_orange).ratio()
        print "asian_orange: "
        print orange_result

        brown_result = SequenceMatcher(None, ingredient_list,
                                       asian_brown).ratio()
        print "asian_brown: "
        print brown_result

        sesame_result = SequenceMatcher(None, ingredient_list,
                                        asian_sesame).ratio()
        print "asian_sesame: "
        print sesame_result

        # teriyaki_result = SequenceMatcher(None, ingredient_list, asian_teriyaki).ratio()
        # print "asian_teriyaki: "
        # print teriyaki_result

        if (orange_result > brown_result) and (orange_result > sesame_result):
            result = "asian_orange"
        elif (sesame_result > orange_result) and (sesame_result >
                                                  brown_result):
            result = "asian_sesame"
        else:
            result = "asian_brown"

    if sauce_status == False and classification == "asian":
        result = "asian_no_sauce"

    if sauce_status == True and classification == "mexican":
        # print "mexican with sauce"
        mexican_red = kb.read_specific_lines(
            util.relative_path("kb_data/italian_red_sauce.txt"),
            "#mexican_red", "#end_mexican_red")
        mexican_white = kb.read_specific_lines(
            util.relative_path("kb_data/italian_red_sauce.txt"),
            "#mexican_white", "#end_mexican_white")

        red_result = SequenceMatcher(None, ingredient_list,
                                     mexican_red).ratio()
        print "mexican_red_result: "
        print red_result
        white_result = SequenceMatcher(None, ingredient_list,
                                       mexican_white).ratio()
        print "mexican_white_result: "
        print white_result

        if white_result > red_result:
            result = "mexican_white"
        else:
            result = "mexican_red"

    if (sauce_status == False) and (classification == "mexican"):
        result = "mexican_no_sauce"
    else:
        result = "neutral"
    # recipe_fusion(made_recipe, result, fusion_style, knowledge_base)
    return result
Пример #20
0
def recipe_fusion(made_recipe, fusion_style, knowledge_base):
    sauce_type = spice_classify(made_recipe, knowledge_base)
    print "sauce type: ", sauce_type

    mexican_spices = kb.read_specific_lines(
        util.relative_path("kb_data/style_substitutions.txt"),
        "#mexican_spices", "#end_mexican_spices")
    mexican_ingredients = kb.read_specific_lines(
        util.relative_path("kb_data/style_substitutions.txt"),
        "#mexican_ingredients", "#end_mexican_ingredients")

    italian_spices = kb.read_specific_lines(
        util.relative_path("kb_data/style_substitutions.txt"),
        "#italian_spices", "#end_italian_spices")
    italian_ingredients = kb.read_specific_lines(
        util.relative_path("kb_data/style_substitutions.txt"),
        "#italian_ingredients", "#end_italian_ingredients")

    asian_spices = kb.read_specific_lines(
        util.relative_path("kb_data/style_substitutions.txt"), "#asian_spices",
        "#end_asian_spices")
    asian_ingredients = kb.read_specific_lines(
        util.relative_path("kb_data/style_substitutions.txt"),
        "#asian_ingredients", "#end_asian_ingredients")

    master_spices = kb.read_specific_lines(
        util.relative_path("kb_data/style_substitutions.txt"),
        "#master_spices", "#end_master_spices")

    neutral_to_asian = kb.read_specific_lines(
        util.relative_path("kb_data/style_substitutions.txt"),
        "#neutral_to_asian", "#end_neutral_to_asian")
    neutral_to_mexican = kb.read_specific_lines(
        util.relative_path("kb_data/style_substitutions.txt"),
        "#neutral_to_mexican", "#end_neutral_to_mexican")
    neutral_to_italian = kb.read_specific_lines(
        util.relative_path("kb_data/style_substitutions.txt"),
        "#neutral_to_italian", "#end_neutral_to_italian")

    italian_red = kb.read_specific_lines(
        util.relative_path("kb_data/italian_red_sauce.txt"), "#italian_red",
        "#end_italian_red")
    italian_white = kb.read_specific_lines(
        util.relative_path("kb_data/italian_red_sauce.txt"), "#italian_white",
        "#end_italian_white")
    italian_green = kb.read_specific_lines(
        util.relative_path("kb_data/italian_red_sauce.txt"), "#italian_pesto",
        "#end_italian_pesto")
    mexican_red = kb.read_specific_lines(
        util.relative_path("kb_data/italian_red_sauce.txt"), "#mexican_red",
        "#end_mexican_red")
    mexican_white = kb.read_specific_lines(
        util.relative_path("kb_data/italian_red_sauce.txt"), "#mexican_white",
        "#end_mexican_white")
    asian_orange = kb.read_specific_lines(
        util.relative_path("kb_data/italian_red_sauce.txt"), "#asian_orange",
        "#asian_orange_end")
    asian_brown = kb.read_specific_lines(
        util.relative_path("kb_data/italian_red_sauce.txt"), "#asian_brown",
        "#asian_brown_end")
    asian_sesame = kb.read_specific_lines(
        util.relative_path("kb_data/italian_red_sauce.txt"), "#asian_sesame",
        "#asian_sesame_end")

    # mexican to italian

    #mexican to italian
    if "mexican" in sauce_type and "italian" in fusion_style:
        for spice in mexican_spices:
            for e in range(len(made_recipe.ingredients)):
                if spice in made_recipe.ingredients[e].name:
                    print "matched spice: ", spice, made_recipe.ingredients[
                        e].name
                    print "amount: ", made_recipe.ingredients[
                        e].quantity.amount, made_recipe.ingredients[
                            e].quantity.unit
                if spice in made_recipe.ingredients[
                        e].name and spice not in italian_spices:
                    print "not in italian:", made_recipe.ingredients[e].name
                    spicesub = randrange(
                        1, len(knowledge_base.italian_spices_subs))
                    print "replaced: ", made_recipe.ingredients[
                        e].name, " with ", knowledge_base.italian_spices_subs[
                            spicesub].food_out[0].name
                    replaced_ingredient = made_recipe.ingredients[e].name
                    made_recipe.ingredients[
                        e] = knowledge_base.italian_spices_subs[
                            spicesub].food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        replaced_ingredient, knowledge_base.
                        italian_spices_subs[spicesub].food_out[0].name)

        for sub in knowledge_base.mexican_to_italian_list:
            for e in range(len(made_recipe.ingredients)):
                if sub.food_in.name in made_recipe.ingredients[e].name.lower():
                    subbed_ingredient = made_recipe.ingredients[e].name.lower()
                    print "item to sub: " + subbed_ingredient
                    print "replace with: " + sub.food_out[0].name
                    made_recipe.ingredients[e] = sub.food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        subbed_ingredient, sub.food_out[0].name)

        for e in range(len(made_recipe.steps)):
            print made_recipe.steps[e]
        made_recipe.change_title("Italian " + made_recipe.title)

    #mexican to asian
    if "mexican" in sauce_type and "asian" in fusion_style:
        for spice in mexican_spices:
            for e in range(len(made_recipe.ingredients)):
                if spice in made_recipe.ingredients[e].name:
                    print "matched spice: ", spice, made_recipe.ingredients[
                        e].name
                    print "amount: ", made_recipe.ingredients[
                        e].quantity.amount, made_recipe.ingredients[
                            e].quantity.unit
                if spice in made_recipe.ingredients[
                        e].name and spice not in asian_spices:
                    print "not in mexican:", made_recipe.ingredients[e].name
                    spicesub = randrange(1,
                                         len(knowledge_base.asian_spices_subs))
                    print "replaced: ", made_recipe.ingredients[
                        e].name, " with ", knowledge_base.asian_spices_subs[
                            spicesub].food_out[0].name
                    replaced_ingredient = made_recipe.ingredients[e].name
                    made_recipe.ingredients[
                        e] = knowledge_base.asian_spices_subs[
                            spicesub].food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        replaced_ingredient, knowledge_base.
                        asian_spices_subs[spicesub].food_out[0].name)

        for sub in knowledge_base.mexican_to_asian_list:
            for e in range(len(made_recipe.ingredients)):
                if sub.food_in.name in made_recipe.ingredients[e].name.lower():
                    subbed_ingredient = made_recipe.ingredients[e].name.lower()
                    print "item to sub: " + subbed_ingredient
                    print "replace with: " + sub.food_out[0].name
                    made_recipe.ingredients[e] = sub.food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        subbed_ingredient, sub.food_out[0].name)

        for e in range(len(made_recipe.steps)):
            print made_recipe.steps[e]
        made_recipe.change_title("Asian " + made_recipe.title)

    #asian to italian
    if "asian" in sauce_type and "italian" in fusion_style:
        for spice in asian_spices:
            for e in range(len(made_recipe.ingredients)):
                if spice in made_recipe.ingredients[e].name:
                    print "matched spice: ", spice, " with ", made_recipe.ingredients[
                        e].name
                    print "amount: ", made_recipe.ingredients[
                        e].quantity.amount, made_recipe.ingredients[
                            e].quantity.unit
                if spice in made_recipe.ingredients[
                        e].name and spice not in italian_spices:
                    print "not in italian:", made_recipe.ingredients[e].name
                    spicesub = randrange(
                        1, len(knowledge_base.italian_spices_subs))
                    print "replaced: ", made_recipe.ingredients[
                        e].name, " with ", knowledge_base.italian_spices_subs[
                            spicesub].food_out[0].name
                    replaced_ingredient = made_recipe.ingredients[e].name
                    made_recipe.ingredients[
                        e] = knowledge_base.italian_spices_subs[
                            spicesub].food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        replaced_ingredient, knowledge_base.
                        italian_spices_subs[spicesub].food_out[0].name)

        for sub in knowledge_base.asian_to_italian_list:
            for e in range(len(made_recipe.ingredients)):
                if sub.food_in.name in made_recipe.ingredients[e].name.lower():
                    subbed_ingredient = made_recipe.ingredients[e].name.lower()
                    print "item to sub: " + subbed_ingredient
                    print "replace with: " + sub.food_out[0].name
                    made_recipe.ingredients[e] = sub.food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        subbed_ingredient, sub.food_out[0].name)

        for e in range(len(made_recipe.steps)):
            print made_recipe.steps[e]
        made_recipe.change_title("Italian " + made_recipe.title)

    #asian to italian
    if "asian" in sauce_type and "mexican" in fusion_style:
        for spice in asian_spices:
            for e in range(len(made_recipe.ingredients)):
                if spice in made_recipe.ingredients[e].name:
                    print "matched spice: ", spice, " with ", made_recipe.ingredients[
                        e].name
                    print "amount: ", made_recipe.ingredients[
                        e].quantity.amount, made_recipe.ingredients[
                            e].quantity.unit
                if spice in made_recipe.ingredients[
                        e].name and spice not in mexican_spices:
                    print "not in italian:", made_recipe.ingredients[e].name
                    spicesub = randrange(
                        1, len(knowledge_base.mexican_spices_subs))
                    print "replaced: ", made_recipe.ingredients[
                        e].name, " with ", knowledge_base.mexican_spices_subs[
                            spicesub].food_out[0].name
                    replaced_ingredient = made_recipe.ingredients[e].name
                    made_recipe.ingredients[
                        e] = knowledge_base.mexican_spices_subs[
                            spicesub].food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        replaced_ingredient, knowledge_base.
                        mexican_spices_subs[spicesub].food_out[0].name)

        for sub in knowledge_base.asian_to_mexican_list:
            for e in range(len(made_recipe.ingredients)):
                if sub.food_in.name in made_recipe.ingredients[e].name.lower():
                    subbed_ingredient = made_recipe.ingredients[e].name.lower()
                    print "item to sub: " + subbed_ingredient
                    print "replace with: " + sub.food_out[0].name
                    made_recipe.ingredients[e] = sub.food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        subbed_ingredient, sub.food_out[0].name)

        for e in range(len(made_recipe.steps)):
            print made_recipe.steps[e]
        made_recipe.change_title("Mexican " + made_recipe.title)

    #italian to asian
    if "italian" in sauce_type and "asian" in fusion_style:
        for spice in italian_spices:
            for e in range(len(made_recipe.ingredients)):
                if spice in made_recipe.ingredients[e].name:
                    print "matched spice: ", spice, made_recipe.ingredients[
                        e].name
                    print "amount: ", made_recipe.ingredients[
                        e].quantity.amount, made_recipe.ingredients[
                            e].quantity.unit
                if spice in made_recipe.ingredients[
                        e].name and spice not in asian_spices:
                    print "not in italian:", made_recipe.ingredients[e].name
                    spicesub = randrange(1,
                                         len(knowledge_base.asian_spices_subs))
                    print "replaced: ", made_recipe.ingredients[
                        e].name, " with ", knowledge_base.asian_spices_subs[
                            spicesub].food_out[0].name
                    replaced_ingredient = made_recipe.ingredients[e].name
                    made_recipe.ingredients[
                        e] = knowledge_base.asian_spices_subs[
                            spicesub].food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        replaced_ingredient, knowledge_base.
                        asian_spices_subs[spicesub].food_out[0].name)

        for sub in knowledge_base.asian_to_italian_list:
            for e in range(len(made_recipe.ingredients)):
                if sub.food_in.name in made_recipe.ingredients[e].name.lower():
                    subbed_ingredient = made_recipe.ingredients[e].name.lower()
                    print "item to sub: " + subbed_ingredient
                    print "replace with: " + sub.food_out[0].name
                    made_recipe.ingredients[e] = sub.food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        subbed_ingredient, sub.food_out[0].name)

        for e in range(len(made_recipe.steps)):
            print made_recipe.steps[e]
        made_recipe.change_title("Asian " + made_recipe.title)

    #italian to mexican
    if "italian" in sauce_type and "mexican" in fusion_style:
        for spice in italian_spices:
            for e in range(len(made_recipe.ingredients)):
                if spice in made_recipe.ingredients[e].name:
                    print "matched spice: ", spice, made_recipe.ingredients[
                        e].name
                    print "amount: ", made_recipe.ingredients[
                        e].quantity.amount, made_recipe.ingredients[
                            e].quantity.unit
                if spice in made_recipe.ingredients[
                        e].name and spice not in mexican_spices:
                    print "not in mexican:", made_recipe.ingredients[e].name
                    spicesub = randrange(
                        1, len(knowledge_base.mexican_spices_subs))
                    print "replaced: ", made_recipe.ingredients[
                        e].name, " with ", knowledge_base.mexican_spices_subs[
                            spicesub].food_out[0].name
                    replaced_ingredient = made_recipe.ingredients[e].name
                    made_recipe.ingredients[
                        e] = knowledge_base.mexican_spices_subs[
                            spicesub].food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        replaced_ingredient, knowledge_base.
                        mexican_spices_subs[spicesub].food_out[0].name)

        for sub in knowledge_base.mexican_to_italian_list:
            for e in range(len(made_recipe.ingredients)):
                if sub.food_in.name in made_recipe.ingredients[e].name.lower():
                    subbed_ingredient = made_recipe.ingredients[e].name.lower()
                    print "item to sub: " + subbed_ingredient
                    print "replace with: " + sub.food_out[0].name
                    made_recipe.ingredients[e] = sub.food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        subbed_ingredient, sub.food_out[0].name)

        for e in range(len(made_recipe.steps)):
            print made_recipe.steps[e]
        made_recipe.change_title("Italian " + made_recipe.title)

    if "neutral" in sauce_type and "mexican" in fusion_style:
        for spice in master_spices:
            for e in range(len(made_recipe.ingredients)):
                if spice in made_recipe.ingredients[e].name:
                    print "matched spice: ", spice, made_recipe.ingredients[
                        e].name
                    print "amount: ", made_recipe.ingredients[
                        e].quantity.amount, made_recipe.ingredients[
                            e].quantity.unit
                if spice in made_recipe.ingredients[
                        e].name and spice not in mexican_spices:
                    print "not in mexican:", made_recipe.ingredients[e].name
                    spicesub = randrange(
                        1, len(knowledge_base.mexican_spices_subs))
                    print "replaced: ", made_recipe.ingredients[
                        e].name, " with ", knowledge_base.mexican_spices_subs[
                            spicesub].food_out[0].name
                    replaced_ingredient = made_recipe.ingredients[e].name
                    made_recipe.ingredients[
                        e] = knowledge_base.mexican_spices_subs[
                            spicesub].food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        replaced_ingredient, knowledge_base.
                        mexican_spices_subs[spicesub].food_out[0].name)

        for sub in knowledge_base.neutral_to_mexican_list:
            for e in range(len(made_recipe.ingredients)):
                if sub.food_in.name in made_recipe.ingredients[e].name.lower():
                    subbed_ingredient = made_recipe.ingredients[e].name.lower()
                    print "item to sub: " + subbed_ingredient
                    print "replace with: " + sub.food_out[0].name
                    made_recipe.ingredients[e] = sub.food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        subbed_ingredient, sub.food_out[0].name)

        for e in range(len(made_recipe.steps)):
            print made_recipe.steps[e]
        made_recipe.change_title("Mexican " + made_recipe.title)

    if "neutral" in sauce_type and "italian" in fusion_style:
        for spice in master_spices:
            for e in range(len(made_recipe.ingredients)):
                if spice in made_recipe.ingredients[e].name:
                    print "matched spice: ", spice, made_recipe.ingredients[
                        e].name
                    print "amount: ", made_recipe.ingredients[
                        e].quantity.amount, made_recipe.ingredients[
                            e].quantity.unit
                if spice in made_recipe.ingredients[
                        e].name and spice not in italian_spices:
                    print "not in italian:", made_recipe.ingredients[e].name
                    spicesub = randrange(
                        1, len(knowledge_base.italian_spices_subs))
                    print "replaced: ", made_recipe.ingredients[
                        e].name, " with ", knowledge_base.italian_spices_subs[
                            spicesub].food_out[0].name
                    replaced_ingredient = made_recipe.ingredients[e].name
                    made_recipe.ingredients[
                        e] = knowledge_base.italian_spices_subs[
                            spicesub].food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        replaced_ingredient, knowledge_base.
                        italian_spices_subs[spicesub].food_out[0].name)

        for sub in knowledge_base.neutral_to_italian_list:
            for e in range(len(made_recipe.ingredients)):
                if sub.food_in.name in made_recipe.ingredients[e].name.lower():
                    subbed_ingredient = made_recipe.ingredients[e].name.lower()
                    print "item to sub: " + subbed_ingredient
                    print "replace with: " + sub.food_out[0].name
                    made_recipe.ingredients[e] = sub.food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        subbed_ingredient, sub.food_out[0].name)

        for e in range(len(made_recipe.steps)):
            print made_recipe.steps[e]
        made_recipe.change_title("Italian " + made_recipe.title)

    if "neutral" in sauce_type and "asian" in fusion_style:
        for spice in master_spices:
            for e in range(len(made_recipe.ingredients)):
                if spice in made_recipe.ingredients[e].name:
                    print "matched spice: ", spice, made_recipe.ingredients[
                        e].name
                    print "amount: ", made_recipe.ingredients[
                        e].quantity.amount, made_recipe.ingredients[
                            e].quantity.unit
                if spice in made_recipe.ingredients[
                        e].name and spice not in asian_spices:
                    print "not in asian:", made_recipe.ingredients[e].name
                    spicesub = randrange(1,
                                         len(knowledge_base.asian_spices_subs))
                    print "replaced: ", made_recipe.ingredients[
                        e].name, " with ", knowledge_base.asian_spices_subs[
                            spicesub].food_out[0].name
                    replaced_ingredient = made_recipe.ingredients[e].name
                    made_recipe.ingredients[
                        e] = knowledge_base.asian_spices_subs[
                            spicesub].food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        replaced_ingredient, knowledge_base.
                        asian_spices_subs[spicesub].food_out[0].name)

        for sub in knowledge_base.neutral_to_asian_list:
            for e in range(len(made_recipe.ingredients)):
                if sub.food_in.name in made_recipe.ingredients[e].name.lower():
                    subbed_ingredient = made_recipe.ingredients[e].name.lower()
                    print "item to sub: " + subbed_ingredient
                    print "replace with: " + sub.food_out[0].name
                    made_recipe.ingredients[e] = sub.food_out[0]
                    made_recipe.replace_ingredient_in_steps(
                        subbed_ingredient, sub.food_out[0].name)

        for e in range(len(made_recipe.steps)):
            print made_recipe.steps[e]
        made_recipe.change_title("Asian " + made_recipe.title)

    return made_recipe
Пример #21
0
def main():
    try:
        zendesk_status_file = util.relative_path("zendesk")
        with open(zendesk_status_file) as f:
            old_data = cPickle.load(f)
    except IOError:
        old_data = {"elapsed_time": 0.0001,   # avoid a divide-by-0
                    "ticket_count": 0,
                    "last_time_t": None,
                    }

    # We compare the number of tickets in the last few minutes against
    # the historical average for all time.  But we don't start "all
    # time" at AD 1, we start it a week ago.  Longer than that and it
    # takes forever due to quota issues.  That's still plenty of
    # historical data. :-)
    #
    # Zendesk seems to wait 5 minutes to update API data :-(, so we
    # ask for data that's a bit time-lagged
    end_time = int(time.time()) - 300
    start_time = old_data['last_time_t']
    
    # Set flag to track if current time period is a weekend. Separate
    # ticket_count/elapsed_time stats are kept for weekend vs. weekday
    # to improve sensitivity to increases during low-traffic periods
    is_weekend = time.localtime().tm_wday in [5, 6]

    (num_new_tickets, oldest_ticket_time_t) = num_tickets_between(
        start_time or (end_time - 86400 * 7), end_time)

    # The first time we run this, we take the starting time to be the
    # time of the first bug report.

    if start_time is None:
        start_time = oldest_ticket_time_t

    time_this_period = end_time - start_time

    # To handle transition from unsegmented to segmented data, below sets
    # the weekend data to mirror the stats from the past 4 months of logs
    # to calculate a mean, and shifts all historical data to the weekday
    # data points. This will result in some inaccuracy, but the weekend
    # data should skew the weekday data only negligably. May cause some
    # skewed alerting during the transition period.
    # TODO(jacqueline): Remove this transition code after August 2017
    if 'elapsed_time' in old_data:
        old_data['ticket_count_weekday'] = old_data['ticket_count']
        old_data['ticket_count_weekend'] = 555
        old_data['elapsed_time_weekday'] = old_data['elapsed_time']
        old_data['elapsed_time_weekend'] = 2921756.0001

    if is_weekend is True:
        ticket_count = old_data['ticket_count_weekend']
        elapsed_time = old_data['elapsed_time_weekend']
    else:
        ticket_count = old_data['ticket_count_weekday']
        elapsed_time = old_data['elapsed_time_weekday']

    (mean, probability) = util.probability(ticket_count,
                                           elapsed_time,
                                           num_new_tickets,
                                           time_this_period)

    print ("%s] TOTAL %s/%ss; %s-: %s/%ss; m=%.3f p=%.3f"
           % (time.strftime("%Y-%m-%d %H:%M:%S %Z"),
              ticket_count, int(elapsed_time),
              start_time,
              num_new_tickets, time_this_period,
              mean, probability))

    handle_alerts(num_new_tickets, time_this_period, mean, probability,
                  start_time, end_time)

    if is_weekend is True:
        new_data = {"elapsed_time_weekend": (
                        old_data["elapsed_time_weekend"] + time_this_period),
                    "ticket_count_weekend": (
                        old_data["ticket_count_weekend"] + num_new_tickets),
                    "elapsed_time_weekday": old_data["elapsed_time_weekday"],
                    "ticket_count_weekday": old_data["ticket_count_weekday"],
                    }
    else:
        new_data = {"elapsed_time_weekend": old_data["elapsed_time_weekend"],
                    "ticket_count_weekend": old_data["ticket_count_weekend"],
                    "elapsed_time_weekday": (
                        old_data["elapsed_time_weekday"] + time_this_period),
                    "ticket_count_weekday": (
                        old_data["ticket_count_weekday"] + num_new_tickets),
                    }

    new_data['last_time_t'] = end_time

    with open(zendesk_status_file, 'w') as f:
        cPickle.dump(new_data, f)
Пример #22
0
import httplib
import json
import socket
import time
import urllib
import urllib2

import util

# Only report if issue count exceeds threshold
THRESHOLD = 3


# Easier to use basic authentication than setup OAuth
JIRA_USER = '******'
JIRA_PASSWORD_FILE = util.relative_path('jira.cfg')
JIRA_PASSWORD = None     # set lazily


# Custom fields: JIRA API doesn't respect labels we give it in the UI
CREATED_FIELD = 'created'
EXERCISE_FIELD = 'customfield_10024'


def _parse_time(s):
    """Convert a string of the form "YYYY-MM-DDTHH:MM:SS.000ZZZZZ" to time_t.
    """
    # We could use strptime, but this is just as easy.
    (yyyy, mm, dd, HH, MM, T) = (int(s[0:4]), int(s[5:7]), int(s[8:10]),
                                 int(s[11:13]), int(s[14:16]), int(s[23:26]))
Пример #23
0
    def _load_style_substitutions(self):
        """
        Loads Italian, Mexican, South Asian, vegan, AND vegetarian text files into fields
        """
        # TODO: I feel really bad about the use of copied code, so a helper function could be good to write sometime.
        mexican_to_italian = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#mexican_to_italian", "#end_mexican_to_italian")
        mexican_to_asian = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#mexican_to_asian", "#end_mexican_to_asian")
        asian_to_italian = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#asian_to_italian", "#end_asian_to_italian")
        asian_to_mexican = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#asian_to_mexican", "#end_asian_to_mexican")
        italian_to_mexican = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#italian_to_mexican", "#end_italian_to_mexican")
        italian_to_asian = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#italian_to_asian", "#end_italian_to_asian")
        italian_spices_subs = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#italian_spices_subs", "#end_italian_spices_subs")
        italian_spices = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#italian_spices_subs", "#end_italian_spices_subs")
        asian_spices = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#asian_spices", "#end_asian_spices")
        asian_spices_subs = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#asian_spices_subs", "#end_asian_spices_subs")
        mexican_spices = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#mexican_spices", "#end_mexican_spices")
        mexican_spices_subs = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#mexican_spices_subs", "#end_mexican_spices_subs")
        neutral_to_asian = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#neutral_to_asian", "#end_neutral_to_asian")
        neutral_to_mexican = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#neutral_to_mexican", "#end_neutral_to_mexican")
        neutral_to_italian = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#neutral_to_italian", "#end_neutral_to_italian")

        vegan_sub_list = read_txt_lines_into_list(
            'kb_data/vegan_substitutions.txt')
        vegetarian_sub_list = read_txt_lines_into_list(
            'kb_data/vegetarian_substitutions.txt')

        for raw_sub in italian_spices:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.italian_spices_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'italian'))

        for raw_sub in italian_spices_subs:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.italian_spices_subs.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'italian'))

        for raw_sub in asian_spices_subs:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.asian_spices_subs.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'asian'))

        for raw_sub in mexican_spices_subs:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.mexican_spices_subs.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'mexican'))

        for spice in mexican_spices:
            self.mexican_spices_list.append(self.lookup_single_food(spice))

        for spice in asian_spices:
            self.asian_spices_list.append(self.lookup_single_food(spice))

        for raw_sub in mexican_to_italian:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.mexican_to_italian_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'mexican_to_italian'))

        for raw_sub in mexican_to_asian:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.mexican_to_asian_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'mexican_to_asian'))

        for raw_sub in asian_to_italian:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.asian_to_italian_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'asian_to_italian'))

        for raw_sub in asian_to_mexican:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.asian_to_mexican_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'asian_to_mexican'))

        for raw_sub in italian_to_asian:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.italian_to_asian_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'italian_to_asian'))

        for raw_sub in italian_to_mexican:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.italian_to_mexican_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'italian_to_mexican'))

        for raw_sub in vegan_sub_list:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.vegan_substitutions.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'vegan'))

        for raw_sub in vegetarian_sub_list:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.vegetarian_substitutions.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'vegetarian'))

        for raw_sub in neutral_to_italian:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.neutral_to_italian_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'neutral_to_italian'))

        for raw_sub in neutral_to_asian:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.neutral_to_asian_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'neutral_to_asian'))

        for raw_sub in neutral_to_mexican:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.neutral_to_mexican_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'neutral_to_mexican'))
Пример #24
0
import contextlib
import datetime
import re
import sys
import urllib2

import util

with open(util.relative_path("hipchat.cfg")) as cfg_file:
    contents = cfg_file.read()
    # This assumes that there is a token defined in hipchat.cfg; if there isn't
    # we can't send anyway, so it's better to fail loudly.
    token_re = re.compile("token ?= ?([0-9A-Fa-f]+)")
    TOKEN = token_re.match(contents).group(1)


def send_message(msg, room_id="1s and 0s"):
    msg_params = ("room_id=%s" % room_id +
                  "&from=beep-boop" +
                  "&message=%s" % msg +
                  "&color=red")

    try:
        url = (
            "http://api.hipchat.com/v1/rooms/message?format=json&auth_token=%s"
            % TOKEN)
        with contextlib.closing(urllib2.urlopen(url, msg_params)) as req:
            result = req.read()
    except urllib2.HTTPError, err:
        result = err
Пример #25
0
def main():
    try:
        jira_status_file = util.relative_path('jira')
        with open(jira_status_file) as f:
            old_data = cPickle.load(f)
    except IOError:
        old_data = {'elapsed_times': {},
                    'ticket_counts': collections.defaultdict(int),
                    'last_time_t': None,
                    }

    # We compare the number of tickets in the last few minutes against
    # the historical average for all time. But we don't start "all
    # time" at AD 1, we start it 100 days ago.
    # Note: this is a way wider window than we use for Zendesk, but we're
    # making exercise-specific recommendations, so we need more data.
    now = int(time.time())
    num_days_in_past = 100
    (num_new_tickets, oldest_ticket_time_t) = num_tickets_between(
        old_data['last_time_t'] or (now - 86400 * num_days_in_past), now)

    # Elapsed time is computed per-exercise, so store values as we go.
    # We use a copy so that exercises that don't appear as new tickets still
    # have their old elapsed times preserved.
    elapsed_times = copy.copy(old_data['elapsed_times'])
    for exercise in num_new_tickets:
        # If this is the first time we're running, we don't have a last_time_t,
        # so we take the oldest ticket for each exercise as its last_time_t
        last_time_t = old_data['last_time_t'] or oldest_ticket_time_t[exercise]
        time_this_period = now - last_time_t
        # Avoid divide-by-0 if this is the first time we've seen an exercise
        time_last_period = old_data['elapsed_times'].get(exercise, 0.0001)

        num_old_tickets_for_exercise = old_data['ticket_counts'][exercise]
        num_new_tickets_for_exercise = num_new_tickets[exercise]
        (mean, probability) = util.probability(num_old_tickets_for_exercise,
                                               time_last_period,
                                               num_new_tickets_for_exercise,
                                               time_this_period)

        print('%s] %s TOTAL %s/%ss; %s-: %s/%ss; m=%.3f p=%.3f'
              % (time.strftime('%Y-%m-%d %H:%M:%S %Z'),
                  exercise,
                  num_old_tickets_for_exercise, int(time_last_period),
                  last_time_t,
                  num_new_tickets_for_exercise, time_this_period,
                  mean, probability))

        if (mean != 0 and probability > 0.9995 and
                num_new_tickets_for_exercise > THRESHOLD):
            quoted = urllib.quote(exercise.encode("utf-8"))
            ka_url = "https://khanacademy.org/e/%s" % quoted
            jira_url = "https://khanacademy.atlassian.net/browse/AI-941528?jql=Exercise%%20%%3D%%20%s" % quoted
            util.send_to_slack(
                "*Elevated bug report rate on exercise `%s`*\n"
                "We saw %s in the last %s minutes,"
                " while the mean indicates we should see around %s."
                " *Probability that this is abnormally elevated: %.4f.*\n"
                " Links: <%s|exercise on Khan Academy>, <%s|JIRA tickets>."
                % (exercise,
                   util.thousand_commas(num_new_tickets_for_exercise),
                   util.thousand_commas(int(time_this_period / 60)),
                   util.thousand_commas(round(mean, 2)),
                   probability,
                   ka_url,
                   jira_url),
                channel='#content-beep-boop')
        elapsed_times[exercise] = time_last_period + time_this_period

    new_ticket_counts = util.merge_int_dicts(old_data['ticket_counts'],
                                             num_new_tickets)
    new_data = {'elapsed_times': elapsed_times,
                'ticket_counts': new_ticket_counts,
                'last_time_t': now,
                }
    with open(jira_status_file, 'w') as f:
        cPickle.dump(new_data, f)