def test_my_guess_honors_max_length_by_truncating_longest():
    y = [
        deploy.Prediction(2, '567890', 0.024),
        deploy.Prediction(7, '012', 0.046),
    ]
    # before truncation:
    # '\n2. 567890 2.40%\n7. 012 4.60%'
    max_length = 26
    reply = messages.Messages.my_guess(y, preface='', max_length=max_length)
    assert reply == '\n2. 567 2.40%\n7. 012 4.60%'
    assert len(reply) <= max_length
def test_my_guess_honors_max_length_by_truncating_all():
    y = [
        deploy.Prediction(2, '567', 0.091),
        deploy.Prediction(4, '789', 0.012),
    ]
    # before truncation:
    # '\n2. 567 9.10%\n4. 789 1.20%'
    max_length = 22
    reply = messages.Messages.my_guess(y, preface='', max_length=max_length)
    assert reply == '\n2. 5 9.10%\n4. 7 1.20%'
    assert len(reply) <= max_length
    def my_guess(cls, y, top_n=3, max_length=None, preface="Probable Anime:"):
        if not len(y):
            return cls.unknown_image()

        pred_lines = []
        max_category_length = 0
        max_category_length_index = 0

        for i, pred in enumerate(y[:top_n]):
            pred_lines.append(
                deploy.Prediction(
                    "{}.".format(pred.rank),
                    pred.category,
                    "{:.2%}".format(pred.probability),
                ))
            if max_category_length < len(pred.category):
                max_category_length_index = i
                max_category_length = len(pred.category)

        newline_count = len(pred_lines)
        pred_length = sum(
            sum(map(len, pred)) + len(pred) - 1 for pred in pred_lines)
        current_length = len(preface) + newline_count + pred_length

        # truncate category name(s) if needed
        if max_length is not None and current_length > max_length:
            lengthy_pred = pred_lines[max_category_length_index]
            excess_length = current_length - max_length
            # don't penalize the longest category if it's going to be truncated too much
            if len(lengthy_pred.category) * 0.5 < excess_length:
                subtract_from_everyone_length = int(
                    math.ceil(excess_length / len(pred_lines)))
                pred_lines = [
                    deploy.Prediction(
                        pred.rank,
                        pred.category[:-subtract_from_everyone_length],
                        pred.probability) for pred in pred_lines
                ]
            else:
                shortened_pred = deploy.Prediction(
                    lengthy_pred.rank, lengthy_pred.category[:-excess_length],
                    lengthy_pred.probability)
                pred_lines[max_category_length_index] = shortened_pred

        reply = "{}\n{}".format(
            preface, "\n".join(" ".join(pred) for pred in pred_lines))
        return reply[:max_length] if max_length is not None else reply
 def classify(self, **params):
     try:
         r = requests.get(self._base_url, params=params, timeout=60).json()
         if 'error' in r:
             raise exc.RemoteError(r['error'])
         return map(lambda guess: deploy.Prediction(**guess), r['y'])
     except requests.exceptions.Timeout:
         raise exc.TimeoutError
 def classify(self, *args, **kwargs):
     message = at_random(
         "I hope a mock message like this won't get caught by Twitter's spam filter",
         "But I must explain to you how all this mistaken idea was born",
         "At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis",
         "Excepteur sint occaecat cupidatat non proident",
     )
     return [deploy.Prediction(1, message, 100)]