def main(_):
    model = ShowAndTellModel(FLAGS.model_path)
    vocab = Vocabulary(FLAGS.vocab_file)
    filenames = _load_filenames()
    can1 = "a table with different kinds of food"
    candidate = can1.split()
    generator = CaptionGenerator(model, vocab)
    for filename in filenames:
        with tf.gfile.GFile(filename, "rb") as f:
            image = f.read()
        captions = generator.beam_search(image)
        print("Captions: ")
        for i, caption in enumerate(captions):
            sentence = [vocab.id_to_token(w) for w in caption.sentence[1:-1]]
            sentence = " ".join(sentence)
            temp = "  %d) %s (p=%f)" % (i + 1, sentence,
                                        math.exp(caption.logprob))
            print(temp)
            comp = [sentence.split()]
            # Calculating The Blue Score
            print('Blue cumulative 1-gram: %f' %
                  sentence_bleu(comp, candidate, weights=(1, 0, 0, 0)))
            print('Blue cumulative 2-gram: %f' %
                  sentence_bleu(comp, candidate, weights=(0.5, 0.5, 0, 0)))
            # Glue Score
            G = gleu.sentence_gleu(comp, candidate, min_len=1, max_len=2)
            print("Glue score for this sentence: {}".format(G))
def main(_):
    model = ShowAndTellModel(FLAGS.model_path)
    vocab = Vocabulary(FLAGS.vocab_file)
    filenames = _load_filenames()

    generator = CaptionGenerator(model, vocab)

    for filename in filenames:
        with tf.gfile.GFile(filename, "rb") as f:
            image = f.read()
        captions = generator.beam_search(image)
        print("Captions for image %s:" % os.path.basename(filename))
        for i, caption in enumerate(captions):
            # Ignore begin and end tokens <S> and </S>.
            sentence = [vocab.id_to_token(w) for w in caption.sentence[1:-1]]
            sentence = " ".join(sentence)
            print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))
def main(_):
    model = ShowAndTellModel(FLAGS.model_path)
    vocab = Vocabulary(FLAGS.vocab_file)
    filenames = _load_filenames()

    generator = CaptionGenerator(model, vocab)

    for filename in filenames:
        with tf.gfile.GFile(filename, "rb") as f:
            image = f.read()
        captions = generator.beam_search(image)
        print("Captions for image %s:" % os.path.basename(filename))
        for i, caption in enumerate(captions):
            # Ignore begin and end tokens <S> and </S>.
            sentence = [vocab.id_to_token(w) for w in caption.sentence[1:-1]]
            sentence = " ".join(sentence)
            if i == 0:
                # f1.write("%s \n" % sentence)
                print(
                    "this is---",
                    os.path.basename(filename)
                    [:os.path.basename(filename).find(".")], sentence)
                idcol.append(
                    os.path.basename(filename)
                    [:os.path.basename(filename).find(".")])
                predol1.append(sentence)
            if i == 1:
                predol2.append(sentence)
            if i == 2:
                predol3.append(sentence)
            if i == 3:
                predol4.append(sentence)
            print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))

    print(idcol, predol1)

    predDF = pd.DataFrame()
    predDF['id'] = idcol
    predDF['pred_caption1'] = predol1
    predDF['pred_caption2'] = predol2
    predDF['pred_caption3'] = predol3
    predDF['pred_caption4'] = predol4

    predDF.to_csv('MpredCapt.csv')
Example #4
0
def main(input_files_):
    input_files = input_files_
    model = ShowAndTellModel(model_path_)  #tf.app.flags.model_path)) #
    #     vocab = Vocabulary(vocab_file_)#tf.app.flags.vocab_file
    filenames = _load_filenames(input_files_)

    generator = CaptionGenerator(model)  #, vocab)
    encodings = []
    first = True
    for filename in filenames:
        with tf.gfile.GFile(filename, "rb") as f:
            image = f.read()
            encoding = generator.beam_search(image)
            if (first):
                encodings = encoding
                first = False
            else:
                encodings = np.concatenate((encodings, encoding), axis=0)
    return encodings
def main(_):
    model = ShowAndTellModel(FLAGS.model_path)
    vocab = Vocabulary(FLAGS.vocab_file)
    filenames = _load_filenames()

    generator = CaptionGenerator(model, vocab)
    with open('C:\BigDataAnalyticsAppns\Tutorial 6 Source Code\medium-show-and-tell-caption-generator-master\etc\pred.txt', 'a') as f1:
        for filename in filenames:
            with tf.gfile.GFile(filename, "rb") as f:
                image = f.read()
            captions = generator.beam_search(image)
            print("Captions for image %s:" % os.path.basename(filename))
            for i, caption in enumerate(captions):
                # Ignore begin and end tokens <S> and </S>.
                sentence = [vocab.id_to_token(w) for w in caption.sentence[1:-1]]
                sentence = " ".join(sentence)
                if i == 1:
                    f1.write("%s \n" % sentence)
                    # print("this is---", sentence)
                print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))
Example #6
0
                    file = f.read()
    if file:
        captions = generator.beam_search(file)
        results = []
        for i, caption in enumerate(captions):
            # Ignore begin and end tokens <S> and </S>.
            sentence = [vocab.id_to_token(w) for w in caption.sentence[1:-1]]
            sentence = " ".join(sentence)
            # print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))
            results.append({
                'i': i,
                'sentence': sentence,
                'logprob': math.exp(caption.logprob)
            })
        return jsonify(results)

    else:
        return 'No file provided', 400


if __name__ == "__main__":
    model_path = os.path.join(current_dir,
                              "../etc/show-and-tell.pb")  #FLAGS.model_path
    vocab_file = os.path.join(current_dir,
                              "../etc/word_counts.txt")  #FLAGS.model_path
    model = ShowAndTellModel(model_path)
    vocab = Vocabulary(vocab_file)

    generator = CaptionGenerator(model, vocab)
    # tf.app.run()
    app.run(host='0.0.0.0')