def eval(self, sess, eval_gt_coco, eval_data, vocabulary):
        """ Evaluate the model using the COCO val2014 data. """
        print("Evaluating the model ...")
        config = self.config

        results = []
        if not os.path.exists(config.eval_result_dir):
            os.mkdir(config.eval_result_dir)

        # Generate the captions for the images
        idx = 0
        for k in tqdm(list(range(eval_data.num_batches)), desc='batch'):
            batch = eval_data.next_batch()
            caption_data = self.beam_search(sess, batch, vocabulary)

            fake_cnt = 0 if k<eval_data.num_batches-1 \
                         else eval_data.fake_count
            for l in range(eval_data.batch_size - fake_cnt):
                word_idxs = caption_data[l][0].sentence
                score = caption_data[l][0].score
                caption = vocabulary.get_sentence(word_idxs)
                results.append({
                    'image_id': eval_data.image_ids[idx],
                    'caption': caption
                })
                idx += 1

                # Save the result in an image file, if requested
                if config.save_eval_result_as_image:
                    image_file = batch[l]
                    image_name = image_file.split(os.sep)[-1]
                    image_name = os.path.splitext(image_name)[0]
                    img = plt.imread(image_file)
                    plt.imshow(img)
                    plt.axis('off')
                    plt.title(caption)
                    plt.savefig(
                        os.path.join(config.eval_result_dir,
                                     image_name + '_result.jpg'))

        fp = open(config.eval_result_file, 'w')

        str_results = []
        for res in results:
            str_results.append({
                'image_id': int(res['image_id']),
                'caption': res['caption']
            })
        json.dump({"results": str_results}, fp)
        fp.close()

        # Evaluate these captions
        eval_result_coco = eval_gt_coco.loadRes(config.eval_result_file)
        scorer = COCOEvalCap(eval_gt_coco, eval_result_coco)
        scorer.evaluate()
        print("Evaluation complete.")
Beispiel #2
0
    def eval(self, sess, eval_gt_coco, eval_data, vocabulary):
        """ Evaluate the model using the COCO val2014 data. """
        print("Evaluating the model ...")
        config = self.config

        results = []
        if not os.path.exists(config.eval_result_dir):
            os.mkdir(config.eval_result_dir)

        # Generate the captions for the images
        idx = 0
        for k in tqdm(list(range(eval_data.num_batches)), desc='batch'):
            #for k in range(1):
            batch = eval_data.next_batch()
            #caption_data = self.beam_search(sess, batch, vocabulary)
            images = self.image_loader.load_images(batch)
            caption_data, scores = sess.run([self.predictions, self.probs],
                                            feed_dict={self.images: images})
            fake_cnt = 0 if k<eval_data.num_batches-1 \
                         else eval_data.fake_count
            for l in range(eval_data.batch_size - fake_cnt):
                ## self.predictions will return the indexes of words, we need to find the corresponding word from it.
                word_idxs = caption_data[l]
                ## get_sentence will return a sentence till there is a end delimiter which is '.'
                caption = str(vocabulary.get_sentence(word_idxs))
                results.append({
                    'image_id': int(eval_data.image_ids[idx]),
                    'caption': caption
                })
                #print(results)
                idx += 1

                # Save the result in an image file, if requested
                if config.save_eval_result_as_image:
                    image_file = batch[l]
                    image_name = image_file.split(os.sep)[-1]
                    image_name = os.path.splitext(image_name)[0]
                    img = mpimg.imread(image_file)
                    plt.imshow(img)
                    plt.axis('off')
                    plt.title(caption)
                    plt.savefig(
                        os.path.join(config.eval_result_dir,
                                     image_name + '_result.jpg'))

        fp = open(config.eval_result_file, 'w')
        json.dump(results, fp)
        fp.close()

        # Evaluate these captions
        eval_result_coco = eval_gt_coco.loadRes(config.eval_result_file)
        scorer = COCOEvalCap(eval_gt_coco, eval_result_coco)
        scorer.evaluate()
        print("Evaluation complete.")
Beispiel #3
0
    def eval(self, sess, eval_data):
        """ Evaluate the model using the COCO val2014 data. """
        print("Evaluating the model ...")
        config = self.config

        results = []
        if not os.path.exists(config.eval_result_dir):
            os.mkdir(config.eval_result_dir)

        #if config.debug:
        self.restore_model(sess)

        idx = 0
        eval_epochs = 500
        for k in tqdm(list(range(min(eval_epochs, eval_data.num_batches))),
                      desc='batch'):
            #for k in range(1):
            image_files = eval_data.next_batch()

            caption_data = self.generator.eval(sess, conv_features)
            caption_data = np.squeeze(caption_data)

            #print('caption data shape ' + str(caption_data.shape))

            fake_cnt = 0 if k<eval_data.num_batches-1 \
                         else eval_data.fake_count
            for l in range(eval_data.batch_size - fake_cnt):
                ## self.predictions will return the indexes of words, we need to find the corresponding word from it.
                word_idxs = caption_data[l]
                ## get_sentence will return a sentence till there is a end delimiter which is '.'
                caption = str(eval_data.vocabulary.get_sentence(word_idxs))
                print(caption)
                results.append({
                    'image_id': int(eval_data.image_ids[idx]),
                    'caption': caption
                })
                #print(results)
                idx += 1

                # Save the result in an image file, if requested
                if config.save_eval_result_as_image:
                    image_file = batch[l]
                    image_name = image_file.split(os.sep)[-1]
                    image_name = os.path.splitext(image_name)[0]
                    img = mpimg.imread(image_file)
                    plt.imshow(img)
                    plt.axis('off')
                    plt.title(caption)
                    plt.savefig(
                        os.path.join(config.eval_result_dir,
                                     image_name + '_result.jpg'))

        fp = open(config.eval_result_file, 'w')
        json.dump(results, fp)
        fp.close()

        # Evaluate these captions
        eval_result_coco = eval_data.coco.loadRes(config.eval_result_file)
        scorer = COCOEvalCap(eval_data.coco, eval_result_coco)
        scorer.evaluate()
        print("Evaluation complete.")
Beispiel #4
0
    def eval_old(self, sess, eval_data):
        """ Evaluate the model using the COCO val2014 data. """
        print("Evaluating the model ...")
        config = self.config

        results = []
        if not os.path.exists(config.eval_result_dir):
            os.mkdir(config.eval_result_dir)

        #if config.debug:
        self.restore_model(sess)
        vgg_dir = 'D:/download/art_desc/train/images_vgg/'
        # Generate the captions for the images
        vgg_dir = 'D:/download/art_desc/train/images_vgg/'
        # Batch_size: 1
        input_batches = [[np.load(vgg_dir + 'art_desc1.npy')],
                         [np.load(vgg_dir + 'art_desc2.npy')],
                         [np.load(vgg_dir + 'art_desc3.npy')],
                         [np.load(vgg_dir + 'art_desc4.npy')],
                         [np.load(vgg_dir + 'art_desc6.npy')],
                         [np.load(vgg_dir + 'art_desc7.npy')],
                         [np.load(vgg_dir + 'art_desc8.npy')],
                         [np.load(vgg_dir + 'art_desc9.npy')]]

        conv_features = np.squeeze(input_batches)

        target_batches = [
            [
                'The close range of this photograph of peeling paint precludes the viewer from gaining any foothold into the space of the picture, emphasizing its ultimate flatness. Siskind was especially drawn to surfaces that resembled the canvases of the Abstract Expressionist painters, with whom he was friends.'
            ],
            [
                'Metal Hook is one of Siskind\'s first photographs that truly focuses on the abstract visual language of ordinary objects.  The flatness of the image as a whole also serves to assert the graphic quality of the metal hook itself as a sign/symbol for male and female, thus suggesting a level of content in addition to that of form.'
            ],
            [
                'One of Siskind\'s later works, Recife (Olinda) 8 was taken during his travels in Northeastern Brazil.  The result is that we are forced to remain as viewers attached to the abstract surface - noting with pleasure the additional details of age, texture, misaligned lines, and accidental drips.'
            ],
            [
                'Siskind\'s first pictures show a decidedly more straightforward approach to picture making than the later work for which he became known. Although the male figure is a specific individual and technically the focal point, he is flattened in his own reflection against the back wall, pressed into the service of the overall design of the photograph.'
            ],
            [
                'The Blue Series followed the Red Series of paintings and this is one of its most successful examples. The rectangular shapes of various shades of blue and green are suspended within a resplendent azure surface.'
            ],
            [
                'This is one of the paintings belonging to the Red Series. Here the artist immersed himself completely into the exploration of the color red, one of the most expressive among the primary colors.'
            ],
            [
                'In this famous cartoon of 1946 Ad Reinhardt tried to encapsulate the essence of the artistic modernism with its history and inherent conflicts within the American context. The tree of modern art has its roots deep in history - the Greeks are here, and so are Persian miniatures and Japanese prints.'
            ],
            [
                'This early composition by Ad Reinhardt exhibits the artist\'s profound interest and understanding of the Cubist art of Pablo Picasso and George Braque. The palette is typical of the style and is comprised of four colors essential for a Cubist painting: black, white, brown, and gray.'
            ]
        ]

        idx = 0
        eval_epochs = 500
        for k in tqdm(list(range(min(eval_epochs, eval_data.num_batches))),
                      desc='batch'):
            #for k in range(1):
            image_files = eval_data.next_batch()
            #print("len image files: " + str(len(image_files)))
            #conv_features = [np.load(vgg_dir+'art_desc2047.npy'), np.load(vgg_dir+'art_desc2048.npy')]
            #conv_features = self.get_imagefeatures(image_files, config.batch_size)
            caption_data = self.generator.eval(sess, conv_features)
            caption_data = np.squeeze(caption_data)

            #print('caption data shape ' + str(caption_data.shape))

            fake_cnt = 0 if k<eval_data.num_batches-1 \
                         else eval_data.fake_count
            for l in range(eval_data.batch_size - fake_cnt):
                ## self.predictions will return the indexes of words, we need to find the corresponding word from it.
                word_idxs = caption_data[l]
                ## get_sentence will return a sentence till there is a end delimiter which is '.'
                caption = str(eval_data.vocabulary.get_sentence(word_idxs))
                print(caption)
                results.append({
                    'image_id': int(eval_data.image_ids[idx]),
                    'caption': caption
                })
                #print(results)
                idx += 1

                # Save the result in an image file, if requested
                if config.save_eval_result_as_image:
                    image_file = batch[l]
                    image_name = image_file.split(os.sep)[-1]
                    image_name = os.path.splitext(image_name)[0]
                    img = mpimg.imread(image_file)
                    plt.imshow(img)
                    plt.axis('off')
                    plt.title(caption)
                    plt.savefig(
                        os.path.join(config.eval_result_dir,
                                     image_name + '_result.jpg'))

        fp = open(config.eval_result_file, 'w')
        json.dump(results, fp)
        fp.close()

        # Evaluate these captions
        eval_result_coco = eval_data.coco.loadRes(config.eval_result_file)
        scorer = COCOEvalCap(eval_data.coco, eval_result_coco)
        scorer.evaluate()
        print("Evaluation complete.")