def batch_predict(self, images): """ param: images : list of ndarray """ batch = self.batch_process(images) batch = batch.to(self.config['device']) if self.config['predictor']['beamsearch']: sent = translate_beam_search(batch, self.model) s = sent else: sents = translate(batch, self.model).tolist() sequences = self.vocab.batch_decode(sents) return sequences
def predict(self, img): img = process_input(img, self.config['dataset']['image_height'], self.config['dataset']['image_min_width'], self.config['dataset']['image_max_width']) img = img.to(self.config['device']) if self.config['predictor']['beamsearch']: sent = translate_beam_search(img, self.model) s = sent else: s = translate(img, self.model)[0].tolist() s = self.vocab.decode(s) return s
def predict(self, img): img = self.preprocess_input(img) img = np.expand_dims(img, axis=0) img = torch.FloatTensor(img) img = img.to(self.config['device']) if self.config['predictor']['beamsearch']: sent = translate_beam_search(img, self.model) s = sent else: s = translate(img, self.model)[0].tolist() s = self.vocab.decode(s) return s
def predict(self, img, return_prob=False): img = process_input(img, self.config['dataset']['image_height'], self.config['dataset']['image_min_width'], self.config['dataset']['image_max_width']) img = img.to(self.config['device']) if self.config['predictor']['beamsearch']: sent = translate_beam_search(img, self.model) s = sent prob = None else: s, prob = translate(img, self.model) s = s[0].tolist() prob = prob[0] s = self.vocab.decode(s) if return_prob: return s, prob else: return s
def predict(self, img, return_prob=False): img = self.preprocess_input(img) img = np.expand_dims(img, axis=0) img = torch.FloatTensor(img) img = img.to(self.config['device']) if self.config['predictor']['beamsearch']: sent = translate_beam_search(img, self.model) s = sent prob = None else: s, prob = translate(img, self.model) s = s[0].tolist() prob = prob[0] s = self.vocab.decode(s) if return_prob: return s, prob else: return s