def next_batch(self, batch_size=64, pad=0, raw=False, tokenizer=['spacy', 'split', 'split'], one_hot=False): # format: either 'one_hot' or 'numerical' # rescale: if format is 'numerical', then this should be a tuple # (min, max) samples = None if self._index_in_epoch + batch_size > len(self.data): samples = self.data[self._index_in_epoch:len(self.data)] random.shuffle(self.data) missing_samples = batch_size - (len(self.data) - self._index_in_epoch) self._epochs_completed += 1 samples.extend(self.data[0:missing_samples]) self._index_in_epoch = missing_samples else: samples = self.data[self._index_in_epoch:self._index_in_epoch + batch_size] self._index_in_epoch += batch_size data = list(zip(*samples)) sentences = data[0] pos = data[1] ner = data[2] # Generate sequences sentences = self.generate_sequences(sentences, tokenizer[0]) pos = self.generate_sequences(pos, tokenizer[1]) ner = self.generate_sequences(ner, tokenizer[2]) lengths = [len(s) if pad == 0 else min(pad, len(s)) for s in sentences] if (raw): return self.Batch(sentences=sentences, pos=pos, ner=ner, lengths=lengths) sentences = datasets.padseq( datasets.seq2id(sentences, self.vocab_w2i[0]), pad) pos = datasets.padseq(datasets.seq2id(pos, self.vocab_w2i[1]), pad) ner = datasets.padseq(datasets.seq2id(ner, self.vocab_w2i[2]), pad) if one_hot: ner = [ to_categorical(n, nb_classes=len(self.vocab_w2i[2])) for n in ner ] batch = self.Batch(sentences=sentences, pos=pos, ner=ner, lengths=lengths) return batch
def get_similarity_siamese(s1_text, s2_text, model_siam, sess): s1_encoded, s2_encoded = get_sents_encoded(s1_text, s2_text) s1 = datasets.padseq([s1_encoded], pad=30) s2 = datasets.padseq([s2_encoded], pad=30) feed_dict = { model_siam.input_s1: s1, model_siam.input_s2: s2, model_siam.input_sim: [0.0], } ops = [model_siam.distance] sim = sess.run(ops, feed_dict) return sim
def next_batch(self, batch_size=64, seq_begin=False, seq_end=False, rescale=(0.0, 1.0), pad=0, raw=False, keep_entities=False): if not self.datafile: raise Exception('The dataset needs to be open before being used. ' 'Please call dataset.open() before calling ' 'dataset.next_batch()') datasets.validate_rescale(rescale) s1s, s2s, sims = [], [], [] while len(s1s) < batch_size: row = self.datafile.readline() if row == '': self._epochs_completed += 1 self.datafile.seek(0) continue cols = row.strip().split('\t') s1, s2, sim = cols[0], cols[1], float(cols[2]) s1, s2 = s1.split(' '), s2.split(' ') # convert to dependency tree s1s.append(s1) s2s.append(s2) sims.append(sim) if not keep_entities: s1s = self.remove_entities(s1s) s2s = self.remove_entities(s2s) if not raw: s1s = datasets.seq2id(s1s[:batch_size], self.vocab_w2i, seq_begin, seq_end) s2s = datasets.seq2id(s2s[:batch_size], self.vocab_w2i, seq_begin, seq_end) else: s1s = datasets.append_seq_markers(s1s[:batch_size], seq_begin, seq_end) s2s = datasets.append_seq_markers(s2s[:batch_size], seq_begin, seq_end) if pad != 0: s1s = datasets.padseq(s1s, pad, raw) s2s = datasets.padseq(s2s, pad, raw) batch = self.Batch( s1=s1s, s2=s2s, sim=datasets.rescale(sims[:batch_size], rescale, (0.0, 1.0))) return batch
def next_batch(self, batch_size=64, seq_begin=False, seq_end=False, pad=0, raw=False, mark_entities=False, tokenizer='spacy', one_hot=False): if not self.datafile: raise Exception('The dataset needs to be open before being used. ' 'Please call dataset.open() before calling ' 'dataset.next_batch()') text, emotion = [], [] while len(text) < batch_size: row = self.datafile.readline() if row == '': self._epochs_completed += 1 self.datafile.seek(0) continue cols = row.strip().split('\t') try: tweet, emo = cols[0], int(cols[1]) except Exception as e: print('Invalid data instance. Skipping line.') continue text.append(datasets.tokenize(tweet, tokenizer)) emotion.append(emo) if one_hot: emotion = to_categorical(emotion, nb_classes=self.n_classes) if mark_entities: text = datasets.mark_entities(text, lang='en') if not raw: text = datasets.seq2id(text[:batch_size], self.vocab_w2i, seq_begin, seq_end) else: text = datasets.append_seq_markers(text[:batch_size], seq_begin, seq_end) if pad != 0: text = datasets.padseq(text[:batch_size], pad, raw) batch = self.Batch(text=text, emotion=emotion) return batch
def next_batch(self, batch_size=64, format='one_hot', rescale=None, pad=0, raw=False, tokenizer='spacy'): samples = None if self._index_in_epoch + batch_size > len(self.data): samples = self.data[self._index_in_epoch:len(self.data)] random.shuffle(self.data) missing_samples = batch_size - (len(self.data) - self._index_in_epoch) self._epochs_completed += 1 samples.extend(self.data[0:missing_samples]) self._index_in_epoch = missing_samples else: samples = self.data[self._index_in_epoch:self._index_in_epoch + batch_size] self._index_in_epoch += batch_size x, y = zip(*samples) # Generate sequences x = self.generate_sequences(x, tokenizer) lens = [len(s) if pad == 0 else min(pad, len(s)) for s in x] if (raw): return self.Batch(x=x, y=y, lengths=lens) if (format == 'one_hot'): y = to_categorical(y, nb_classes=3) if (rescale is not None): datasets.validate_rescale(rescale) y = datasets.rescale(y, rescale, (0.0, 2.0)) batch = self.Batch(x=datasets.padseq( datasets.seq2id(x, self.vocab_w2i), pad), y=y, lengths=lens) return batch
def next_batch(self, batch_size=64, seq_begin=False, seq_end=False, rescale=None, pad=0, raw=False, mark_entities=False, tokenizer='spacy', sentence_pad=0, one_hot=False): if not self.datafile: raise Exception('The dataset needs to be open before being used. ' 'Please call dataset.open() before calling ' 'dataset.next_batch()') text, sentences, ratings, titles, lengths = [], [], [], [], [] while len(text) < batch_size: row = self.datafile.readline() if row == '': self._epochs_completed += 1 self.close() self.datafile = open(self.path_list[self.epochs_completed % len(self.path_list)]) continue json_obj = json.loads(row.strip()) text.append(datasets.tokenize(json_obj["review_text"], tokenizer)) lengths.append(len(text[-1])) sentences.append(datasets.sentence_tokenizer(json_obj["review_text"])) ratings.append(int(json_obj["review_rating"])) titles.append(datasets.tokenize(json_obj["review_header"])) if rescale is not None and one_hot == False: ratings = datasets.rescale(ratings, rescale, [1.0, 5.0]) elif rescale is None and one_hot == True: ratings = [x - 1 for x in ratings] ratings = to_categorical(ratings, nb_classes=5) elif rescale is None and one_hot == False: pass else: raise ValueError('rescale and one_hot cannot be set together') if mark_entities: text = datasets.mark_entities(text, lang='de') titles = datasets.mark_entities(titles, lang='de') sentences = [datasets.mark_entities(sentence, lang='de') for sentence in sentences] if not raw: text = datasets.seq2id(text[:batch_size], self.vocab_w2i, seq_begin, seq_end) titles = datasets.seq2id(titles[:batch_size], self.vocab_w2i, seq_begin, seq_end) sentences = [datasets.seq2id(sentence, self.vocab_w2i, seq_begin, seq_end) for sentence in sentences[:batch_size]] else: text = datasets.append_seq_markers(text[:batch_size], seq_begin, seq_end) titles = datasets.append_seq_markers(titles[:batch_size], seq_begin, seq_end) sentences = [datasets.append_seq_markers(sentence, seq_begin, seq_end) for sentence in sentences[:batch_size]] if pad != 0: text = datasets.padseq(text[:batch_size], pad, raw) titles = datasets.padseq(titles[:batch_size], pad, raw) sentences = [datasets.padseq(sentence, pad, raw) for sentence in sentences[:batch_size]] if sentence_pad != 0: sentences = [datasets.pad_sentences(sentence, sentence_pad, raw) for sentence in sentences[:batch_size]] batch = self.Batch(text=text, sentences=sentences, ratings=ratings, titles=titles, lengths=lengths) return batch
def next_batch(self, batch_size=64, seq_begin=False, seq_end=False, rescale=None, pad=0, raw=False, mark_entities=False, tokenizer='spacy', sentence_pad=0, one_hot=False): if not self.datafile: raise Exception('The dataset needs to be open before being used. ' 'Please call dataset.open() before calling ' 'dataset.next_batch()') text, sentences, ratings_service, ratings_cleanliness, \ ratings_overall, ratings_value, ratings_sleep_quality, ratings_rooms, \ titles, helpful_votes, lengths = [], [], [], [], [], [], [], [], [], [], [] while len(text) < batch_size: row = self.datafile.readline() if row == '': self._epochs_completed += 1 self.close() self.datafile = open(self.path_list[self.epochs_completed % len(self.path_list)]) continue json_obj = json.loads(row.strip()) text.append(datasets.tokenize(json_obj["text"], tokenizer)) lengths.append(len(text[-1])) sentences.append(datasets.sentence_tokenizer((json_obj["text"]))) ratings_service.append( int(json_obj["ratings"]["service"]) if 'service' in json_obj['ratings'] else int(json_obj['ratings']['overall'])) ratings_cleanliness.append( int(json_obj["ratings"]["cleanliness"]) if 'cleanliness' in json_obj['ratings'] else int(json_obj['ratings']['overall'])) ratings_overall.append(int(json_obj["ratings"]["overall"])) ratings_value.append( int(json_obj["ratings"]["value"]) if 'value' in json_obj['ratings'] else int(json_obj['ratings']['overall'])) ratings_sleep_quality.append( int(json_obj["ratings"]["sleep_quality"]) if 'sleep_quality' in json_obj['ratings'] else int(json_obj['ratings']['overall'])) ratings_rooms.append( int(json_obj["ratings"]["rooms"]) if 'rooms' in json_obj['ratings'] else int(json_obj['ratings']['overall'])) helpful_votes.append(json_obj["num_helpful_votes"]) titles.append(datasets.tokenize(json_obj["title"])) if rescale is not None and one_hot == False: ratings_service = datasets.rescale(ratings_service, rescale, [1.0, 5.0]) ratings_cleanliness = datasets.rescale(ratings_cleanliness, rescale, [1.0, 5.0]) ratings_overall = datasets.rescale(ratings_overall, rescale, [1.0, 5.0]) ratings_value = datasets.rescale(ratings_value, rescale, [1.0, 5.0]) ratings_sleep_quality = datasets.rescale(ratings_sleep_quality, rescale, [1.0, 5.0]) ratings_rooms = datasets.rescale(ratings_rooms, rescale, [1.0, 5.0]) elif rescale is None and one_hot == True: ratings_service = to_categorical([x - 1 for x in ratings_service], nb_classes=5) ratings_cleanliness = to_categorical( [x - 1 for x in ratings_cleanliness], nb_classes=5) ratings_overall = to_categorical([x - 1 for x in ratings_overall], nb_classes=5) ratings_value = to_categorical([x - 1 for x in ratings_value], nb_classes=5) ratings_sleep_quality = to_categorical( [x - 1 for x in ratings_sleep_quality], nb_classes=5) ratings_rooms = to_categorical([x - 1 for x in ratings_rooms], nb_classes=5) elif rescale is None and one_hot == False: pass else: raise ValueError('rescale and one_hot cannot be set together') if mark_entities: text = datasets.mark_entities(text) titles = datasets.mark_entities(titles) sentences = [ datasets.mark_entities(sentence) for sentence in sentences ] if not raw: text = datasets.seq2id(text[:batch_size], self.vocab_w2i, seq_begin, seq_end) titles = datasets.seq2id(titles[:batch_size], self.vocab_w2i, seq_begin, seq_end) sentences = [ datasets.seq2id(sentence, self.vocab_w2i, seq_begin, seq_end) for sentence in sentences[:batch_size] ] else: text = datasets.append_seq_markers(text[:batch_size], seq_begin, seq_end) titles = datasets.append_seq_markers(titles[:batch_size], seq_begin, seq_end) sentences = [ datasets.append_seq_markers(sentence, seq_begin, seq_end) for sentence in sentences[:batch_size] ] if pad != 0: text = datasets.padseq(text[:batch_size], pad, raw) titles = datasets.padseq(titles[:batch_size], pad, raw) sentences = [ datasets.padseq(sentence, pad, raw) for sentence in sentences[:batch_size] ] if sentence_pad != 0: sentences = [ datasets.pad_sentences(sentence, pad, raw) for sentence in sentences[:batch_size] ] batch = self.Batch(text=text, sentences=sentences, ratings_service=ratings_service, ratings_cleanliness=ratings_cleanliness, ratings=ratings_overall, ratings_value=ratings_value, ratings_sleep_quality=ratings_sleep_quality, ratings_rooms=ratings_rooms, titles=titles, helpful_votes=helpful_votes, lengths=lengths) return batch