예제 #1
0
def load_data():
    trustworthy_hotel_positive = read_from_dir('deception_dataset/hotel/positive/truthful/') 
    trustworthy_hotel_negative = read_from_dir('deception_dataset/hotel/negative/truthful/')
    trustworthy_restaurant = read_from_dir('deception_dataset/restaurant/truthful/')
    trustworthy_doctor = read_from_dir('deception_dataset/doctor/truthful/')

    untrustworthy_hotel_positive = read_from_dir('deception_dataset/hotel/positive/deceptive_turker/')
    untrustworthy_hotel_negative = read_from_dir('deception_dataset/hotel/negative/deceptive_turker/') 
    untrustworthy_hotel_expert_positive =  read_from_dir('deception_dataset/hotel/positive/deceptive_expert/')
    untrustworthy_hotel_expert_negative =  read_from_dir('deception_dataset/hotel/negative/deceptive_expert/')
    
    untrustworthy_restaurant = read_from_dir('deception_dataset/restaurant/deceptive_MTurk/')
    untrustworthy_doctor = read_from_dir('deception_dataset/doctor/deceptive_MTurk/')

    TRUSTWORTHY_REVIEWS_DICT = {
        "hotel":trustworthy_hotel_positive + trustworthy_hotel_negative,
        "restaurant":trustworthy_restaurant,
        "doctor":trustworthy_doctor
    }

    UNTRUSTWORTHY_REVIEWS_DICT = { 
        "hotel":untrustworthy_hotel_positive + untrustworthy_hotel_negative,
        "restaurant":untrustworthy_restaurant,
        "doctor":untrustworthy_doctor
    }

    TRUSTWORTHY_REVIEWS  = trustworthy_hotel_positive   + trustworthy_hotel_negative   + trustworthy_restaurant   + trustworthy_doctor
    UNTRUSTWORTHY_REVIEWS = untrustworthy_hotel_positive + untrustworthy_hotel_negative + untrustworthy_restaurant + untrustworthy_doctor
    VOCAB, VOCAB_INV =  data_helpers.build_vocab(TRUSTWORTHY_REVIEWS + UNTRUSTWORTHY_REVIEWS, vocab_size=30001)

    return TRUSTWORTHY_REVIEWS_DICT, UNTRUSTWORTHY_REVIEWS_DICT, VOCAB, VOCAB_INV
예제 #2
0
def get_cross_domain_dataset(test_keys=[]):
    train_keys = ["hotel"]
    train_data, train_labels = get_data(keys=train_keys, name="cross_domain")
    test_data,  test_labels  = get_data(keys=test_keys, name="cross_domain")
    vocab, vocab_inv = data_helpers.build_vocab(np.vstack((train_data,test_data)), vocab_size=30001)

    return train_data, train_labels, test_data, test_labels, vocab, vocab_inv
예제 #3
0
def preencode(df):
    sentences =  make_text_matrix(df)
    s = [x.split() for x in sentences['text'].values]
    l = sentences['target'].values
    sentences_padded = pad_sentences(s)
    vocabulary, vocabulary_inv = build_vocab(sentences_padded)
    x, y = build_input_data(sentences_padded, l, vocabulary)
    return x,y,vocabulary,vocabulary_inv
def load_need_data(dataset: str):
    """
    Loads and preprocessed data for the dataset.
    Returns input vectors, labels, vocabulary, and inverse vocabulary.
    """
    # Load and preprocess data
    sentences, labels = load_need_and_labels(dataset=dataset)
    sentences_padded = sentences  # don't have to pad sentences
    # sentences_padded = pad_sentences(sentences)
    vocabulary, vocabulary_inv = build_vocab(sentences_padded)

    [x, y], label_voc, label_voc_inv = build_need_input_data(
        sentences_padded, labels, vocabulary)
    return [x, y, vocabulary, vocabulary_inv, label_voc, label_voc_inv]
    def __init__(self,
                 positive_file=real_T_file_,
                 negative_file=real_U_file_,
                 fold=FOLD,
                 is_test_data=False):  # change real_U_file to fake_U_file
        super(Data, self).__init__()
        self.fold = fold

        ###### BEGIN #####
        # load data
        trustworthy_reviews_for_training, trustworthy_reviews_for_testing, untrustworthy_reviews_for_training, untrustworthy_reviews_for_testing = load_data(
            fold=fold)
        train = trustworthy_reviews_for_training + untrustworthy_reviews_for_training
        test = trustworthy_reviews_for_testing + untrustworthy_reviews_for_testing

        # generaate labels
        train_labels1 = [[1, 0]
                         for _ in range(len(trustworthy_reviews_for_training))]
        train_labels0 = [
            [0, 1] for _ in range(len(untrustworthy_reviews_for_training))
        ]
        test_labels1 = [[1, 0]
                        for _ in range(len(trustworthy_reviews_for_testing))]
        test_labels0 = [[0, 1]
                        for _ in range(len(untrustworthy_reviews_for_testing))]

        train_labels = np.array(train_labels1 + train_labels0)
        test_labels = np.array(test_labels1 + test_labels0)

        # convert word2idx
        vocabulary, vocabulary_inv = data_helpers.build_vocab(train + test,
                                                              vocab_size=30001)
        train = torch.as_tensor(
            data_helpers.build_input_data(train, vocabulary))
        test = torch.as_tensor(data_helpers.build_input_data(test, vocabulary))

        assert (train.shape[0], train_labels.shape[0])
        assert (test.shape[0], test_labels.shape[0])

        if not is_test_data:
            self.data = train
            self.labels = train_labels
        else:
            self.data = test
            self.labels = test_labels
예제 #6
0
def get_mix_domain_dataset(fold=1):
    keys = ["hotel", "restaurant", "doctor"]
    data, labels = get_data(keys=keys, name="mix_domain")

    MAX_FOLD = 5
    LEN_DATA = len(data)
    DELTA = int(LEN_DATA / MAX_FOLD)
    start_idx = (fold-1)*DELTA
    end_idx   = start_idx + DELTA

    # np.concatenate
    train_data   = np.concatenate((data[0:start_idx, :],  data[end_idx:,:]))
    train_labels = np.concatenate((labels[0:start_idx:end_idx,:], labels[end_idx:,:]))

    test_data   = data[start_idx:end_idx]
    test_labels = labels[start_idx:end_idx]

    vocab, vocab_inv = data_helpers.build_vocab(data, vocab_size=30001)

    return train_data, train_labels, test_data, test_labels, vocab, vocab_inv
예제 #7
0
    def __init__(self,
                 train_keys=[],
                 test_keys=[]):  # change real_U_file to fake_U_file
        super(MixedDomainDataset, self).__init__()

        trustworthy_reviews, untrustworthy_reviews = load_data()
        reviews = trustworthy_reviews + untrustworthy_reviews

        # generate labels
        labels_trustworthy = [[1, 0] for _ in range(len(trustworthy_reviews))]
        labels_untrustworthy = [[0, 1]
                                for _ in range(len(untrustworthy_reviews))]

        self.labels = np.array(labels_trustworthy + labels_untrustworthy)

        # convert word2idx
        vocabulary, vocabulary_inv = data_helpers.build_vocab(
            trustworthy_reviews + untrustworthy_reviews, vocab_size=30001)
        self.data = torch.as_tensor(
            data_helpers.build_input_data(reviews, vocabulary))

        print("data len: ", self.data.shape[0])
        print("labels len: ", self.labels.shape[0])
예제 #8
0
# Define Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run")
tf.flags.DEFINE_string("sentence", "the movie was bad", "sentence to classify")

FLAGS = tf.flags.FLAGS

#######################################################################################################################
# process the raw sentence
new_review = data_helpers.clean_senetnce(FLAGS.sentence)

# load vocabulary
sentences, _ = data_helpers.load_data_and_labels()
sequence_length = max(len(x) for x in sentences)
sentences_padded = data_helpers.pad_sentences(sentences)
vocabulary, vocabulary_inv = data_helpers.build_vocab(sentences_padded)

num_padding = sequence_length - len(new_review)
new_sentence = new_review + ["<PAD/>"] * num_padding

# convert sentence to input matrix
array = []
for word in new_sentence:
    try:
        word_vector=vocabulary[word]
    except KeyError:
        word_vector=vocabulary["<PAD/>"]
    array.append(word_vector)
x=np.array([array])

#######################################################################################################################
예제 #9
0
def main(argv):
    parser = argparse.ArgumentParser(
        description='CNN baseline for DSTC5 SAP Task')
    parser.add_argument('--trainset',
                        dest='trainset',
                        action='store',
                        metavar='TRAINSET',
                        required=True,
                        help='')
    parser.add_argument('--testset',
                        dest='testset',
                        action='store',
                        metavar='TESTSET',
                        required=True,
                        help='')
    parser.add_argument('--dataroot',
                        dest='dataroot',
                        action='store',
                        required=True,
                        metavar='PATH',
                        help='')

    args = parser.parse_args()

    train_utters = []
    trainset = dataset_walker.dataset_walker(args.trainset,
                                             dataroot=args.dataroot,
                                             labels=True,
                                             translations=True)
    sys.stderr.write('Loading training instances ... ')
    for call in trainset:
        context_utters = []
        context_utter_str = '<PAD/>'
        context_labels = []
        context_label = ['INI_OPENING']
        last_speaker = None
        for (log_utter, translations, label_utter) in call:
            transcript = data_helpers.tokenize_and_lower(
                log_utter['transcript'])
            speech_act = label_utter['speech_act']
            sa_label_list = []
            for sa in speech_act:
                sa_label_list += [
                    '%s_%s' % (sa['act'], attr) for attr in sa['attributes']
                ]
            sa_label_list = sorted(set(sa_label_list))

            if last_speaker is not None and log_utter[
                    'speaker'] != last_speaker:
                if len(context_utters) > 0:
                    context_utter_str = ' <pause> '.join(context_utters)
                    context_label = context_labels[-1]
                else:
                    context_utter_str = '<PAD/>'
                    context_label = ['INI_OPENING']

                context_utters = []
                context_labels = []
                last_speaker = None

            if last_speaker is None or log_utter['speaker'] == last_speaker:
                context_utters += [transcript]  # cumulate context utters
                context_labels += [sa_label_list]

            last_speaker = log_utter['speaker']
            train_utters += [
                (transcript, context_utter_str, log_utter['speaker'],
                 sa_label_list, log_utter['utter_index'], context_label)
            ]
            # train_utters += [(transcript, context_utter_str, log_utter['speaker'], sa_label_list, log_utter['utter_index'], sa_label_list)]

    sys.stderr.write('Done\n')

    test_utters = []
    testset = dataset_walker.dataset_walker(args.testset,
                                            dataroot=args.dataroot,
                                            labels=True,
                                            translations=True)
    sys.stderr.write('Loading testing instances ... ')
    for call in testset:
        context_utters = []
        context_utter_str = '<PAD/>'
        context_labels = []
        context_label = ['INI_OPENING']
        last_speaker = None
        for (log_utter, translations, label_utter) in call:
            try:
                translation = data_helpers.tokenize_and_lower(
                    translations['translated'][0]['hyp'])
            except:
                translation = ''

            speech_act = label_utter['speech_act']
            sa_label_list = []
            for sa in speech_act:
                sa_label_list += [
                    '%s_%s' % (sa['act'], attr) for attr in sa['attributes']
                ]
            sa_label_list = sorted(set(sa_label_list))

            if last_speaker is not None and log_utter[
                    'speaker'] != last_speaker:
                if len(context_utters) > 0:
                    context_utter_str = ' <pause> '.join(context_utters)
                    context_label = context_labels[-1]
                else:
                    context_utter_str = ''
                    context_label = ['INI_OPENING']

                context_utters = []
                context_labels = []
                last_speaker = None

            if last_speaker is None or log_utter['speaker'] == last_speaker:
                context_utters += [translation]  # cumulate context utters
                context_labels += [sa_label_list]

            last_speaker = log_utter['speaker']

            test_utters += [
                (translation, context_utter_str, log_utter['speaker'],
                 sa_label_list, log_utter['utter_index'], context_label)
            ]
            # test_utters += [(translation, context_utter_str, log_utter['speaker'], sa_label_list, log_utter['utter_index'], sa_label_list)]

    # pprint(train_utters[:2])
    # pprint(test_utters[:2])

    # dump_corpus(train_utters, "dstc5_train.txt")
    # dump_corpus(test_utters, "dstc5_test.txt")

    # load parameters
    params = data_helpers.load_params("parameters/cnn.txt")
    pprint(params)

    # build vocabulary
    utters = [utter[0].split(' ') for utter in train_utters]
    ctx_utters = [utter[1].split(' ') for utter in train_utters]
    print("max context utter length: %d " %
          max([len(ctx_utter) for ctx_utter in ctx_utters]))
    max_sent_len = int(params['max_sent_len'])
    pad_utters = data_helpers.pad_sentences(utters, max_sent_len)
    pad_ctx_utters = data_helpers.pad_sentences(ctx_utters, max_sent_len)

    vocabulary, inv_vocabulary = data_helpers.build_vocab(pad_ctx_utters)
    print("vocabulary size: %d" % len(vocabulary))

    # build input
    train_inputs = data_helpers.build_input_data(pad_utters, vocabulary)
    train_ctx_inputs = data_helpers.build_input_data(pad_ctx_utters,
                                                     vocabulary)

    utters = [utter[0].split(' ') for utter in test_utters]
    ctx_utters = [utter[1].split(' ') for utter in test_utters]
    pad_utters = data_helpers.pad_sentences(utters, max_sent_len)
    pad_ctx_utters = data_helpers.pad_sentences(ctx_utters, max_sent_len)
    test_inputs = data_helpers.build_input_data(pad_utters, vocabulary)
    test_ctx_inputs = data_helpers.build_input_data(pad_ctx_utters, vocabulary)

    # build labels
    sa_train_labels = [utter[3] for utter in train_utters]
    sa_test_labels = [utter[3] for utter in test_utters]
    sa_train_ctx_labels = [utter[5] for utter in train_utters]
    sa_test_ctx_labels = [utter[5] for utter in test_utters]

    label_binarizer = preprocessing.MultiLabelBinarizer()
    label_binarizer.fit(sa_train_labels + sa_test_labels)

    train_labels = label_binarizer.transform(sa_train_labels)
    test_labels = label_binarizer.transform(sa_test_labels)
    train_ctx_labels = label_binarizer.transform(sa_train_ctx_labels)
    test_ctx_labels = label_binarizer.transform(sa_test_ctx_labels)

    # split speakers into two sets
    tourist_train_indices = [
        i for i, utter in enumerate(train_utters)
        if utter[2].lower() == 'tourist'
    ]
    guide_train_indices = [
        i for i, utter in enumerate(train_utters)
        if utter[2].lower() == 'guide'
    ]

    tourist_test_indices = [
        i for i, utter in enumerate(test_utters)
        if utter[2].lower() == 'tourist'
    ]
    guide_test_indices = [
        i for i, utter in enumerate(test_utters) if utter[2].lower() == 'guide'
    ]

    np.random.shuffle(tourist_train_indices)
    np.random.shuffle(guide_train_indices)

    tourist_train_inputs = train_inputs[tourist_train_indices]
    tourist_train_ctx_inputs = train_ctx_inputs[tourist_train_indices]
    tourist_train_labels = train_labels[tourist_train_indices]
    tourist_train_ctx_labels = train_ctx_labels[tourist_train_indices]

    guide_train_inputs = train_inputs[guide_train_indices]
    guide_train_ctx_inputs = train_ctx_inputs[guide_train_indices]
    guide_train_labels = train_labels[guide_train_indices]
    guide_train_ctx_labels = train_ctx_labels[guide_train_indices]

    tourist_test_inputs = test_inputs[tourist_test_indices]
    tourist_test_ctx_inputs = test_ctx_inputs[tourist_test_indices]
    tourist_test_labels = test_labels[tourist_test_indices]
    tourist_test_ctx_labels = test_ctx_labels[tourist_test_indices]

    guide_test_inputs = test_inputs[guide_test_indices]
    guide_test_ctx_inputs = test_ctx_inputs[guide_test_indices]
    guide_test_labels = test_labels[guide_test_indices]
    guide_test_ctx_labels = test_ctx_labels[guide_test_indices]

    # load pre-trained word embeddings
    embedding_dim = int(params['embedding_dim'])
    embedding_matrix = data_helpers.load_embedding(
        vocabulary, embedding_dim=embedding_dim, embedding=params['embedding'])

    run_slu_task(embedding_matrix, vocabulary, label_binarizer,
                 tourist_train_inputs, tourist_train_ctx_inputs,
                 tourist_train_labels, tourist_train_ctx_labels,
                 tourist_test_inputs, tourist_test_ctx_inputs,
                 tourist_test_labels, tourist_test_ctx_labels)

    run_slu_task(embedding_matrix, vocabulary, label_binarizer,
                 guide_train_inputs, guide_train_ctx_inputs,
                 guide_train_labels, guide_train_ctx_labels, guide_test_inputs,
                 guide_test_ctx_inputs, guide_test_labels,
                 guide_test_ctx_labels)

    print("")
예제 #10
0
# Data Preparatopn
# ==================================================

# Load data
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file,
                                              FLAGS.negative_data_file)
x_eval = data_helpers.load_test_data(FLAGS.test_data_file)

# Pad sentences
sentences_padded_all, max_length = data_helpers.pad_sentences(x_text + x_eval)
sentences_padded, max_length = data_helpers.pad_sentences(x_text, max_length)

# Build vocabulary
vocabulary, vocabulary_inv = data_helpers.build_vocab(sentences_padded_all)
x, y = data_helpers.build_input_data(sentences_padded, y, vocabulary)

# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]

# Split train/test set
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
print("Vocabulary Size: {:d}".format(len(vocabulary)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
예제 #11
0
#TODO: After complete all training, use argparse to store the params.
positive_data_file = "./data/rt-polaritydata/rt-polarity.pos"
negtive_data_file = "./data/rt-polaritydata/rt-polarity.neg"

# Load data
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(positive_data_file,
                                              negtive_data_file)

# Pad sentence
print("Padding sentences...")
x_text = data_helpers.pad_sentences(x_text)
print("The sequence length is: ", len(x_text[0]))

# Build vocabulary
vocabulary, vocabulary_inv = data_helpers.build_vocab(x_text)

# Represent sentence with word index, using word index to represent a sentence
x = data_helpers.build_index_sentence(x_text, vocabulary)
y = y.argmax(
    axis=1)  # y: [1, 1, 1, ...., 0, 0, 0]. 1 for positive, 0 for negative

# Shuffle data
np.random.seed(42)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]

# Split train and test
# TODO: training_rate could be set by user as a parameter
training_rate = 0.9
예제 #12
0
def main(argv):
    parser = argparse.ArgumentParser(
        description='CNN baseline for DSTC5 SAP Task')
    parser.add_argument('--trainset',
                        dest='trainset',
                        action='store',
                        metavar='TRAINSET',
                        required=True,
                        help='')
    parser.add_argument('--testset',
                        dest='testset',
                        action='store',
                        metavar='TESTSET',
                        required=True,
                        help='')
    parser.add_argument('--dataroot',
                        dest='dataroot',
                        action='store',
                        required=True,
                        metavar='PATH',
                        help='')

    args = parser.parse_args()

    # load parameters
    params = data_helpers.load_params("parameters/cnn.txt")
    pprint(params)
    ctx_len = int(params['context_length'])

    train_utters = []
    trainset = dataset_walker.dataset_walker(args.trainset,
                                             dataroot=args.dataroot,
                                             labels=True,
                                             translations=True)
    sys.stderr.write('Loading training instances ... ')
    transcript_contexts = []
    for call in trainset:
        for i, (log_utter, translations, label_utter) in enumerate(call):
            transcript = data_helpers.tokenize_and_lower(
                log_utter['transcript'])
            transcript_contexts += [transcript]

            speech_act = label_utter['speech_act']
            sa_label_list = []
            for sa in speech_act:
                sa_label_list += [
                    '%s_%s' % (sa['act'], attr) for attr in sa['attributes']
                ]
            sa_label_list = sorted(set(sa_label_list))
            # train_utters += [(transcript_contexts[max(0, i+1-ctx_len):i+1], log_utter['speaker'], sa_label_list, log_utter['utter_index'])]
            train_utters += [(transcript, log_utter['speaker'], sa_label_list,
                              log_utter['utter_index'])]

    sys.stderr.write('Done\n')

    test_utters = []
    testset = dataset_walker.dataset_walker(args.testset,
                                            dataroot=args.dataroot,
                                            labels=True,
                                            translations=True)
    sys.stderr.write('Loading testing instances ... ')
    transcript_contexts = []
    for call in testset:
        for i, (log_utter, translations, label_utter) in enumerate(call):
            try:
                translation = data_helpers.tokenize_and_lower(
                    translations['translated'][0]['hyp'])
            except:
                translation = ''
            transcript_contexts += [translation]

            speech_act = label_utter['speech_act']
            sa_label_list = []
            for sa in speech_act:
                sa_label_list += [
                    '%s_%s' % (sa['act'], attr) for attr in sa['attributes']
                ]
            sa_label_list = sorted(set(sa_label_list))
            # test_utters += [(transcript_contexts[max(0, i+1-ctx_len):i+1], log_utter['speaker'], sa_label_list, log_utter['utter_index'])]
            test_utters += [(translation, log_utter['speaker'], sa_label_list,
                             log_utter['utter_index'])]

    # pprint(train_utters[:2])
    # pprint(test_utters[:2])

    # dump_corpus(train_utters, "dstc5_train.txt")
    # dump_corpus(test_utters, "dstc5_test.txt")

    # build vocabulary
    utters = [utter[0].split(' ') for utter in train_utters]
    max_sent_len = int(params['max_sent_len'])
    pad_utters = data_helpers.pad_sentences(utters, max_sent_len)

    vocabulary, inv_vocabulary = data_helpers.build_vocab(pad_utters)
    print("vocabulary size: %d" % len(vocabulary))

    # build input
    train_inputs = data_helpers.build_input_data(pad_utters, vocabulary)

    utters = [utter[0].split(' ') for utter in test_utters]
    pad_utters = data_helpers.pad_sentences(utters, max_sent_len)
    test_inputs = data_helpers.build_input_data(pad_utters, vocabulary)

    # make windowed input data as context
    train_inputs = data_helpers.build_windowed_input(train_inputs, ctx_len)
    test_inputs = data_helpers.build_windowed_input(test_inputs, ctx_len)

    # build labels
    sa_train_labels = [utter[2] for utter in train_utters]
    sa_test_labels = [utter[2] for utter in test_utters]
    label_binarizer = preprocessing.MultiLabelBinarizer()
    label_binarizer.fit(sa_train_labels + sa_test_labels)

    train_labels = label_binarizer.transform(sa_train_labels)
    test_labels = label_binarizer.transform(sa_test_labels)

    # split speakers into two sets
    tourist_train_indices = [
        i for i, utter in enumerate(train_utters)
        if utter[1].lower() == 'tourist'
    ]
    guide_train_indices = [
        i for i, utter in enumerate(train_utters)
        if utter[1].lower() == 'guide'
    ]

    tourist_test_indices = [
        i for i, utter in enumerate(test_utters)
        if utter[1].lower() == 'tourist'
    ]
    guide_test_indices = [
        i for i, utter in enumerate(test_utters) if utter[1].lower() == 'guide'
    ]

    np.random.shuffle(tourist_train_indices)
    np.random.shuffle(guide_train_indices)
    # np.random.shuffle(tourist_test_indices)
    # np.random.shuffle(guide_test_indices)

    tourist_train_inputs = train_inputs[tourist_train_indices]
    tourist_train_labels = train_labels[tourist_train_indices]

    guide_train_inputs = train_inputs[guide_train_indices]
    guide_train_labels = train_labels[guide_train_indices]

    tourist_test_inputs = test_inputs[tourist_test_indices]
    tourist_test_labels = test_labels[tourist_test_indices]

    guide_test_inputs = test_inputs[guide_test_indices]
    guide_test_labels = test_labels[guide_test_indices]

    # load pre-trained word embeddings
    embedding_dim = int(params['embedding_dim'])
    embedding_matrix = data_helpers.load_embedding(
        vocabulary, embedding_dim=embedding_dim, embedding=params['embedding'])

    run_slu_sequence_task(embedding_matrix, vocabulary, label_binarizer,
                          tourist_train_inputs, tourist_train_labels,
                          tourist_test_inputs, tourist_test_labels)

    run_slu_sequence_task(embedding_matrix, vocabulary, label_binarizer,
                          guide_train_inputs, guide_train_labels,
                          guide_test_inputs, guide_test_labels)

    print("")
예제 #13
0
def main(argv):
    parser = argparse.ArgumentParser(description='CNN baseline for DSTC5 SAP Task')
    parser.add_argument('--trainset', dest='trainset', action='store', metavar='TRAINSET', required=True, help='')
    parser.add_argument('--testset', dest='testset', action='store', metavar='TESTSET', required=True, help='')
    parser.add_argument('--dataroot', dest='dataroot', action='store', required=True, metavar='PATH',  help='')
    parser.add_argument('--roletype', dest='roletype', action='store', choices=['guide',  'tourist'], required=True,  help='speaker')

    args = parser.parse_args()
    threshold_predictor = None

    train_utters = []
    trainset = dataset_walker.dataset_walker(args.trainset, dataroot=args.dataroot, labels=True, translations=True)
    sys.stderr.write('Loading training instances ... ')
    for call in trainset:
        for (log_utter, translations, label_utter) in call:
            if log_utter['speaker'].lower() != args.roletype:
                continue
            transcript = data_helpers.tokenize_and_lower(log_utter['transcript'])

            speech_act = label_utter['speech_act']
            sa_label_list = []
            for sa in speech_act:
                sa_label_list += ['%s_%s' % (sa['act'], attr) for attr in sa['attributes']]
            sa_label_list = sorted(set(sa_label_list))
            train_utters += [(transcript, log_utter['speaker'], sa_label_list)]
    sys.stderr.write('Done\n')

    test_utters = []
    testset = dataset_walker.dataset_walker(args.testset, dataroot=args.dataroot, labels=True, translations=True)
    sys.stderr.write('Loading testing instances ... ')
    for call in testset:
        for (log_utter, translations, label_utter) in call:
            if log_utter['speaker'].lower() != args.roletype:
                continue
            try:
                translation = data_helpers.tokenize_and_lower(translations['translated'][0]['hyp'])
            except:
                translation = ''

            speech_act = label_utter['speech_act']
            sa_label_list = []
            for sa in speech_act:
                sa_label_list += ['%s_%s' % (sa['act'], attr) for attr in sa['attributes']]
            sa_label_list = sorted(set(sa_label_list))
            test_utters += [(translation, log_utter['speaker'], sa_label_list)]

    pprint(train_utters[:2])
    pprint(test_utters[:2])

    # load parameters
    params = data_helpers.load_params("parameters/cnn.txt")
    pprint(params)
    num_epochs = int(params['num_epochs'])
    validation_split = float(params['validation_split'])
    batch_size = int(params['batch_size'])
    multilabel = params['multilabel']=="true"

    # build vocabulary
    sents = [utter[0].split(' ') for utter in train_utters]
    max_sent_len = int(params['max_sent_len'])
    pad_sents = data_helpers.pad_sentences(sents, max_sent_len)
    vocabulary, inv_vocabulary = data_helpers.build_vocab(pad_sents)
    print("vocabulary size: %d" % len(vocabulary))
    # params['max_sent_len'] = max_sent_len

    # build inputs
    train_inputs = data_helpers.build_input_data(pad_sents, vocabulary)

    test_sents = [utter[0].split(' ') for utter in test_utters]
    test_pad_sents = data_helpers.pad_sentences(test_sents, max_sent_len)
    test_inputs = data_helpers.build_input_data(test_pad_sents, vocabulary)

    # build labels
    sa_train_labels = [utter[2] for utter in train_utters]
    sa_test_labels = [utter[2] for utter in test_utters]
    label_binarizer = preprocessing.MultiLabelBinarizer()
    label_binarizer.fit(sa_train_labels+sa_test_labels)

    train_labels = label_binarizer.transform(sa_train_labels)
    test_labels = label_binarizer.transform(sa_test_labels)

    # split and shuffle data
    indices = np.arange(train_inputs.shape[0])
    np.random.shuffle(indices)
    train_inputs = train_inputs[indices]
    train_labels = train_labels[indices]
    num_validation = int(validation_split * train_inputs.shape[0])

    # x_train = train_inputs[:-num_validation]
    # y_train = train_labels[:-num_validation]
    # x_val = train_inputs[-num_validation:]
    # y_val = train_labels[-num_validation:]
    x_train = train_inputs
    y_train = train_labels

    x_test = test_inputs
    y_test = test_labels

    # construct a pytorch data_loader
    x_train = torch.from_numpy(x_train).long()
    y_train = torch.from_numpy(y_train).float()
    dataset_tensor = data_utils.TensorDataset(x_train, y_train)
    train_loader = data_utils.DataLoader(dataset_tensor, batch_size=batch_size, shuffle=True, num_workers=4,
                                         pin_memory=False)

    x_test = torch.from_numpy(x_test).long()
    y_test = torch.from_numpy(y_test).long()
    dataset_tensor = data_utils.TensorDataset(x_test, y_test)
    test_loader = data_utils.DataLoader(dataset_tensor, batch_size=batch_size, shuffle=False, num_workers=4,
                                         pin_memory=False)


    # load pre-trained word embeddings
    embedding_dim = int(params['embedding_dim'])
    embedding_matrix = data_helpers.load_embedding(vocabulary, embedding_dim=embedding_dim, embedding=params['embedding'])

    # load model
    model = SluConvNet(params, embedding_matrix, len(vocabulary), y_train.shape[1])

    if torch.cuda.is_available():
        model = model.cuda()
    learning_rate = float(params['learning_rate'])
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    loss_fn = nn.MultiLabelSoftMarginLoss()
    # loss_fn = nn.BCEWithLogitsLoss()

    for epoch in range(num_epochs):
        model.train()   # set the model to training mode (apply dropout etc)
        for i, (inputs, labels) in enumerate(train_loader):
            inputs, labels = autograd.Variable(inputs), autograd.Variable(labels)
            if torch.cuda.is_available():
                inputs, labels = inputs.cuda(), labels.cuda()

            preds = model(inputs)
            if torch.cuda.is_available():
                preds = preds.cuda()

            loss = loss_fn(preds, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i % 100 == 0:
                print("current loss: %.4f" % loss)

        model.eval()        # set the model to evaluation mode
        # if threshold_predictor is None:
        threshold_predictor = train_threshold(model, train_loader, y_train.numpy())
        # count_predictor = train_count(model, train_loader, y_train.numpy())
        true_acts, pred_acts, metrics = evaluate(model, label_binarizer, test_loader, y_test, multilabel, threshold_predictor)
        # true_acts, pred_acts, metrics = evaluate_count(model, label_binarizer, test_loader, y_test, multilabel, count_predictor)
        print("Precision: %.4f\tRecall: %.4f\tF1-score: %.4f\n" % (metrics[0], metrics[1], metrics[2]))

    # end of training
    true_acts, pred_acts, metrics = evaluate(model, label_binarizer, test_loader, y_test, multilabel)
    print("Precision: %.4f\tRecall: %.4f\tF1-score: %.4f\n" % (metrics[0], metrics[1], metrics[2]))

    with open(("pred_result_%s.txt" % args.roletype), "w") as f:
        for pred_act, true_act in zip(pred_acts, true_acts):
            f.write("pred: %s\ntrue: %s\n\n" % (', '.join(pred_act), ', '.join(true_act)))
예제 #14
0
def main(argv):
    parser = argparse.ArgumentParser(
        description='CNN baseline for DSTC5 SAP Task')
    parser.add_argument('--trainset',
                        dest='trainset',
                        action='store',
                        metavar='TRAINSET',
                        required=True,
                        help='')
    parser.add_argument('--devset',
                        dest='devset',
                        action='store',
                        metavar='DEVSET',
                        required=True,
                        help='')
    parser.add_argument('--testset',
                        dest='testset',
                        action='store',
                        metavar='TESTSET',
                        required=True,
                        help='')
    parser.add_argument('--dataroot',
                        dest='dataroot',
                        action='store',
                        required=True,
                        metavar='PATH',
                        help='')

    args = parser.parse_args()

    # load parameters
    params = data_helpers.load_params("parameters/cnn.txt")
    pprint(params)

    trainset = dataset_walker.dataset_walker(args.trainset,
                                             dataroot=args.dataroot,
                                             labels=True,
                                             translations=True)
    devset = dataset_walker.dataset_walker(args.devset,
                                           dataroot=args.dataroot,
                                           labels=True,
                                           translations=True)
    testset = dataset_walker.dataset_walker(args.testset,
                                            dataroot=args.dataroot,
                                            labels=True,
                                            translations=True)
    train_utters, dev_utters, test_utters = data_helpers.load_dstc5_dataset_multitask(
        trainset, devset, testset)

    train_utters += dev_utters

    context_case = 1
    # 여기다가 previous labels context 를 구성하는 코드를 작성하자!
    # 1) 이전 화행 N개 (speaker 구분안함)
    # 2) 이전 턴의 상대방 발화들의 모든 화행 (n개)
    if context_case == 1:

        pass

    else:
        pass

    # pprint(train_utters[:2])
    # pprint(test_utters[:2])

    # dump_corpus(train_utters, "dstc5_train.txt")
    # dump_corpus(test_utters, "dstc5_test.txt")

    # build vocabulary
    utters = [[char for char in utter[0]] for utter in train_utters]
    max_sent_len = int(params['max_sent_len'])
    pad_utters = data_helpers.pad_sentences(utters, max_sent_len)

    vocabulary, inv_vocabulary = data_helpers.build_vocab(pad_utters)
    print("vocabulary size: %d" % len(vocabulary))

    # build input
    train_inputs = data_helpers.build_input_data(pad_utters, vocabulary)

    utters = [[char for char in utter[0]] for utter in test_utters]
    pad_utters = data_helpers.pad_sentences(utters, max_sent_len)
    test_inputs = data_helpers.build_input_data(pad_utters, vocabulary)

    # build labels
    train_labels_category = [utter[3] for utter in train_utters]
    test_labels_category = [utter[3] for utter in test_utters]
    train_labels_attr = [utter[4] for utter in train_utters]
    test_labels_attr = [utter[4] for utter in test_utters]
    train_labels_sa = [utter[5] for utter in train_utters]
    test_labels_sa = [utter[5] for utter in test_utters]

    label_binarizer_category = preprocessing.MultiLabelBinarizer()
    label_binarizer_category.fit(train_labels_category + test_labels_category)

    label_binarizer_attr = preprocessing.MultiLabelBinarizer()
    label_binarizer_attr.fit(train_labels_attr + test_labels_attr)

    label_binarizer_sa = preprocessing.MultiLabelBinarizer()
    label_binarizer_sa.fit(train_labels_sa + test_labels_sa)

    train_labels_category = label_binarizer_category.transform(
        train_labels_category)
    test_labels_category = label_binarizer_category.transform(
        test_labels_category)
    train_labels_attr = label_binarizer_attr.transform(train_labels_attr)
    test_labels_attr = label_binarizer_attr.transform(test_labels_attr)
    train_labels_sa = label_binarizer_sa.transform(train_labels_sa)
    test_labels_sa = label_binarizer_sa.transform(test_labels_sa)

    # split speakers into two sets
    tourist_train_indices = [
        i for i, utter in enumerate(train_utters)
        if utter[1].lower() == 'tourist'
    ]
    guide_train_indices = [
        i for i, utter in enumerate(train_utters)
        if utter[1].lower() == 'guide'
    ]

    tourist_test_indices = [
        i for i, utter in enumerate(test_utters)
        if utter[1].lower() == 'tourist'
    ]
    guide_test_indices = [
        i for i, utter in enumerate(test_utters) if utter[1].lower() == 'guide'
    ]

    np.random.shuffle(tourist_train_indices)
    np.random.shuffle(guide_train_indices)
    # np.random.shuffle(tourist_test_indices)
    # np.random.shuffle(guide_test_indices)

    tourist_train_inputs = train_inputs[tourist_train_indices]
    tourist_train_labels_category = train_labels_category[
        tourist_train_indices]
    tourist_train_labels_attr = train_labels_attr[tourist_train_indices]
    tourist_train_labels_sa = train_labels_sa[tourist_train_indices]
    tourist_train_labels = (tourist_train_labels_category,
                            tourist_train_labels_attr, tourist_train_labels_sa)

    guide_train_inputs = train_inputs[guide_train_indices]
    guide_train_labels_category = train_labels_category[guide_train_indices]
    guide_train_labels_attr = train_labels_attr[guide_train_indices]
    guide_train_labels_sa = train_labels_sa[guide_train_indices]
    guide_train_labels = (guide_train_labels_category, guide_train_labels_attr,
                          guide_train_labels_sa)

    tourist_test_inputs = test_inputs[tourist_test_indices]
    tourist_test_labels_category = test_labels_category[tourist_test_indices]
    tourist_test_labels_attr = test_labels_attr[tourist_test_indices]
    tourist_test_labels_sa = test_labels_sa[tourist_test_indices]
    tourist_test_labels = (tourist_test_labels_category,
                           tourist_test_labels_attr, tourist_test_labels_sa)

    guide_test_inputs = test_inputs[guide_test_indices]
    guide_test_labels_category = test_labels_category[guide_test_indices]
    guide_test_labels_attr = test_labels_attr[guide_test_indices]
    guide_test_labels_sa = test_labels_sa[guide_test_indices]
    guide_test_labels = (guide_test_labels_category, guide_test_labels_attr,
                         guide_test_labels_sa)

    # load pre-trained word embeddings
    embedding_dim = int(params['embedding_dim'])
    embedding_matrix = data_helpers.load_embedding(
        vocabulary, embedding_dim=embedding_dim, embedding=params['embedding'])

    run_slu_task(embedding_matrix, vocabulary, label_binarizer_sa,
                 tourist_train_inputs, tourist_train_labels,
                 tourist_test_inputs, tourist_test_labels)

    run_slu_task(embedding_matrix, vocabulary, label_binarizer_sa,
                 guide_train_inputs, guide_train_labels, guide_test_inputs,
                 guide_test_labels)
예제 #15
0
def main(argv):
    parser = argparse.ArgumentParser(
        description='CNN baseline for DSTC5 SAP Task')
    parser.add_argument('--trainset',
                        dest='trainset',
                        action='store',
                        metavar='TRAINSET',
                        required=True,
                        help='')
    parser.add_argument('--devset',
                        dest='devset',
                        action='store',
                        metavar='DEVSET',
                        required=True,
                        help='')
    parser.add_argument('--testset',
                        dest='testset',
                        action='store',
                        metavar='TESTSET',
                        required=True,
                        help='')
    parser.add_argument('--dataroot',
                        dest='dataroot',
                        action='store',
                        required=True,
                        metavar='PATH',
                        help='')

    args = parser.parse_args()

    # load parameters
    params = data_helpers.load_params("parameters/cnn.txt")
    pprint(params)

    trainset = dataset_walker.dataset_walker(args.trainset,
                                             dataroot=args.dataroot,
                                             labels=True,
                                             translations=True)
    devset = dataset_walker.dataset_walker(args.devset,
                                           dataroot=args.dataroot,
                                           labels=True,
                                           translations=True)
    testset = dataset_walker.dataset_walker(args.testset,
                                            dataroot=args.dataroot,
                                            labels=True,
                                            translations=True)
    train_utters, dev_utters, test_utters = data_helpers.load_dstc5_dataset(
        trainset, devset, testset)

    train_utters += dev_utters

    # pprint(train_utters[:2])
    # pprint(test_utters[:2])

    # dump_corpus(train_utters, "dstc5_train.txt")
    # dump_corpus(test_utters, "dstc5_test.txt")

    # build vocabulary
    utters = [[char for char in utter[0]] for utter in train_utters]
    max_sent_len = int(params['max_sent_len'])
    pad_utters = data_helpers.pad_sentences(utters, max_sent_len)

    vocabulary, inv_vocabulary = data_helpers.build_vocab(pad_utters)
    print("vocabulary size: %d" % len(vocabulary))

    # build input
    train_inputs = data_helpers.build_input_data(pad_utters, vocabulary)

    utters = [[char for char in utter[0]] for utter in test_utters]
    pad_utters = data_helpers.pad_sentences(utters, max_sent_len)
    test_inputs = data_helpers.build_input_data(pad_utters, vocabulary)

    # build labels
    sa_train_labels = [utter[2] for utter in train_utters]
    sa_test_labels = [utter[2] for utter in test_utters]
    label_binarizer = preprocessing.MultiLabelBinarizer()
    label_binarizer.fit(sa_train_labels + sa_test_labels)

    train_labels = label_binarizer.transform(sa_train_labels)
    test_labels = label_binarizer.transform(sa_test_labels)

    # split speakers into two sets
    tourist_train_indices = [
        i for i, utter in enumerate(train_utters)
        if utter[1].lower() == 'tourist'
    ]
    guide_train_indices = [
        i for i, utter in enumerate(train_utters)
        if utter[1].lower() == 'guide'
    ]

    tourist_test_indices = [
        i for i, utter in enumerate(test_utters)
        if utter[1].lower() == 'tourist'
    ]
    guide_test_indices = [
        i for i, utter in enumerate(test_utters) if utter[1].lower() == 'guide'
    ]

    np.random.shuffle(tourist_train_indices)
    np.random.shuffle(guide_train_indices)
    # np.random.shuffle(tourist_test_indices)
    # np.random.shuffle(guide_test_indices)

    tourist_train_inputs = train_inputs[tourist_train_indices]
    tourist_train_labels = train_labels[tourist_train_indices]

    guide_train_inputs = train_inputs[guide_train_indices]
    guide_train_labels = train_labels[guide_train_indices]

    tourist_test_inputs = test_inputs[tourist_test_indices]
    tourist_test_labels = test_labels[tourist_test_indices]

    guide_test_inputs = test_inputs[guide_test_indices]
    guide_test_labels = test_labels[guide_test_indices]

    # load pre-trained word embeddings
    embedding_dim = int(params['embedding_dim'])
    embedding_matrix = data_helpers.load_embedding(
        vocabulary, embedding_dim=embedding_dim, embedding=params['embedding'])

    run_slu_task(embedding_matrix, vocabulary, label_binarizer,
                 tourist_train_inputs, tourist_train_labels,
                 tourist_test_inputs, tourist_test_labels)

    run_slu_task(embedding_matrix, vocabulary, label_binarizer,
                 guide_train_inputs, guide_train_labels, guide_test_inputs,
                 guide_test_labels)

    print("")
예제 #16
0
tf.flags.DEFINE_boolean("log_device_placement", False,
                        "Log placement of ops on devices")

FLAGS = tf.flags.FLAGS
FLAGS(sys.argv)
assert (FLAGS.batch_size == FLAGS.pools_size)

print(("\nParameters:"))
for attr, value in sorted(FLAGS.__flags.items()):
    print(("{}={}".format(attr.upper(), value)))
print((""))

timeStamp = time.strftime("%Y%m%d%H%M%S", time.localtime(int(time.time())))

print(("Loading data..."))
vocab, embd = data_helpers.build_vocab(FLAGS.dataset,
                                       FLAGS.pretrained_embeddings_path)
if len(FLAGS.pretrained_embeddings_path) > 0:
    assert (embd.shape[1] == FLAGS.embedding_dim)
    with open('{}/embd.pkl'.format(FLAGS.dataset), 'wb') as fout:
        pickle.dump(embd, fout)
with open('{}/vocab.pkl'.format(FLAGS.dataset), 'wb') as fout:
    pickle.dump(vocab, fout)
alist = data_helpers.read_alist_standalone(FLAGS.dataset, "vocab.txt",
                                           FLAGS.max_sequence_length_a,
                                           FLAGS.padding)
raw, raw_dict = data_helpers.read_raw(FLAGS.dataset)
devList = data_helpers.loadTestSet(FLAGS.dataset, "valid.data")
testList = data_helpers.loadTestSet(FLAGS.dataset, "test.data")
testallList = data_helpers.loadTestSet(FLAGS.dataset, "test.data")  # testall

print("Load done...")
예제 #17
0
print('10 fold CV starting')
for train, test in kfold.split(sentences_padded, y_class):
    # split train & test set
    print('spliting train and test set')
    X_train = list()
    X_test = list()
    for index in train:
        X_train.append(sentences_padded[index])
    for index in test:
        X_test.append(sentences_padded[index])
    y_train = y_class[train]
    y_test = y_class[test]

    # building vocabulary on train set
    print('building vocabulary on train set')
    vocabulary, vocabulary_inv = build_vocab(X_train)

    # Maps sentences to vectors based on vocabulary
    print('Mapping sentences to vectors based on vocabulary')
    X_train, y_train = build_input_data(X_train, y_train, vocabulary)
    # print(X_train.shape)
    X_test, y_test = build_input_data(X_test, y_test, vocabulary)
    # all x and y for predicting
    x, y_class = build_input_data(sentences_padded, y_class, vocabulary)
    # print(X_test.shape)
    vocabulary_size = len(vocabulary_inv)

    # building embedding matrix using GloVe word embeddings
    print('building embedding matrix using GloVe word embeddings')
    embedding_matrix = create_embedding_matrix('./dataset/myGloVe200d.txt', vocabulary, embedding_dim)