コード例 #1
0
ファイル: word_average.py プロジェクト: harperjiang/TTIC31210
        emds.append(emd)
        labels.append(label)

    return emds, labels


# Load data
train_embed, train_label = load(train_file)
dev_embed, dev_label = load(dev_file)
test_embed, test_label = load(test_file)

train_ds = nds.VarLenDataSet(train_embed, train_label)
dev_ds = nds.VarLenDataSet(dev_embed, dev_label)
test_ds = nds.VarLenDataSet(test_embed, test_label)
# Build Computation Graph
graph = ng.Graph(nl.LogLoss(), ns.Adam(eta=0.01))

# Word Embedding Matrix using Xavier
word_embedding = graph.param_of([len(word_dict), wv_dim])
# Weight vector
weight = graph.param_of([wv_dim, 1])

input_node = graph.input()
embed = nd.Embed(input_node, word_embedding)
average = nd.Average(embed)
dot = nd.Dot(average, weight)
sigmoid = nd.Sigmoid(dot)
graph.output(sigmoid)

epochs = 100
batch_size = 50
コード例 #2
0
        labels.append(label)

    return emds, labels


# Load data
train_embed, train_label = load(train_file)
dev_embed, dev_label = load(dev_file)
test_embed, test_label = load(test_file)

train_ds = nds.VarLenDataSet(train_embed, train_label)
dev_ds = nds.VarLenDataSet(dev_embed, dev_label)
test_ds = nds.VarLenDataSet(test_embed, test_label)

# Build Computation Graph
graph = ng.Graph(nl.LogLoss(), ns.SGD(eta=0.1, decay=0.95))

# Word Embedding Matrix using Xavier
word_embedding = graph.param_of([len(word_dict), wv_dim])
# Weight vector
weight = graph.param_of([wv_dim, 1])
attention_weight = graph.param_of([wv_dim])

# Relative Position weight
relative_len = 20
relative_pos = graph.param_of([relative_len])


class EmbedMap(nd.Node):
    def __init__(self, embed, weight, rel_pos):
        super(EmbedMap, self).__init__([embed, weight, rel_pos])
コード例 #3
0
        labels.append(label)

    return emds, labels


# Load data
train_embed, train_label = load(train_file)
dev_embed, dev_label = load(dev_file)
test_embed, test_label = load(test_file)

train_ds = nds.VarLenDataSet(train_embed, train_label)
dev_ds = nds.VarLenDataSet(dev_embed, dev_label)
test_ds = nds.VarLenDataSet(test_embed, test_label)

# Build Computation Graph
graph = ng.Graph(nl.LogLoss(), ns.SGD(eta=0.01))

# Word Embedding Matrix from dict
word_embedding = graph.param_of([len(word_dict), wv_dim])

word_embed_file = "/home/harper/Downloads/glove.840B.300d.txt"


class WordEmbedDict:
    def __init__(self):
        self.buffer = {}

    def load(self, keyset):
        lines = open(word_embed_file, "rb").readlines()
        for line in lines:
            pieces = line.split()
コード例 #4
0
        labels.append(label)

    return emds, labels


# Load data
train_embed, train_label = load(train_file)
dev_embed, dev_label = load(dev_file)
test_embed, test_label = load(test_file)

train_ds = nds.VarLenDataSet(train_embed, train_label)
dev_ds = nds.VarLenDataSet(dev_embed, dev_label)
test_ds = nds.VarLenDataSet(test_embed, test_label)

# Build Computation Graph
graph = ng.Graph(nl.LogLoss(), ns.RMSProp(eta=0.005))

# Word Embedding Matrix using Xavier
word_embedding = graph.param_of([len(word_dict), wv_dim])
# Weight vector
weight = graph.param_of([wv_dim, 1])
attention_weight = graph.param_of([wv_dim])


class EmbedMap(nd.Node):
    def __init__(self, embed, weight):
        super(EmbedMap, self).__init__([embed, weight])
        self.embed = embed
        self.weight = weight

    def compute(self):