Exemplo n.º 1
0
parser.add_argument('--use_feature', help = 'whether use input features', type = bool, default = True)
parser.add_argument('--update_emb', help = 'whether update embedding when optimizing supervised loss', type = bool, default = True)
parser.add_argument('--layer_loss', help = 'whether incur loss on hidden layers', type = bool, default = True)
args = parser.parse_args()

def comp_accu(tpy, ty):
    import numpy as np
    return (np.argmax(tpy, axis = 1) == np.argmax(ty, axis = 1)).sum() * 1.0 / tpy.shape[0]

# load the data: x, y, tx, ty, allx, graph
NAMES = ['x', 'y', 'tx', 'ty', 'allx', 'graph']
OBJECTS = []
for i in range(len(NAMES)):
    OBJECTS.append(cPickle.load(open("data/ind.{}.{}".format(DATASET, NAMES[i]))))
x, y, tx, ty, allx, graph = tuple(OBJECTS)

m = model(args)                                                 # initialize the model
m.add_data(x, y, allx, graph)                                   # add data
m.build()                                                       # build the model
m.init_train(init_iter_label = 10000, init_iter_graph = 400)    # pre-training
iter_cnt, max_accu = 0, 0
while True:
    m.step_train(max_iter = 1, iter_graph = 0.1, iter_inst = 1, iter_label = 0) # perform a training step
    tpy = m.predict(tx)                                                         # predict the dev set
    accu = comp_accu(tpy, ty)                                                   # compute the accuracy on the dev set
    print iter_cnt, accu, max_accu
    iter_cnt += 1
    if accu > max_accu:
        m.store_params()                                                        # store the model if better result is obtained
        max_accu = max(max_accu, accu)
Exemplo n.º 2
0
train_x, train_y, train_le, train_labels = getData(trainFile)
dev_x, dev_y, dev_le, dev_labels = getData(devFile)

graph = getGraph(graphFile)
allx = getAllData(allxFile)

print(len(graph))
print(allx.shape)
## load the data: x, y, tx, ty, allx, graph
#NAMES = ['x', 'y', 'tx', 'ty', 'allx', 'graph']
#OBJECTS = []
#for i in range(len(NAMES)):
#    OBJECTS.append(cPickle.load(open("data/ind.{}.{}".format(DATASET, NAMES[i]))))
#x, y, tx, ty, allx, graph = tuple(OBJECTS)

m = model(args)  # initialize the model
m.add_data(train_x, train_y, allx, graph)  # add data
m.build()  # build the model
m.init_train(init_iter_label=100, init_iter_graph=100)  # pre-training
iter_cnt, max_accu = 0, 0
num_epochs = 100

while True:
    m.step_train(max_iter=1, iter_graph=0.1, iter_inst=1,
                 iter_label=0)  # perform a training step
    tpy = m.predict(dev_x)  # predict the dev set
    accu = comp_accu(tpy, dev_y)  # compute the accuracy on the dev set
    #    print iter_cnt, accu, max_accu
    iter_cnt += 1
    accu, P, R, F1, wAUC, AUC, report = performance.performance_measure_tf(
        dev_y, tpy, dev_le, dev_labels)
Exemplo n.º 3
0
def comp_accu(tpy, ty):
    return (np.argmax(tpy, axis=1) == np.argmax(
        ty, axis=1)).sum() * 1.0 / tpy.shape[0]


# load the data: x, y, tx, ty, allx, graph
NAMES = ['x', 'y', 'tx', 'ty', 'allx', 'graph']
objects = {}
for name in NAMES:
    data = pkl.load(open("data/ind.{}.{}".format(DATASET, name), 'rb'),
                    encoding='latin1')
    objects[name] = data

# initialize the model
m = model(args)

# add data
m.add_data(objects['x'], objects['y'], objects['allx'], objects['graph'])

# build the model
m.build()
m.init_train(init_iter_label=10000, init_iter_graph=400)  # pre-training
iter_cnt, max_accu = 0, 0
for _ in range(1000):
    # perform a training step
    m.step_train(max_iter=1, iter_graph=0.1, iter_inst=1, iter_label=0)

    # predict the dev set
    tpy = m.predict(objects['tx'])