def test_worker():
    fout = open('test_label.txt', 'w')
    P_input = T.tensor4(dtype=theano.config.floatX)

    Cparams = loaddata('./para_learning_rate0.01/0rCparas.pkl')
    Mparams = loaddata('./para_learning_rate0.01/12Mparas.pkl')
    test_handle = test(P_input=P_input, Cparams=Cparams, Mparams=Mparams)
    f = theano.function(inputs=[P_input],
                        outputs=test_handle.output,
                        updates=[])

    right = 0.0
    cnt = 0.0

    prep = prepare.prepareData()
    block_size = prep.block_size
    item = prep.generate_test_from_sentence(1)
    for pair in item:
        for i in range(block_size):
            a = f(pair[i][0])
            print a
            #fout.write(str(a.mean()))
            #fout.write('\n')
            cnt = cnt + 1
            if pair[i][1].mean() == a.mean():
                right = right + 1.0
Exemple #2
0
def train_worker():
  P_input = T.tensor4(dtype=theano.config.floatX)
  P_y = T.vector(name ='P_y', dtype='int32')
  Cparams = parameters.c_random_weights()#loaddata('./Cparams.pkl') 
  Mparams = parameters.random_weights()

  train_handle = train(P_input=P_input, P_y=P_y, Cparams=Cparams, Mparams=Mparams, learning_rate=learning_rate)
  f = theano.function(inputs=[P_input, P_y],outputs=train_handle.cost, updates=train_handle.updates)

  prep = prepare.prepareData()
  block_size = prep.block_size

  for epoch in range(startepoch,100):
    print "epoch", epoch
    cnt = 0.0
    cost = 0.0
    item = prep.generate_batch_from_sentence(batch_size)
    for pair in item :
      for i in range(block_size):
          a = f(pair[i][0],pair[i][1])
          cost = cost + a
          cnt = cnt + 1
          sys.stdout.flush()
          sys.stdout.write(str(cnt)+'\r')
          #if cnt*batch_size%10000 == 0:
            #train_handle.storedata(cnt)
    train_handle.storedata(epoch)
    train_handle.c_storedata(epoch)
    print cost
Exemple #3
0
def train_worker():
    P_input = T.tensor4(dtype=theano.config.floatX)
    P_y = T.vector(name='P_y', dtype='int32')
    Cparams = parameters.c_random_weights()  #loaddata('./Cparams.pkl')
    Mparams = parameters.random_weights()

    train_handle = train(P_input=P_input,
                         P_y=P_y,
                         Cparams=Cparams,
                         Mparams=Mparams,
                         learning_rate=learning_rate)
    f = theano.function(inputs=[P_input, P_y],
                        outputs=train_handle.cost,
                        updates=train_handle.updates)

    prep = prepare.prepareData()
    block_size = prep.block_size

    for epoch in range(startepoch, 100):
        print "epoch", epoch
        cnt = 0.0
        cost = 0.0
        item = prep.generate_batch_from_sentence(batch_size)
        for pair in item:
            for i in range(block_size):
                a = f(pair[i][0], pair[i][1])
                cost = cost + a
                cnt = cnt + 1
                sys.stdout.flush()
                sys.stdout.write(str(cnt) + '\r')
                #if cnt*batch_size%10000 == 0:
                #train_handle.storedata(cnt)
        train_handle.storedata(epoch)
        train_handle.c_storedata(epoch)
        print cost
Exemple #4
0
def train_worker(ipath,wpath):     
  params = parameters.random_weights()
  P_input1 = T.matrix(name='P_input1',dtype=theano.config.floatX)
  P_input2 = T.matrix(name='P_input2',dtype=theano.config.floatX)
  P_y = T.vector(name='P_y',dtype='int32')
  RCNN = train_model(learning_rate,params)
  p_y,cost = RCNN.training(P_input1,P_input2,P_y)
  rcnn = theano.function(inputs=[P_input1,P_input2,P_y],outputs = [p_y,cost],updates=RCNN.updates)
  print 'reading dataset'
  prep = prepare.prepareData(ipath)
  prep.Reading_traindata()
  block_size = prep.block_size
  session_size = prep.train_session_num

  for epoch in range(iternum):
	print "epoch", epoch
	for k in range(session_size):
		item = prep.generate_batch_from_sentence(k)
		out,cost = rcnn(item[0],item[1],item[2])
		sys.stdout.flush()
		sys.stdout.write(str(k)+'\r')
	RCNN.storedata(epoch,wpath)
Exemple #5
0
def test_worker():
  fout = open('test_label.txt','w')
  P_input = T.tensor4(dtype=theano.config.floatX)
  
  Cparams = loaddata('./para_learning_rate0.01/0rCparas.pkl') 
  Mparams = loaddata('./para_learning_rate0.01/12Mparas.pkl')
  test_handle = test(P_input=P_input, Cparams=Cparams, Mparams=Mparams)
  f = theano.function(inputs=[P_input],outputs=test_handle.output, updates=[])
  
  right = 0.0
  cnt = 0.0

  prep = prepare.prepareData()
  block_size = prep.block_size
  item = prep.generate_test_from_sentence(1)
  for pair in item :
    for i in range(block_size):
      a = f(pair[i][0])
      print a
      #fout.write(str(a.mean()))
      #fout.write('\n')
      cnt = cnt + 1
      if pair[i][1].mean()==a.mean():
        right = right + 1.0
        x = F.relu(self.fc1(x))
        output = torch.sigmoid(self.fc2(x))

        return output


print('train dataset: ', dataset_train)
xtrain, train_y = load_dataset_text(dataset_train, False)

dataset_name = 'sp-a'
dataset_name = basePath + dataset_name
print('test dataset', dataset_name)
xvalid, valid_y = load_dataset_text(dataset_name, False)

prepare = prepareData(EMBEDDING_DIM, MAXLEN_CLAIM, TRIM_NUM)
[train_sets,
 dev_sets] = prepare.data_process_pair(xtrain, train_y, xvalid, valid_y,
                                       separate, sampleNum)
pretrained_embeddings = torch.tensor(prepare.get_embedding_matrix()).to(device)

dataset = myDataset_pair(train_sets[0], train_sets[1])
trainloader = torch.utils.data.DataLoader(dataset,
                                          batch_size=BATCH_SIZE,
                                          shuffle=False)

dataset = myDataset(dev_sets[0], dev_sets[1])
devloader = torch.utils.data.DataLoader(dataset,
                                        batch_size=BATCH_SIZE,
                                        shuffle=False)
Exemple #7
0
    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
    return out_layer

# Construct model
logits = multilayer_perceptron(X)
pred = tf.nn.sigmoid(logits)  # Apply softmax to logits

# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
    logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Initializing the variables
init = tf.global_variables_initializer()

features, label = prepareData()
features = features[:, 0, 0, :]
label = label[:, np.newaxis]
with tf.Session() as sess:
    sess.run(init)

    # Training cycle
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = 30
        # Loop over all batches
        for i in range(total_batch):
            idx = np.random.choice(features.shape[0], batch_size)
            batch_x, batch_y = features[idx], label[idx]
            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([train_op, loss_op], feed_dict={X: batch_x,