예제 #1
0
    def get_final_result_multi(self):

        final_result_multi = dict()
        prediction_funtion = NNMultiClassModel.predict_based_on_bert_classifier

        valid_2016_context_embs = self.data_embd['valid_2016']['context_embds']
        valid_2016_ending_0_embs = self.data_embd['valid_2016'][
            'ending0_embds']
        valid_2016_ending_1_embs = self.data_embd['valid_2016'][
            'ending1_embds']
        valid_2016_data = self.dataset['valid_2016']
        prediction_2016 = prediction_funtion(self, valid_2016_context_embs,
                                             valid_2016_ending_0_embs,
                                             valid_2016_ending_1_embs)
        final_result_multi['valid_2016'] = compute_accuracy(
            valid_2016_data, prediction_2016)

        valid_2018_context_embs = self.data_embd['valid_2018']['context_embds']
        valid_2018_ending_0_embs = self.data_embd['valid_2018'][
            'ending0_embds']
        valid_2018_ending_1_embs = self.data_embd['valid_2018'][
            'ending1_embds']
        valid_2018_data = self.dataset['valid_2018']
        prediction_2018 = prediction_funtion(self, valid_2018_context_embs,
                                             valid_2018_ending_0_embs,
                                             valid_2018_ending_1_embs)
        final_result_multi['valid_2018'] = compute_accuracy(
            valid_2018_data, prediction_2018)

        test_context_embs = self.data_embd['test']['context_embds']
        test_ending_0_embs = self.data_embd['test']['ending0_embds']
        test_ending_1_embs = self.data_embd['test']['ending1_embds']
        test_data = self.dataset['test']
        prediction_test = prediction_funtion(self, test_context_embs,
                                             test_ending_0_embs,
                                             test_ending_1_embs)
        final_result_multi['test'] = compute_accuracy(test_data,
                                                      prediction_test)

        return final_result_multi
예제 #2
0
    def get_final_result_binary(self):

        get_final_result_binary = dict()
        prediction_funtion = NNBinaryClassModel.predict_based_on_bert_binary_classifier

        test_context_embs = self.data_embd['test']['context_embds']
        test_ending_0_embs = self.data_embd['test']['ending0_embds']
        test_ending_1_embs = self.data_embd['test']['ending1_embds']
        test_data = self.dataset['test']
        prediction_test = prediction_funtion(self, test_context_embs,
                                             test_ending_0_embs,
                                             test_ending_1_embs)
        get_final_result_binary['test'] = compute_accuracy(
            test_data, prediction_test)

        return get_final_result_binary
예제 #3
0
파일: train1.py 프로젝트: Debanitrkl/ATFORD
max_checkpoints_to_keep = 3  
save_dir = "data/checkpoints"  
train_vars = 'models/fc8-pets/weights:0,models/fc8-pets/biases:0'  
 # Export  
 export_dir = "/tmp/export/"  
 export_name = "pet-model"  export_version = 2 
 images, labels = datasets.input_pipeline(dataset_dir, batch_size,    is_training=True)
 test_images, test_labels = datasets.input_pipeline(dataset_dir,   batch_size, is_training=False)   
 
 with tf.variable_scope("models") as scope:     
     logits = nets.inference(images, is_training=True)     
     scope.reuse_variables()     
     test_logits = nets.inference(test_images, is_training=False)   
     
     total_loss = models.compute_loss(logits, labels)  
     train_accuracy = models.compute_accuracy(logits, labels)  
     test_accuracy = models.compute_accuracy(test_logits, test_labels)    
     global_step = tf.Variable(0, trainable=False)  
     learning_rate = models.get_learning_rate(global_step,   initial_learning_rate, decay_steps, decay_rate)  
     train_op = models.train(total_loss, learning_rate, global_step,   train_vars)   
     
     saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)  
     checkpoints_dir = os.path.join(save_dir,   datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))  
     if not os.path.exists(save_dir):     
         os.mkdir(save_dir)  
         if not os.path.exists(checkpoints_dir):     
             os.mkdir(checkpoints_dir)
             
with tf.Session() as sess:     
    sess.run(tf.global_variables_initializer())     
    coords = tf.train.Coordinator()     
예제 #4
0
파일: train.py 프로젝트: xjustusc/mlwithtf
                                            dtypes=[tf.string, tf.int64],
                                            shapes=[(num_frames, ), ()])

train_enqueue_op = train_input_queue.enqueue_many(
    [image_paths_placeholder, labels_placeholder])

frames_batch, labels_batch = input_pipeline(train_input_queue,
                                            batch_size=batch_size,
                                            image_size=image_size)

with tf.variable_scope("models") as scope:
    logits, _ = nets.inference(frames_batch, is_training=True)

total_loss, cross_entropy_loss, reg_loss = models.compute_loss(
    logits, labels_batch)
train_accuracy = models.compute_accuracy(logits, labels_batch)

global_step = tf.Variable(0, trainable=False)
learning_rate = models.get_learning_rate(global_step, initial_learning_rate,
                                         decay_steps, decay_rate)
train_op = models.train(total_loss, learning_rate, global_step)

tf.summary.scalar("learning_rate", learning_rate)
tf.summary.scalar("train/accuracy", train_accuracy)
tf.summary.scalar("train/total_loss", total_loss)
tf.summary.scalar("train/cross_entropy_loss", cross_entropy_loss)
tf.summary.scalar("train/regularization_loss", reg_loss)

summary_op = tf.summary.merge_all()

saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)
            # step 2. compute the output
            y_pred = classifier(x_in=batch_dict['x_data'].float())

            # step 3. compute the loss
            loss = loss_func(y_pred, batch_dict['y_target'].float())
            loss_t = loss.item()
            running_loss += (loss_t - running_loss) / (batch_index + 1)

            # step 4. use loss to produce gradients
            loss.backward()

            # step 5. use optimizer to take gradient step
            optimizer.step()
            # -----------------------------------------
            # compute the accuracy
            acc_t = compute_accuracy(y_pred, batch_dict['y_target'])
            running_acc += (acc_t - running_acc) / (batch_index + 1)

            # update bar
            # train_bar.set_postfix(loss=running_loss,
            #                       acc=running_acc,
            #                       epoch=epoch_index)
            # train_bar.update()

        train_state['train_loss'].append(running_loss)
        train_state['train_acc'].append(running_acc)

        # Iterate over val dataset

        # setup: batch generator, set loss and acc to 0; set eval mode on
        dataset.set_split('val')
예제 #6
0
            logits_split, _ = nets.inference(frames_batch_split[i], is_training=True)
            labels_split = labels_batch_split[i]

            total_loss, cross_entropy_loss, reg_loss = models.compute_loss(logits_split, labels_split)

            grads = optimizer.compute_gradients(total_loss)

            total_gradients.append(grads)

            tf.get_variable_scope().reuse_variables()

with tf.device('/cpu:0'):
    gradients = models.average_gradients(total_gradients)
    train_op = optimizer.apply_gradients(gradients, global_step)

    train_accuracy = models.compute_accuracy(logits_split, labels_split)

    tf.summary.scalar("learning_rate", learning_rate)
    tf.summary.scalar("train/accuracy", train_accuracy)
    tf.summary.scalar("train/total_loss", total_loss)
    tf.summary.scalar("train/cross_entropy_loss", cross_entropy_loss)
    tf.summary.scalar("train/regularization_loss", reg_loss)

summary_op = tf.summary.merge_all()

saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)
time_stamp = datetime.now().strftime("multi_%Y-%m-%d_%H-%M-%S")
checkpoints_dir = os.path.join(save_dir, time_stamp)
summary_dir = os.path.join(checkpoints_dir, "summaries")

train_writer = tf.summary.FileWriter(summary_dir, flush_secs=10)