コード例 #1
0
                    ])
                    tf.add_to_collection('enqueue', enqueue_op)
                    close_op = gpu_queue.close(cancel_pending_enqueues=True)
                    tf.add_to_collection('close_queue', close_op)

                data, labels, epoch_ended = gpu_queue.dequeue()
                data_per_replica = tf.split(data, MODEL_REPLICA, axis=1)

                last_fully_activations = []
                with tf.variable_scope('model_replicas'):
                    for j in range(MODEL_REPLICA):
                        last_fully_activations.append(
                            C3Dmodel.inference(tf.squeeze(data_per_replica[j],
                                                          axis=1),
                                               EXAMPLES_PER_GPU,
                                               dropout_placeholder,
                                               is_training_placeholder,
                                               NUM_CLASSES,
                                               collection='network_output',
                                               tensor_to_return='fully'))
                        tf.get_variable_scope().reuse_variables()

                # concatenate replica's last fully activations and add fully_connected output-layer!
                concatenated_tensor = tf.concat(last_fully_activations, axis=1)

                with tf.variable_scope('out'):
                    wout = C3Dmodel.model_variable(
                        'wout', [MODEL_REPLICA * 4096, NUM_CLASSES],
                        C3Dmodel.weight_init, C3Dmodel.WEIGHT_DECAY)()
                    bout = C3Dmodel.model_variable('bout', [NUM_CLASSES],
                                                   C3Dmodel.bias_init)()
                    out = tf.matmul(concatenated_tensor, wout) + bout
コード例 #2
0
    # TODO: capture current adam learning rate
    
    dropout_placeholder = tf.placeholder(tf.float32, name='dropout_placeholder')
    is_training_placeholder = tf.placeholder(tf.bool, name='is_training_placeholder')
    tf.add_to_collection("dropout", dropout_placeholder)
    tf.add_to_collection("training", is_training_placeholder)
    
    with tf.variable_scope(tf.get_variable_scope()):
        for i in range(NUM_GPUS):
            with tf.device('/gpu:%d'%i), tf.name_scope('Tower%d'%i) as scope:
                
                input_placeholder, output_placeholder, epoch_ended_placeholder = queue_input_placeholders()
                
                with tf.variable_scope('model_replicas'):
                    network_output = C3Dmodel.inference(
                        input_placeholder, EXAMPLES_PER_GPU, dropout_placeholder, is_training_placeholder, NUM_CLASSES,
                        collection='network_output')
                xentropy_loss, regularization_loss = C3Dmodel.loss(network_output, output_placeholder, collection='xentropy_loss', scope=scope)
                
                # train_step = C3Dmodel.train(xentropy_loss, 1e-04, global_step, collection='train_step')
    
                train_step = optimizer.minimize(xentropy_loss, global_step=global_step)

def run_training():
    with tf.Session(graph=my_graph, config=tf.ConfigProto(log_device_placement=True, allow_soft_placement=True)) as sess:
        # with tf.Session(graph=my_graph, config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        assert(tf.get_default_graph() == my_graph)
        sess.run(tf.global_variables_initializer())
        my_graph.finalize()
        
        starttime = time.time()
コード例 #3
0
                    enqueue_op = gpu_queue.enqueue([
                        input_placeholder, output_placeholder,
                        epoch_ended_placeholder
                    ])
                    tf.add_to_collection('enqueue', enqueue_op)
                    close_op = gpu_queue.close(cancel_pending_enqueues=True)
                    tf.add_to_collection('close_queue', close_op)

                data, labels, epoch_ended = gpu_queue.dequeue()

                with tf.variable_scope('model_replicas'):
                    network_output = C3Dmodel.inference(
                        data,
                        EXAMPLES_PER_GPU,
                        dropout_placeholder,
                        is_training_placeholder,
                        NUM_CLASSES,
                        collection='network_output')
                xentropy_loss, regularization_loss = C3Dmodel.loss(
                    network_output,
                    labels,
                    collection='xentropy_loss',
                    scope=scope)

                # train_step = C3Dmodel.train(xentropy_loss, 1e-04, global_step, collection='train_step')
                grads = optimizer.compute_gradients(xentropy_loss)
                tower_gradients.append(grads)
                accuracy_op, accuracy_summary_op = C3Dmodel.accuracy(
                    network_output, labels, collection='accuracy_op')
コード例 #4
0
CKPT = os.getcwd() + "/tf_checkpoints/run-20170828082324"
MAX_INPUT_LENGTH = 3
EVAL_INDXS = np.arange(0, 165, 10)
OFFSET = 16

input_placeholder = tf.placeholder(
    tf.float32,
    [None, TEMPORAL_DEPTH, INPUT_HEIGHT, INPUT_WIDTH, INPUT_CHANNELS],
    name='input_placeholder')

onehot_label_placeholder = tf.placeholder(tf.float32, [NUM_CLASSES])
with tf.variable_scope('model_replicas'):
    output = C3Dmodel.inference(input_placeholder,
                                -1,
                                1,
                                False,
                                NUM_CLASSES,
                                collection='network_output')
# softmax_output = tf.nn.softmax(output)
saver = tf.train.Saver()

# mean_prediction = tf.reduce_mean(output,0)
# correctly_classified = tf.equal(tf.argmax(mean_prediction), tf.argmax(onehot_label_placeholder))

with tf.Session() as sess:
    # path = tf.train.get_checkpoint_state("./tf_checkpoints")
    # graphsaver = tf.train.import_meta_graph(METAGRAPH)

    for model_indx in EVAL_INDXS:
        saver.restore(sess, CKPT + '/model-{}.ckpt'.format(model_indx))
        print('Loaded model {}'.format(model_indx))