コード例 #1
0
ファイル: back_image.py プロジェクト: yuxiaomu/codes
def model():
    is_training = tf.placeholder(tf.bool, [])
    train_images, train_label = data.get_train_data(batch_size)
    test_images, test_label = data.get_test_data(batch_size)
    x = tf.cond(is_training, lambda:train_images, lambda:test_images)
    y_ = tf.cond(is_training, lambda:train_label, lambda:test_label)
    y_ = tf.cast(y_, tf.int64)
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        activation_fn=tf.nn.crelu,
                        normalizer_fn=slim.batch_norm,
                        weights_regularizer=slim.l2_regularizer(0.005),
                        normalizer_params={'is_training': is_training, 'decay': 0.95}
                        ):
        conv1 =slim.conv2d(x, 39, [1,1], weights_initializer=tf.truncated_normal_initializer(mean=0.54, stddev=0.24))
        conv2 =slim.conv2d(conv1, 8, [6,6], weights_initializer=tf.truncated_normal_initializer(mean=0.07, stddev=0.57))
        pool1 = slim.avg_pool2d(conv2, [4,4], stride=4, padding='SAME')
        flatten = slim.flatten(pool1)
        full1 = slim.fully_connected(flatten, 1598, weights_initializer=tf.truncated_normal_initializer(mean=-0.1043610, stddev=0.40), biases_initializer=tf.constant_initializer(0.1, dtype=tf.float32))
        logits = slim.fully_connected(full1, 10, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(mean=-0.244986, stddev=0.477889), biases_initializer=tf.constant_initializer(0.1, dtype=tf.float32))
        correct_prediction = tf.equal(tf.argmax(logits, 1), y_)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        regularization_loss = tf.add_n(slim.losses.get_regularization_losses())
        cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_, logits=logits))+ regularization_loss
        step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)

#         lr = tf.train.exponential_decay(0.1,
#                                   step,
#                                   550*30,
#                                   0.9,
#                                   staircase=True)
#
#
#         optimizer = tf.train.GradientDescentOptimizer(lr)
        optimizer = tf.train.AdamOptimizer(0.001)
#         lr_summary = tf.summary.scalar('lr', lr)
        train_step = slim.learning.create_train_op(cross_entropy, optimizer, global_step=step)
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        if update_ops:
            updates = tf.group(*update_ops)
            cross_entropy = control_flow_ops.with_dependencies([updates], cross_entropy)

        loss_summary = tf.summary.scalar('loss', cross_entropy)
        accuracy_summary = tf.summary.scalar('accuracy', accuracy)
        merge_summary = tf.summary.merge([loss_summary, accuracy_summary])
        return is_training, train_step, step, accuracy, cross_entropy, merge_summary
コード例 #2
0
def model():

    x = tf.placeholder(dtype=tf.float32,
                       shape=[batch_size, 32, 32, 1],
                       name='Input')
    y = tf.placeholder(dtype=tf.float32, shape=[batch_size], name='True_Y')
    y = tf.cast(y, tf.int64)
    keep_prob = tf.placeholder(dtype=tf.float32, shape=(), name='dropout')
    is_training = tf.placeholder(tf.bool, shape=())

    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        activation_fn=tf.nn.crelu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params={
                            'is_training': is_training,
                            'decay': 0.95
                        }):
        h = slim.conv2d(inputs=x,
                        num_outputs=100,
                        kernel_size=2,
                        weights_regularizer=slim.l2_regularizer(0.0010))
        h = slim.conv2d(inputs=h,
                        num_outputs=82,
                        kernel_size=2,
                        weights_regularizer=slim.l2_regularizer(0.0018))
        h = slim.conv2d(inputs=h,
                        num_outputs=100,
                        kernel_size=2,
                        weights_regularizer=slim.l2_regularizer(0.0001))
        h = slim.conv2d(inputs=h,
                        num_outputs=100,
                        kernel_size=2,
                        weights_regularizer=slim.l2_regularizer(0.0001))
        h = slim.max_pool2d(h, kernel_size=2, stride=2)
        flatten = slim.flatten(h)
        full = slim.fully_connected(flatten, 512)
        drop_full = slim.dropout(full, keep_prob)
        with tf.name_scope('accuracy'):
            logits = slim.fully_connected(drop_full, 10, activation_fn=None)
            correct_prediction = tf.equal(tf.argmax(logits, 1), y)
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        with tf.name_scope('loss'):
            loss = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    labels=y, logits=logits)) + tf.add_n(
                        tf.losses.get_regularization_losses())
        with tf.name_scope('train'):
            optimizer = tf.train.AdamOptimizer()
            step = tf.get_variable("step", [],
                                   initializer=tf.constant_initializer(0.0),
                                   trainable=False)
            train_op = slim.learning.create_train_op(loss,
                                                     optimizer,
                                                     global_step=step)
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            if update_ops:
                updates = tf.group(*update_ops)
                loss = control_flow_ops.with_dependencies([updates], loss)

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            train_data, train_label = get_data.get_train_data()
            validate_data, validate_label = get_data.get_test_data()
            epochs = total_epochs
            for current_epoch in range(epochs):
                train_loss_list = []
                train_accu_list = []
                total_length = train_data.shape[0]
                idx = np.arange(total_length)
                np.random.shuffle(idx)
                train_data = train_data[idx]
                train_label = train_label[idx]
                total_steps = total_length // batch_size
                for step in range(total_steps):
                    batch_train_data = train_data[step *
                                                  batch_size:(step + 1) *
                                                  batch_size]
                    batch_train_label = train_label[step *
                                                    batch_size:(step + 1) *
                                                    batch_size]
                    _, loss_v, accuracy_str = sess.run(
                        [train_op, loss, accuracy], {
                            x: batch_train_data,
                            y: batch_train_label,
                            keep_prob: 0.5,
                            is_training: True
                        })
                    train_loss_list.append(loss_v)
                    train_accu_list.append(accuracy_str)
                #test
                test_length = validate_data.shape[0]
                test_steps = test_length // batch_size
                test_loss_list = []
                test_accu_list = []
                for step in range(test_steps):
                    batch_test_data = validate_data[step *
                                                    batch_size:(step + 1) *
                                                    batch_size]
                    batch_test_label = validate_label[step *
                                                      batch_size:(step + 1) *
                                                      batch_size]
                    loss_v, accuracy_str = sess.run(
                        [loss, accuracy], {
                            x: batch_test_data,
                            y: batch_test_label,
                            keep_prob: 1.0,
                            is_training: False
                        })
                    test_loss_list.append(loss_v)
                    test_accu_list.append(accuracy_str)

                print(
                    '{}, epoch:{}/{}, step:{}/{}, loss:{:.6f}, accu:{:.4f}, test loss:{:.6f}, accu:{:.4f}'
                    .format(datetime.now(), current_epoch, total_epochs,
                            total_steps * current_epoch + step,
                            total_steps * epochs, np.mean(train_loss_list),
                            np.mean(train_accu_list), np.mean(test_loss_list),
                            np.mean(test_accu_list)))
コード例 #3
0
vocab_size = 6773
embed_size = 200
hidden_size = 200
sentence_len = 7
root_path = os.path.abspath('../')

#读入词典
vocab_path = root_path + '\\Data\\word_vocab_for_rnn.pkl'
with open(vocab_path, 'rb') as f:
    vocab = pickle.load(f)
wd2Idx = {wd: idx for idx, wd in enumerate(vocab)}
idx2Wd = {idx: wd for idx, wd in enumerate(vocab)}

#读取测试集
test_data_path = root_path + '\\Data\\qtest7'
testline, testvec = get_test_data(test_data_path, wd2Idx, sentence_len)
testbatch = get_test_batch(testvec, BATCH_SIZE, 0)  #得到一个batch的测试数据

#读取网络
net7 = Net(sentence_len=sentence_len,
           batch_size=BATCH_SIZE,
           vocab_size=vocab_size,
           embed_size=embed_size,
           hidden_size=hidden_size)
net_path = root_path + '\\Models\\rnn\\rnn7_epoch_1.pth'
net7.load_state_dict(torch.load(net_path))
net7.eval()  #表示此时网络不在训练
testbatch = np.array(testbatch)
print(testbatch.shape)

output = net7(testbatch, False)  #一个batch测试的输出
コード例 #4
0
vocab_size = 6773
embed_size = 200
hidden_size = 200
sentence_len = 5
root_path = os.path.abspath('../')

#读入词典
vocab_path = root_path + '\\Data\\word_vocab_for_rnn.pkl'
with open(vocab_path, 'rb') as f:
    vocab = pickle.load(f)
wd2Idx = {wd: idx for idx, wd in enumerate(vocab)}
idx2Wd = {idx: wd for idx, wd in enumerate(vocab)}

#读取测试集
test_data_path = root_path + '\\Data\\qtest5'
testline, testvec = get_test_data(test_data_path, wd2Idx, 5)
testbatch = get_test_batch(testvec, BATCH_SIZE, 0)  #得到一个batch的测试数据

#读取网络
net5 = Net(sentence_len=sentence_len,
           batch_size=BATCH_SIZE,
           vocab_size=vocab_size,
           embed_size=embed_size,
           hidden_size=hidden_size)
net_path = root_path + '\\Models\\rnn\\rnn5_epoch_4.pth'
net5.load_state_dict(torch.load(net_path))
net5.eval()  #表示此时网络不在训练
testbatch = np.array(testbatch)
print(testbatch.shape)

output = net5(testbatch, False)  #一个batch测试的输出
コード例 #5
0
ファイル: mainfile.py プロジェクト: yazici/Audio-Vision
#embedding_matrix = prepare_embeddings(vocabulary_size, word_emb_dim, metadata, embedding_matrix_filename, glove_path)
#print(embedding_matrix.shape)
model = san_atten(common_word_emb_dim, img_vec_dim, activation_1, activation_2,
                  dropout, vocabulary_size, num_hidden_units_lstm,
                  max_ques_length, word_emb_dim, num_hidden_layers_mlp,
                  num_hidden_units_mlp, nb_classes, class_activation,
                  filter_sizes, num_attention_layers)
model.compile(loss='categorical_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy'])
model.summary()  # prints model layers with weights

train_X, train_Y = get_train_data(input_img_train_h5, input_ques_h5)

test_X, test_Y, multi_val_y = get_test_data(input_img_test_h5, input_ques_h5,
                                            metadata, val_file)

model.fit(train_X,
          train_Y,
          batch_size=batch_size,
          epochs=epochs,
          validation_data=(test_X, test_Y),
          verbose=1)

print("Evaluating Accuracy on validation set:")
metric_vals = model.evaluate(test_X, test_Y)
print("")
for metric_name, metric_val in zip(model.metrics_names, metric_vals):
    print(metric_name, " is ", metric_val)

# Comparing prediction against multiple choice answers
コード例 #6
0
def get_session(gpu_fraction=0.3):
    """
    This function is to allocate GPU memory a specific fraction
    Assume that you have 6GB of GPU memory and want to allocate ~2GB
    """
    num_threads = os.environ.get('OMP_NUM_THREADS')
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)

    if num_threads:
        return tf.Session(config=tf.ConfigProto(
            gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
    else:
        return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))


a, b, c, d = get_test_data()
(X_train, y_train), (X_test, y_test) = (a, c), (b, d)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)

KTF.set_session(get_session(0.8))
import numpy as np
np.random.seed(1337)  # for reproducibility

batch_size = 16
nb_classes = 20

nb_epoch = 12
# input image dimensions
コード例 #7
0
        return np.mean(test_pure_loss_list)

    def max_unpool_2x2(self, x, name):
        width = x.get_shape()[1].value
        height = x.get_shape()[2].value
        inference = tf.image.resize_images(x, [width * 2, height * 2])
        return inference


if __name__ == '__main__':
    #cuda3
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    unlabeldata = get_data.get_unlabeled_data()
    train_data, train_label = get_data.get_train_data()
    test_data, test_label = get_data.get_test_data()

    params = {}
    params['unlabel_data'] = unlabeldata
    params['train_data'] = train_data
    params['train_label'] = train_label
    params['test_data'] = test_data
    params['test_label'] = test_label
    params['pop_size'] = 50
    params['num_class'] = 10
    params['cae_length'] = 5
    params['x_prob'] = 0.9
    params['x_eta'] = 20
    params['m_prob'] = 0.1
    params['m_eta'] = 20
    params['total_generation'] = 50