コード例 #1
0
x = tf.placeholder("float", [None, n_input]) # array of one_hot_encoded vectors shape=(S, n_input)
y_true = tf.placeholder("float", [None, 3]) # hard label, augmented to the number of lambda functions
#  n_input x 3

# Build model distribution and predictor
# num_lambda=10
Alpha = tf.Variable(tf.random_normal([10])) #tf.vector(1, num_labeling_functions)
Beta = tf.Variable(tf.random_normal([10])) #tf.vector(1, num_labeling_functions)
Lambda = tf.vstack([
    tf.matmul(lambdas[0] * x),
    tf.matmul(lambdas[1] * x),
    tf.matmul(lambdas[2] * x),

    tf.matmul(lambdas[10] * x),
])  # -> shape = (10, 3)
log_model_distribution = tf.reduce(Alpha*Beta*Lambda[0:1] + (1-Alpha)*Beta*Lambda[1:2] + Beta*Lambda[2:3])
# -> shape=(1, 1)

y_pred = tf.

# Define loss and optimiser
xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=y_pred, labels=y_true)
cost = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Launch the graph
with tf.Session() as sess:
	# Initialise the variables and run
	with tf.device("/cpu:0"):
		init = tf.global_variables_initializer()
		sess.run(init)
コード例 #2
0
ファイル: model.py プロジェクト: SunNYNO1/pix2pix
def create_model(input_img, target_img):

    epsilon = 1e-12
    #生成器
    output_channels = int(target_img.get_shape()[-1])
    generator = create_generator(input_img, output_channels)

    #关于真实图片的判别器
    real_discriminator = create_discriminator(input_img, target_img)

    #关于生成图片的判别器
    fake_discriminator = create_discriminator(generator, target_img)

    #生成器损失
    gen_gan_loss = tf.reduce_mean(-tf.log(fake_discriminator + epsilon))
    gen_l1_loss = tf.reduce(tf.abs(target_img - generator))
    gen_loss = args.gen_wight * gen_gan_loss + args.l1_wight * gen_l1_loss
    #判别器损失
    dis_loss = tf.reduce_mean(-(tf.log(real_discriminator + epsilon) +
                                tf.log(1 - fake_discriminator + epsilon)))

    #生成器需要更新的变量列表
    gen_vars = [
        var for var in tf.trainable_variables()
        if var.name.startswith('generator')
    ]
    #判别器需要更新的变量列表
    dis_vars = [
        var for var in tf.trainable_variables()
        if var.name.startswith("discriminator")
    ]

    #生成器优化器
    gen_optimizer = tf.train.AdamOptimizer(0.0002, 0.5)
    #判别器优化器
    dis_optimizer = tf.train.AdamOptimizer(0.0002, 0.5)

    #使用损失 对指定变量进行训练更新
    #与使用optimizer.minimize()区别????
    gen_gradients = gen_optimizer.compute_gradients(gen_loss,
                                                    var_list=gen_vars)
    gen_train = gen_optimizer.apply_gradients(gen_gradients)
    dis_gradients = dis_optimizer.compute_gradients(dis_loss,
                                                    var_list=dis_vars)
    dis_train = dis_optimizer.apply_gradients(dis_gradients)

    #更新参数
    ema = tf.train.ExponentialMovingAverage(decay=0.99)
    update_losses = ema.apply([dis_loss, gen_gan_loss, gen_l1_loss])

    global_step = tf.train.get_or_create_global_step()
    incr_global_step = tf.assign(global_step, global_step + 1)

    return Model(generator=generator,
                 real_discriminator=real_discriminator,
                 fake_discriminator=fake_discriminator,
                 discrim_loss=ema.average(dis_loss),
                 gen_loss_GAN=ema.average(gen_gan_loss),
                 gen_loss_L1=ema.average(gen_l1_loss),
                 gen_gradients=gen_gradients,
                 dis_gradients=dis_gradients,
                 train=tf.group(update_losses, incr_global_step, gen_train))
コード例 #3
0
def get_tensor_size(tensor):
    from operator import mul
    return tf.reduce(mul, (d.value for d in tensor.get_shape()), 1)
コード例 #4
0
keep_prob:float类型,每个元素被保留下来的概率,设置神经元被选中的概率
            初始化时是一个占位符,运行时设置具体的值
noise_shape:一个1维int32张量,代表随机产生”保留或丢弃的shape"
seed:int 随机数种子
name:指定该操作的名字
'''

#输出层
#全连接层输入大小为1024,而结果大小为10
#所以权重W尺寸为[1024,10]
w_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.sotfmax(tf.matmul(h_fc1_drop,w_fc2)+b_fc2)

#损失函数和损失优化
cross_entropy = tf.reduce(-tf.reduce.sum(y_ * tf.log(y_conv)))
trian_step = tf.trian.AdamOptimizer(1e-4).minimize(cross_entropy)


#测试准确率,跟SOFTMAX回归模型一样
correct_prediction = tf.equal(tf.argmax(y_conv,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

#保存训练结果
savePath = './mnist_conv/'
saveFile = savePath + 'mnist_conv.ckpt'
if os.path.exists(savePath) == False:
    os.mkdir(savePath)

saver = tf.train.Saver()
....
コード例 #5
0
class TextCNN(object):
    """
    A CNN for text classification
    Use a embedding layer, followed by a convolutional, max-pooling and softmax
    """
    def __init__(self, sequence_length, num_classes, vocab_size,
                embedding_size,, filter_sizes, num_filters):
        #Placeholders for input, output, and dropout
        self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
        self.input_y = tf.placeholder(tf.int32, [None, num_classes], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
        with tf.device('/cpu:0'), tf.name_scope("embedding"):
            W = tf.Variable(
                tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
                name="W"
            )
            self.embedding_chars = tf.nn.embedding_lookup(W, self.input_x)
            self.embedding_chars_expended = tf.expand_dims(self.embedded_chars, -1)

        pooled_outputs = []
        for i, filter_size in enumerate(filter_sizes):
            with tf.name_scope("conv-maxpool-%s"%fiter_size):
                # convolution layer
                fiter_shape = [filter_size, embedding_size, 1, num_filters]
                W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="w")
                b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
                conv = tf.nn.conv2d(
                    self.embedding_chars_expended,
                    W,
                    strides=[1, 1, 1, 1],
                    padding="VALID",
                    name="conv"    
                    )
                # Apply nonlinearity
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
                # Max-pooling over the outputs
                pooled = tf.nn.max_pool(
                    h,
                    ksize=[1, sequence_length - filter_size + 1, 1, 1],
                    strides=[1, 1, 1, 1],
                    padding='VALID',
                    name="pool")
                pooled_outputs.append(pooled)
        # combine all the pooled features
        num_filters_total = num_filters*len(filter_sizes)
        self.h_pool = tf.concat(3, pooled_outputs)
        self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])

        # add dropout
        with tf.name_scope("dropout"):
            self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
        # scores and predictions
        with tf.name_scope("output"):
            W = tf.Variable(tf.truncated_normal([num_filters_total, num_classes], stddev=0.1), name="W")
            b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
            self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
            self.predictions = tf.argmax(self.scores, 1, name="predictions")
        # calculate mean croos-entrooy loss
        with tf.name_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.inpu_y)
            self.loss = tf.reduce(losses)
        # calculate Accuracy
        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cat(correct_predictions, "float"), name="accuracy")
コード例 #6
0
#tf.print("Y: ", Y, output_stream=sys.stdout)

# Loss function - simple linear distance between output and ideal results
print("      ... loss function ...")
#LossFunction = tf.reduce_sum(np.abs(Output - Y)/NumberOfTestLocations)

for l in range(0, TrainingBatchSize):
    Real = M.MVector()
    Real.SetMagThetaPhi(1.0, Y[l, 0], Y[l, 1])
    Reconstructed = M.MVector()
    Reconstructed.SetMagThetaPhi(1.0, Output[l, 0], Output[l, 1])
    AngularDeviation = math.degrees(Real.Angle(Reconstructed))
    MeanAngularDeviation += AngularDeviation
    RMSAngularDeviation += math.pow(AngularDeviation, 2)

LossFunction = tf.reduce(MeanAngularDeviation / TrainingBatchSize)
#LossFunction = tf.reduce(MeanAngularDeviation / NumberOfTrainingBatches*TrainingBatchSize)

#LossFunction = tf.reduce_sum(tf.pow(Output - Y, 2))/NumberOfTestLocations
#LossFunction = tf.losses.mean_squared_error(Output, Y)

# Minimizer
print("      ... minimizer ...")
Trainer = tf.train.AdamOptimizer().minimize(LossFunction)

# Session configuration
print("      ... configuration ...")
Config = tf.ConfigProto()
Config.gpu_options.allow_growth = True

# Create and initialize the session
コード例 #7
0
 def attnmask(e):
     attndist = nn.softmax(e)
     attndist *= encodepadding
     mask = tf.reduce(attndist, axis=1)
     return attndist / tf.reshape(mask, [-1, 1])
コード例 #8
0
 def get_var_count(self):
     count = 0
     for v in list(self.var_dict.values()):
         count += tf.reduce(lambda x, y: x * y, v.get_shape().as_list())
     return count