예제 #1
0
def build(input_var, type='residual', n=1, num_filters=8,num_class=10, feat_dim=60, max_length=100):
    # feature_data: numpy.ndarray [shape=(t, feature vector length)]
    # depth: number of hidden layers
    # width: number of units in each hidden layer
    #feature_length = feature_data.shape[1]    # feature_data shape???
    if type == 'vgg16':
        import vgg16
        layers = vgg16.build()
    if type == 'residual':
        import residual_network
        network = residual_network.build_cnn(input_var, n=n, num_filters= num_filters, cudnn='no',num_class=num_class, feat_dim=feat_dim, max_length=max_length)
    return network
예제 #2
0
def loss_net(res_net, width, height, style_image_path, content_weight,
             style_weight):
    x = concatenate([res_net.output, res_net.input], axis=0)
    x = layers.VGGNormalize(name="vgg_normalize")(x)
    vgg = vgg16.build(include_top=False, input_tensor=x)
    vgg_output_dict = dict([(layer.name, layer.output)
                            for layer in vgg.layers[-18:]])
    vgg_layers = dict([(layer.name, layer) for layer in vgg.layers[-18:]])
    if style_weight > 0:
        add_style_loss(vgg, style_image_path, vgg_layers, vgg_output_dict,
                       width, height, style_weight)
    if content_weight > 0:
        add_content_loss(vgg_layers, vgg_output_dict, content_weight)
    vgg.trainable = False
    for l in vgg.layers:
        l.trainable = False

    st_input = res_net.input
    x = res_net(st_input)
    for i in range(1, len(vgg.layers)):
        x = vgg.layers[i](x)
    model = Model(st_input, x)
    return model
예제 #3
0
    min_after_dequeue=mini_after_dequeue)

test_batch, test_label_batch = tf.train.batch([test_img, test_label],
                                              batch_size=batch_size,
                                              capacity=capacity)

# -----------------构建网络----------------------
# 占位符
x = tf.placeholder(tf.float32, shape=[None, w, h, c], name='x')
y_ = tf.placeholder(tf.int32, shape=[
    None,
], name='y_')
# train_mode = tf.placeholder(tf.bool)

# vgg = vgg16.Vgg16()
logits = vgg16.build(x)
# print(vgg16.get_var_count())

# ---------------------------网络结束---------------------------

# 学习速率指数递减
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(learn_rate_create,
                                           global_step,
                                           100,
                                           decay_rate=0.98,
                                           staircase=True)
with tf.name_scope('loss'):
    loss = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=logits)
    tf.summary.scalar('loss', loss)
train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
예제 #4
0
def build_encoder(input_height, input_width, input_var):
    encoder = vgg16.build(input_height, input_width, input_var)
    set_pretrained_weights(encoder)
    return encoder
예제 #5
0
def build_encoder():
    encoder = vgg16.build()
    set_pretrained_weights(encoder) # builds my vgg16 model that makes sure the first 3 layers i.e 20 parameters are not trainable. In train.py make sure to not backpropagate loss to the 3rd layer.
    return encoder
예제 #6
0
def build_encoder(net, input_height, input_width):
    encoder = vgg16.build(None, input_height, input_width, connect=False)
    #set_pretrained_weights(encoder)
    return encoder