Example #1
0
            i = (i + 1) % n
        yield (np.array(X_train), np.array(Y_train))


def loss(y_true, y_pred):
    crossloss = K.binary_crossentropy(y_true, y_pred)
    # 这个乘除是为了求平均……相当于每个像素点的交叉熵。
    loss = 4 * K.sum(crossloss) / HEIGHT / WIDTH
    return loss


if __name__ == "__main__":
    log_dir = "logs/"
    # 获取model
    model = convnet_segnet(n_classes=NCLASSES,
                           input_height=HEIGHT,
                           input_width=WIDTH)
    # model.summary()
    WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
    weights_path = get_file(
        'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
        WEIGHTS_PATH_NO_TOP,
        cache_subdir='models')

    model.load_weights(weights_path, by_name=True)
    # 打开数据集的txt
    with open(r"/SegNet_Conv2/dataset2/train.txt", "r") as f:
        lines = f.readlines()

    # 打乱行,这个txt主要用于帮助读取数据来训练
    # 打乱的数据更有利于训练
from nets.segnet import convnet_segnet

model = convnet_segnet(2, input_height=416, input_width=416)
model.summary()