コード例 #1
0
def visualize(args):
    testimg_dir = args.testimg_dir
    testmsk_dir = args.testmsk_dir
    test_list = range(args.test_size)

    file1 = open(args.save_dir + "recent.txt", "r")
    args.weight = file1.read()
    file1.close()

    try:
        model = segnet(args.input_shape, args.n_labels, args.kernel,
                       args.pool_size, args.output_mode, int(args.weight[-1]))
        model.load_weights(args.save_dir + args.weight + ".hdf5")
        print(args.save_dir + args.weight + ".hdf5")
    except:
        print('The model name entered does not exist')
    else:
        for i in test_list:
            img = []

            # If grayscale image is used as input
            # original_img = cv2.imread(testimg_dir+str(i)+".jpg", 0)
            # resized_img = cv2.resize(original_img, (args.input_shape[0], args.input_shape[1]))
            # imag = np.stack([resized_img, resized_img, resized_img], axis=2)
            # array_img = img_to_array(resized_img) / 255
            # img.append(imag)

            original_img = cv2.imread(testimg_dir + str(i) + ".jpg")
            resized_img = cv2.resize(
                original_img, (args.input_shape[0], args.input_shape[1]))
            array_img = img_to_array(resized_img) / 255
            img.append(resized_img)

            array_img = np.expand_dims(array_img, axis=0)
            output = model.predict(array_img)
            mask = cv2.imread(testmsk_dir + str(i) + ".jpg")
            resized_mask = cv2.resize(
                mask * 255, (args.input_shape[0], args.input_shape[1]),
                interpolation=cv2.INTER_NEAREST)
            img.append(resized_mask)
            resized_image = np.reshape(
                output[0] * 255,
                (args.input_shape[0], args.input_shape[1], args.n_labels))
            img.append(resized_image[:, :, 1])
            fig = plt.figure()
            ax = []
            for j in range(3):
                ax.append(fig.add_subplot(1, 3, j + 1))
                if j == 0:
                    plt.imshow(img[j])
                else:
                    plt.imshow(img[j], cmap='gray', vmin=0, vmax=255)

            ax[0].title.set_text("Original Image")
            ax[1].title.set_text("Ground Truth")
            ax[2].title.set_text("Predicted Mask")

            fig.savefig(args.save_dir + "images/" + str(i) + ".png",
                        dpi=fig.dpi)
            plt.close('all')
コード例 #2
0
ファイル: test.py プロジェクト: FCeruzzi/segnet
def main(args):
    # set the necessary list
    test_file = open(args.test_list, "r")
    test_list = test_file.readlines()

    model = segnet(args.input_shape, args.n_labels, args.kernel,
                   args.pool_size, args.output_mode)
    print(model.summary())

    model.compile(loss=args.loss,
                  optimizer=Adam(lr=0.0001),
                  metrics=["accuracy"])

    print("Load Model: " + args.resume)
    model.load_weights(args.resume)

    list_images = image_list(test_list)

    size = (args.input_shape[0], args.input_shape[1])

    for image in list_images:
        print(image)
        original_img = cv2.imread(image)[:, :, ::-1]
        converted_img = convert_image(original_img, size)
        result = model.predict(converted_img)[0]
        colored_image = color_image(result, size, args.n_labels, color_table)
        last_slash = image.rfind('/')
        name = image[last_slash + 1:]
        cv2.imwrite(args.save_dir + "/" + name, colored_image)
コード例 #3
0
def main(args):
    # set the necessary list
    train_list = pd.read_csv(args.train_list, header=None)
    val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                               args.batch_size,
                               [args.input_shape[0], args.input_shape[1]],
                               args.n_labels)
    val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list, args.batch_size,
                             [args.input_shape[0], args.input_shape[1]],
                             args.n_labels)

    model = segnet(args.input_shape, args.n_labels, args.kernel,
                   args.pool_size, args.output_mode)
    print(model.summary())

    model.compile(loss=args.loss,
                  optimizer=args.optimizer,
                  metrics=["accuracy"])
    model.fit_generator(train_gen,
                        steps_per_epoch=args.epoch_steps,
                        epochs=args.n_epochs,
                        validation_data=val_gen,
                        validation_steps=args.val_steps)

    model.save_weights(args.save_dir + str(args.n_epochs) + ".hdf5")
    print("sava weight done..")
コード例 #4
0
def train(args):
    train_list = range(args.train_size)
    val_list = range(args.val_size)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                               args.batch_size,
                               [args.input_shape[0], args.input_shape[1]],
                               args.n_labels)
    val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list, args.batch_size,
                             [args.input_shape[0], args.input_shape[1]],
                             args.n_labels)

    model = segnet(args.input_shape, args.n_labels, args.kernel,
                   args.pool_size, args.output_mode, args.model_number)
    print(model.summary())

    with open(args.save_dir + 'model' + str(args.model_number) + '.txt',
              'w') as fh:
        model.summary(print_fn=lambda x: fh.write(x + '\n'))

    model.compile(loss=args.loss,
                  optimizer=args.optimizer,
                  metrics=["accuracy"])

    print("Model Compiled")
    tensorboard = TensorBoard(log_dir='logs/{}'.format(time()))
    model.fit_generator(train_gen,
                        steps_per_epoch=args.epoch_steps,
                        epochs=args.n_epochs,
                        validation_data=val_gen,
                        validation_steps=args.val_steps,
                        callbacks=[tensorboard])
    model.save(args.save_dir + args.new_model_name + ".hdf5")
    print("Model Saved")

    K.set_learning_phase(0)
    session = K.get_session()
    init = tf.global_variables_initializer()
    session.run(init)
    frozen_graph = freeze_session(
        session, output_names=[out.op.name for out in model.outputs])
    print([out.op.name for out in model.outputs])
    tf.train.write_graph(frozen_graph,
                         args.save_dir,
                         args.new_model_name + ".pb",
                         as_text=False)
    session.close()
    file1 = open(args.save_dir + "recent.txt", "w+")
    file1.write(args.new_model_name)
    file1.close()
コード例 #5
0
def main(args):
    # set the necessary list
    train_file = open(args.train_list, "r")
    train_list = train_file.readlines()
    random.shuffle(train_list)

    val_file = open(args.val_list, "r")
    val_list = val_file.readlines()

    test_file = open(args.test_list, "r")
    test_list = test_file.readlines()

    train_gen = data_gen_small(
        train_list,
        args.batch_size,
        [args.input_shape[0], args.input_shape[1]],
        args.n_labels,
    )
    val_gen = data_gen_small(
        val_list,
        args.batch_size,
        [args.input_shape[0], args.input_shape[1]],
        args.n_labels,
    )

    model = segnet(args.input_shape, args.n_labels, args.kernel,
                   args.pool_size, args.output_mode)
    print(model.summary())

    model.compile(loss=args.loss,
                  optimizer=Adam(lr=0.001),
                  metrics=["accuracy"])

    if args.resume:
        print("Load Model: " + args.resume)
        model.load_weights(args.resume)

    model.fit_generator(
        train_gen,
        steps_per_epoch=args.epoch_steps,
        epochs=args.n_epochs,
        validation_data=val_gen,
        validation_steps=args.val_steps,
    )

    model.save_weights(args.save_dir + str(args.n_epochs) + ".hdf5")
    print("save weight done..")

    test_gen = test_data_generator(
        test_list,
        #args.batch_size,
        [args.input_shape[0], args.input_shape[1]],
        args.n_labels,
    )
    model.evaluate(test_gen)
コード例 #6
0
ファイル: test.py プロジェクト: templeblock/tf-keras-SegNet
def main(args):
    # set the necessary list i.e. list of names of images to be read
    test_list = pd.read_csv(args.test_list, header=None)

    # set the necessary directories
    testimg_dir = args.testimg_dir
    testmsk_dir = args.testmsk_dir

    # Generate batch data for SGD
    # NOTE: This helps control the batch size for each test set
    test_gen = data_gen_small(testimg_dir, testmsk_dir, test_list,
                              args.batch_size,
                              [args.input_shape[0], args.input_shape[1]],
                              args.n_labels)

    # Create a model
    if args.model == 'unet':
        model = unet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool_size, args.output_mode, args.gpus)
    elif args.model == 'segnet':
        model = segnet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool_size, args.output_mode, args.gpus)
    elif args.model == 'segunet':
        model = segunet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool_size, args.output_mode, args.gpus)

    # TODO: Configure the model for training
    optimizer = Adadelta(
        lr=1.0, rho=0.95, epsilon=None, decay=0.01
    )  # NOTE: The details doesn't matter here because we are not training the model
    model.compile(loss=args.loss, optimizer=optimizer, metrics=[args.metrics])

    # Set model trained weights
    model.load_weights(args.model_weights)

    # Keras moodel summary, for confirmation
    print(model.summary())

    # Test the model on data generated batch-by-batch by a Python generator
    # NOTE: We use evaluate_generator because we do provide our generated dataset with specific batch size
    tensorboard = TensorBoard(log_dir=args.save_dir)
    fit_start_time = time()
    model.evaluate_generator(test_gen,
                             steps=args.epoch_steps,
                             verbose=args.verbose,
                             callbacks=[tensorboard])
    print('### Model fit time (s): ', time() - fit_start_time)
コード例 #7
0
def main():
    #test_list = pd.read_csv('gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/test_2/test_2.csv')
    test_list = pd.read_csv('gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/test_2/test.csv')
    testing_img_dir = 'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/multiclassification/data/SequoiaMulti_30/test_partition_images/'
    weights_path = 'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/test_2/650.hdf5'
    PRED_FOLDER = 'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/test_2/'
    img_path = 'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/multiclassification/data/SequoiaMulti_30/test_partition_images/full_color_training_1.png'

    batch_size = 10
    input_shape = (256,256,3)
    print('test_list')
    print(test_list)

    test_gen = data_gen_small(
        testing_img_dir,
        test_list,
        batch_size,
        #[args.input_shape[0], args.input_shape[1]],
        (input_shape[0], input_shape[1]),
    )

    n_labels = 3
    kernel = 3
    pool_size = (2,2)
    output_mode = 'softmax'
    model = segnet(
        input_shape, n_labels, kernel, pool_size, output_mode
    )

    
    model.load_weights(weights_path)

    '''
    original_img = cv2.imread(img_path)[:, :, ::-1]
    #print(original_img.shape)
    #resized_img = cv2.resize(original_img, dims + [3])
    resized_img = cv2.resize(original_img, (256,256) )
    array_img = img_to_array(resized_img) / 255

    output = model.predict(np.array([array_img]))

    image = np.reshape(output, (256,256,3))
    p = np.argmax(image, axis=-1)
    p = np.expand_dims(p, axis=-1)
    p = p * (255/3)
    p = p.astype(np.int32)
    print(p.shape)
    p = np.concatenate([p, p, p], axis=2)
    '''

    '''
    image_2 = visualize(np.argmax(image,axis=-1).reshape((256,256)), False)
    print(image_2.shape)
    print(image_id)
    print(np.unique(image_2))
    '''

    '''
    pred_dir = '4_epochs_predictions'
    name = 'img_1' + '.png'
    cv2.imwrite(os.path.join(PRED_FOLDER, pred_dir, name), p)
    '''


    imgs_mask_test = model.predict_generator(
        test_gen,
        steps=np.ceil(len(test_list) / batch_size)-1)


    '''
    #original_img = cv2.imread(img_path)[:, :, ::-1]
    original_img = np.array([cv2.imread(img_path)])
    #nop = np.array([None])
    #original_img = np.append(nop, original_img)
    print(original_img.shape)
    output = model.predict(original_img)
    pred = visualize(np.argmax(output[0],axis=1).reshape((256,256)), False)
    print(pred.shape)
    #plt.imshow(pred)
    #plt.show()
    cv2.imwrite(os.path.join('gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/test_2/img_2_300_epoch.png'), output)
    #cv2.imwrite(os.path.join('gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/test_2/img_50_epoch.png'), pred)
  
    '''

    pred_dir = '650_epochs_predictions'
    if not os.path.exists(os.path.join(PRED_FOLDER, pred_dir)):
        os.mkdir(os.path.join(PRED_FOLDER, pred_dir))


    for image, image_id in zip(imgs_mask_test, test_list.iloc()):
        #image = (image[:, :, 0]).astype(np.float32)
        #print(image_id)
        #print(image.shape)
        #print(np.unique(image[:,0] == image[:,1]))
        image = np.reshape(image, (256,256,3))
        #print('Image shape')
        #print(image.shape)
        #print(np.unique(image))
        #print(np.argmax(image,axis=-1))
        #print(np.unique((np.argmax(image,axis=-1))))
        #print('Here')
        p = np.argmax(image, axis=-1)
        p = np.expand_dims(p, axis=-1)
        p = p * (255/3)
        p = p.astype(np.int32)
        #print(p.shape)
        p = np.concatenate([p, p, p], axis=2)
        #image_2 = visualize(np.argmax(image,axis=-1).reshape((256,256)), False)
        #print(image_2.shape)
        #print(image_id)
        #print(np.unique(image_2))
        #print(image_id[0])
        if(image_id[0]%50 == 0):
          print(image_id)
        name = str(image_id[0]) + '.png'
        cv2.imwrite(os.path.join(PRED_FOLDER, pred_dir, name), p)
      
    print("Saving predicted cloud masks on disk... \n")
コード例 #8
0
 def reset(self, use_residual=False, use_argmax=True):
     self.n_classes = 2
     self.model = segnet(input_shape=(768, 768, 3), n_labels=self.n_classes,
                         kernel=3, pool_size=(2, 2), output_mode="softmax", use_residual=False, use_argmax=True)
コード例 #9
0
ファイル: train.py プロジェクト: templeblock/tf-keras-SegNet
def main(args):
    # set the necessary list i.e. list of names of images to be read
    train_list = pd.read_csv(args.train_list, header=None)
    train_list_len = len(train_list)
    epoch_steps = int(train_list_len / args.batch_size)
    val_list = pd.read_csv(args.val_list, header=None)
    val_list_len = len(val_list)
    val_steps = int(val_list_len / args.batch_size)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    # Generate batch data for SGD
    # NOTE: This helps control the batch size for each training and validation set
    train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                               args.batch_size,
                               [args.input_shape[0], args.input_shape[1]],
                               args.n_labels)
    val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list, args.batch_size,
                             [args.input_shape[0], args.input_shape[1]],
                             args.n_labels)

    # Create a model
    if args.model == 'unet':
        model = unet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool, args.output_mode, args.gpus)
    elif args.model == 'segnet':
        model = segnet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool, args.output_mode, args.gpus)
    elif args.model == 'segunet':
        model = segunet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool, args.output_mode, args.gpus)
    elif args.model == 'fastnet':
        model = fastnet(\
         args.input_shape, args.n_labels, args.kernel, \
         args.pool, args.output_mode, args.gpus)

    # Keras moodel summary
    print(model.summary())

    # TODO: Configure the model for training
    optimizer = Adadelta(lr=args.lr,
                         rho=args.rho,
                         epsilon=args.epsilon,
                         decay=args.decay)
    model.compile(loss=args.loss, optimizer=optimizer, metrics=[args.metrics])
    # model.compile(loss=args.loss, optimizer=args.optimizer, metrics=[args.metrics])

    # If pre-trained weights available, use those
    if (args.initial_weights):
        model.load_weights(args.initial_weights)
        print('Initial weights loaded')

    # Generate log for tensorboard (currently only useful parameters are added to the dir name)
    log_dir = args.save_dir + \
     'model_' + args.model + \
     '_batch_size_' + str(args.batch_size) + \
     '_epoch_steps_' + str(epoch_steps) + \
     '_n_epochs_' + str(args.n_epochs) + \
     '_lr_' + str(args.lr) + \
        '_decay_' + str(args.decay) + \
        '_labels_' + str(args.n_labels) + \
        '/'
    tensorboard = TensorBoard(log_dir=log_dir)

    # Generate checkpoints
    checkpoint = ModelCheckpoint(
        log_dir + 'weights_at_epoch_{epoch:03d}.hdf5',
        verbose=1,
        save_best_only=False,
        save_weights_only=True,
        period=args.period)  # Create 10 checkpoints only

    # Train the model on data generated batch-by-batch by a Python generator
    # NOTE: We use fit_generator because we do provide our generated dataset with specific batch size
    fit_start_time = time()
    model.fit_generator(train_gen,
                        steps_per_epoch=epoch_steps,
                        epochs=args.n_epochs,
                        validation_data=val_gen,
                        validation_steps=val_steps,
                        verbose=args.verbose,
                        callbacks=[checkpoint, tensorboard])
    print('### Model fit time (s): ', time() - fit_start_time)

    # # NOTE: Cannot save the whole model OR the model structure if the model is not Keras Sequential, it throws "AttributeError: 'NoneType' object has no attribute 'update'"
    # # This is a bug in Keras (not sure about TensorFlow)
    # # Therefore, the model strutuce must be generated every time it is required to be implemented outside of the current script
    # model.save(log_dir + 'model.hdf5') # Model architecture and final weights

    # Save final weights
    model.save_weights(log_dir + 'weights_at_epoch_%03d.hdf5' %
                       (args.n_epochs))
    print('Final model weights saved.')
コード例 #10
0
ファイル: test.py プロジェクト: benlin131020/SegNet
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from model import segnet, cal_loss, cal_accuracy
from data_loader import get_train_iterator, get_test_iterator
from parameter import *

iterator = get_train_iterator()

with tf.Session() as sess:
    sess.run(iterator.initializer)
    X, Y = iterator.get_next()
    X.set_shape([None, INPUT_HEIGHT, INPUT_WIDTH, INPUT_CHANNEL])
    Y.set_shape([None, INPUT_HEIGHT * INPUT_WIDTH, NUM_CLASSES])
    logits, prediction = segnet(X, True)
    cross_entropy_loss = cal_loss(logits, Y)
    accuracy = cal_accuracy(prediction, Y)
    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    sess.run(init)
    saver.restore(sess, MODEL_PATH)
    image, pred, loss, acc = sess.run(
        [X, prediction, cross_entropy_loss, accuracy])
    image = image.astype(int)
    for y in range(INPUT_HEIGHT):
        for x in range(INPUT_WIDTH):
            if pred[0, y, x, 0] < pred[0, y, x, 1]:
                image[0, y, x, 0] = 255
                image[0, y, x, 1] = 0
                image[0, y, x, 2] = 0
    print("loss:{:.9f}".format(loss), "accuracy:{:.9f}".format(acc))
コード例 #11
0
def main():
    model = segnet('./build.json')
    model.train('train_eval.txt', '../tgtdir/', get_batch)
コード例 #12
0
def main(args):
    # set the necessary list
    #train_list = pd.read_csv(args.train_list, header=None)
    train_list = pd.read_csv(
        'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/test_2/train.csv'
    )
    #val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    #trainimg_dir = args.trainimg_dir
    trainimg_dir = 'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/multiclassification/data/SequoiaMulti_30/train_partition_images/'
    trainmsk_dir = 'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/multiclassification/data/SequoiaMulti_30/train_partition_ground_truth/'
    #trainmsk_dir = args.trainmsk_dir
    #valimg_dir = args.valimg_dir
    #valmsk_dir = args.valmsk_dir

    train_gen = data_gen_small(
        trainimg_dir,
        trainmsk_dir,
        train_list,
        args.batch_size,
        #[args.input_shape[0], args.input_shape[1]],
        (args.input_shape[0], args.input_shape[1]),
        args.n_labels,
    )
    '''
    val_gen = data_gen_small(
        valimg_dir,
        valmsk_dir,
        val_list,
        args.batch_size,
        [args.input_shape[0], args.input_shape[1]],
        args.n_labels,
    )
    '''

    model = segnet(args.input_shape, args.n_labels, args.kernel,
                   args.pool_size, args.output_mode)
    print(model.summary())

    model.compile(loss=args.loss,
                  optimizer=args.optimizer,
                  metrics=["accuracy"])

    addtional = 50
    train_resume = True
    weights_path = 'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/test_2/' + str(
        600) + ".hdf5"
    print(weights_path)
    if train_resume:
        model.load_weights(weights_path)
        print("\nTraining resumed...")
    else:
        print("\nTraining started from scratch... ")

    model.fit_generator(
        train_gen,
        steps_per_epoch=args.epoch_steps,
        epochs=args.n_epochs,
        #validation_data=val_gen,
        #validation_steps=args.val_steps,
    )

    model.save_weights(
        'gdrive/My Drive/Research/TTLAB/AI_For_Earth/Problem_2/test_2/' +
        str(650) + ".hdf5")
    print("sava weight done..")
コード例 #13
0
#-- Validation Dataset --#
dataset['val'] = dataset['val'].map(load_image_test)
dataset['val'] = dataset['val'].repeat()
dataset['val'] = dataset['val'].batch(BATCH_SIZE)
dataset['val'] = dataset['val'].prefetch(buffer_size=AUTOTUNE)

print(dataset['train'])
print(dataset['val'])

for image, mask in dataset['train'].take(1):
    sample_image, sample_mask = image, mask

# display_sample([sample_image[0], sample_mask[0]])

m = segnet(input_shape=(128, 128, 3), n_class=45)

m.compile(optimizer=tf.keras.optimizers.Adam(LR),
          loss="sparse_categorical_crossentropy",
          metrics=metrics)

callbacks = [
    ModelCheckpoint("files/model.h5"),
    ReduceLROnPlateau(monitor="val_loss", factor=0.1, patience=3),
    CSVLogger("files/data.csv"),
    TensorBoard(),
    EarlyStopping(monitor="val_loss", patience=10, restore_best_weights=False)
]

STEPS_PER_EPOCH = TRAINSET_SIZE // BATCH_SIZE
VALIDATION_STEPS = VALSET_SIZE // BATCH_SIZE
コード例 #14
0
                            print("VALIDATION DATA SIZE", val_images.shape[0])
                            print("TEST DATA SIZE", test_images.shape[0])
                            print(
                                "****************************************************************************************************************"
                            )

                            # *********************** Setup model ********************

                            input_size = train_images.shape[1:4]

                            if whichmodel == "param_unet":
                                model = param_unet(input_size, filters, levels,
                                                   dropout_rate)

                            if whichmodel == "segnet":
                                model = segnet(input_size, 3, dropout_rate)

                            # *********************************** Train Model **********************************************

                            gray_image = False
                            gray_counter = 0
                            loss_counter = 0
                            best_val_loss = 1
                            acc = []
                            val_acc = []
                            loss = []
                            val_loss = []
                            for i in range(maxepochs):
                                print("-------------------- Epoch #", i + 1,
                                      "--------------------")
                                start_time = time.time()
コード例 #15
0
        print("Building model.")
        input_shape = (256, 256, 3)

        if name_model[i] == "UNet":

            model_select = model.Unet(size=input_shape)  # 搭建新的模型
            model_select.load_weights(
                ".\\result\\model_record\\20210118_256_49461_UNet_CE.h5"
            )  # 載入現有完成訓練的權重
            batch = 10
            train_flag = 0
        elif name_model[i] == "SegNet":
            continue
            model_select = model.segnet(input_shape=input_shape,
                                        n_labels=1,
                                        kernel=3,
                                        pool_size=(2, 2),
                                        output_mode="sigmoid")
            model_select.load_weights(
                ".\\result\\model_record\\20210118_256_49461_SegNet_CE.h5")
            batch = 10
            train_flag = 0
        elif name_model[i] == "DV3":
            continue
            model_select = DV3.Deeplabv3(weights=None,
                                         input_shape=input_shape,
                                         classes=1)
            model_select.load_weights(
                ".\\result\\model_record\\20210118_256_49461_" +
                name_model[i] + "_CE.h5")
            batch = 3