コード例 #1
0
def train():
    (_, inputs) = load_images(DIR_INPUTS, INPUT_IMAGE_SIZE)
    (_, teachers) = load_images(DIR_TEACHERS, INPUT_IMAGE_SIZE)

    network = UNet(INPUT_IMAGE_SIZE)
    model = network.model()
    model.compile(optimizer='adam', loss=dice_coef_loss)

    history = model.fit(
        inputs, teachers, batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1)
    model.save_weights(os.path.join(DIR_MODEL, File_MODEL))
    plotLearningCurve(history)
コード例 #2
0
ファイル: main.py プロジェクト: taashi-s/M2Det_Keras
def predict(input_dir, gpu_num=None):
    (file_names, inputs) = load_images(input_dir,
                                       INPUT_IMAGE_SHAPE,
                                       with_normalize=WITH_NORM)
    network = M2Det(INPUT_IMAGE_SHAPE, BATCH_SIZE, class_num=CLASS_NUM)
    priors = network.get_prior_boxes()
    bbox_util = BBoxUtility(CLASS_NUM, priors)

    if isinstance(gpu_num, int):
        model = network.get_parallel_model(gpu_num)
    else:
        model = network.get_model()


#    model.summary()
    print('loading weghts ...')
    model.load_weights(os.path.join(DIR_MODEL, FILE_MODEL))
    print('... loaded')

    #"""
    print('predicting ...')
    preds = model.predict(inputs, BATCH_SIZE)
    print('... predicted')

    print('result saveing ...')
    pred_pbox = preds[0, :, -8:]
    results = bbox_util.detection_out(preds)
    image_data = __outputs_to_image_data(inputs, results, file_names)
    save_images(DIR_OUTPUTS,
                image_data,
                file_names,
                with_unnormalize=WITH_NORM)
    print('... finish .')
コード例 #3
0
def predict(input_dir, gpu_num=None):
    h, w, c = INPUT_IMAGE_SHAPE
    org_h, org_w = h - (PADDING * 2), w - (PADDING * 2)
    (file_names, inputs) = load_images(input_dir, (org_h, org_w, c))
    inputs = np.pad(inputs, [(0, 0), (PADDING, PADDING), (PADDING, PADDING),
                             (0, 0)],
                    'constant',
                    constant_values=0)

    network = UNet(INPUT_IMAGE_SHAPE, CLASS_NUM)

    model = network.model()
    plot_model(model, to_file='../model_plot.png')
    model.summary()
    if isinstance(gpu_num, int):
        model = multi_gpu_model(model, gpus=gpu_num)
    model.load_weights(os.path.join(DIR_MODEL, File_MODEL))
    print('predicting ...')
    preds = model.predict(inputs, BATCH_SIZE)
    print('... predicted')

    print('output saveing ...')
    preds = preds[:, PADDING:org_h + PADDING, PADDING:org_w + PADDING, :]
    save_images(DIR_OUTPUTS, preds, file_names)
    print('... saved')
コード例 #4
0
ファイル: main.py プロジェクト: konny0311/UNet_Keras
def predict(input_dir):
    (file_names, inputs) = load_images(input_dir, INPUT_IMAGE_SIZE)

    network = UNet(INPUT_IMAGE_SIZE)
    model = network.model()
    model.load_weights(os.path.join(DIR_MODEL, File_MODEL))
    preds = model.predict(inputs, BATCH_SIZE)

    save_images(DIR_OUTPUTS, preds, file_names)
コード例 #5
0
ファイル: main.py プロジェクト: taashi-s/UNet_plus_plus_Keras
def predict(input_dir, gpu_num=None):
    (file_names, inputs) = load_images(input_dir, INPUT_IMAGE_SHAPE)

    network = UNetPP(INPUT_IMAGE_SHAPE, start_filter=START_FILTER, depth=DEPTH, class_num=CLASS_NUM)
    if isinstance(gpu_num, int):
        model = network.get_parallel_model(gpu_num)
    else:
        model = network.get_model()

    print('loading weghts ...')
    model.load_weights(os.path.join(DIR_MODEL, File_MODEL))
    print('... loaded')
    print('predicting ...')
    preds = model.predict(inputs, BATCH_SIZE)
    print('... predicted')

    print('result saveing ...')
    save_images(DIR_OUTPUTS, preds, file_names)
    print('... finish .')
コード例 #6
0
ファイル: main.py プロジェクト: taashi-s/DeepUNet_Keras
def predict(input_dir, gpu_num=None):
    h, w, c = INPUT_IMAGE_SHAPE
    org_h, org_w = h - (PADDING * 2), w - (PADDING * 2)
    (file_names, inputs) = load_images(input_dir, (org_h, org_w, c))
    inputs = np.pad(inputs, [(0, 0), (PADDING, PADDING), (PADDING, PADDING), (0, 0)], 'constant', constant_values=0)

    network = DeepUNet(INPUT_IMAGE_SHAPE, internal_filter=INTERNAL_FILTER, depth=DEPTH, class_num=CLASS_NUM)
    if isinstance(gpu_num, int):
        model = network.get_parallel_model(gpu_num)
    else:
        model = network.get_model()
#    model.summary()
    print('loading weghts ...')
    model.load_weights(os.path.join(DIR_MODEL, File_MODEL))
    print('... loaded')
    print('predicting ...')
    preds = model.predict(inputs, BATCH_SIZE)
    print('... predicted')

    print('result saveing ...')
    preds = preds[:, PADDING:org_h+PADDING, PADDING:org_w+PADDING, :]

    save_images(DIR_OUTPUTS, preds, file_names)
    print('... finish .')
コード例 #7
0
ファイル: main.py プロジェクト: niros7/butman2
# model_name = "third"
# model_name = "fourthx"

# x_train = cifar_data[0][0]
# y_train = cifar_data[0][1]
#
# x_eval = cifar_data[1][0]
# y_eval = cifar_data[1][1]

# model = trainer.trainModel(model, classes, batch_size, epochs, model_name, x_train, y_train)
# trainer.evalModel(model, classes, x_eval, y_eval)

model = transferedModel()
model_name = "transfered"

flowers_data = load_images("flowers images", [".jpg"], 11 - 1, 11)

# model = load_model('models_checkpoint/transfered.hdf5')

train_flowers_x = np.array(flowers_data[0][:int(len(flowers_data[0]) * .80)])
train_flowers_y = np.array(flowers_data[1][:int(len(flowers_data[1]) * .80)])
eval_flowers_x = np.array(
    flowers_data[0][int(len(flowers_data[0]) *
                        .80):int(len(flowers_data[0]) * 1)])
eval_flowers_y = np.array(
    flowers_data[1][int(len(flowers_data[1]) *
                        .80):int(len(flowers_data[1]) * 1)])

train_x = np.vstack((train_flowers_x, cifar_data[0][0]))
train_y = np.vstack((train_flowers_y, cifar_data[0][1]))
eval_x = np.vstack((eval_flowers_x, cifar_data[1][0]))