Beispiel #1
0
    def creat_jpg_dataset(self, data_path_list, data_size):
        data_list = Dataset.list_files(data_path_list, shuffle = False)
        dataset   = data_list.map(self.path_process, num_parallel_calls = tf.data.experimental.AUTOTUNE)
        dataset   = dataset.shuffle(self.dataset_buffer_size).repeat()
        dataset   = dataset.batch(self.batch_size)
        step_each_epoch = math.ceil(data_size / self.batch_size)

        return dataset, step_each_epoch
Beispiel #2
0
    def acc_test(resize_train_data_path, model_path, batch_size):
        train_image_list = Dataset.list_files(resize_train_data_path + "*.jpg")
        train_image_dataset = train_image_list.map(process_path, num_parallel_calls = tf.data.experimental.AUTOTUNE)
        train_image_dataset = train_image_dataset.shuffle(62)
        train_image_dataset = train_image_dataset.batch(batch_size)

        steps_each_epoch = math.ceil(62 / batch_size)
        cmodel = tf.keras.models.load_model(model_path + "c_model")

        step = 0
        acc_list = []
        label = ['reality', 'animation']
        train_image_dataset_iterator = iter(train_image_dataset)
        while step < steps_each_epoch:
            batch_label, batch_image = next(train_image_dataset_iterator)
            batch_label = label_process(batch_label, label)
            predict = tf.round(cmodel.predict(batch_image))
            predict = tf.math.equal(batch_label, predict)
            acc_list.append(tf.reduce_mean(tf.cast(predict, tf.float32)))
            step += 1
        acc_mean = np.mean(acc_list)
        print(f"Acc: {acc_mean}")
Beispiel #3
0
    training_classification = False
    if training_classification:
        cmodel_optimizer = tf.keras.optimizers.Adam()

    #批次數量
    batch_size = 1
    image_count = 1
    if first_training:
        epochs = 5000
    else:
        epochs = 200
    
    #匯入資料
    if training_classification:
        train_image_list = Dataset.list_files(resize_data_path + "*.jpg")
        train_image_dataset = train_image_list.map(process_path, num_parallel_calls = tf.data.experimental.AUTOTUNE)
        train_image_dataset = train_image_dataset.shuffle(62).repeat()
        train_image_dataset = train_image_dataset.batch(batch_size)
        steps_each_epoch = math.ceil(62 / batch_size)
    else:
        reality_image_list = Dataset.list_files(resize_reality_data_path + "/*.jpg")
        reality_image_dataset = reality_image_list.map(process_path, num_parallel_calls = tf.data.experimental.AUTOTUNE)
        reality_image_dataset = reality_image_dataset.shuffle(38).repeat()
        reality_image_dataset = reality_image_dataset.batch(batch_size)

        Animation_image_list = Dataset.list_files(resize_animation_data_path + "/*.jpg")
        Animation_image_dataset = Animation_image_list.map(process_path, num_parallel_calls = tf.data.experimental.AUTOTUNE)
        Animation_image_dataset = Animation_image_dataset.shuffle(38).repeat()
        Animation_image_dataset = Animation_image_dataset.batch(batch_size)
Beispiel #4
0
    return final_model


# %%
def process_path(file_path):
    print(file_path)
    file = read_file(file_path)
    file = image.decode_jpeg(file, channels=3)
    file = cast(file, float32)
    file = preprocess_input(file)
    file = image.resize(file, [ROW, COL])

    return file


inference = Dataset.list_files(str(file_location + '/*'), shuffle=False)
inference = inference.map(process_path).batch(BATCH_SIZE).prefetch(AUTOTUNE)

# %%
checkpoints = glob.glob(os.path.join('Weights', 'IDP_Final_B*.h5'))

Models = []
for checkpoint in checkpoints:
    new_model = create_model(training=False)
    new_model.load_weights(checkpoint)
    Models = new_model

print('Models loaded...')

sentence = ''
    #匯入檔案
    image = tf.io.read_file(file_path)
    image = decode_image(image)
    return file_name, image


#將影像轉為0~255的區間並改成uint8 data type
def deprocess(img):
    img = 255 * (img + 1.0) / 2.0
    return tf.cast(img, tf.uint8)


base_image_path = 'E:/python program/Deep dream test/*.jpg'
#base_image_path = 'E:/python program/dogs-vs-cats/test/*.jpg'
#根據給予的路徑建立一個包含所有符合條件檔案路徑的dataset
image_list = Dataset.list_files(base_image_path)
#Dataset.list_files  的輸出為帶有數個檔案路徑的eager_tensor

#for file in image_list.batch(2):
#    print(file)

#根據給予的function 建立 Mapdataset物件
#會再被調用時將dataset中的元素帶入function
#num_parallel_calls 用來決定在載入多個元素時是否要用平行運算
image_dataset = image_list.map(
    process_path, num_parallel_calls=tf.data.experimental.AUTOTUNE)

#for file_name, image in image_dataset.take(1):
#    plt.imshow(image)
#    plt.show()