예제 #1
0
    def test_dist(self):
        model_path = os.path.join(self.config.cp_dir, "triplet_loss_model.h5")

        model = load_model(
            model_path,
            custom_objects={'triplet_loss': TripletModel.triplet_loss})
        model.summary()

        (_, _), (X_test, y_test) = mnist.load_data()
        anchor = np.reshape(X_test, (-1, 28, 28, 1))
        X = {
            'anc_input': anchor,
            'pos_input': np.zeros(anchor.shape),
            'neg_input': np.zeros(anchor.shape)
        }
        s_time = datetime.datetime.now()
        res = model.predict(X)
        e_time = datetime.datetime.now() - s_time
        micro = float(e_time.microseconds) / float(len(y_test))
        print "TPS: %s (%s ms)" % ((1000000.0 / micro), (micro / 1000.0))
        data = res[:, :128]
        print "验证结果结构: %s" % str(data.shape)
        log_dir = os.path.join(ROOT_DIR, self.config.tb_dir, "test")
        mkdir_if_not_exist(log_dir)
        self.tb_projector(data, y_test, log_dir)
예제 #2
0
    def test_dist(self):
        model_path = os.path.join(self.config.cp_dir,
                                  "triplet_loss_model_91_0.9989.h5")

        model = load_model(
            model_path,
            custom_objects={'triplet_loss': TripletModel.triplet_loss})
        model.summary()

        data_path = os.path.join(ROOT_DIR, 'experiments', 'data_test_200.npz')
        data_all = np.load(data_path)
        X_test = data_all['f_list']
        y_test = data_all['l_list']
        X_test = np.transpose(X_test, [0, 2, 1])

        X = {
            'anc_input': X_test,
            'pos_input': np.zeros(X_test.shape),
            'neg_input': np.zeros(X_test.shape)
        }
        start_time = datetime.now()  # 起始时间
        res = model.predict(X)
        elapsed_time = (datetime.now() - start_time).total_seconds()
        tps = float(len(y_test)) / float(elapsed_time)
        print "Num: %s, Time: %s, TPS: %s (%s ms)" % (len(y_test),
                                                      elapsed_time, tps,
                                                      (1 / tps * 1000))

        data = res[:, :O_DIM]
        print "验证结果结构: %s" % str(data.shape)
        log_dir = os.path.join(ROOT_DIR, self.config.tb_dir, "test")
        mkdir_if_not_exist(log_dir)
        self.tb_projector(data, y_test, log_dir)
예제 #3
0
 def init_callbacks(self):
     train_dir = os.path.join(ROOT_DIR, self.config.tb_dir, "train")
     mkdir_if_not_exist(train_dir)
     self.callbacks.append(
         TensorBoard(
             log_dir=train_dir,
             write_images=True,
             write_graph=True,
         ))
def init_config(json_file):
    """
    解析Json文件
    :param json_file: 配置文件
    :return: 配置类
    """
    config, _ = get_config_from_json(json_file)

    mkdir_if_not_exist(MODELS_DIR)  # 初始化
    return config
예제 #5
0
    def default_dist(self):
        data_path = os.path.join(ROOT_DIR, 'experiments', 'data_test_200.npz')
        data_all = np.load(data_path)
        X_test = data_all['f_list']
        y_test = data_all['l_list']

        X_test = np.reshape(X_test, (-1, 256 * 32))
        log_dir = os.path.join(ROOT_DIR, self.config.tb_dir, 'default')
        mkdir_if_not_exist(log_dir)
        self.tb_projector(X_test, y_test, log_dir)
예제 #6
0
def test_model(X, data_folder, trained_model, configurations):

    # Parameters
    IMG_HEIGHT = configurations.size_img
    IMG_WIDTH = configurations.size_img
    TEST_PATH = data_folder

    # Path of Image Tiles and Masks
    path = os.path.join(TEST_PATH, "img")

    _, _, files_orj = next(os.walk(path))
    files_orj = sorted(files_orj)

    # Load Trained Model
    model = load_model(trained_model, \
        custom_objects={'dice_coef':dice_coef, 'dice_coef_loss':dice_coef_loss})

    # Predict
    preds_test = model.predict(X)
    preds_reshaped = np.ndarray((len(preds_test), IMG_HEIGHT, IMG_WIDTH),
                                dtype=np.float32)

    for i in range(len(preds_test)):
        preds_reshaped[i] = preds_test[i].reshape(IMG_HEIGHT, IMG_WIDTH)

    preds_upsampled = []
    for i in range(len(preds_test)):
        preds_upsampled.append(
            np.expand_dims(cv2.resize(preds_reshaped[i],
                                      (IMG_HEIGHT, IMG_WIDTH)),
                           axis=-1))
    print("[INFO] Upsampling is done!(upsampled to ({}, {}) from ({}, {})".
          format(IMG_HEIGHT, IMG_WIDTH, preds_test[i].shape[0],
                 preds_test[i].shape[1]))

    output_pred = os.path.join(configurations.output_folder, 'Prediction')
    mkdir_if_not_exist(configurations.output_folder)
    mkdir_if_not_exist(output_pred)
    theshold_pred = 0.5

    for k in range(len(preds_test)):
        img = preds_upsampled[k].copy()

        img[img > theshold_pred] = 1
        img[img <= theshold_pred] = 0
        img *= 255

        out_name = os.path.join(output_pred, "pred-" + files_orj[k])
        cv2.imwrite(out_name, img)

    print('[INFO] Finished Prediction!')

    return output_pred
예제 #7
0
 def save(self, timestamp=time.strftime('%m%d_%H:%M:%S')):
     if self.args is None:
         raise ValueError("Did not parse any arg")
     if isinstance(self.args, Bunch):
         config = {k: self.args[k] for k in self.args}
     else:
         config = {k: v for k, v in self.args.__dict__.items()}
     # config['timestamp'] = "{:.0f}".format(datetime.now())
     # config['local_timestamp'] = str(datetime.now())
     run_dir = _os.path.join(self.args.logdir, self.args.model, "checkpoints")
     print("[INFO] Saving config and results to {}".format(run_dir))
     mkdir_if_not_exist([run_dir])
     save_config(config, run_dir, timestamp)
예제 #8
0
def process_config(json_file):
    """
    Parse Json file
    :param json_file: Json config file
    :return: config
    """
    config, _ = get_config_from_json(json_file)
    config.tb_dir = os.path.join("experiments", config.exp_name, "logs/")  # 日志
    config.cp_dir = os.path.join("experiments", config.exp_name, "checkpoints/")  # 模型
    config.img_dir = os.path.join("experiments", config.exp_name, "images/")  # 网络

    mkdir_if_not_exist([config.tb_dir, config.cp_dir, config.img_dir])  # 创建文件夹
    return config
예제 #9
0
def process_config(json_file):
    """
    解析Json文件
    :param json_file: 配置文件
    :return: 配置类
    """
    config = get_config_from_json(json_file)
    config["trainer"]['tb_dir'] = os.path.join("experiments",
                                               config['exp_name'],
                                               "logs/")  # 日志
    config["trainer"]['cp_dir'] = os.path.join("experiments",
                                               config['exp_name'],
                                               "checkpoints/")  # 模型
    config["trainer"]['img_dir'] = os.path.join("experiments",
                                                config['exp_name'],
                                                "images/")  # 网络
    config["trainer"]['preds_dir'] = os.path.join("experiments",
                                                  config['exp_name'],
                                                  "preds/")  # 预测输出
    ticks = time.time()
    config["trainer"]['time'] = timestamp_2_readable(
        ticks)  # the time when starting

    mkdir_if_not_exist(config["trainer"]['tb_dir'])  # 创建文件夹
    mkdir_if_not_exist(config["trainer"]['cp_dir'])  # 创建文件夹
    mkdir_if_not_exist(config["trainer"]['img_dir'])  # 创建文件夹
    mkdir_if_not_exist(config["trainer"]['preds_dir'])  # 创建文件夹
    return config
예제 #10
0
def train_vgg_mnist():

    print('[INFO] 加载数据…')
    config_str = 'configs/try_vgg_manga.json'
    config = process_config(config_str)

    np.random.seed(47)

    print('[INFO] 加载数据…')
    dl = FaceNetDL(config=config)

    base_model = tf.keras.applications.vgg16.VGG16(weights=None,
                                                   include_top=False,
                                                   input_shape=(224, 224, 1))
    x = base_model.output
    x = tf.keras.layers.Flatten()(x)
    # x = tf.keras.layers.Dense(config.fc1_num, activation='relu')(x)
    predictions = tf.keras.layers.Dense(2, activation='sigmoid')(x)

    model = tf.keras.Model(inputs=base_model.input, outputs=predictions)
    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    print('[INFO] 训练网络')
    model_save_path = 'experiments/try_vgg_manga/checkpoints/'
    mkdir_if_not_exist(model_save_path)
    cp_callback = tf.keras.callbacks.ModelCheckpoint(os.path.join(
        model_save_path, 'mnist_weights.hdf5'),
                                                     verbose=1,
                                                     save_weights_only=False,
                                                     monitor='loss',
                                                     mode='min',
                                                     save_best_only=True)
    model.fit_generator(dl.get_train_data(),
                        epochs=config.num_epochs,
                        verbose=2,
                        validation_data=dl.get_validation_data(),
                        callbacks=[cp_callback])
    print('[INFO] 训练完成…')

    model.evaluate_generator(dl.get_test_data())
    print('[INFO] 测试完成…')
예제 #11
0
def process_config(json_file):
    """
    解析Json文件
    :param json_file: 配置文件
    :return: 配置类
    """
    config, _ = get_config_from_json(json_file)
    config.tb_dir = os.path.join("experiments", config.exp_name, "logs/")  # 日志
    config.cp_dir = os.path.join("experiments", config.exp_name, "checkpoints/")  # 模型
    config.img_dir = os.path.join("experiments", config.exp_name, "images/")  # 网络

    mkdir_if_not_exist(config.tb_dir)  # 创建文件夹
    mkdir_if_not_exist(config.cp_dir)  # 创建文件夹
    mkdir_if_not_exist(config.img_dir)  # 创建文件夹
    return config
예제 #12
0
def process_config(json_file):
    """
    定义 解析Json文件 函数 | Definition Parse Json File Function
    :param json_file: 配置文件
    :return: 配置类
    """
    config, _ = get_config_from_json(json_file)
    config.tb_dir = os.path.join("experiments", config.exp_name,
                                 "logs/")  # 日志 | Log
    config.cp_dir = os.path.join("experiments", config.exp_name,
                                 "checkpoints/")  # 模型 | model
    config.img_dir = os.path.join("experiments", config.exp_name,
                                  "images/")  # 网络 | internet

    mkdir_if_not_exist(config.tb_dir)  # 创建文件夹 | Create folder
    mkdir_if_not_exist(config.cp_dir)  # 创建文件夹 | Create folder
    mkdir_if_not_exist(config.img_dir)  # 创建文件夹 | Create folder
    return config
예제 #13
0
def train_model(X, y, configurations):

    # Parameters - IMG
    IMG_HEIGHT = int(configurations.size_img)
    IMG_WIDTH = int(configurations.size_img)
    IMG_CHANNELS = 3

    # Parameters - Model
    lr_rate = float(configurations.learning_rate)
    model_name = str(configurations.model_name)
    model_type = str(configurations.model_type)
    dir_write = mkdir_if_not_exist(str(configurations.dir_write))
    activation = str(configurations.activation)
    batch_size = int(configurations.batch_size)
    epochs = int(configurations.epoch)
    dropout_ratio = float(configurations.dropout_ratio)
    dropout_level = int(configurations.dropout_level)
    model_string = str(configurations.model_string)
    eprint(f"[INFO][train_model] {model_string}")

    # Free up RAM in case the model definition cells were run multiple times
    K.clear_session()
    # Stop training when a monitoring quantity has stopped improving
    # earlystopper = EarlyStopping(monitor='val_loss', patience=100, verbose=1)

    # Initialize the model
    if model_type.lower() == 'resunet':
        model = unetModel_residual(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS, dropout_ratio=dropout_ratio, \
            lr_rate=lr_rate, activation=activation, dropout_level=dropout_level)
    else:
        model = unetModel_basic_4(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS, dropout_ratio=dropout_ratio, \
            lr_rate=lr_rate, activation=activation, dropout_level=dropout_level)

    # Save the model after every epoch
    checkpointer = ModelCheckpoint(dir_write + "/" + model_string + '_main_modelCheckpoint.h5', verbose=0, monitor='val_loss', \
                                   save_best_only=True, save_weights_only=False, period=1, mode='auto')

    # Log training
    csv_logger = CSVLogger('{}/log_{}.training.csv'.format(
        dir_write, model_string))
    # Reduce lr_rate on plateau
    reduce_lr = ReduceLROnPlateau(monitor='val_dice_coef',
                                  factor=0.5,
                                  patience=10,
                                  verbose=0,
                                  mode='max',
                                  cooldown=1,
                                  min_lr=0.000001)
    # Early stopping with patience
    earlystopping = EarlyStopping(monitor='val_dice_coef',
                                  patience=25,
                                  mode='max')

    # Fit model
    eprint("[INFO][train_model] Model Fit...")
    results = model.fit(
        X,
        y,
        validation_split=0.2,
        batch_size=batch_size,
        epochs=epochs,
        callbacks=[checkpointer, csv_logger, reduce_lr, earlystopping],
        verbose=1,
        shuffle=True)  #, sample_weight=weights_train)
    eprint("[INFO][train_model] Model Fit Done!")

    # Write model history to the file
    pd.DataFrame(results.history).to_csv(dir_write + "history_" +
                                         model_string + ".csv")

    return model, results
예제 #14
0
def wmcnn_hdf5(path, full=False):
    def expanddims(array, expand=2):
        for i in range(expand):
            array = np.expand_dims(array, 0)
        return array

    # from matplotlib import pyplot as plt
    # from skimage import measure

    scale = 2
    if not full:
        size_label = 96
        stride = 48
    else:
        size_label = 400

    size_input = size_label / scale
    batch_size = 400
    classes = 7
    # downsizes = [1, 0.7, 0.5]

    id_files = join(path, 'id_files.txt')
    # id_files = join(path, 'test_id.txt')
    with open(id_files, 'r') as f:
        article = f.readlines()
        fnum = len(article) // classes
        order = np.random.permutation(fnum)
        training = order[:int(fnum * 0.7)]
        testing = order[len(training):]

        img_list = []
        for i, line in enumerate(article):
            img = io.imread(path + line[2:].strip('\n'))
            if i % fnum in testing:
                cls = line.split('/')[1]
                mkdir_if_not_exist(path + 'test/' + cls)
                io.imsave(path + 'test/' + line[2:].strip('\n'), img)
            else:
                if img.shape[2] == 4:
                    img = color.rgba2rgb(img)
                img_ycbcr = color.rgb2ycbcr(img) / 255
                (rows, cols, channel) = img_ycbcr.shape
                img_y, img_cb, img_cr = np.split(img_ycbcr,
                                                 indices_or_sections=channel,
                                                 axis=2)
                img_list.append(img_y)

    for idx, img_gt in enumerate(img_list):
        if idx == 0:
            hf = h5py.File('D:/data/rsscn7/full_wdata.h5', 'w')
            d_data = hf.create_dataset("data",
                                       (batch_size, 1, size_input, size_input),
                                       maxshape=(None, 1, size_input,
                                                 size_input),
                                       dtype='float32')
            d_ca = hf.create_dataset("CA",
                                     (batch_size, 1, size_input, size_input),
                                     maxshape=(None, 1, size_input,
                                               size_input),
                                     dtype='float32')
            d_ch = hf.create_dataset("CH",
                                     (batch_size, 1, size_input, size_input),
                                     maxshape=(None, 1, size_input,
                                               size_input),
                                     dtype='float32')
            d_cv = hf.create_dataset("CV",
                                     (batch_size, 1, size_input, size_input),
                                     maxshape=(None, 1, size_input,
                                               size_input),
                                     dtype='float32')
            d_cd = hf.create_dataset("CD",
                                     (batch_size, 1, size_input, size_input),
                                     maxshape=(None, 1, size_input,
                                               size_input),
                                     dtype='float32')
        else:
            hf = h5py.File('D:/data/rsscn7/full_wdata.h5', 'a')
            d_data = hf['data']
            d_ca = hf['CA']
            d_ch = hf['CH']
            d_cv = hf['CV']
            d_cd = hf['CD']

        count = 0
        if not full:
            d_data.resize([idx * batch_size + 392, 1, size_input, size_input])
            d_ca.resize([idx * batch_size + 392, 1, size_input, size_input])
            d_ch.resize([idx * batch_size + 392, 1, size_input, size_input])
            d_cv.resize([idx * batch_size + 392, 1, size_input, size_input])
            d_cd.resize([idx * batch_size + 392, 1, size_input, size_input])

            for flip in range(2):
                for degree in range(4):
                    # for downsize in downsizes:
                    img = img_gt.squeeze()
                    if flip == 1:
                        img = np.fliplr(img)

                    for turn in range(degree):
                        img = np.rot90(img)

                    # img = imresize(img, scalar_scale=downsize)
                    hei, wid = img.shape
                    # fig = plt.figure(figsize=(6, 3))
                    for x in range(0, hei - size_label, stride):
                        for y in range(0, wid - size_label, stride):
                            subim_label = img[x:x + size_label,
                                              y:y + size_label]
                            subim_data = imresize(subim_label,
                                                  scalar_scale=1 / scale)
                            coeffs2 = pywt.dwt2(subim_label, 'bior1.1')
                            LL, (LH, HL, HH) = coeffs2

                            d_data[idx * batch_size + count] = expanddims(
                                subim_data, expand=2)
                            d_ca[idx * batch_size + count] = expanddims(
                                LL, expand=2)
                            d_ch[idx * batch_size + count] = expanddims(
                                LH, expand=2)
                            d_cv[idx * batch_size + count] = expanddims(
                                HL, expand=2)
                            d_cd[idx * batch_size + count] = expanddims(
                                HH, expand=2)
                            count += 1
        else:
            d_data.resize([idx * batch_size + 1, 1, size_input, size_input])
            d_ca.resize([idx * batch_size + 1, 1, size_input, size_input])
            d_ch.resize([idx * batch_size + 1, 1, size_input, size_input])
            d_cv.resize([idx * batch_size + 1, 1, size_input, size_input])
            d_cd.resize([idx * batch_size + 1, 1, size_input, size_input])

            img = img_gt.squeeze()
            im_data = imresize(img, scalar_scale=1 / scale)
            coeffs2 = pywt.dwt2(img, 'bior1.1')
            LL, (LH, HL, HH) = coeffs2

            d_data[idx * batch_size + count] = expanddims(im_data, expand=2)
            d_ca[idx * batch_size + count] = expanddims(LL, expand=2)
            d_ch[idx * batch_size + count] = expanddims(LH, expand=2)
            d_cv[idx * batch_size + count] = expanddims(HL, expand=2)
            d_cd[idx * batch_size + count] = expanddims(HH, expand=2)
            count += 1

        batch_size = count

        hf.close()
예제 #15
0
 def default_dist(self):
     (_, _), (X_test, y_test) = mnist.load_data()
     X_test = np.reshape(X_test, (-1, 28 * 28))
     log_dir = os.path.join(ROOT_DIR, self.config.tb_dir, 'default')
     mkdir_if_not_exist(log_dir)
     self.tb_projector(X_test, y_test, log_dir)
예제 #16
0
def train_vgg_mnist():

    print('[INFO] 加载数据…')
    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
    y_train = tf.keras.utils.to_categorical(y_train)
    y_test = tf.keras.utils.to_categorical(y_test)

    # x_train_new = []
    # x_test_new = []
    # for i in range(x_train.shape[0]):
    #     x_train_new.append(cv2.resize(x_train[i], (32, 32)))
    # for i in range(x_test.shape[0]):
    #     x_test_new.append(cv2.resize(x_test[i], (32, 32)))

    # x_train_new = np.expand_dims(x_train_new, axis=-1)
    # x_test_new = np.expand_dims(x_test_new, axis=-1)

    x_train = np.expand_dims(x_train, axis=-1)
    x_test = np.expand_dims(x_test, axis=-1)

    main_input = tf.keras.Input(shape=(28, 28, 1))

    x = tf.keras.layers.Convolution2D(32, (3, 3), padding='same',
                                      name='conv1')(main_input)
    x = tf.keras.layers.BatchNormalization(name='bn1')(x)
    x = tf.keras.layers.ReLU()(x)
    x = tf.keras.layers.MaxPool2D((2, 2),
                                  strides=(2, 2),
                                  padding='same',
                                  name='pool1')(x)

    x = tf.keras.layers.Convolution2D(48, (3, 3), padding='same',
                                      name='conv2')(x)
    x = tf.keras.layers.BatchNormalization(name='bn2')(x)
    x = tf.keras.layers.ReLU()(x)
    x = tf.keras.layers.MaxPool2D((2, 2),
                                  strides=(2, 2),
                                  padding='same',
                                  name='pool2')(x)

    x = tf.keras.layers.Convolution2D(64, (3, 3), padding='same',
                                      name='conv3')(x)
    x = tf.keras.layers.BatchNormalization(name='bn3')(x)
    x = tf.keras.layers.ReLU()(x)
    x = tf.keras.layers.MaxPool2D((2, 2),
                                  strides=(2, 2),
                                  padding='same',
                                  name='pool3')(x)

    x = tf.keras.layers.Flatten(name='fl')(x)

    x = tf.keras.layers.Dense(3168, activation='relu', name='fc1')(x)
    x = tf.keras.layers.Dense(10, activation='softmax')(x)

    model = tf.keras.Model(inputs=main_input, outputs=x)

    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    print('[INFO] 训练网络')
    model_save_path = 'experiments/mnist_model/checkpoints/'
    mkdir_if_not_exist(model_save_path)
    cp_callback = tf.keras.callbacks.ModelCheckpoint(os.path.join(
        model_save_path, 'mnist_weights.hdf5'),
                                                     verbose=1,
                                                     save_weights_only=False,
                                                     monitor='loss',
                                                     mode='min',
                                                     save_best_only=True)
    model.fit(x_train, y_train, epochs=10, callbacks=[cp_callback])

    print('[INFO] 训练完成…')
    model.evaluate(x_test, y_test)
예제 #17
0
            manga109_dir, 'images', anno[1]['@book'],
            '%03d.jpg' % anno[1]['elements'][cnt]['@page'])
        save_img_path = None
        if cnt % 5 == 0:  # test path
            save_img_path = os.path.join(target_test_dir,
                                         anno[0] + '_%03d.jpg' % cnt)
        elif cnt % 5 == 1:
            save_img_path = os.path.join(target_validation_dir,
                                         anno[0] + '_%03d.jpg' % cnt)
        else:
            save_img_path = os.path.join(target_train_dir,
                                         anno[0] + '_%03d.jpg' % cnt)
        img = cv2.imread(origin_img_path, 0)
        crop_image = img[anno[1]['elements'][cnt]['@ymin']:anno[1]['elements']
                         [cnt]['@ymax'], anno[1]['elements'][cnt]['@xmin']:
                         anno[1]['elements'][cnt]['@xmax']]
        cv2.imwrite(save_img_path, crop_image)

img_dir = 'manga109_body'
for subdir in ['train', 'test', 'validation']:
    dir_name = os.path.join(img_dir, subdir)
    imgs = os.listdir(dir_name)

    for img in imgs:
        if not img.endswith('.jpg'):
            continue
        img_sub_name = img.split('_')[0]
        mkdir_if_not_exist(os.path.join(dir_name, img_sub_name))
        shutil.move(os.path.join(dir_name, img),
                    os.path.join(dir_name, img_sub_name, img))
예제 #18
0
        'stacksr_lr=1e-3_3drrn_2x.txt', 'stacksr_lr=1e-3_3lap_2x.txt'
    ]
    names = ['KTDE', 'KTDE-VDSR', 'KTDE-DRRN', 'KTDE-MS-Lapsrn']
    ls = ['-', '--', '-.', ':']
    ylabels = 'PSNR (dB)'
    # models = ['BL-VDSR', 'BL-DRRN', 'BL-MS-LapSRN']
    # fig, axes = plt.subplots(1, 2, sharey=False, figsize=(9, 4))
    x = np.linspace(0, 400000, 100)
    # plt.figure(figsize=(6,3))E
    # plt.xticks(fontsize=16)
    # plt.yticks(fontsize=16)
    ax = plt.gca()

    ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
    for j, f in enumerate(files):
        psnr_list, loss_list = get_perform_epoch(f)
        plt.plot(x, psnr_list, label=names[j], ls=ls[j])

        # plt.title('Study of Effectiveness of Ensembling Different Pre-defined SISR Methods', fontsize=16)
    plt.ylabel(ylabels, fontsize=16)
    plt.xlabel('training steps', fontsize=16)
    plt.legend()

    plt.tight_layout()
    plt.savefig('figure/ADI_plot.pdf')


if __name__ == '__main__':
    mkdir_if_not_exist('figure')
    draw_KT()
예제 #19
0
def train_vgg_mnist():

    print('[INFO] 加载数据…')
    config = process_config('configs/vgg_mnist_config.json')
    dl = FaceNetDL(config=config)

    main_input = tf.keras.Input(shape=(config.input_shape, config.input_shape,
                                       1))

    x = tf.keras.layers.Convolution2D(32, (3, 3), padding='same',
                                      name='conv1')(main_input)
    x = tf.keras.layers.BatchNormalization(name='bn1')(x)
    x = tf.keras.layers.ReLU()(x)
    x = tf.keras.layers.MaxPool2D((2, 2),
                                  strides=(2, 2),
                                  padding='same',
                                  name='pool1')(x)

    x = tf.keras.layers.Convolution2D(48, (3, 3), padding='same',
                                      name='conv2')(x)
    x = tf.keras.layers.BatchNormalization(name='bn2')(x)
    x = tf.keras.layers.ReLU()(x)
    x = tf.keras.layers.MaxPool2D((2, 2),
                                  strides=(2, 2),
                                  padding='same',
                                  name='pool2')(x)

    x = tf.keras.layers.Convolution2D(64, (3, 3), padding='same',
                                      name='conv3')(x)
    x = tf.keras.layers.BatchNormalization(name='bn3')(x)
    x = tf.keras.layers.ReLU()(x)
    x = tf.keras.layers.MaxPool2D((2, 2),
                                  strides=(2, 2),
                                  padding='same',
                                  name='pool3')(x)

    x = tf.keras.layers.Flatten(name='fl')(x)

    x = tf.keras.layers.Dense(3168, activation='relu', name='fc1')(x)
    x = tf.keras.layers.Dense(2, activation='softmax')(x)

    model = tf.keras.Model(inputs=main_input, outputs=x)

    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    print('[INFO] 训练网络')
    model_save_path = 'experiments/mnist_facenet/checkpoints'
    mkdir_if_not_exist(model_save_path)
    cp_callback = tf.keras.callbacks.ModelCheckpoint(os.path.join(
        model_save_path, 'mnist_facenet_weights.hdf5'),
                                                     verbose=1,
                                                     save_weights_only=False,
                                                     monitor='val_loss',
                                                     mode='min',
                                                     save_best_only=True)
    model.fit_generator(dl.get_train_data(),
                        epochs=config.num_epochs,
                        validation_data=dl.get_validation_data(),
                        callbacks=[cp_callback])
    print('[INFO] 训练完成…')

    print('[INFO] 测试模型…')
    model.evaluate_generator(dl.get_test_data())
    print('[INFO] 测试完成…')
예제 #20
0
def get_data_test(data_folder, configurations, trained_model):

    # Parameters
    IMG_WIDTH = configurations.size_img
    IMG_HEIGHT = configurations.size_img
    IMG_CHANNELS = 3
    TEST_PATH = data_folder
    COUNT = configurations.sample_count

    # Path of Image Tiles and Masks
    path = os.path.join(TEST_PATH, "img")
    # path_mask = os.path.join(TEST_PATH, "mask")

    # total = int(sum([len(files) for r, d, files in os.walk(path)]))

    eprint(
        f'[DEBUG][get_data_test]  Getting and Resizing({IMG_WIDTH}x{IMG_HEIGHT}) Test Images and Masks... '
    )

    # Get and resize Test images and masks
    # test_cpt = int(sum([len(files) for r, d, files in os.walk(path)]))

    # X_test = np.ndarray((test_cpt, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32)
    # Y_test = np.ndarray((test_cpt, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.float32)  # dtype=np.bool)

    eprint(
        f'[DEBUG][get_data_test] Getting and Resizing Test Images and Masks Done!\nPath to img: {path}'
    )
    sys.stdout.flush()

    _, _, files_orj = next(os.walk(path))
    # _, _, files_mask = next(os.walk(path_mask))
    files_orj = sorted(files_orj)
    # files_mask = sorted(files_mask)

    eprint(f'[DEBUG][get_data_test] Number of Image Tiles: {len(files_orj)}')

    # for i, f in enumerate(files_orj[:COUNT]):
    #     img = cv2.imread(os.path.join(path, f))
    #     img = cv2.resize(img, (IMG_HEIGHT, IMG_WIDTH), interpolation=cv2.INTER_AREA)
    #     img = img / 255
    #     X_test[i] = img

    # for i, fm in enumerate(files_mask[:COUNT]):
    #     img_mask = cv2.imread(os.path.join(path_mask, fm), cv2.IMREAD_GRAYSCALE)
    #     img_mask = cv2.resize(img_mask, (IMG_HEIGHT, IMG_WIDTH), interpolation=cv2.INTER_AREA)
    #     img_mask = img_mask / 255
    #     img_mask = np.expand_dims(img_mask, axis=-1)
    #     Y_test[i] = img_mask
    # Load Trained Model
    model = load_model(trained_model,
                       custom_objects={
                           'dice_coef': dice_coef,
                           'dice_coef_loss': dice_coef_loss
                       })

    for i, f in enumerate(files_orj):
        X_test = np.ndarray((1, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS),
                            dtype=np.float32)

        img = cv2.imread(os.path.join(path, f))
        img = cv2.resize(img, (IMG_HEIGHT, IMG_WIDTH),
                         interpolation=cv2.INTER_AREA)
        img = img / 255
        X_test[0] = img
        # predictions = test_model(X_test, data_folder, trained_model, configurations)
        # Predict
        preds_test = model.predict(X_test)
        preds_reshaped = np.ndarray((1, IMG_HEIGHT, IMG_WIDTH),
                                    dtype=np.float32)
        preds_reshaped[0] = preds_test[0].reshape(IMG_HEIGHT, IMG_WIDTH)

        preds_upsampled = [
            np.expand_dims(cv2.resize(preds_reshaped[0],
                                      (IMG_HEIGHT, IMG_WIDTH)),
                           axis=-1)
        ]
        print("[INFO] Upsampling is done!(upsampled to ({}, {}) from ({}, {})".
              format(IMG_HEIGHT, IMG_WIDTH, preds_test[0].shape[0],
                     preds_test[0].shape[1]))

        output_pred = os.path.join(configurations.output_folder, 'Prediction')
        mkdir_if_not_exist(configurations.output_folder)
        mkdir_if_not_exist(output_pred)
        theshold_pred = 0.5

        img = preds_upsampled[0].copy()

        img_raw = img * 255
        out_name_raw = os.path.join(output_pred, "pred-raw-" + files_orj[i])
        cv2.imwrite(out_name_raw, img_raw)

        img[img > theshold_pred] = 1
        img[img <= theshold_pred] = 0
        img *= 255

        out_name = os.path.join(output_pred, "pred-" + files_orj[i])
        cv2.imwrite(out_name, img)

        print('[INFO] Finished Prediction!')
예제 #21
0
	def predict(self, data, **kwargs):
		# eng = meng.start_matlab()
		# for name, param in self.model.named_parameters():
		#     a = param.clone().cpu().data.numpy()
		#     print(name, a.max(), a.min())
		# print('\n')

		cost_time = 0
		save_dir = os.path.join(self.config['preds_dir'], kwargs['testset'])
		mkdir_if_not_exist(save_dir)
		self.model.eval()
		psnr_list = []
		ssim_list = []
		b_psnr_list = []
		b_ssim_list = []
		gap_list = {}
		diversity = 0
		with torch.no_grad():
			for img_bundle in data:
				# print(img_bundle['name'])
				if "color" in self.config.keys() and self.config["color"]:
					x = img_bundle['origin']
					y = img_bundle['y']
					multichannel = True
				else:
					x = img_bundle['x']
					y = img_bundle['y']
					if len(y.shape) == 3:
						(rows, cols, channel) = y.shape
						y, _, _ = np.split(y, indices_or_sections=channel, axis=2)
					else:
						(rows, cols) = y.shape

					multichannel = False

				x = torch.from_numpy(x).float().view(1, -1, x.shape[0], x.shape[1])
				if self.config['cuda']:
					x = x.cuda()
				# print(x[:5])
				lr_size = (x.shape[2], x.shape[3])
				hr_size = img_bundle['size']
				if self.config['progressive']:
					inter_sizes = np_utils.interval_size(lr_size, hr_size, self.config['max_gradual_scale'])
				else:
					inter_sizes = []
				inter_sizes.append(hr_size)

				start_time = time.time()
				if self.config['net'] == 'rrgun':
					preds = self.model(x, y_sizes=inter_sizes)
				elif self.config['net'] == 'lapsrn':
					# step = len(inter_sizes)
					# if kwargs['upscale'] % 2 != 0:
					#     step = step + 1
					step = int(np.ceil(math.log(kwargs['upscale'], 2)))
					preds = self.model(x, step=step)[-1]

					# y_numpy = preds[-1].data.cpu().numpy().squeeze()
					# x = misc.imresize(y_numpy, size=hr_size,
					#                    interp='bicubic', mode='F')
					# x = np.array(x, dtype=np.float64)
					# preds = torch.from_numpy(x)

					# resize = tfs.Compose([tfs.ToPILImage(), tfs.Resize(hr_size, interpolation=Image.BICUBIC),
					#                       tfs.ToTensor()])
					# preds = resize(preds[-1].squeeze(0))
					# preds = F.upsample(preds[-1], size=hr_size, mode='bilinear')
					# preds = preds[-1]
				elif self.config['net'] == 'lapgun':
					preds = self.model(x, y_sizes=inter_sizes)
				elif self.config['net'] in ['lapinternet', 'lapmtnet']:
					# print(img.shape)
					preds = self.model(x, size=inter_sizes[-1], step=self.config['step'])
				elif self.config['net'] in ['ensemsr', 'stacksr', 'stacksr_back', 'stacksr_uni']:
					input_list = self.em_generator(x)
					preds, parts = self.model(input_list)
					parts = torch.cat(tuple(parts), 0)
					diversity += self.chisquare(parts)
					# preds = com[-1]
				elif self.config['net'] == 'wmcnn':
					preds = self.model(x)
					preds = [p.data.cpu().numpy() for p in preds]
					# preds = [matlab.double(p.data.cpu().numpy().squeeze().tolist()) for p in preds]
					# preds = eng.idwt2(*preds, 'bior1.1')
					preds = pywt.idwt2((preds[0], (preds[1:])), 'bior1.1')
				else:
					preds = self.model(x)

				# for c in com:
				#     c = c.data.cpu().numpy()
				#     continue
				cost_time += time.time() - start_time
				if isinstance(preds, list):
					preds = np.clip(preds[-1].data.cpu().numpy(), 16/255, 235/255).astype(np.float64)
					# preds = np.clip(preds[-1].data.cpu().numpy(), 0, 1).astype(np.float64)
				else:
					try:
						preds = preds.data.cpu().numpy()
					except AttributeError:
						preds = preds
					# preds = preds.mul(255).clamp(0, 255).round().div(255)
					preds = np.clip(preds, 16/255, 235/255).astype(np.float64)
					# preds = np.clip(preds, 0, 1).astype(np.float64)

				preds = preds.squeeze()
				if len(preds.shape) == 3:
					preds = preds.transpose([1, 2, 0])
				preds = modcrop(preds.squeeze(), kwargs['upscale'])
				preds_bd = shave(preds.squeeze(), kwargs['upscale'])
				y = modcrop(y.squeeze(), kwargs['upscale'])
				# y = np.round(y * 255).astype(np.uint8)
				y_bd = shave(y.squeeze(), kwargs['upscale'])#/ 255.

				# print(preds_bd.shape, y_bd.shape)
				x = x.data.cpu().numpy().squeeze()
				# bic = x
				bic = imresize.imresize(x, scalar_scale=kwargs['upscale'])
				# bic = np.clip(bic, 16 / 255, 235 / 255).astype(np.float64)
				bic = np.round(bic*255).astype(np.uint8)
				bic = shave(bic.squeeze(), kwargs['upscale']) / 255.

				b_psnr = measure.compare_psnr(bic, y_bd, data_range=1)
				# b_ssim = measure.compare_ssim(bic, y_bd, data_range=1)
				b_ssim = self.calculate_ssim(bic* 255, y_bd* 255)
				# b_ssim = self.vifp_mscale(bic, y_bd)
				b_psnr_list.append(b_psnr)
				b_ssim_list.append(b_ssim)

				m_psnr = measure.compare_psnr(preds_bd, y_bd)

				# m_ssim = measure.compare_ssim(preds_bd, y_bd, multichannel=multichannel)
				m_ssim = self.calculate_ssim(preds_bd* 255, y_bd* 255)
				# print('image {} PSNR: {} SSIM: {}'.format(img_bundle['name'], m_psnr, m_ssim))
				gap_list[m_psnr-b_psnr] = img_bundle['name']
				psnr_list.append(m_psnr)
				ssim_list.append(m_ssim)
				test_value = '{}_{}'.format(m_psnr, m_ssim)
				# self.save_preds(save_dir, test_value, preds, img_bundle, True)

		diversity = diversity / len(data)
		print('Averaged Diversity is {}'.format(diversity))
		print('Averaged PSNR is {}, SSIM is {}'.format(np.mean(np.array(psnr_list)), np.mean(np.array(ssim_list))))
		print('Averaged BIC PSNR is {}, SSIM is {}'.format(np.mean(np.array(b_psnr_list)), np.mean(np.array(b_ssim_list))))
		# print(self.model.module.w_output)
		# print(self.model.module.w_inter)
		bigest_gap = sorted(gap_list, reverse=True)
		print(bigest_gap)
		print(gap_list[bigest_gap[0]], gap_list[bigest_gap[1]])
		print('Inference cost time {}s'.format(cost_time))
예제 #22
0
    def predict(self, data, **kwargs):
        # eng = meng.start_matlab()
        # for name, param in self.model.named_parameters():
        #     a = param.clone().cpu().data.numpy()
        #     print(name, a.max(), a.min())
        # print('\n')
        save_dir = os.path.join(self.config['preds_dir'], kwargs['testset'])
        mkdir_if_not_exist(save_dir)
        self.model.eval()
        psnr_list = []
        ssim_list = []
        b_psnr_list = []
        b_ssim_list = []
        with torch.no_grad():
            for img_bundle in data:
                # print(img_bundle['name'])
                if "color" in self.config.keys() and self.config["color"]:
                    x = img_bundle['origin']
                    y = img_bundle['y']
                    multichannel = True
                else:
                    x = img_bundle['x']
                    y = img_bundle['y']
                    (rows, cols, channel) = y.shape
                    y, _, _ = np.split(y, indices_or_sections=channel, axis=2)
                    multichannel = False

                x = torch.from_numpy(x).float().view(1, -1, x.shape[0],
                                                     x.shape[1])
                if self.config['cuda']:
                    x = x.cuda()
                # print(x[:5])
                lr_size = (x.shape[2], x.shape[3])
                hr_size = img_bundle['size']
                if self.config['progressive']:
                    inter_sizes = np_utils.interval_size(
                        lr_size, hr_size, self.config['max_gradual_scale'])
                else:
                    inter_sizes = []
                inter_sizes.append(hr_size)

                if self.config['net'] == 'wmcnn':
                    preds = self.model(x)
                    preds = [p.data.cpu().numpy() for p in preds]
                    # preds = [matlab.double(p.data.cpu().numpy().squeeze().tolist()) for p in preds]
                    # preds = eng.idwt2(*preds, 'bior1.1')
                    preds = pywt.idwt2((preds[0], (preds[1:])), 'bior1.1')
                else:
                    preds = self.model(x)

                if isinstance(preds, list):
                    # Y-channel's pixels are within [16, 235]
                    preds = np.clip(preds[-1].data.cpu().numpy(), 16 / 255,
                                    235 / 255).astype(np.float64)
                    # preds = np.clip(preds[-1].data.cpu().numpy(), 0, 1).astype(np.float64)
                else:
                    try:
                        preds = preds.data.cpu().numpy()
                    except AttributeError:
                        preds = preds
                    # preds = preds.mul(255).clamp(0, 255).round().div(255)
                    preds = np.clip(preds, 16 / 255,
                                    235 / 255).astype(np.float64)
                    # preds = np.clip(preds, 0, 1).astype(np.float64)

                preds = preds.squeeze()
                if len(preds.shape) == 3:
                    preds = preds.transpose([1, 2, 0])
                preds = modcrop(preds.squeeze(), kwargs['upscale'])
                preds_bd = shave(preds.squeeze(), kwargs['upscale'])
                y = modcrop(y.squeeze(), kwargs['upscale'])
                y_bd = shave(y.squeeze(), kwargs['upscale'])

                # print(preds_bd.shape, y_bd.shape)
                x = x.data.cpu().numpy().squeeze()
                bic = imresize.imresize(x, scalar_scale=kwargs['upscale'])
                bic = np.clip(bic, 16 / 255, 235 / 255).astype(np.float64)
                bic = shave(bic.squeeze(), kwargs['upscale'])
                b_psnr = measure.compare_psnr(bic, y_bd)
                b_ssim = measure.compare_ssim(bic, y_bd)
                b_psnr_list.append(b_psnr)
                b_ssim_list.append(b_ssim)

                m_psnr = measure.compare_psnr(preds_bd, y_bd)
                m_ssim = measure.compare_ssim(preds_bd,
                                              y_bd,
                                              multichannel=multichannel)
                print('PSNR of image {} is {}'.format(img_bundle['name'],
                                                      m_psnr))
                print('SSIM of image {} is {}'.format(img_bundle['name'],
                                                      m_ssim))
                psnr_list.append(m_psnr)
                ssim_list.append(m_ssim)
                self.save_preds(save_dir, preds, img_bundle, True)

        print('Averaged PSNR is {}'.format(np.mean(np.array(psnr_list))))
        print('Averaged SSIM is {}'.format(np.mean(np.array(ssim_list))))
        print('Averaged BIC PSNR is {}'.format(np.mean(np.array(b_psnr_list))))
        print('Averaged BIC SSIM is {}'.format(np.mean(np.array(b_ssim_list))))
 def init_callbacks(self):
     train_dir = os.path.join(ROOT_DIR, self.config.tb_dir, "train")
     mkdir_if_not_exist(train_dir)