def valid(self):
        test_iter = Clip_Iterator(c.VALID_DIR_CLIPS)
        evaluator = Evaluator(self.global_step)
        i = 0
        for data in test_iter.sample_valid(self._batch):
            in_data = data[:, :self._in_seq, ...]
            if c.IN_CHANEL == 3:
                gt_data = data[:,
                               self._in_seq:self._in_seq + self._out_seq, :, :,
                               1:-1]
            elif c.IN_CHANEL == 1:
                gt_data = data[:, self._in_seq:self._in_seq + self._out_seq,
                               ...]
            else:
                raise NotImplementedError
            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)

            mse, mae, gdl, pred = self.g_model.valid_step(in_data, gt_data)
            evaluator.evaluate(gt_data, pred)
            self.logger.info(f"Iter {self.global_step} {i}: \n\t "
                             f"mse:{mse:.4f} \n\t "
                             f"mae:{mae:.4f} \n\t "
                             f"gdl:{gdl:.4f}")
            i += 1
        evaluator.done()
示例#2
0
    def train(self):
        iter = 350000
        train_iter = Iterator(time_interval=c.RAINY_TRAIN,
                              sample_mode="random",
                              seq_len=c.IN_SEQ + c.OUT_SEQ)
        try:
            SummaryWriter = tf.train.SummaryWriter
        except:
            SummaryWriter = tf.summary.FileWriter
        writer = SummaryWriter(c.SAVE_SUMMARY, self.model.sess.graph)
        while iter < c.MAX_ITER:
            data, *_ = train_iter.sample(batch_size=c.BATCH_SIZE)
            in_data = data[:, :c.IN_SEQ, ...]
            gt_data = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, ...]
            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)

            mse, mae, gdl, summary = self.model.train_step(in_data, gt_data)

            logging.info(
                f"Iter {iter}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}")
            # merged=self.model.sess.run(merged,feed_dict={self.model.in_data_480:in_data_480,self.model.gt_data_480:gt_data})
            writer.add_summary(summary, iter)
            if (iter + 1) % c.SAVE_ITER == 0:
                self.model.save_model(iter)

            if (iter + 1) % c.VALID_ITER == 0:
                self.run_benchmark(iter)
            # if (iter + 1) % c.TEST_ITER == 0:
            #         self.test(iter)
            iter += 1
示例#3
0
    def train(self):
        iter = 0
        train_iter = Iterator(time_interval=c.RAINY_TRAIN,
                              sample_mode="random",
                              seq_len=c.IN_SEQ + c.OUT_SEQ)
        merged = tf.summary.merge_all(
        )  # 合并所有的summary data的获取函数,merge_all 可以将所有summary全部保存到磁盘,以便tensorboard显示。如果没有特殊要求,一般用这一句就可一显示训练时的各种信息了。
        writer = tf.summary.FileWriter("/extend/rain_data/Logs",
                                       self.model.sess.graph)
        while iter < c.MAX_ITER:
            data, *_ = train_iter.sample(batch_size=c.BATCH_SIZE)
            in_data = data[:, :c.IN_SEQ, ...]
            gt_data = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, ...]

            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)

            mse, mae, gdl = self.model.train_step(in_data, gt_data)
            logging.info(
                f"Iter {iter}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}")

            if (iter + 1) % c.SAVE_ITER == 0:

                self.model.save_model(iter)

            if (iter + 1) % c.VALID_ITER == 0:
                self.run_benchmark(iter)
            iter += 1
示例#4
0
    def train(self):
        step = 0
        train_iter = Clip_Iterator(c.TRAIN_DIR_CLIPS)
        while step < c.MAX_ITER:
            data = train_iter.sample_clips(batch_size=c.BATCH_SIZE)
            in_data = data[:, :c.IN_SEQ, ...]

            if c.IN_CHANEL == 3:
                gt_data = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, :, :, 1:-1]
            elif c.IN_CHANEL == 1:
                gt_data = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, ...]
            else:
                raise NotImplementedError

            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)

            mse, mae, gdl = self.model.train_step(in_data, gt_data)
            logging.info(f"Iter {step}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}")

            if (step + 1) % c.SAVE_ITER == 0:
                self.model.save_model()

            if (step + 1) % c.VALID_ITER == 0:
                self.valid_clips(step)
            step += 1
示例#5
0
    def run_benchmark(self, iter, mode="Valid"):
        if mode == "Valid":
            time_interval = c.RAINY_VALID
            stride = 20
        else:
            time_interval = c.RAINY_TEST
            stride = 1
        test_iter = Iterator(time_interval=time_interval,
                             sample_mode="sequent",
                             seq_len=c.IN_SEQ + c.OUT_SEQ,
                             stride=1)
        evaluator = Evaluator(iter)
        i = 1
        while not test_iter.use_up:
            data, date_clip, *_ = test_iter.sample(batch_size=c.BATCH_SIZE)
            in_data = np.zeros(shape=(c.BATCH_SIZE, c.IN_SEQ, c.H, c.W, c.IN_CHANEL))
            gt_data = np.zeros(shape=(c.BATCH_SIZE, c.OUT_SEQ, c.H, c.W, 1))
            if type(data) == type([]):
                break
            in_data[...] = data[:, :c.IN_SEQ, ...]

            if c.IN_CHANEL == 3:
                gt_data[...] = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, :, :, 1:-1]
            elif c.IN_CHANEL == 1:
                gt_data[...] = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, ...]
            else:
                raise NotImplementedError

            # in_date = date_clip[0][:c.IN_SEQ]

            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)

            mse, mae, gdl, pred = self.model.valid_step(in_data, gt_data)
            evaluator.evaluate(gt_data, pred)
            logging.info(f"Iter {iter} {i}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}")
            i += 1
            if i % stride == 0:
                if c.IN_CHANEL == 3:
                    in_data = in_data[:, :, :, :, 1:-1]

                for b in range(c.BATCH_SIZE):
                    predict_date = date_clip[b][c.IN_SEQ]
                    logging.info(f"Save {predict_date} results")
                    if mode == "Valid":
                        save_path = os.path.join(c.SAVE_VALID, str(iter), predict_date.strftime("%Y%m%d%H%M"))
                    else:
                        save_path = os.path.join(c.SAVE_TEST, str(iter), predict_date.strftime("%Y%m%d%H%M"))

                    path = os.path.join(save_path, "in")
                    save_png(in_data[b], path)

                    path = os.path.join(save_path, "pred")
                    save_png(pred[b], path)

                    path = os.path.join(save_path, "out")
                    save_png(gt_data[b], path)
        evaluator.done()
示例#6
0
文件: g_model.py 项目: qui3n/Code
    def generate_image(self, frames):
        normalized_frames = normalize_frames(frames)
        feed_dict = {self.input_frames_test: normalized_frames}
        gen_img = denormalize_frames(
            self.sess.run(self.scale_preds_test[-1], feed_dict=feed_dict))

        return gen_img
    def get_train_batch(self, iterator):
        data, *_ = iterator.sample(batch_size=self._batch)
        in_data = data[:, :self._in_seq, :, :, :]

        if c.IN_CHANEL == 3:
            gt_data = data[:,
                           self._in_seq:self._in_seq + self._out_seq, :, :, :]
        elif c.IN_CHANEL == 1:
            gt_data = data[:,
                           self._in_seq:self._in_seq + self._out_seq, :, :, :]
        else:
            raise NotImplementedError

        if c.NORMALIZE:
            in_data = normalize_frames(in_data)
            gt_data = normalize_frames(gt_data)
        in_data = crop_img(in_data)
        gt_data = crop_img(gt_data)
        return in_data, gt_data
示例#8
0
    def valid_clips(self, step):
        test_iter = Clip_Iterator(c.VALID_DIR_CLIPS)
        evaluator = Evaluator(step)
        i = 0
        for data in test_iter.sample_valid(c.BATCH_SIZE):
            in_data = data[:, :c.IN_SEQ, ...]
            if c.IN_CHANEL == 3:
                gt_data = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, :, :, 1:-1]
            elif c.IN_CHANEL == 1:
                gt_data = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, ...]
            else:
                raise NotImplementedError
            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)

            mse, mae, gdl, pred = self.model.valid_step(in_data, gt_data)
            evaluator.evaluate(gt_data, pred)
            logging.info(f"Iter {step} {i}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}")
            i += 1
        evaluator.done()
示例#9
0
def get_full_clips(data_dir, num_clips):
    """
    Loads a batch of random clips from the unprocessed train or test data.
    NOTE: the target frame was moved to be the last one. 
    [<HIST_LEN/2 before frames> | <HIST_LEN/2 after  frames> | <frame to be interpolated>]

    @param data_dir: The directory of the data to read. Should be either c.TRAIN_DIR or c.TEST_DIR.
    @param num_clips: The number of clips to read.

    @return: An array of shape
             [num_clips, c.TRAIN_HEIGHT, c.TRAIN_WIDTH, (3 * (c.HIST_LEN + 1))].
             A batch of frame sequences with values normalized in range [-1, 1].
    """
    video_list = glob(os.path.join(data_dir, '*'))
    while True:
        video = np.random.choice(video_list)
        ok, clips_rgb = full_clips_from_video(video, num_clips)
        if ok:
            break

    shape = np.shape(clips_rgb)
    clips = np.empty([num_clips, shape[1], shape[2], (3 * (c.HIST_LEN + 1))])

    middle = int(c.HIST_LEN / 2)
    frame_indices = list(i for j in (range(middle), [c.HIST_LEN],
                                     range(middle, c.HIST_LEN))
                         for i in j)  # in cosa mi sta trasformando python?
    for clip_num in range(num_clips):
        for frame_index_src in range(c.HIST_LEN + 1):
            frame_index_dest = frame_indices[frame_index_src]
            clips[clip_num, :, :, frame_index_dest * 3:(frame_index_dest + 1) *
                  3] = utils.normalize_frames(
                      clips_rgb[clip_num, :, :,
                                frame_index_src * 3:(frame_index_src + 1) * 3])

    assert (np.max(clips) <= 1.0)
    assert (np.min(clips) >= -1.0)
    return clips
示例#10
0
    def run_benchmark(self, iter, mode="Valid"):
        if mode == "Valid":
            time_interval = c.RAINY_VALID
        else:
            time_interval = c.RAINY_TEST
        test_iter = Iterator(time_interval=time_interval,
                             sample_mode="sequent",
                             seq_len=c.IN_SEQ + c.OUT_SEQ,
                             stride=10,
                             mode=mode)
        i = 1
        while not test_iter.use_up:

            data, date_clip, *_ = test_iter.sample(batch_size=c.BATCH_SIZE)

            data = np.array(data)
            if data.shape[0] == 0:
                break
            print(data.shape)
            if mode == 'Valid':
                in_data = np.zeros(shape=(c.BATCH_SIZE, c.IN_SEQ, c.H_TRAIN,
                                          c.W_TRAIN, c.IN_CHANEL))
                gt_data = np.zeros(shape=(c.BATCH_SIZE, c.OUT_SEQ, c.H_TRAIN,
                                          c.W_TRAIN, c.IN_CHANEL))
                in_data[:, :, :, :, :] = data[:, :c.IN_SEQ, :, :, :]
                gt_data[:, :, :, :, :] = data[:, c.IN_SEQ:c.IN_SEQ +
                                              c.OUT_SEQ, :, :, :]
            else:
                in_data = np.zeros(shape=(c.BATCH_SIZE, c.DISPLAY_IN_SEQ,
                                          c.H_TEST, c.W_TEST, c.IN_CHANEL))
                gt_data = np.zeros(shape=(c.BATCH_SIZE, c.OUT_SEQ, c.H_TEST,
                                          c.W_TEST, c.IN_CHANEL))
                in_data[:, :, :, :, :] = data[:, :c.DISPLAY_IN_SEQ, :, :, :]
                gt_data[:, :, :, :, :] = data[:, c.
                                              DISPLAY_IN_SEQ:c.DISPLAY_IN_SEQ +
                                              c.OUT_SEQ, :, :, :]

            if type(data) == type([]):
                break

            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)
            if mode == 'Valid':
                mse, mae, gdl, pred = self.model.valid_step(in_data, gt_data)
                logging.info(
                    f"Iter {iter} {i}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}"
                )
            else:
                pred = self.model.pred_step(in_data[:, 5:10])
            i += 1
            for b in range(c.BATCH_SIZE):
                predict_date = date_clip[b]
                logging.info(f"Save {predict_date} results")
                if mode == "Valid":
                    save_path = os.path.join(
                        c.SAVE_VALID, str(iter),
                        predict_date.strftime("%Y%m%d%H%M"))
                    display_path = os.path.join(
                        c.SAVE_DISPLAY, str(iter),
                        predict_date.strftime("%Y%m%d%H%M"))
                    save_in_data = in_data[b]
                    save_out_data = gt_data[b]
                    save_pred_data = pred[b]
                else:
                    display_path = os.path.join(
                        c.SAVE_DISPLAY, str(iter),
                        predict_date.strftime("%Y%m%d%H%M"))
                    save_path = os.path.join(
                        c.SAVE_TEST, str(iter),
                        predict_date.strftime("%Y%m%d%H%M"))
                    save_in_data = np.zeros((c.DISPLAY_IN_SEQ, 900, 900, 1))
                    save_out_data = np.zeros((c.OUT_SEQ, 900, 900, 1))
                    save_pred_data = np.zeros((c.PREDICT_LENGTH, 900, 900, 1))
                    save_in_data[:, 90:-90, :, :] = in_data[b]
                    save_out_data[:, 90:-90, :, :] = gt_data[b]
                    save_pred_data[:, 90:-90, :, :] = pred[b]

                path = os.path.join(save_path, "in")
                save_png(save_in_data, path)
                if mode != 'Valid':
                    multi_process_transfer(path, display_path + '/in')

                path = os.path.join(save_path, "pred")
                save_png(save_pred_data, path)
                if mode != 'Valid':
                    os.system(r'./post_processing/postprocessing' + ' ' +
                              save_path)
                    pred_display_dir = os.path.join(display_path, 'pred')
                    multi_process_transfer(path, pred_display_dir)
                # multi_process_transfer(path, display_path + 'pred')

                path = os.path.join(save_path, "out")
                save_png(save_out_data, path)
                if mode != 'Valid':
                    multi_process_transfer(path, display_path + '/out')
    def run_benchmark(self, iter, mode="Test"):
        if mode == "Valid":
            time_interval = c.RAINY_VALID
            stride = 5
        else:
            time_interval = c.RAINY_TEST
            stride = 1
        test_iter = Iterator(time_interval=time_interval,
                             sample_mode="sequent",
                             seq_len=self._in_seq + self._out_seq,
                             stride=1)
        evaluator = Evaluator(iter, length=self._out_seq, mode=mode)
        i = 1
        while not test_iter.use_up:
            data, date_clip, *_ = test_iter.sample(batch_size=self._batch)
            in_data = np.zeros(shape=(self._batch, self._in_seq, self._h,
                                      self._w, c.IN_CHANEL))
            gt_data = np.zeros(shape=(self._batch, self._out_seq, self._h,
                                      self._w, 1))
            if type(data) == type([]):
                break
            in_data[...] = data[:, :self._in_seq, :, :, :]

            if c.IN_CHANEL == 3:
                gt_data[...] = data[:, self._in_seq:self._in_seq +
                                    self._out_seq, :, :, :]
            elif c.IN_CHANEL == 1:
                gt_data[...] = data[:, self._in_seq:self._in_seq +
                                    self._out_seq, :, :, :]
            else:
                raise NotImplementedError

            # in_date = date_clip[0][:c.IN_SEQ]

            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)
            in_data = crop_img(in_data)
            gt_data = crop_img(gt_data)
            mse, mae, gdl, pred = self.g_model.valid_step(in_data, gt_data)
            evaluator.evaluate(gt_data, pred)
            self.logger.info(
                f"Iter {iter} {i}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}"
            )
            i += 1
            if i % stride == 0:
                if c.IN_CHANEL == 3:
                    in_data = in_data[:, :, :, :, 1:-1]

                for b in range(self._batch):
                    predict_date = date_clip[b][self._in_seq - 1]
                    self.logger.info(f"Save {predict_date} results")
                    if mode == "Valid":
                        save_path = os.path.join(
                            c.SAVE_VALID, str(iter),
                            predict_date.strftime("%Y%m%d%H%M"))
                    else:
                        save_path = os.path.join(
                            c.SAVE_TEST, str(iter),
                            predict_date.strftime("%Y%m%d%H%M"))

                    path = os.path.join(save_path, "in")
                    save_png(in_data[b], path)

                    path = os.path.join(save_path, "pred")
                    save_png(pred[b], path)

                    path = os.path.join(save_path, "out")
                    save_png(gt_data[b], path)
        evaluator.done()
        self.notifier.eval(iter, evaluator.result_path)
示例#12
0
    def run_benchmark(self, iter, mode="Valid"):
        if mode == "Valid":
            time_interval = c.RAINY_VALID
        else:
            time_interval = c.RAINY_TEST
        test_iter = Iterator(time_interval=time_interval,
                             sample_mode="sequent",
                             seq_len=c.IN_SEQ + c.OUT_SEQ,
                             stride=20,
                             mode=mode)
        i = 1
        while not test_iter.use_up:
            data, date_clip, *_ = test_iter.sample(batch_size=c.BATCH_SIZE)

            if mode == 'Valid':
                in_data = np.zeros(shape=(c.BATCH_SIZE, c.IN_SEQ, c.H_TRAIN,
                                          c.W_TRAIN, c.IN_CHANEL))
                gt_data = np.zeros(shape=(c.BATCH_SIZE, c.OUT_SEQ, c.H_TRAIN,
                                          c.W_TRAIN, c.IN_CHANEL))
            else:
                in_data = np.zeros(shape=(c.BATCH_SIZE, c.IN_SEQ, c.H_TEST,
                                          c.W_TEST, c.IN_CHANEL))
                gt_data = np.zeros(shape=(c.BATCH_SIZE, c.OUT_SEQ, c.H_TEST,
                                          c.W_TEST, c.IN_CHANEL))

            if type(data) == type([]):
                break
            in_data[:, :, :, :, :] = data[:, :c.IN_SEQ, :, :, :]
            gt_data[:, :, :, :, :] = data[:, c.IN_SEQ:c.IN_SEQ +
                                          c.OUT_SEQ, :, :, :]
            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)
            if mode == 'Valid':
                mse, mae, gdl, pred = self.model.valid_step(in_data, gt_data)
                logging.info(
                    f"Iter {iter} {i}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}"
                )
            else:
                pred = self.model.pred_step(in_data)
            i += 1
            for b in range(c.BATCH_SIZE):
                predict_date = date_clip[b]
                logging.info(f"Save {predict_date} results")
                if mode == "Valid":
                    save_path = os.path.join(
                        c.SAVE_VALID, str(iter),
                        predict_date.strftime("%Y%m%d%H%M"))
                else:
                    save_path = os.path.join(
                        c.SAVE_TEST, str(iter),
                        predict_date.strftime("%Y%m%d%H%M"))

                path = os.path.join(save_path, "in")
                save_png(in_data[0], path)

                path = os.path.join(save_path, "pred")
                save_png(pred[0], path)

                path = os.path.join(save_path, "out")
                save_png(gt_data[0], path)
示例#13
0

def gdl_loss(pred, gt):
    """

    Parameters
    ----------
    pred : tensor
        Shape: (b, length, h, w, c)
    gt : tensor
        Shape: (b, length, h, w, c)
    Returns
    -------
    gdl : value
    """
    pred_diff_h = tf.abs(one_step_diff(pred, axis=2))
    pred_diff_w = tf.abs(one_step_diff(pred, axis=3))
    gt_diff_h = tf.abs(one_step_diff(gt, axis=2))
    gt_diff_w = tf.abs(one_step_diff(gt, axis=3))
    gd_h = tf.abs(pred_diff_h - gt_diff_h)
    gd_w = tf.abs(pred_diff_w - gt_diff_w)
    gdl = tf.reduce_sum(gd_h) + tf.reduce_sum(gd_w)
    return gdl


if __name__ == '__main__':
    a = [[1, 2, 3, 5, 15], [25, 30, 40, 45, 50], [60, 70, 79, 1, 17],
         [60, 70, 79, 1, 17], [60, 70, 79, 1, 17]]
    a = normalize_frames(np.asarray(a))
    print(a)
    print(get_loss_weight_symbol(a))