示例#1
0
def getLossSlice(tensor):
    """
    Slice the tensor to match the loss
    """
    types = getTypes()
    heights = getHeights()
    return tensor[:, types[0]:types[1], heights[0]:heights[1], :, :]
示例#2
0
def getSSIM(y, pred, single_layer=False):
    types = getTypes()
    heights = getHeights()
    assert (heights[1] -
            heights[0] == 1), "SSIM only support single layer output"
    yin = tf.transpose(y[:, types[0]:types[1], heights[0], :, :],
                       perm=[0, 2, 3, 1])  # map to channel last
    if single_layer:
        predin = tf.transpose(pred[:, :, 0, :, :], perm=[0, 2, 3, 1])
    else:
        predin = tf.transpose(pred[:, types[0]:types[1], heights[0], :, :],
                              perm=[0, 2, 3, 1])
    return -tf.reduce_mean(tf.image.ssim(yin, predin, max_val=5))
示例#3
0
def getLoss(y, pred, L1loss=False, single_layer=False, ssim=False):
    """
    Loss function is defined here
    """
    if ssim:
        return getSSIM(y, pred, single_layer)
    types = getTypes()
    heights = getHeights()
    # return tf.losses.mean_squared_error(y, pred)
    if L1loss:
        lossfcn = tf.losses.absolute_difference
    else:
        lossfcn = tf.losses.mean_squared_error
    if single_layer:
        return lossfcn(y[:, types[0]:types[1], heights[0]:heights[1], :, :],
                       pred)
    else:
        return lossfcn(y[:, types[0]:types[1], heights[0]:heights[1], :, :],
                       pred[:, types[0]:types[1], heights[0]:heights[1], :, :])
示例#4
0
def getEvaluate(y,
                pred,
                file_comment,
                single_layer=False,
                height_seperate=False):
    """
    Get the RMS for the denormalized data
    """
    types = getTypes()
    heights = getHeights()
    std_path = global_macros.TF_DATA_DIRECTORY + "/std_" + file_comment + ".npy"
    std = tf.constant(np.expand_dims(np.load(std_path), axis=0),
                      dtype=tf.float32)[:, types[0]:types[1],
                                        heights[0]:heights[1], :, :]
    if "_nor" in file_comment:
        std = std[:, :, :, :360, :]
    if single_layer:
        assert (heights[1] -
                heights[0] == 1), "Only single height level is supported"
        result = tf.square(
            (y[:, types[0]:types[1], heights[0]:heights[1], :360, :] -
             pred[:, :, :, :360, :]) * std)
    else:
        result = tf.square(
            (y[:, types[0]:types[1], heights[0]:heights[1], :360, :] -
             pred[:, types[0]:types[1], heights[0]:heights[1], :360, :]) * std)
    if height_seperate:
        result = tf.reduce_mean(result, axis=[0, 3, 4])  # type and height
        result = tf.sqrt(result)
        result = tf.reshape(result, [(types[1] - types[0]) *
                                     (heights[1] - heights[0])])
    else:
        result = tf.reduce_mean(result, axis=[0, 2, 3, 4])  # only type is left
        result = tf.sqrt(result)
        result = result
    return result
示例#5
0
def getCrop(y):
    types = getTypes()
    heights = getHeights()
    return y[:, types[0]:types[1], heights[0]:heights[1], :, :]
示例#6
0
    def get_predictions(self, iter_data, file_comment, load=False):
        """
        save predictions in numpy file format in directory: log/<exp>/*.npy
        :param iter_data: iterator of test data
        :param file_comment: prefix of the file
        :return None
        """
        if load is not False:
            self.saver.restore(self.sess, load)
        else:
            assert (self.FLAGS.resume_iter !=
                    -1), "Need to input a model for prediction"
            model_file = osp.join(self.FLAGS.ckptdir, self.FLAGS.exp,
                                  'model_{}'.format(self.FLAGS.resume_iter))
            self.saver.restore(self.sess, model_file)
            print("Loaded from: ", model_file)
            self.global_step = tf.constant(self.FLAGS.resume_iter)

        types = getTypes()
        heights = getHeights()
        std_path = global_macros.TF_DATA_DIRECTORY + "/std_" + file_comment + ".npy"
        mean_path = global_macros.TF_DATA_DIRECTORY + "/mean_" + file_comment + ".npy"
        std = np.expand_dims(np.load(std_path),
                             axis=0)[:, types[0]:types[1],
                                     heights[0]:heights[1], :, :]
        mean = np.expand_dims(np.load(mean_path),
                              axis=0)[:, types[0]:types[1],
                                      heights[0]:heights[1], :, :]

        indata = iter_data.get_next()
        self.myprint("\nGetting predictions: ")
        self.sess.run(iter_data.initializer)

        # loss, pred, _ = self._test_average_loss(data,  include_pred=True)
        assert (self.FLAGS.batch_size == 1
                ), "Only batch size one is supported for prediction dump"

        while True:
            try:
                data = self.sess.run(indata)
                train_dict = self.get_train_dict(data)
                x48 = data['Y'][:, types[0]:types[1],
                                heights[0]:heights[1], :, :]
                y48 = data['X'][:, 7 + types[0]:7 + types[1],
                                heights[0]:heights[1], :, :]
                spred = self.sess.run(self.pred, feed_dict=train_dict)
                date = data['Date'][0][0]

                y48 = (y48 * std + mean)[0, 0, 0, :, :]
                x48 = (x48 * std + mean)[0, 0, 0, :, :]
                pred = (spred * std + mean)[0, 0, 0, :, :]
                # concatenate the ens forecast
                pred = np.concatenate([pred, y48[360:, :]], axis=0)
                value = np.stack([pred, y48, x48], axis=0)

                datestr = str(date)
                dumpdir = osp.join(self.FLAGS.logdir, self.FLAGS.exp, datestr)
                np.save(dumpdir, value)
            except tf.errors.OutOfRangeError:
                break

        return