コード例 #1
0
ファイル: refmse.py プロジェクト: sprite2200/deep-weather
 def _buildnet(self):
     self.x = tf.placeholder(tf.float32, shape=X_SHAPE, name='X')
     self.y = tf.placeholder(tf.float32, shape=Y_SHAPE, name='Y')
     with tf.name_scope('MSE_loss'):
         y = self.y
         self.pred = getLossSlice(self.x[:, X_SHAPE[1]//2: X_SHAPE[1], :, :, :])
         self.loss = getLoss(y, self.pred, single_layer=True)
         self.evaluate_op = getEvaluate(y, self.pred, self.file_comment, single_layer=True)
         with tf.name_scope('summary'):
             tf.summary.scalar('MSE loss', self.loss)
     with tf.name_scope('train'):
         self.train_op = self.loss
     self.summary_op = tf.summary.merge_all()
コード例 #2
0
ファイル: one2one.py プロジェクト: sprite2200/deep-weather
    def _buildnet(self):
        self.x = tf.placeholder(tf.float32, shape=X_SHAPE, name='X')
        self.y = tf.placeholder(tf.float32, shape=Y_SHAPE, name='Y')

        x0 = self.x[:, :X_SHAPE[1] // 2, :, :, :]
        x1 = self.x[:, X_SHAPE[1] // 2:X_SHAPE[1], :, :, :]

        with tf.name_scope('one2one'):
            self.pred = self.one2oneLayer(x1, "l1") + x1
            self.loss = getLoss(self.y, self.pred)
            self.evaluate_op = getEvaluate(self.y, self.pred,
                                           self.file_comment)
            with tf.name_scope('summary'):
                tf.summary.scalar('MSE loss', self.loss)
        with tf.name_scope('train'):
            self.train_op = tf.train.AdamOptimizer(
                learning_rate=self.FLAGS.lr).minimize(self.loss)
        self.summary_op = tf.summary.merge_all()
コード例 #3
0
    def _buildnet(self):
        with tf.variable_scope('2DUnet'):
            self.x = tf.placeholder(
                tf.float32, shape=X_SHAPE)  # batch none to input it later
            self.y = tf.placeholder(tf.float32, shape=Y_SHAPE)

            x0 = self.x[:, :X_SHAPE[1] // 2, :, :, :]
            x1 = self.x[:, X_SHAPE[1] // 2:X_SHAPE[1], :, :, :]

            x0d2 = tf.reshape(
                x0, [-1, X_SHAPE[1] * X_SHAPE[2] // 2, X_SHAPE[3], X_SHAPE[4]])
            x1d2 = tf.reshape(
                x1, [-1, X_SHAPE[1] * X_SHAPE[2] // 2, X_SHAPE[3], X_SHAPE[4]])

            xd2 = tf.reshape(
                self.x, [-1, X_SHAPE[1] * X_SHAPE[2], X_SHAPE[3], X_SHAPE[4]])
            yd2 = tf.reshape(
                self.y, [-1, Y_SHAPE[1] * Y_SHAPE[2], Y_SHAPE[3], Y_SHAPE[4]])

            conv_0_1 = self.conv_batch_relu2d(xd2,
                                              base_n_filt,
                                              is_training=self.FLAGS.train)
            conv_0_2 = self.conv_batch_relu2d(conv_0_1,
                                              base_n_filt * 2,
                                              is_training=self.FLAGS.train)
            # Level one
            max_1_1 = tf.layers.max_pooling2d(conv_0_2, [2, 2], [2, 2],
                                              data_format="channels_first")
            conv_1_1 = self.conv_batch_relu2d(max_1_1,
                                              base_n_filt * 2,
                                              is_training=self.FLAGS.train)
            conv_1_2 = self.conv_batch_relu2d(conv_1_1,
                                              base_n_filt * 4,
                                              is_training=self.FLAGS.train)
            # Level two
            max_2_1 = tf.layers.max_pooling2d(conv_1_2, [2, 2], [2, 2],
                                              data_format="channels_first")
            conv_2_1 = self.conv_batch_relu2d(max_2_1,
                                              base_n_filt * 4,
                                              is_training=self.FLAGS.train)
            conv_2_2 = self.conv_batch_relu2d(conv_2_1,
                                              base_n_filt * 8,
                                              is_training=self.FLAGS.train)
            # Level one
            up_conv_2_1 = self.upconvolve2d(conv_2_2,
                                            base_n_filt * 8,
                                            kernel=[2, 2],
                                            stride=[2, 2])
            concat_1_1 = self.centre_crop_and_concat(conv_1_2, up_conv_2_1)
            conv_1_3 = self.conv_batch_relu2d(concat_1_1,
                                              base_n_filt * 4,
                                              is_training=self.FLAGS.train)
            conv_1_4 = self.conv_batch_relu2d(conv_1_3,
                                              base_n_filt * 4,
                                              is_training=self.FLAGS.train)
            # Level zero
            up_conv_1_0 = self.upconvolve2d(conv_1_4,
                                            base_n_filt * 4,
                                            kernel=[2, 2],
                                            stride=[2, 2])
            concat_0_1 = self.centre_crop_and_concat(conv_0_2, up_conv_1_0)
            concat_0_2 = self.centre_crop_and_concat(concat_0_1, xd2)
            conv_0_3 = self.conv_batch_relu2d(concat_0_2,
                                              base_n_filt * 2,
                                              is_training=self.FLAGS.train)
            conv_0_4 = self.conv_batch_relu2d(conv_0_3,
                                              base_n_filt * 2,
                                              is_training=self.FLAGS.train)
            conv_0_5 = tf.layers.conv2d(conv_0_4,
                                        1, [1, 1], [1, 1],
                                        padding='same',
                                        data_format="channels_first")
            self.pred = conv_0_5 + x1d2

        with tf.name_scope('train'):
            self.loss = getLoss(yd2, self.pred)
            self.evaluate_op = self.loss
            self.trainer = tf.train.AdamOptimizer(learning_rate=self.FLAGS.lr)
            self.extra_update_ops = tf.get_collection(
                tf.GraphKeys.UPDATE_OPS
            )  # Ensure correct ordering for batch-norm to work
            with tf.control_dependencies(self.extra_update_ops):
                self.train_op = self.trainer.minimize(
                    self.loss
                )  # = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
            self.summary_op = tf.summary.merge_all()
コード例 #4
0
    def _buildnet(self):
        temp_only = self.FLAGS.temp_only

        self.x = tf.placeholder(tf.float32, shape=X_SHAPE)
        self.y = tf.placeholder(tf.float32, shape=Y_SHAPE)

        if not temp_only:
            x0 = self.x[:, :X_SHAPE[1] // 2, :, :, :]
            x1 = self.x[:, X_SHAPE[1] // 2:X_SHAPE[1], :, :, :]
            x = self.x
            y = self.y
        else:
            x0 = self.x[:, 0:1, :, :, :]
            x1 = self.x[:, X_SHAPE[1] // 2:X_SHAPE[1] // 2 + 1, :, :, :]
            x = tf.concat([x0, x1], axis=1)
            y = self.y[:, 0:1, :, :, :]

        xd2 = tf.reshape(self.x,
                         [-1, X_SHAPE[1] * X_SHAPE[2], X_SHAPE[3], X_SHAPE[4]])
        x0d2 = tf.reshape(
            x0, [-1, X_SHAPE[1] * X_SHAPE[2] // 2, X_SHAPE[3], X_SHAPE[4]])
        x1d2 = tf.reshape(
            x1, [-1, X_SHAPE[1] * X_SHAPE[2] // 2, X_SHAPE[3], X_SHAPE[4]])

        xd2 = tf.reshape(self.x,
                         [-1, X_SHAPE[1] * X_SHAPE[2], X_SHAPE[3], X_SHAPE[4]])
        yd2 = tf.reshape(self.y,
                         [-1, Y_SHAPE[1] * Y_SHAPE[2], Y_SHAPE[3], Y_SHAPE[4]])

        with tf.variable_scope("tile_CNN",
                               reuse=False,
                               initializer=base_init,
                               regularizer=None):
            if Y_SHAPE[2] == 1:
                lcn_kernel = int(self.FLAGS.lcn_kernel[0])
                self.pred = tf.layers.conv2d(x1d2,
                                             1,
                                             kernel_size=lcn_kernel,
                                             strides=[1, 1],
                                             padding='SAME',
                                             data_format="channels_first")
                self.pred = self.pred + x1d2
                self.loss = getLoss(y, self.pred)

            else:
                lcn_kernel = (int(self.FLAGS.lcn_kernel[0]),
                              int(self.FLAGS.lcn_kernel[1]),
                              int(self.FLAGS.lcn_kernel[2]))
                self.myprint("TileCNN: the kernel size is: " + str(lcn_kernel))
                div = (2, 3)
                self.pred = tile_conv_layer(x1,
                                            div,
                                            1,
                                            kernel_size=lcn_kernel,
                                            strides=[1, 1, 1],
                                            data_format="channels_first")
                self.pred = self.pred + x1
                self.loss = getLoss(y, self.pred)
                self.evaluate_op = getEvaluate(y, self.pred, self.file_comment)

        with tf.name_scope('train'):
            self.train_op = tf.train.AdamOptimizer(
                learning_rate=self.FLAGS.lr).minimize(self.loss)
            self.summary_op = tf.summary.merge_all()
コード例 #5
0
    def _buildnet(self):
        temp_only = self.FLAGS.temp_only

        self.x = tf.placeholder(tf.float32, shape=X_SHAPE, name='X')
        self.y = tf.placeholder(tf.float32, shape=Y_SHAPE, name='Y')

        if not temp_only:
            x0 = self.x[:, :X_SHAPE[1] // 2, :, :, :]
            x1 = self.x[:, X_SHAPE[1] // 2:X_SHAPE[1], :, :, :]
            x = self.x
            y = self.y
            predshape = [-1, Y_SHAPE[1], Y_SHAPE[3], Y_SHAPE[4]]
        else:
            x0 = self.x[:, 0:1, :, :, :]
            x1 = self.x[:, X_SHAPE[1] // 2:X_SHAPE[1] // 2 + 1, :, :, :]
            x = tf.concat([x0, x1], axis=1)
            y = self.y[:, 0:1, :, :, :]
            predshape = [-1, 1, Y_SHAPE[3], Y_SHAPE[4]]

        with tf.variable_scope("LCN0", reuse=False, regularizer=reg_init):

            lcn_kernel = (int(self.FLAGS.lcn_kernel[0]),
                          int(self.FLAGS.lcn_kernel[1]),
                          int(self.FLAGS.lcn_kernel[2]))
            self.myprint("LCN0: the kernel size is: " + str(lcn_kernel))
            out, reg0 = LCN3D_layer(x1,
                                    channels=1,
                                    kernel=lcn_kernel,
                                    namespace='conv_local3',
                                    regularize=self.FLAGS.regularize,
                                    alpha=self.FLAGS.alpha,
                                    with_affine=True)

        with tf.variable_scope("Unet",
                               reuse=False,
                               initializer=base_init,
                               regularizer=reg_init):
            conv_0_1 = conv_batch_relu3d_layer(out,
                                               base_n_filt,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)
            conv_0_2 = conv_batch_relu3d_layer(conv_0_1,
                                               base_n_filt * 2,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)
            # Level one
            max_1_1 = tf.layers.max_pooling3d(
                conv_0_2, [1, 2, 2], [1, 2, 2], data_format="channels_first"
            )  # Stride, Kernel previously [2,2,2]
            # pool_size:An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width)

            conv_1_1 = conv_batch_relu3d_layer(max_1_1,
                                               base_n_filt * 2,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)
            conv_1_2 = conv_batch_relu3d_layer(conv_1_1,
                                               base_n_filt * 4,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)
            # Level two
            max_2_1 = tf.layers.max_pooling3d(
                conv_1_2, [1, 2, 2], [1, 2, 2], data_format="channels_first"
            )  # Stride, Kernel previously [2,2,2]
            conv_2_1 = conv_batch_relu3d_layer(max_2_1,
                                               base_n_filt * 4,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)
            conv_2_2 = conv_batch_relu3d_layer(conv_2_1,
                                               base_n_filt * 8,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)
            # Level three
            max_3_1 = tf.layers.max_pooling3d(
                conv_2_2, [1, 2, 2], [1, 2, 2], data_format="channels_first"
            )  # Stride, Kernel previously [2,2,2]
            conv_3_1 = conv_batch_relu3d_layer(max_3_1,
                                               base_n_filt * 8,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)
            conv_3_2 = conv_batch_relu3d_layer(conv_3_1,
                                               base_n_filt * 16,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)
            # Level two
            up_conv_3_2 = upconv3d_layer(conv_3_2,
                                         base_n_filt * 16)  #, kernel=2,
            #stride=[1, 2, 2])  # Stride previously [2,2,2]
            concat_2_1 = centre_crop_and_concat_layer(conv_2_2, up_conv_3_2)
            conv_2_3 = conv_batch_relu3d_layer(concat_2_1,
                                               base_n_filt * 8,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)
            conv_2_4 = conv_batch_relu3d_layer(conv_2_3,
                                               base_n_filt * 8,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)
            # Level one
            up_conv_2_1 = upconv3d_layer(conv_2_4,
                                         base_n_filt * 8)  #, kernel=2,
            #stride=[1, 2, 2])  # Stride previously [2,2,2]
            concat_1_1 = centre_crop_and_concat_layer(conv_1_2, up_conv_2_1)
            conv_1_3 = conv_batch_relu3d_layer(concat_1_1,
                                               base_n_filt * 4,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)
            conv_1_4 = conv_batch_relu3d_layer(conv_1_3,
                                               base_n_filt * 4,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)
            # Level zero
            up_conv_1_0 = upconv3d_layer(conv_1_4,
                                         base_n_filt * 4)  #, kernel=2,
            #stride=[1, 2, 2])  # Stride previously [2,2,2]
            concat_0_1 = centre_crop_and_concat_layer(conv_0_2, up_conv_1_0)
            conv_0_3 = conv_batch_relu3d_layer(concat_0_1,
                                               base_n_filt * 2,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)
            conv_0_4 = conv_batch_relu3d_layer(conv_0_3,
                                               base_n_filt * 2,
                                               is_training=self.FLAGS.train,
                                               is_pad=self.FLAGS.is_pad)

            conv_0_5 = tf.layers.conv3d(
                conv_0_4,
                y.shape[1], [1, 1, 1], [1, 1, 1],
                padding='same',
                data_format="channels_first")  # 1 instead of OUTPUT_CLASSES
            out = conv_0_5

        with tf.variable_scope("LCN0", reuse=False, regularizer=reg_init):

            lcn_kernel = (int(self.FLAGS.lcn_kernel[0]),
                          int(self.FLAGS.lcn_kernel[1]),
                          int(self.FLAGS.lcn_kernel[2]))
            self.myprint("LCN1: the kernel size is: " + str(lcn_kernel))
            out, reg1 = LCN3D_layer(out,
                                    channels=1,
                                    kernel=lcn_kernel,
                                    namespace='conv_local3',
                                    regularize=self.FLAGS.regularize,
                                    alpha=self.FLAGS.alpha,
                                    with_affine=True)
        self.pred = out + x1

        with tf.name_scope('train'):
            self.loss = getLoss(y, self.pred) + reg0 + reg1
            self.evaluate_op = getEvaluate(y, self.pred, self.file_comment)
            self.trainer = tf.train.AdamOptimizer(learning_rate=self.FLAGS.lr)
            self.extra_update_ops = tf.get_collection(
                tf.GraphKeys.UPDATE_OPS
            )  # Ensure correct ordering for batch-norm to work
            with tf.control_dependencies(self.extra_update_ops):
                self.train_op = self.trainer.minimize(
                    self.loss
                )  # = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
            self.summary_op = tf.summary.merge_all()
コード例 #6
0
    def _buildnet(self):
        self.varlist = None
        if self.FLAGS.recompute:
            self.varlist = []
        self.x = tf.placeholder(tf.float32, shape=X_SHAPE)
        self.y = tf.placeholder(tf.float32, shape=Y_SHAPE)

        # random crop:
        div = [9, 3]

        x, y = self.data_crop(div=div,
                              crop=self.FLAGS.crop,
                              stack=self.FLAGS.crop_stack)

        if self.FLAGS.img_emb:
            emb_img = tf.Variable(tf.zeros([1, 1] + list(tf.shape(x)[2:])),
                                  name='emb_img')
            x = tf.concat(
                [x, tf.tile(emb_img, [tf.shape(x)[0], 1, 1, 1, 1])], axis=1)

        # reshape for 1,2,3 layers unet
        x1 = x[:, X_SHAPE[1] // 2:X_SHAPE[1], :, :, :]

        with tf.variable_scope("Unet",
                               reuse=False,
                               initializer=base_init,
                               regularizer=reg_init):
            if self.FLAGS.unet_levels == 3:
                self.pred = unet3_l3(self.FLAGS.nfilters,
                                     x,
                                     y,
                                     deconv=False,
                                     is_training=self.FLAGS.train,
                                     is_pad=self.FLAGS.is_pad,
                                     varlist=self.varlist) + x1
            elif self.FLAGS.unet_levels == 2:
                self.pred = unet3_l2(self.FLAGS.nfilters,
                                     x,
                                     y,
                                     deconv=False,
                                     is_training=self.FLAGS.train,
                                     is_pad=self.FLAGS.is_pad,
                                     varlist=self.varlist) + x1
            elif self.FLAGS.unet_levels == 1:
                self.pred = unet3_l1(self.FLAGS.nfilters,
                                     x,
                                     y,
                                     deconv=False,
                                     is_training=self.FLAGS.train,
                                     is_pad=self.FLAGS.is_pad,
                                     varlist=self.varlist) + x1
            elif self.FLAGS.unet_levels == 0:
                self.pred = simple_conv(self.FLAGS.nfilters,
                                        x,
                                        y,
                                        deconv=False,
                                        is_training=self.FLAGS.train,
                                        is_pad=self.FLAGS.is_pad,
                                        varlist=self.varlist) + x1
            else:
                assert 0, "Unet levels not supported, only 0,1,2,3 are supported"

        self.loss = getLoss(y,
                            self.pred,
                            self.FLAGS.L1_loss,
                            ssim=self.FLAGS.ssim)
        variable_summaries(self.loss, 'loss')
        self.evaluate_op = getEvaluate(y, self.pred, self.file_comment)
        self.build_train_op()
        self.summary_op = tf.summary.merge_all()