コード例 #1
0
    def max_depool(self,
                   in_layer=None,
                   depth_factor=2,
                   height_factor=2,
                   width_factor=2):
        if in_layer == None:
            in_layer = self.layer

        #if 1: # alt with deconv
        #lo, li = self.deconvolutional_layer(1, [1,1], None, stride=[2,2], name="g_D1", reuse=reuse, batch_norm=use_batch_norm, train=train)
        #return lo
        '''
		if len(self.layer.get_shape()) == 4:
			outWidth = in_layer.get_shape()[2] * window_stride[0] + window_size[0] - window_stride[0]
			outHeight = in_layer.get_shape()[1] * window_stride[1] + window_size[1] -  window_stride[1]
			self.layer = tf.image.resize_images(in_layer, [int(outHeight), int(outWidth)], 1) #1 = ResizeMethod.NEAREST_NEIGHBOR
			print("Max Depool {}: {}".format(window_size, self.layer.get_shape()))
		'''
        if len(self.layer.get_shape()) == 4:
            #self.layer = tf.contrib.keras.backend.resize_images(self.layer, height_factor, width_factor, 'channels_last')
            self.layer = kb.resize_images(self.layer, height_factor,
                                          width_factor, 'channels_last')
            print("Max Depool : {}".format(self.layer.get_shape()))
        if len(self.layer.get_shape()) == 5:
            #self.layer = tf.contrib.keras.backend.resize_volumes(self.layer, depth_factor, height_factor, width_factor, 'channels_last')
            self.layer = kb.resize_volumes(self.layer, depth_factor,
                                           height_factor, width_factor,
                                           'channels_last')
            print("Max Depool : {}".format(self.layer.get_shape()))
        return self.layer
コード例 #2
0
ファイル: autoencoder_layers.py プロジェクト: pkainz/keras
   def call(self, x, mask=None):
       output = K.resize_volumes(x, self.size[0], self.size[1], self.size[2],
                               self.dim_ordering)
 
       f = K.gradients(K.sum(self._master_layer.output), 
                       self._master_layer.input) * output
       
       return f
コード例 #3
0
 def call(self, x, mask=None):
     # TODO: implement for dim_ordering='tf'
     img = K.resize_volumes(x, self.size[0], self.size[1], self.size[2],
                            self.dim_ordering)
     padded = T.zeros((img.shape[0], img.shape[1], self.shape[0],
                       self.shape[1], self.shape[2]))
     padded = T.set_subtensor(
         padded[:, :, :img.shape[2], :img.shape[3], :img.shape[4]], img)
     return T.switch(self.ind, padded, T.zeros_like(padded))
コード例 #4
0
ファイル: GAN.py プロジェクト: wangqifan444/tempoGAN1
    def max_depool(self,
                   in_layer=None,
                   depth_factor=2,
                   height_factor=2,
                   width_factor=2):
        if in_layer == None:
            in_layer = self.layer

        if len(self.layer.get_shape()) == 4:
            self.layer = kb.resize_images(self.layer, height_factor,
                                          width_factor, 'channels_last')
            print("Max Depool : {}".format(self.layer.get_shape()))
        if len(self.layer.get_shape()) == 5:
            self.layer = kb.resize_volumes(self.layer, depth_factor,
                                           height_factor, width_factor,
                                           'channels_last')
            print("Max Depool : {}".format(self.layer.get_shape()))
        return self.layer
コード例 #5
0
ファイル: mpool.py プロジェクト: imlab-uiip/keras-segnet
def m_maxpool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
                dim_ordering='th'):
    if border_mode == 'same':
        # TODO: add implementation for border_mode="same"
        raise Exception('border_mode="same" not supported with Theano.')
    elif border_mode == 'valid':
        ignore_border = True
        padding = (0, 0)
    else:
        raise Exception('Invalid border mode: ' + str(border_mode))

    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        x = x.dimshuffle((0, 4, 1, 2, 3))

    # TODO: check dimensions manipulations
    # pooling over conv_dim2, conv_dim1 (last two channels)
    out_shape = x.shape
    output, ind1 = m_maxpool_2d_op(input=x,
                                   ds=(pool_size[1], pool_size[2]),
                                   st=(strides[1], strides[2]),
                                   ignore_border=ignore_border,
                                   padding=padding)

    # pooling over conv_dim3
    pool_out, ind2 = m_maxpool_2d_op(input=output.dimshuffle(0, 1, 4, 3, 2),
                                     ds=(1, pool_size[0]),
                                     st=(1, strides[0]),
                                     ignore_border=ignore_border,
                                     padding=padding)

    pool_out = pool_out.dimshuffle(0, 1, 4, 3, 2)
    ind2 = ind2.dimshuffle(0, 1, 4, 3, 2)

    ind2 = K.resize_volumes(ind2, 1, pool_size[1], pool_size[2], dim_ordering)
    padded_ind2 = T.zeros(out_shape)
    padded_ind2 = T.set_subtensor(padded_ind2[:, :, :ind2.shape[2], :ind2.shape[3], :ind2.shape[4]], ind2)
    ind = padded_ind2 * ind1
    if dim_ordering == 'tf':
        pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1))

    return pool_out, ind
コード例 #6
0
ファイル: model_i3d_flow.py プロジェクト: aayushjr/ssa2d
def adjust(x, depth_factor, height_factor, width_factor, reps):
    x = K.resize_volumes(x, depth_factor, height_factor, width_factor,
                         'channels_last')
    x = K.repeat_elements(x, rep=reps, axis=-1)

    return x
コード例 #7
0
 def call(self, inputs):
     return K.resize_volumes(inputs, self.size[0], self.size[1],
                             self.size[2], self.data_format)
コード例 #8
0
 def call(self, x, mask=None):
     # TODO: implement for dim_ordering='tf'
     img = K.resize_volumes(x, self.size[0], self.size[1], self.size[2], self.dim_ordering)
     padded = T.zeros((img.shape[0], img.shape[1], self.shape[0], self.shape[1], self.shape[2]))
     padded = T.set_subtensor(padded[:, :, :img.shape[2], :img.shape[3], :img.shape[4]], img)
     return T.switch(self.ind, padded, T.zeros_like(padded))
コード例 #9
0
def main():
    # mirrored_strategy = tf.distribute.MirroredStrategy()

    # with mirrored_strategy.scope():
    # with tf.compat.v1.Session(config=tf_config) as sess:
    tfrecords_test_vol_filename = DATA_DIR + '/output/fmri/tf_data/XXX.tfrecords'
    # train
    # tfrecords_tr_vol_filename = DATA_DIR + '/output/fmri/tf_data/train_data_vol.tfrecords'
    # validation
    # tfrecords_val_vol_filename = DATA_DIR + '/output/fmri/tf_data/val_data_vol.tfrecords'

    global batch_cost
    inputs_ = tf.compat.v1.placeholder(tf.float32, input_shape, name='inputs')
    targets_ = tf.compat.v1.placeholder(tf.float32, input_shape, name='targets')

    # encoder

    print('shape input:', inputs_.shape)
    conv1 = tf.keras.layers.Conv3D(
        filters=16, kernel_size=(3, 3, 3), strides=stride, padding=padding, activation=tf.nn.relu)(inputs_)
    maxpool1 = tf.keras.layers.MaxPool3D(
        pool_size=(2, 2, 2), strides=(3, 2, 2), padding=padding)(conv1)
    print('shape maxpool1:', maxpool1.shape)
    conv2 = tf.keras.layers.Conv3D(
        filters=32, kernel_size=(3, 3, 3), strides=stride, padding=padding, activation=tf.nn.relu)(maxpool1)
    maxpool2 = tf.keras.layers.MaxPool3D(
        pool_size=(2, 2, 2), strides=(3, 3, 2), padding=padding)(conv2)

    print('shape:maxpool2', maxpool2.shape)
    conv3 = tf.keras.layers.Conv3D(
        filters=96, kernel_size=(2, 2, 2), strides=stride, padding=padding, activation=tf.nn.relu)(maxpool2)
    maxpool3 = tf.keras.layers.MaxPool3D(
        pool_size=(2, 2, 2), strides=(1, 1, 2), padding=padding)(conv3)
    print('shape maxpool3:', maxpool3.shape)
    # decoder
    unpool1 = K.resize_volumes(maxpool3, 1, 1, 2, "channels_last")
    deconv1 = tf.keras.layers.Conv3DTranspose(filters=96, kernel_size=(2, 2, 2), strides=stride,
                                              padding=padding, activation=tf.nn.relu)(unpool1)
    print('shape deconv1:', deconv1.shape)
    unpool2 = K.resize_volumes(deconv1, 3, 3, 2, "channels_last")
    deconv2 = tf.keras.layers.Conv3DTranspose(filters=32, kernel_size=(3, 3, 3), strides=stride,
                                              padding=padding, activation=tf.nn.relu)(unpool2)

    print('shape deconv2:', deconv2.shape)
    # (64, 24, 48, 32, 32)
    unpool3 = K.resize_volumes(deconv2, 3, 2, 2, "channels_last")
    deconv3 = tf.keras.layers.Conv3DTranspose(filters=16, kernel_size=(3, 3, 3), strides=stride,
                                              padding=padding, activation=tf.nn.relu)(unpool3)

    print('shape deconv3:', deconv3.shape)
    # (64, 72, 96, 64, 16)
    output = tf.keras.layers.Dense(
        units=1, activation=None)(deconv3)
    print(output.shape)
    output = tf.reshape(output, input_shape)
    loss = tf.divide(tf.norm(tf.subtract(targets_, output), ord='fro', axis=[1, 2]),
                     tf.norm(targets_, ord='fro', axis=[1, 2]))

    # loss = tf.divide(tf.norm(tf.subtract(targets_, output), ord='fro', axis=[-2, -1]),
    #                  tf.norm(targets_, ord='fro', axis=[-2, -1]))
    cost = tf.reduce_mean(loss)
    opt = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(cost)

    # output shape = input shape

    all_saver = tf.compat.v1.train.Saver(max_to_keep=None)

    # initializing a saver to save weights
    # enc_saver = tf.compat.v1.train.Saver({'conv1': conv1, 'maxpool1': maxpool1,
    #                             'conv2': conv2, 'maxpool2': maxpool2, 'conv3': conv3, 'maxpool3': maxpool3})
    # initializing a restorer to restore weights
    # res_saver = tf.compat.v1.train.import_meta_graph(ws_path+'weights.meta')

    # summary nodes
    # tf.summary.scalar("loss", loss)
    tf.summary.scalar("cost", cost)
    tf.summary.histogram("conv1", conv1)
    # tf.summary.histogram("conv1_1", conv1_1)
    tf.summary.histogram("maxpool1", maxpool1)
    tf.summary.histogram("conv2", conv2)
    tf.summary.histogram("maxpool2", maxpool2)
    tf.summary.histogram("conv3", conv3)
    tf.summary.histogram("maxpool3", maxpool3)
    # tf.summary.histogram("conv4", conv4)
    # tf.summary.histogram("deconv4", deconv4)
    tf.summary.histogram("unpool3", unpool3)
    tf.summary.histogram("deconv3", deconv3)
    tf.summary.histogram("unpool2", unpool2)
    tf.summary.histogram("deconv2", deconv2)
    tf.summary.histogram("unpool1", unpool1)
    # tf.summary.histogram("deconv1_1", deconv1_1)
    tf.summary.histogram("deconv1", deconv1)

    # summary operation and a writer to save it.
    summary_op = tf.compat.v1.summary.merge_all()
    writer = tf.compat.v1.summary.FileWriter(logs_path, graph=tf.compat.v1.get_default_graph())

    # end of tensorflow graph

    # start of training
    counter = 0
    try:
        sess = tf.compat.v1.Session()
        # making operation-variables to run our methods whenever needed during training
        # coordinator and queue runners to manage parallel sampling of batches from the input pipeline
        coord = tf.compat.v1.train.Coordinator()
        threads = tf.compat.v1.train.start_queue_runners(sess=sess, coord=coord)
        while not coord.should_stop():
            # fetching a batch
            fetch_op_tr = input_pipeline(tfrecords_test_vol_filename)
            nvol = sess.run(fetch_op_tr)

            batch_cost, _ = sess.run([cost, opt],
                                     feed_dict={inputs_: nvol + noise_factor * np.random.randn(*nvol.shape),
                                                targets_: nvol})
            # loss function, optimizer and a saver to save weights&biases

            print('\nEpoch\t' + str(counter + 1) + '/' + str(n_epochs))
            # # print(n_batches) 5670
            for i in range(n_batches):
                # if i % 1000 == 0:
                #     print("batch_cost:", batch_cost)
                print('\r' + str(((i + 1) * 100) / n_batches) + '%', sys.stdout.flush())
            counter = counter + 1
            # print("Epoch: {}/{}...".format(counter, n_epochs), "Training loss: {:.4f}".format(batch_cost))
            # save weights and biases of the model
            all_saver.save(sess, ws_path + "model.ckpt", global_step=counter)
            # save weights and biases of the encoder
            # enc_saver.save(sess, ws_path + "enc.ckpt", global_step=counter)
            print('Weights saved')

            # saving summary
            # summary, _ = sess.run([summary_op, opt], feed_dict={inputs_: nvol, targets_: nvol})
            # writer.add_summary(summary, counter)
            print('Summary saved')

            if counter >= n_epochs:
                break
        # checking validation error
        # vol = sess.run(fetch_op_val)
        # nvol = np.asarray(vol)
        # batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: nvol, targets_: nvol})
        # print('Validation error' + str(batch_cost))
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')

    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
コード例 #10
0
def main():
    # start of tensorflow graph
    # input and target placeholders
    global nvol, batch_cost
    # print("input_shape:", input_shape)
    # inputs_ = tf.Variable(shape=input_shape, name="inputs")
    # targets_ = tf.Variable(shape=input_shape, name="targets")

    inputs_ = tf.placeholder(tf.float32, input_shape, name='inputs')
    targets_ = tf.placeholder(tf.float32, input_shape, name='targets')

    conv1 = tf.keras.layers.Conv3D(
        filters=16, kernel_size=(3, 3, 3), strides=stride, padding=padding, activation=tf.nn.relu)(inputs_)
    maxpool1 = tf.keras.layers.MaxPool3D(
        pool_size=(2, 2, 2), strides=(3, 2, 2), padding=padding)(conv1)
    # print('shape maxpool1:', maxpool1.shape)
    conv2 = tf.keras.layers.Conv3D(
        filters=32, kernel_size=(3, 3, 3), strides=stride, padding=padding, activation=tf.nn.relu)(maxpool1)
    maxpool2 = tf.keras.layers.MaxPool3D(
        pool_size=(2, 2, 2), strides=(3, 3, 2), padding=padding)(conv2)

    # print('shape:maxpool2', maxpool2.shape)
    conv3 = tf.keras.layers.Conv3D(
        filters=96, kernel_size=(2, 2, 2), strides=stride, padding=padding, activation=tf.nn.relu)(maxpool2)
    maxpool3 = tf.keras.layers.MaxPool3D(
        pool_size=(2, 2, 2), strides=(1, 1, 2), padding=padding)(conv3)
    # print('shape maxpool3:', maxpool3.shape)
    # decoder
    unpool1 = K.resize_volumes(maxpool3, 1, 1, 2, "channels_last")
    deconv1 = tf.keras.layers.Conv3DTranspose(filters=96, kernel_size=(2, 2, 2), strides=stride,
                                              padding=padding, activation=tf.nn.relu)(unpool1)
    # print('shape deconv1:', deconv1.shape)
    unpool2 = K.resize_volumes(deconv1, 3, 3, 2, "channels_last")
    deconv2 = tf.keras.layers.Conv3DTranspose(filters=32, kernel_size=(3, 3, 3), strides=stride,
                                              padding=padding, activation=tf.nn.relu)(unpool2)

    # print('shape deconv2:', deconv2.shape)
    # (64, 24, 48, 32, 32)
    unpool3 = K.resize_volumes(deconv2, 3, 2, 2, "channels_last")
    deconv3 = tf.keras.layers.Conv3DTranspose(filters=16, kernel_size=(3, 3, 3), strides=stride,
                                              padding=padding, activation=tf.nn.relu)(unpool3)

    # print('shape deconv3:', deconv3.shape)
    # (64, 72, 96, 64, 16)
    output = tf.keras.layers.Dense(
        units=1, activation=None)(deconv3)

    loss = tf.divide(tf.norm(tf.subtract(targets_, output), ord='fro', axis=[0, -1]),
                     tf.norm(targets_, ord='fro', axis=[0, -1]))
    # print(loss.shape)
    print("loss:", loss)
    cost = tf.reduce_mean(loss, name='loss')
    # print(cost)
    print("cost:", cost)
    opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
    print("opt:", opt)

    all_saver = tf.train.Saver(max_to_keep=None)
    # conv1_v = tf.assign("conv1_v", conv1)
    # maxpool1_v = tf.assign("maxpool1_v", maxpool1)
    # conv2_v = tf.assign("conv2_v", conv2)
    # maxpool2_v = tf.assign("maxpool2_v", maxpool2)
    # conv3_v = tf.assign("conv3_v", conv3)
    # maxpool3_v = tf.assign("maxpool3_v", maxpool3)
    # enc_saver = tf.train.Saver({'conv1': conv1, 'maxpool1': maxpool1,
    #                             'conv2': conv2, 'maxpool2': maxpool2,
    #                             'conv3': conv3, 'maxpool3': maxpool3})
    # # initializing a saver to save weights
    # enc_saver = tf.train.Saver({'conv1': conv1_v, 'maxpool1': maxpool1_v,
    #                             'conv2': conv2_v, 'maxpool2': maxpool2_v,
    #                             'conv3': conv3_v, 'maxpool3': maxpool3_v})
    # initializing a restorer to restore weights
    # res_saver = tf.train.import_meta_graph('/weights/model.ckpt-1.meta')
    #
    # summary nodes
    tf.summary.scalar("loss", loss)
    tf.summary.scalar("cost", cost)
    tf.summary.histogram("conv1", conv1)
    tf.summary.histogram("maxpool1", maxpool1)
    tf.summary.histogram("conv2", conv2)
    tf.summary.histogram("maxpool2", maxpool2)
    tf.summary.histogram("conv3", conv3)
    tf.summary.histogram("maxpool3", maxpool3)
    tf.summary.histogram("unpool3", unpool3)
    tf.summary.histogram("deconv3", deconv3)
    tf.summary.histogram("unpool2", unpool2)
    tf.summary.histogram("deconv2", deconv2)
    tf.summary.histogram("unpool1", unpool1)
    tf.summary.histogram("deconv1", deconv1)

    # summary operation and a writer to save it.
    summary_op = tf.summary.merge_all(key='summaries')
    writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())

    # end of tensorflow graph

    # initializing tensorflow graph and a session
    init_op = tf.global_variables_initializer()
    sess = tf.Session(config=config)
    sess.run(init_op)

    # making operation-variables to run our methods whenever needed during training
    fetch_op_tr = input_pipeline_tr()
    fetch_op_val = input_pipeline_val()

    # coordinator and queue runners to manage parallel sampling of batches from the input pipeline
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    # start of training
    counter = 0
    try:

        while not coord.should_stop():
            print('\nEpoch\t' + str(counter + 1) + '/' + str(n_epochs))

            for i in range(n_batches):
                # fetching a batch
                vol = sess.run(fetch_op_tr)
                nvol = np.asarray(vol)
                noisy_nvol = nvol + noise_factor * np.random.randn(*nvol.shape)
                batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_nvol, targets_: nvol})
                if i % 1000 == 0:
                    print("batch_cost", batch_cost)
                print('\r' + str(((i + 1) * 100) / n_batches) + '%', sys.stdout.flush())
            counter = counter + 1
            print("Epoch: {}/{}...".format(counter, n_epochs), "Training loss: {:.4f}".format(batch_cost))
            print("time cost: {}".format(time.time()))
            # save weights and biases of the model
            all_saver.save(sess, ws_path + "model.ckpt", global_step=counter)
            # save weights and biases of the encoder
            # enc_saver.save(sess, ws_path + "enc.ckpt", global_step=counter)
            print('Weights saved')

            # saving summary  code above is clear
            # print(nvol.shape)
            # print(nvol.shape)
            # summary, _ = sess.run([summary_op, opt], feed_dict={inputs_: nvol, targets_: nvol})
            # print("summary:", summary)
            # print("counter:", counter)
            # writer.add_summary(summary, counter)
            print('Summary saved')

            if counter >= n_epochs:
                break
        # checking validation error
        vol = sess.run(fetch_op_val)
        nvol = np.asarray(vol)
        batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: nvol, targets_: nvol})
        print('Validation error' + str(batch_cost))
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')

    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()

    # '''
    # code to restore weights
    with tf.Session(config=config) as sess:
        all_saver.restore(sess,  ws_path + "model.ckpt")
        print("Model restored.")