コード例 #1
0
ファイル: test_env.py プロジェクト: filick/predrnn-pp
    def __init__(self):
        # inputs
        self.x = tf.placeholder(tf.float32, [1, 20, 16, 16, 16])

        self.mask_true = tf.placeholder(tf.float32, [1, 9, 16, 16, 16])

        loss_train = []
        self.pred_seq = []
        self.tf_lr = 0.001
        num_hidden = [128, 64, 64, 64]
        num_layers = len(num_hidden)
        with tf.variable_scope(tf.get_variable_scope()):
            # define a model
            output_list = models_factory.construct_model(
                'predrnn_pp', self.x, self.mask_true, num_layers, num_hidden,
                5, 1, 20, 10, True)
            gen_ims = output_list[0]
            loss = output_list[1]
            pred_ims = gen_ims[:, 9:]
            self.loss_train = loss / 1
            # gradients
            self.pred_seq.append(pred_ims)

        self.train_op = tf.train.AdamOptimizer(self.tf_lr).minimize(loss)

        # session
        variables = tf.global_variables()
        self.saver = tf.train.Saver(variables)
        init = tf.global_variables_initializer()
        configProt = tf.ConfigProto()
        configProt.gpu_options.allow_growth = True
        configProt.allow_soft_placement = True
        self.sess = tf.Session(config=configProt)
        self.sess.run(init)
コード例 #2
0
    def __init__(self):
        # inputs
        self.x = tf.placeholder(tf.float32,
                                [None,
                                 FLAGS.seq_length,
                                 FLAGS.img_height,
                                 FLAGS.img_width,
                                 int(FLAGS.patch_size_height*FLAGS.patch_size_width*FLAGS.img_channel)])

        self.mask_true = tf.placeholder(tf.float32,
                                        [None,
                                         FLAGS.seq_length-FLAGS.input_length-1,
                                         FLAGS.img_height,
                                         FLAGS.img_width,
                                         int(FLAGS.patch_size_height*FLAGS.patch_size_width*FLAGS.img_channel)])
        self.batchsize = tf.placeholder(tf.int32, [], name='batchsize')

        grads = []
        self.pred_seq = []
        self.tf_lr = tf.placeholder(tf.float32, shape=[])
        num_hidden = [int(x) for x in FLAGS.num_hidden.split(',')]
        print("hidden shape is ", num_hidden, flush=True)
        num_layers = len(num_hidden)
        with tf.variable_scope(tf.get_variable_scope()):
            # define a model
            output_list = models_factory.construct_model(
                FLAGS.model_name, self.x,
                self.mask_true,
                num_layers, num_hidden,
                FLAGS.filter_size, FLAGS.stride,
                FLAGS.seq_length, FLAGS.input_length,
                FLAGS.layer_norm,
                self.batchsize)
            gen_ims = output_list[0]
            loss = output_list[1]
            pred_ims = gen_ims[:,FLAGS.input_length-1:]
            self.loss_train = loss
            # gradients
            all_params = tf.trainable_variables()
            grads.append(tf.gradients(loss, all_params))
            self.pred_seq.append(pred_ims)

        self.train_op = tf.train.AdamOptimizer(FLAGS.lr).minimize(loss)

        # session
        variables = tf.global_variables()
        self.saver = tf.train.Saver(variables)
        init = tf.global_variables_initializer()
        configProt = tf.ConfigProto()
        configProt.gpu_options.allow_growth = True
        configProt.allow_soft_placement = True
        self.sess = tf.Session(config = configProt)
        self.sess.run(init)
        if FLAGS.pretrained_model:
            try:
                self.saver.restore(self.sess, tf.train.latest_checkpoint(FLAGS.pretrained_model))
            except:
                pass
コード例 #3
0
    def __init__(self):
        self.gpus = tf_util.available_gpus()
        self.num_gpus = len(self.gpus)
        if self.num_gpus:
            assert FLAGS.batch_size % self.num_gpus == 0, "Batch size should be an integral multiple of number of GPUs"
        # inputs
        self.x = tf.placeholder(tf.float32, [
            FLAGS.batch_size, FLAGS.input_length, FLAGS.img_width //
            FLAGS.patch_size, FLAGS.img_width // FLAGS.patch_size,
            FLAGS.patch_size * FLAGS.patch_size * FLAGS.img_channel
        ])

        x_splits = tf.split(self.x, max(self.num_gpus, 1))

        num_hidden = [int(x) for x in FLAGS.num_hidden.split(',')]
        num_layers = len(num_hidden)

        pred_seq = []
        devices = self.gpus or ['/cpu:0']
        with tf.variable_scope(tf.get_variable_scope()) as outer_scope:
            for i, d in enumerate(devices):
                with tf.device(d), tf.name_scope('tower_%d' % i):
                    pred_ims = models_factory.construct_model(
                        FLAGS.model_name, x_splits[i], None, num_layers,
                        num_hidden, FLAGS.filter_size, FLAGS.stride,
                        FLAGS.pred_length, FLAGS.input_length,
                        FLAGS.layer_norm)
                    pred_seq.append(pred_ims)
                    outer_scope.reuse_variables()

        with tf.name_scope("apply_gradients"), tf.device(devices[0]):
            self.pred_seq = tf.concat(pred_seq, 0)

        # session
        variables = tf.global_variables()
        variables = list(
            filter(
                lambda v: 'states_layer' not in v.name and 'states_global'
                not in v.name, variables))
        self.saver = tf.train.Saver(variables)
        init = tf.global_variables_initializer()
        configProt = tf.ConfigProto()
        configProt.gpu_options.allow_growth = True
        configProt.allow_soft_placement = True
        self.sess = tf.Session(config=configProt)
        self.sess.run(init)
        if FLAGS.pretrained_model:
            self.saver.restore(self.sess, FLAGS.pretrained_model)
コード例 #4
0
    def __init__(self):
        self.gpus = tf_util.available_gpus()
        self.num_gpus = len(self.gpus)
        if self.num_gpus:
            assert FLAGS.batch_size % self.num_gpus == 0, "Batch size should be an integral multiple of number of GPUs"
        # inputs
        self.x = tf.placeholder(tf.float32, [
            FLAGS.batch_size, FLAGS.seq_length, FLAGS.img_width //
            FLAGS.patch_size, FLAGS.img_width // FLAGS.patch_size,
            FLAGS.patch_size * FLAGS.patch_size * FLAGS.img_channel
        ])

        x_splits = tf.split(self.x, max(self.num_gpus, 1))

        self.tf_lr = tf.placeholder(tf.float32, shape=[])
        num_hidden = [int(x) for x in FLAGS.num_hidden.split(',')]
        print(num_hidden)
        num_layers = len(num_hidden)

        opt = tf.train.AdamOptimizer(FLAGS.lr)
        # opt = tf.train.GradientDescentOptimizer(FLAGS.lr)

        pred_seq = []
        tower_grads = []
        tower_losses = []
        devices = self.gpus or ['/cpu:0']
        with tf.variable_scope(tf.get_variable_scope()) as outer_scope:
            for i, d in enumerate(devices):
                with tf.device(d), tf.name_scope('tower_%d' % i):
                    output_list = models_factory.construct_model(
                        FLAGS.model_name, x_splits[i], None, num_layers,
                        num_hidden, FLAGS.filter_size, FLAGS.stride,
                        FLAGS.seq_length, FLAGS.input_length, FLAGS.layer_norm)
                    gen_ims = output_list[0]
                    loss = output_list[1]
                    pred_ims = gen_ims
                    # self.loss_train = loss / FLAGS.batch_size
                    # gradients
                    with tf.name_scope("compute_gradients"):
                        grads = opt.compute_gradients(loss)
                        tower_grads.append(grads)

                    tower_losses.append(loss)
                    pred_seq.append(pred_ims)
                    outer_scope.reuse_variables()

        with tf.name_scope("apply_gradients"), tf.device(devices[0]):
            self.loss_train = tf.add_n(tower_losses) / FLAGS.batch_size
            mean_grads = average_gradients(tower_grads)
            global_step = tf.train.get_or_create_global_step()
            self.train_op = opt.apply_gradients(mean_grads, global_step)
            self.pred_seq = tf.concat(pred_seq, 0)

        # session
        variables = tf.global_variables()
        self.saver = tf.train.Saver(variables)
        init = tf.global_variables_initializer()
        configProt = tf.ConfigProto()
        configProt.gpu_options.allow_growth = True
        configProt.allow_soft_placement = True
        self.sess = tf.Session(config=configProt)
        self.sess.run(init)
        if FLAGS.pretrained_model:
            self.saver.restore(self.sess, FLAGS.pretrained_model)
コード例 #5
0
ファイル: train.py プロジェクト: zjdcts/PredCNN
    def __init__(self):
        # inputs
        self.x = [
            tf.placeholder(tf.float32, [
                FLAGS.batch_size, FLAGS.seq_length, FLAGS.img_width //
                FLAGS.patch_size, FLAGS.img_width // FLAGS.patch_size,
                FLAGS.patch_size * FLAGS.patch_size * FLAGS.img_channel
            ]) for i in range(FLAGS.n_gpu)
        ]

        self.mask_true = tf.placeholder(tf.float32, [
            FLAGS.batch_size, FLAGS.seq_length - FLAGS.input_length - 1,
            FLAGS.img_width // FLAGS.patch_size,
            FLAGS.img_width // FLAGS.patch_size,
            FLAGS.patch_size * FLAGS.patch_size * FLAGS.img_channel
        ])

        grads = []
        loss_train = []
        self.pred_seq = []
        self.tf_lr = tf.placeholder(tf.float32, shape=[])
        self.params = dict()
        if 'predcnn' in FLAGS.model_name:
            self.params['encoder_length'] = FLAGS.encoder_length
            self.params['decoder_length'] = FLAGS.decoder_length
        num_hidden = [int(x) for x in FLAGS.num_hidden.split(',')]
        for i in range(FLAGS.n_gpu):
            with tf.device('/gpu:%d' % i):
                with tf.variable_scope(tf.get_variable_scope(),
                                       reuse=True if i > 0 else None):
                    # define a model
                    output_list = models_factory.construct_model(
                        FLAGS.model_name, self.x[i], self.params,
                        self.mask_true, num_hidden, FLAGS.filter_size,
                        FLAGS.seq_length, FLAGS.input_length)

                    gen_ims = output_list[0]
                    loss = output_list[1]
                    pred_ims = gen_ims[:,
                                       FLAGS.input_length - FLAGS.seq_length:]
                    loss_train.append(loss / FLAGS.batch_size)
                    # gradients
                    all_params = tf.trainable_variables()
                    grads.append(tf.gradients(loss, all_params))
                    self.pred_seq.append(pred_ims)

        if FLAGS.n_gpu == 1:
            self.train_op = tf.train.AdamOptimizer(FLAGS.lr).minimize(loss)
        else:
            # add losses and gradients together and get training updates
            with tf.device('/gpu:0'):
                for i in range(1, FLAGS.n_gpu):
                    loss_train[0] += loss_train[i]
                    for j in range(len(grads[0])):
                        grads[0][j] += grads[i][j]
            # keep track of moving average
            ema = tf.train.ExponentialMovingAverage(decay=0.9995)
            maintain_averages_op = tf.group(ema.apply(all_params))
            self.train_op = tf.group(
                optimizer.adam_updates(all_params,
                                       grads[0],
                                       lr=self.tf_lr,
                                       mom1=0.95,
                                       mom2=0.9995), maintain_averages_op)

        self.loss_train = loss_train[0] / FLAGS.n_gpu

        # session
        variables = tf.global_variables()
        self.saver = tf.train.Saver(variables)
        init = tf.global_variables_initializer()
        configProt = tf.ConfigProto()
        configProt.gpu_options.allow_growth = True
        configProt.allow_soft_placement = True
        self.sess = tf.Session(config=configProt)
        self.sess.run(init)
        if FLAGS.pretrained_model:
            self.saver.restore(self.sess, FLAGS.pretrained_model)