Example #1
0
 def setUp(self):
     self.logpath = 'test-log.json'
     self.VC = CompoundDataSet(
         name = 'virtual_chemistry',
         filepaths = list(read_dir(
             "/data/htdocs/cccid/build/compounds-db/data-files/virtual_chemistry/",
             match="Virtual_Chemistry*.sdf.expanded"
         ))
     )
Example #2
0
 def test_factory(self):
     dl1 = CompoundDataSet(
         name = 'druglike',
         filepaths = list(read_dir(
             "/data/htdocs/cccid/build/compounds-db/data-files/druglike/",
             match="Drug-Like*.sdf.expanded"
         ))
     )
     config = {
         'name':'druglike', 
         'directory':"/data/htdocs/cccid/build/compounds-db/data-files/druglike/", 
         'match':"Drug-Like*.sdf.expanded"
     }
     dl2 = make_data_set(**config)
     
     
     for fp1, fp2 in zip(sorted(dl1.filepaths), sorted(dl2.filepaths)):
         self.assertEqual(fp1, fp2)
Example #3
0
 def test_DrugLike(self):
     DrugLike = CompoundDataSet(
         filepaths = list(read_dir(
             "/data/htdocs/cccid/build/compounds-db/data-files/druglike/",
             match="Drug-Like*.sdf.expanded"
         ))
     )
     base = '/data/htdocs/cccid/build/compounds-db/data-files/druglike/' 
     expected = [
         base + 'Drug-Like_Compounds_MMFF_Ligprep_01.sdf.expanded'
     ]
     expected_len = 29
     
     self.assertEqual(expected[0], DrugLike.filepaths[0])
     self.assertEqual(len(DrugLike.filepaths), expected_len)
     # Test sequence behavior for main object
     self.assertEqual(expected[0], DrugLike[0])
     self.assertEqual(len(DrugLike), expected_len)
Example #4
0
 def test_VirtualChemistry(self):
     VirtualChemistry = CompoundDataSet(
         filepaths = list(read_dir(
             "/data/htdocs/cccid/build/compounds-db/data-files/virtual_chemistry/",
             match="Virtual_Chemistry*.sdf.expanded"
         ))
     )
     base = '/data/htdocs/cccid/build/compounds-db/data-files/virtual_chemistry/' 
     expected = [
         base + 'Virtual_Chemistry_01.sdf.expanded'
     ]
     expected_len = 48
     
     # Test filepaths property
     self.assertEqual(expected[0], VirtualChemistry.filepaths[0])
     self.assertEqual(len(VirtualChemistry.filepaths), expected_len)
     # Test sequence behavior for main object
     self.assertEqual(expected[0], VirtualChemistry[0])
     self.assertEqual(len(VirtualChemistry), expected_len)
Example #5
0
    def test(self):
        GPU_NUM = self.args.gpu_num
        DATA_DIR = self.args.data_dir
        CKPT_DIR = self.args.ckpt_dir + str(self.args.channel_type) + '/'
        CHANNEL_NUM = self.args.channel_num
        CHANNEL_TYPE = self.args.channel_type

        # Assign GPU
        os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
        os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_NUM)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        # Read dataset
        test_data = read_dir(DATA_DIR + self.args.test_data_dir)

        start = time.time()

        with tf.Session(config=config) as sess:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(max_to_keep=1)
            ckpt = tf.train.get_checkpoint_state(CKPT_DIR)

            # Load model
            if ckpt:
                saver.restore(sess, ckpt.model_checkpoint_path)
                print("Model Loaded")
            else:
                print("No model to load")
                sys.exit(1)

            pred_2 = []
            pred_3 = []
            pred_4 = []
            ctx_2 = []
            ctx_3 = []
            ctx_4 = []
            loss_pred_epoch_2 = loss_pred_epoch_3 = loss_pred_epoch_4 = 0
            loss_ctx_epoch_2 = loss_ctx_epoch_3 = loss_ctx_epoch_4 = 0
            '''
            all_vars = tf.trainable_variables()
            vars_2 = [var for var in all_vars if 'pred_2' in var.name]
            var2 = sess.run(vars_2)
            vars_3 = [var for var in all_vars if 'pred_3' in var.name]
            vars_4 = [var for var in all_vars if 'pred_4' in var.name]
            '''

            img_idx = 0
            elapsed = 0
            file = open('encoding.txt', 'a')
            file.write(
                "bpp_2 bpp_3 bpp_4 encoding_time2 encoding_time3 encoding_time4 total_time CNN_time\n"
            )

            for test_name in tqdm.tqdm(test_data):
                starttime = time.time()

                test_sample = cv2.cvtColor(cv2.imread(test_name),
                                           cv2.COLOR_BGR2RGB)

                test_sample = np.expand_dims(test_sample, axis=0)

                feed_dict_test = {self.input: test_sample}

                t_1 = time.time()

                i0, i1, i2, i3, i4, p2, p3, p4, c2, c3, c4 = sess.run(
                    [
                        self.input_0, self.input_1, self.input_2, self.input_3,
                        self.input_4, self.pred_2, self.pred_3, self.pred_4,
                        self.ctx_2, self.ctx_3, self.ctx_4
                    ],
                    feed_dict=feed_dict_test)

                t_2 = time.time()

                loss_p_2, loss_p_3, loss_p_4, loss_c_2, loss_c_3, loss_c_4 = sess.run(
                    [
                        self.loss_pred_2, self.loss_pred_3, self.loss_pred_4,
                        self.loss_ctx_2, self.loss_ctx_3, self.loss_ctx_4
                    ],
                    feed_dict=feed_dict_test)

                loss_pred_epoch_2 += loss_p_2
                loss_pred_epoch_3 += loss_p_3
                loss_pred_epoch_4 += loss_p_4

                loss_ctx_epoch_2 += loss_c_2
                loss_ctx_epoch_3 += loss_c_3
                loss_ctx_epoch_4 += loss_c_4
                '''
                pred_2.append(p2)
                pred_3.append(p3)
                pred_4.append(p4)
                ctx_2.append(c2)
                ctx_3.append(c3)
                ctx_4.append(c4)
                '''

                #save_image_data(img_idx, 1, i0)
                '''
                save_image_data(img_idx, 1, i1)
                save_image_data(img_idx, 2, i2)
                save_image_data(img_idx, 3, i3)
                save_image_data(img_idx, 4, i4)
                save_pred_data(img_idx, 2, p2)
                save_pred_data(img_idx, 3, p3)
                save_pred_data(img_idx, 4, p4)
                save_ctx_data(img_idx, 2, c2)
                save_ctx_data(img_idx, 3, c3)
                save_ctx_data(img_idx, 4, c4)
                '''

                t_0 = time.time()
                height = i1.shape[1]
                width = i1.shape[2]
                #in_put = i1[0, :, :, 0]
                temp = i2[0, :, :, 0]
                bpp_2 = ICIP_Compression.runencoder(
                    i1.shape[1], i1.shape[2], img_idx, 2, i2[0, :, :,
                                                             0].astype(int),
                    p2[0, :, :, 0], c2[0, :, :,
                                       0], "data/compressed.bin", CHANNEL_TYPE)
                print("encoded 2")
                encode_2 = time.time() - t_0
                bpp_3 = ICIP_Compression.runencoder(
                    i1.shape[1], i1.shape[2], img_idx, 3, i3[0, :, :,
                                                             0].astype(int),
                    p3[0, :, :, 0], c3[0, :, :,
                                       0], "data/compressed.bin", CHANNEL_TYPE)
                print("encoded 3")
                encode_3 = time.time() - t_0 - encode_2
                bpp_4 = ICIP_Compression.runencoder(
                    i1.shape[1], i1.shape[2], img_idx, 4, i4[0, :, :,
                                                             0].astype(int),
                    p4[0, :, :, 0], c4[0, :, :,
                                       0], "data/compressed.bin", CHANNEL_TYPE)
                print("encoded 4")
                encode_4 = time.time() - t_0 - encode_3
                endtime = time.time()
                elapsed += endtime - starttime

                file.write(
                    "{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}\n"
                    .format(bpp_2, bpp_3, bpp_4, encode_2, encode_3, encode_4,
                            elapsed, t_2 - t_1))

                print("total: {}s\n 2: {}s\n 3: {}s\n 4: {}s\n network: {}\n".
                      format(elapsed, encode_2, encode_3, encode_4, t_2 - t_1))

                #size = np.zeros(2)
                #size[0]=i1.shape[1]
                #size[1]=i1.shape[2]
                #np.savetxt('data/' + str(img_idx) + '_size.txt', size, fmt='%d')

                print('num {}'.format(img_idx))
                img_idx += 1

            file.close()
            elapsed /= img_idx
            loss_pred_epoch_2 /= len(test_data)
            loss_pred_epoch_3 /= len(test_data)
            loss_pred_epoch_4 /= len(test_data)
            loss_ctx_epoch_2 /= len(test_data)
            loss_ctx_epoch_3 /= len(test_data)
            loss_ctx_epoch_4 /= len(test_data)
            loss_epoch_2 = loss_pred_epoch_2 + loss_ctx_epoch_2
            loss_epoch_3 = loss_pred_epoch_3 + loss_ctx_epoch_3
            loss_epoch_4 = loss_pred_epoch_4 + loss_ctx_epoch_4

            print(
                'test result: %04d\n' % (len(test_data)),
                '***2***   lossPred=', '{:9.4f}'.format(loss_pred_epoch_2),
                'lossContext=', '{:9.4f}'.format(loss_ctx_epoch_2), 'Loss=',
                '{:9.4f}\n'.format(loss_epoch_2), '***3***   lossPred=',
                '{:9.4f}'.format(loss_pred_epoch_3), 'lossContext=',
                '{:9.4f}'.format(loss_ctx_epoch_3), 'Loss=',
                '{:9.4f}\n'.format(loss_epoch_3), '***4***   lossPred=',
                '{:9.4f}'.format(loss_pred_epoch_4), 'lossContext=',
                '{:9.4f}'.format(loss_ctx_epoch_4), 'Loss=',
                '{:9.4f}\n'.format(loss_epoch_4), '***all*** lossPred=',
                '{:9.4f}'.format(loss_pred_epoch_2 + loss_pred_epoch_3 +
                                 loss_pred_epoch_4), 'lossContext=',
                '{:9.4f}'.format(loss_ctx_epoch_2 + loss_ctx_epoch_3 +
                                 loss_ctx_epoch_4), 'Loss=',
                '{:9.4f}'.format(loss_epoch_2 + loss_epoch_3 + loss_epoch_4))
            print("elapsed time: {}s for {} images, {}s per image.".format(
                time.time() - start, img_idx,
                (time.time() - start) / (img_idx)))
            print("purely elapsed time: {}s per image.".format(elapsed))
Example #6
0
    def build(self, encode):

        # Parameters
        DATA_DIR = self.args.data_dir

        LAYER_NUM = self.args.layer_num
        HIDDEN_UNIT = self.args.hidden_unit

        LAMBDA_CTX = self.args.lambda_ctx
        CHANNEL_NUM = self.args.channel_num
        CHANNEL_TYPE = self.args.channel_type

        LR = self.args.lr

        BATCH_SIZE = self.args.batch_size
        CROP_SIZE = self.args.crop_size

        CHANNEL_EPOCH = self.args.channel_epoch
        JOINT_EPOCH = self.args.joint_epoch

        # TFRecord
        tfrecord_name = 'train.tfrecord'

        # if train tfrecord does not exist, create dataset
        if not data_exist(DATA_DIR, tfrecord_name):
            img_list = read_dir(DATA_DIR + 'train/')
            write_tfrecord(DATA_DIR, img_list, tfrecord_name)

        self.input_crop, _, _ = read_tfrecord(DATA_DIR,
                                              tfrecord_name,
                                              num_epochs=3 * CHANNEL_EPOCH +
                                              JOINT_EPOCH,
                                              batch_size=BATCH_SIZE,
                                              min_after_dequeue=10,
                                              crop_size=CROP_SIZE)

        if encode:
            self.input = tf.placeholder(tf.int16, (None, None, None, 3))

            input_yuv = self.rgb2yuv(self.input)

            if CHANNEL_NUM == 1:
                if (CHANNEL_TYPE == 0):
                    input_img = tf.expand_dims(input_yuv[:, :, :, 0], axis=3)
                elif (CHANNEL_TYPE == 1):
                    input_img = tf.expand_dims(input_yuv[:, :, :, 1], axis=3)
                elif (CHANNEL_TYPE == 2):
                    input_img = tf.expand_dims(input_yuv[:, :, :, 2], axis=3)
            elif CHANNEL_NUM == 3:
                input_img = input_yuv
            else:
                print("Invalid Channel Num")
                sys.exit(1)

            input_depth = tf.nn.space_to_depth(input_img, 2)

            original_img = tf.nn.space_to_depth(input_yuv, 2)

            self.input_0, _, _, _ = tf.split(original_img, 4, axis=3)

            self.input_1, self.input_4, self.input_3, self.input_2 = tf.split(
                input_depth, 4, axis=3)

            # Prediction of 2
            pred_2, ctx_2 = model_conv(self.input_1, LAYER_NUM, HIDDEN_UNIT,
                                       'pred_2')

            error_pred_2 = abs(tf.subtract(pred_2, self.input_2))

            # Prediction of 3
            concat_1_2 = tf.concat([self.input_1, self.input_2], axis=3)
            pred_3, ctx_3 = model_conv(concat_1_2, LAYER_NUM, HIDDEN_UNIT,
                                       'pred_3')

            error_pred_3 = abs(tf.subtract(pred_3, self.input_3))

            # Prediction of 4
            concat_1_2_3 = tf.concat(
                [self.input_1, self.input_2, self.input_3], axis=3)
            pred_4, ctx_4 = model_conv(concat_1_2_3, LAYER_NUM, HIDDEN_UNIT,
                                       'pred_4')

            # Prediction error

            error_pred_4 = abs(tf.subtract(pred_4, self.input_4))

            # Losses
            loss_pred_2 = tf.reduce_mean(error_pred_2)
            loss_pred_3 = tf.reduce_mean(error_pred_3)
            loss_pred_4 = tf.reduce_mean(error_pred_4)

            loss_ctx_2 = LAMBDA_CTX * tf.reduce_mean(
                abs(tf.subtract(ctx_2, error_pred_2)))
            loss_ctx_3 = LAMBDA_CTX * tf.reduce_mean(
                abs(tf.subtract(ctx_3, error_pred_3)))
            loss_ctx_4 = LAMBDA_CTX * tf.reduce_mean(
                abs(tf.subtract(ctx_4, error_pred_4)))

            loss_2 = loss_pred_2 + loss_ctx_2
            loss_3 = loss_pred_3 + loss_ctx_3
            loss_4 = loss_pred_4 + loss_ctx_4

            total_loss = loss_2 + loss_3 + loss_4

            # Optimizer
            all_vars = tf.trainable_variables()
            vars_2 = [var for var in all_vars if 'pred_2' in var.name]
            vars_3 = [var for var in all_vars if 'pred_3' in var.name]
            vars_4 = [var for var in all_vars if 'pred_4' in var.name]

            self.optimizer_2 = tf.train.AdamOptimizer(LR).minimize(
                loss_2, var_list=vars_2)
            self.optimizer_3 = tf.train.AdamOptimizer(LR).minimize(
                loss_3, var_list=vars_3)
            self.optimizer_4 = tf.train.AdamOptimizer(LR).minimize(
                loss_4, var_list=vars_4)
            self.optimizer_all = tf.train.AdamOptimizer(LR).minimize(
                total_loss, var_list=all_vars)

            # Variables
            self.loss_2 = loss_2
            self.loss_3 = loss_3
            self.loss_4 = loss_4
            self.loss_all = loss_4 + loss_2 + loss_3

            self.loss_pred_2 = loss_pred_2
            self.loss_pred_3 = loss_pred_3
            self.loss_pred_4 = loss_pred_4
            self.loss_pred_all = loss_pred_2 + loss_pred_3 + loss_pred_4

            self.loss_ctx_2 = loss_ctx_2
            self.loss_ctx_3 = loss_ctx_3
            self.loss_ctx_4 = loss_ctx_4
            self.loss_ctx_all = loss_ctx_2 + loss_ctx_3 + loss_ctx_4

            self.pred_2 = pred_2
            self.pred_3 = pred_3
            self.pred_4 = pred_4

            self.ctx_2 = ctx_2
            self.ctx_3 = ctx_3
            self.ctx_4 = ctx_4

        else:
            '''
            self.input = tf.placeholder(tf.uint8, (None, None, None, 3))
            input_img = tf.expand_dims(self.rgb2yuv(self.input)[:,:,:,0],axis=3)
            self.input_1, _, _, _ = tf.split(tf.nn.space_to_depth(input_img, 2), 4, axis=3)
            '''
            self.input_1 = tf.placeholder(tf.int16,
                                          (None, None, None, CHANNEL_NUM))
            self.input_1 = tf.to_float(self.input_1)

            # Prediction of 2
            pred_2, ctx_2 = model_conv(self.input_1, LAYER_NUM, HIDDEN_UNIT,
                                       'pred_2')

            self.pred_2 = pred_2
            self.ctx_2 = ctx_2

            self.input_2 = tf.placeholder(tf.int16,
                                          (None, None, None, CHANNEL_NUM))
            self.input_2 = tf.to_float(self.input_2)

            # Prediction of 3
            concat_1_2 = tf.concat([self.input_1, self.input_2], axis=3)
            pred_3, ctx_3 = model_conv(concat_1_2, LAYER_NUM, HIDDEN_UNIT,
                                       'pred_3')

            self.pred_3 = pred_3
            self.ctx_3 = ctx_3

            self.input_3 = tf.placeholder(tf.int16,
                                          (None, None, None, CHANNEL_NUM))
            self.input_3 = tf.to_float(self.input_3)

            # Prediction of 4
            concat_1_2_3 = tf.concat(
                [self.input_1, self.input_2, self.input_3], axis=3)
            pred_4, ctx_4 = model_conv(concat_1_2_3, LAYER_NUM, HIDDEN_UNIT,
                                       'pred_4')

            self.pred_4 = pred_4
            self.ctx_4 = ctx_4

        # Original images
        '''self.input_1 = input_1
Example #7
0
    def train(self):

        GPU_NUM = self.args.gpu_num
        DATA_DIR = self.args.data_dir
        CKPT_DIR = self.args.ckpt_dir + str(self.args.channel_type) + '/'
        LOAD = self.args.load
        CHANNEL_EPOCH = self.args.channel_epoch
        JOINT_EPOCH = self.args.joint_epoch
        BATCH_SIZE = self.args.batch_size
        PRINT_EVERY = self.args.print_every
        SAVE_EVERY = self.args.save_every
        CHANNEL_NUM = self.args.channel_num

        # Assign GPU
        os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
        os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_NUM)

        global_step = tf.Variable(0, trainable=False)
        increase = tf.assign_add(global_step, 1)
        config = tf.ConfigProto()

        config.gpu_options.allow_growth = True

        # Read dataset
        train_data = read_dir(DATA_DIR + 'train/')

        with tf.Session(config=config) as sess:
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess,
                                                   coord=coord,
                                                   start=True)

            saver = tf.train.Saver(max_to_keep=1)
            ckpt = tf.train.get_checkpoint_state(CKPT_DIR)

            # Load model if trained before
            if ckpt and LOAD:
                saver.restore(sess, ckpt.model_checkpoint_path)
                print("Model Loaded")

            epoch = sess.run(global_step)

            loss_pred_epoch_2 = loss_pred_epoch_3 = loss_pred_epoch_4 = 0
            loss_ctx_epoch_2 = loss_ctx_epoch_3 = loss_ctx_epoch_4 = 0

            for a in range(JOINT_EPOCH + 3 * CHANNEL_EPOCH):
                sess.run(increase)

                if epoch < CHANNEL_EPOCH:
                    if epoch == 0:
                        print("========== Train Patch 2 ==========")
                    optimizer = self.optimizer_2
                elif epoch < 2 * CHANNEL_EPOCH:
                    if epoch == CHANNEL_EPOCH:
                        print("========== Train Patch 3 ==========")
                    optimizer = self.optimizer_3
                elif epoch < 3 * CHANNEL_EPOCH:
                    if epoch == 2 * CHANNEL_EPOCH:
                        print("========== Train Patch 4 ==========")
                    optimizer = self.optimizer_4
                else:
                    if epoch == 3 * CHANNEL_EPOCH:
                        print("========== Train All Patches ==========")
                    optimizer = self.optimizer_all

                input_crop = sess.run(self.input_crop)

                feed_dict_train = {self.input: input_crop}

                _, loss_p_2, loss_p_3, loss_p_4, loss_c_2, loss_c_3, loss_c_4 =\
                    sess.run([optimizer, self.loss_pred_2, self.loss_pred_3, self.loss_pred_4,
                     self.loss_ctx_2, self.loss_ctx_3, self.loss_ctx_4], feed_dict=feed_dict_train)

                loss_pred_epoch_2 += loss_p_2
                loss_pred_epoch_3 += loss_p_3
                loss_pred_epoch_4 += loss_p_4

                loss_ctx_epoch_2 += loss_c_2
                loss_ctx_epoch_3 += loss_c_3
                loss_ctx_epoch_4 += loss_c_4

                if (epoch + 1) % PRINT_EVERY == 0:
                    loss_pred_epoch_2 /= PRINT_EVERY
                    loss_pred_epoch_3 /= PRINT_EVERY
                    loss_pred_epoch_4 /= PRINT_EVERY

                    loss_ctx_epoch_2 /= PRINT_EVERY
                    loss_ctx_epoch_3 /= PRINT_EVERY
                    loss_ctx_epoch_4 /= PRINT_EVERY

                    loss_epoch_2 = loss_pred_epoch_2 + loss_ctx_epoch_2
                    loss_epoch_3 = loss_pred_epoch_3 + loss_ctx_epoch_3
                    loss_epoch_4 = loss_pred_epoch_4 + loss_ctx_epoch_4

                    print(
                        '%04d\n' % (epoch + 1), '***2***   lossPred=',
                        '{:9.4f}'.format(loss_pred_epoch_2), 'lossContext=',
                        '{:9.4f}'.format(loss_ctx_epoch_2), 'Loss=',
                        '{:9.4f}\n'.format(loss_epoch_2),
                        '***3***   lossPred=',
                        '{:9.4f}'.format(loss_pred_epoch_3), 'lossContext=',
                        '{:9.4f}'.format(loss_ctx_epoch_3), 'Loss=',
                        '{:9.4f}\n'.format(loss_epoch_3),
                        '***4***   lossPred=',
                        '{:9.4f}'.format(loss_pred_epoch_4), 'lossContext=',
                        '{:9.4f}'.format(loss_ctx_epoch_4), 'Loss=',
                        '{:9.4f}\n'.format(loss_epoch_4),
                        '***all*** lossPred=',
                        '{:9.4f}'.format(loss_pred_epoch_2 +
                                         loss_pred_epoch_3 +
                                         loss_pred_epoch_4), 'lossContext=',
                        '{:9.4f}'.format(loss_ctx_epoch_2 + loss_ctx_epoch_3 +
                                         loss_ctx_epoch_4), 'Loss=',
                        '{:9.4f}'.format(loss_epoch_2 + loss_epoch_3 +
                                         loss_epoch_4))

                if (epoch + 1) % SAVE_EVERY == 0:
                    saver.save(sess,
                               CKPT_DIR + 'model_',
                               global_step=epoch + 1)
                    print("Model Saved")

                epoch = sess.run(global_step)
Example #8
0
    def build(self, encode):

        # Parameters
        DATA_DIR = self.args.data_dir

        LAYER_NUM = self.args.layer_num
        HIDDEN_UNIT = self.args.hidden_unit

        LAMBDA_CTX = self.args.lambda_ctx
        CHANNEL_NUM = self.args.channel_num
        CHANNEL_TYPE = self.args.channel_type

        LR = self.args.lr

        BATCH_SIZE = self.args.batch_size
        CROP_SIZE = self.args.crop_size

        CHANNEL_EPOCH = self.args.channel_epoch
        JOINT_EPOCH = self.args.joint_epoch
        NUM_PATCHES = 11

        # TFRecord
        tfrecord_name = 'train.tfrecord'

        # if train tfrecord does not exist, create dataset
        if not data_exist(DATA_DIR, tfrecord_name):
            img_list = read_dir(DATA_DIR + 'train/')
            write_tfrecord(DATA_DIR, img_list, tfrecord_name)

        self.input_crop, _, _ = read_tfrecord(DATA_DIR,
                                              tfrecord_name,
                                              num_epochs=3 * CHANNEL_EPOCH +
                                              JOINT_EPOCH,
                                              batch_size=BATCH_SIZE,
                                              min_after_dequeue=10,
                                              crop_size=CROP_SIZE)

        self.type = self.args.learning_order
        if self.type == "1234":
            self.channel = [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]
            self.origin = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]
        elif self.type == "yuv":
            self.channel = [0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2]
            self.origin = [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4]

        if encode:
            self.input = tf.placeholder(tf.uint8, (None, None, None, 3))

            input_yuv = self.rgb2yuv(self.input)

            if CHANNEL_NUM == 1:
                if (CHANNEL_TYPE == 0):
                    input_img = tf.expand_dims(input_yuv[:, :, :, 0], axis=3)
                elif (CHANNEL_TYPE == 1):
                    input_img = tf.expand_dims(input_yuv[:, :, :, 1], axis=3)
                elif (CHANNEL_TYPE == 2):
                    input_img = tf.expand_dims(input_yuv[:, :, :, 2], axis=3)
            elif CHANNEL_NUM == 3:
                input_img = input_yuv
            else:
                print("Invalid Channel Num")
                sys.exit(1)

            input_depth = tf.nn.space_to_depth(input_img, 2)

            #original_img = tf.nn.space_to_depth(input_yuv, 2)

            #self.input_0, _, _, _ = tf.split(original_img, 4, axis=3)

            self.input_1, self.input_4, self.input_3, self.input_2 = tf.split(
                input_depth, 4, axis=3)

            if self.type == "1234":
                order = tf.concat([
                    tf.expand_dims(self.input_1[:, :, :, 0], axis=3),
                    tf.expand_dims(self.input_2[:, :, :, 0], axis=3),
                    tf.expand_dims(self.input_3[:, :, :, 0], axis=3),
                    tf.expand_dims(self.input_4[:, :, :, 0], axis=3),
                    tf.expand_dims(self.input_1[:, :, :, 1], axis=3),
                    tf.expand_dims(self.input_2[:, :, :, 1], axis=3),
                    tf.expand_dims(self.input_3[:, :, :, 1], axis=3),
                    tf.expand_dims(self.input_4[:, :, :, 1], axis=3),
                    tf.expand_dims(self.input_1[:, :, :, 2], axis=3),
                    tf.expand_dims(self.input_2[:, :, :, 2], axis=3),
                    tf.expand_dims(self.input_3[:, :, :, 2], axis=3),
                    tf.expand_dims(self.input_4[:, :, :, 2], axis=3)
                ],
                                  axis=3)
                #self.channel = [0,0,0,0,1,1,1,1,2,2,2,2]
                #self.origin = [1,2,3,4,1,2,3,4,1,2,3,4]
            elif self.type == "yuv":
                order = tf.concat([
                    tf.expand_dims(self.input_1[:, :, :, 0], axis=3),
                    tf.expand_dims(self.input_1[:, :, :, 1], axis=3),
                    tf.expand_dims(self.input_1[:, :, :, 2], axis=3),
                    tf.expand_dims(self.input_2[:, :, :, 0], axis=3),
                    tf.expand_dims(self.input_2[:, :, :, 1], axis=3),
                    tf.expand_dims(self.input_2[:, :, :, 2], axis=3),
                    tf.expand_dims(self.input_3[:, :, :, 0], axis=3),
                    tf.expand_dims(self.input_3[:, :, :, 1], axis=3),
                    tf.expand_dims(self.input_3[:, :, :, 2], axis=3),
                    tf.expand_dims(self.input_4[:, :, :, 0], axis=3),
                    tf.expand_dims(self.input_4[:, :, :, 1], axis=3),
                    tf.expand_dims(self.input_4[:, :, :, 2], axis=3)
                ],
                                  axis=3)
                #self.channel = [0,1,2,0,1,2,0,1,2,0,1,2]
                #self.origin = [1,1,1,2,2,2,3,3,3,4,4,4]
            print("building order completed.\n")

            pred_li = []
            ctx_li = []
            error_pred_li = []
            loss_pred_li = []
            loss_ctx_li = []
            loss_li = []
            total_loss = 0

            for i in range(NUM_PATCHES):
                if self.origin[i + 1] != 1:
                    pred, ctx = model_conv(order[:, :, :, :(i + 1)], LAYER_NUM,
                                           HIDDEN_UNIT, 'model_' + str(i + 1))
                    error_pred = abs(
                        tf.subtract(
                            pred,
                            tf.expand_dims(order[:, :, :, (i + 1)], axis=3)))
                    loss_pred = tf.reduce_mean(error_pred)
                    loss_ctx = LAMBDA_CTX * tf.reduce_mean(
                        abs(tf.subtract(ctx, error_pred)))
                    pred_li.append(pred)
                    ctx_li.append(ctx)
                    error_pred_li.append(error_pred)
                    loss_pred_li.append(loss_pred)
                    loss_ctx_li.append(loss_ctx)
                    loss_li.append(loss_pred + loss_ctx)
                    total_loss += loss_pred + loss_ctx

            all_vars = tf.trainable_variables()

            optimizer_li = []
            k = 0
            for j in range(NUM_PATCHES):
                if self.origin[j + 1] != 1:
                    vars = [
                        var for var in all_vars
                        if 'model_' + str(j + 1) in var.name
                    ]
                    optimizer = tf.train.AdamOptimizer(LR).minimize(
                        loss_li[k], var_list=vars)
                    optimizer_li.append(optimizer)
                    k += 1

            self.pred_li = pred_li
            self.ctx_li = ctx_li
            self.error_pred_li = error_pred_li
            self.loss_pred_li = loss_pred_li
            self.loss_ctx_li = loss_ctx_li
            self.loss_li = loss_li
            self.optimizer_li = optimizer_li
            self.optimizer_all = tf.train.AdamOptimizer(LR).minimize(
                total_loss, var_list=all_vars)
            self.order = order
            self.true_patches = k

        else:
            '''
            self.input = tf.placeholder(tf.uint8, (None, None, None, 3))
            input_img = tf.expand_dims(self.rgb2yuv(self.input)[:,:,:,0],axis=3)
            self.input_1, _, _, _ = tf.split(tf.nn.space_to_depth(input_img, 2), 4, axis=3)
            '''
            #self.input_list = [tf.to_float(tf.placeholder(tf.int16, (None, None, None, CHANNEL_NUM))) for _ in range(NUM_PATCHES)]
            self.input_all = tf.placeholder(
                tf.uint8, (None, None, None, NUM_PATCHES + 1))
            self.input_all = tf.to_float(self.input_all)

            pred_li = []
            ctx_li = []

            for i in range(NUM_PATCHES):
                if self.origin[i + 1] != 1:
                    pred, ctx = model_conv(self.input_all[:, :, :, :(i + 1)],
                                           LAYER_NUM, HIDDEN_UNIT,
                                           'model_' + str(i + 1))
                    pred_li.append(pred)
                    ctx_li.append(ctx)

            self.pred_li = pred_li
            self.ctx_li = ctx_li
Example #9
0
    def train(self):

        GPU_NUM = self.args.gpu_num
        DATA_DIR = self.args.data_dir
        CKPT_DIR = self.args.ckpt_dir + 'full_' + self.type + '/'
        LOAD = self.args.load
        CHANNEL_EPOCH = self.args.channel_epoch
        JOINT_EPOCH = self.args.joint_epoch
        BATCH_SIZE = self.args.batch_size
        PRINT_EVERY = self.args.print_every
        SAVE_EVERY = self.args.save_every
        CHANNEL_NUM = self.args.channel_num
        TRUE_PATCHES = self.true_patches
        NUM_PATCHES = 11
        EVAL_PERIOD = self.args.eval_period
        TEST_NUM = 18
        FLIF_1 = 11.17472

        # Assign GPU
        os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
        os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_NUM)

        global_step = tf.Variable(0, trainable=False)
        increase = tf.assign_add(global_step, 1)
        config = tf.ConfigProto()

        config.gpu_options.allow_growth = True

        # Read dataset
        train_data = read_dir(DATA_DIR + 'train/')

        with tf.Session(config=config) as sess:
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess,
                                                   coord=coord,
                                                   start=True)

            saver = tf.train.Saver(max_to_keep=1)
            ckpt = tf.train.get_checkpoint_state(CKPT_DIR)

            # Load model if trained before
            if ckpt and LOAD:
                saver.restore(sess, ckpt.model_checkpoint_path)
                print("Model Loaded")

            epoch = sess.run(global_step)

            loss_pred_epoch = np.zeros(TRUE_PATCHES)
            loss_ctx_epoch = np.zeros(TRUE_PATCHES)
            tfile = open("encoding_yuv.txt", "a")
            if self.type == "1234":
                tfile.write(
                    "epoch FLIF_1 bpp_y2 bpp_y3 bpp_y4 bpp_u2 bpp_u3 bpp_u4 bpp_v2 bpp_v3 bpp_v4 cnn_time enc_time\n"
                )
            elif self.type == "yuv":
                tfile.write(
                    "epoch FLIF_1 bpp_y2 bpp_u2 bpp_v2 bpp_y3 bpp_u3 bpp_v3 bpp_y4 bpp_u4 bpp_v4 cnn_time enc_time\n"
                )
            tfile.close()

            for a in range(JOINT_EPOCH + TRUE_PATCHES * CHANNEL_EPOCH):
                sess.run(increase)

                if epoch < TRUE_PATCHES * CHANNEL_EPOCH:
                    if epoch % CHANNEL_EPOCH == 0:
                        print("========== Train Patch {} ==========".format(
                            int(epoch / CHANNEL_EPOCH) + 2))
                        optimizer = self.optimizer_li[int(epoch /
                                                          CHANNEL_EPOCH)]
                else:
                    if epoch >= TRUE_PATCHES * CHANNEL_EPOCH:
                        if epoch == TRUE_PATCHES * CHANNEL_EPOCH:
                            print("========== Train All Patches ==========")
                        optimizer = self.optimizer_all

                input_crop = sess.run(self.input_crop)

                feed_dict_train = {self.input: input_crop}

                _, loss_p, loss_c = \
                    sess.run([optimizer, self.loss_pred_li, self.loss_ctx_li], feed_dict=feed_dict_train)

                for i1 in range(TRUE_PATCHES):
                    loss_pred_epoch[i1] += loss_p[i1]
                    loss_ctx_epoch[i1] += loss_c[i1]

                if (epoch + 1) % PRINT_EVERY == 0:

                    loss_pred_epoch[:] = [
                        x / PRINT_EVERY for x in loss_pred_epoch
                    ]
                    loss_ctx_epoch[:] = [
                        x / PRINT_EVERY for x in loss_ctx_epoch
                    ]
                    loss_epoch = [
                        x + y for x, y in zip(loss_pred_epoch, loss_ctx_epoch)
                    ]
                    print('%04d\n' % (epoch + 1))
                    for i2 in range(TRUE_PATCHES):
                        print('*** {} ***   lossPred='.format(i2 + 1),
                              '{:9.4f}'.format(loss_pred_epoch[i2]),
                              'lossContext=',
                              '{:9.4f}'.format(loss_ctx_epoch[i2]), 'Loss=',
                              '{:9.4f}'.format(loss_epoch[i2]))
                    print('***all*** lossPred=',
                          '{:9.4f}'.format(sum(loss_pred_epoch)),
                          'lossContext=',
                          '{:9.4f}'.format(sum(loss_ctx_epoch)), 'Loss=',
                          '{:9.4f}'.format(sum(loss_epoch)))

                if (epoch + 1) % SAVE_EVERY == 0:
                    saver.save(sess,
                               CKPT_DIR + 'model_',
                               global_step=epoch + 1)
                    print("Model Saved")

                epoch = sess.run(global_step)
Example #10
0
    def test(self):
        GPU_NUM = self.args.gpu_num
        DATA_DIR = self.args.data_dir
        CKPT_DIR = self.args.ckpt_dir
        CHANNEL_NUM = self.args.channel_num
        NUM_PATCHES = 11

        # Assign GPU
        os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
        os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_NUM)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        # Read dataset
        test_data = read_dir(DATA_DIR + 'mcm/')

        start = time.time()

        with tf.Session(config=config) as sess:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(max_to_keep=1)
            ckpt = tf.train.get_checkpoint_state(CKPT_DIR)

            # Load model
            if ckpt:
                saver.restore(sess, ckpt.model_checkpoint_path)
                print("Model Loaded")
            else:
                print("No model to load")
                sys.exit(1)

            loss_pred_epoch = np.zeros(NUM_PATCHES)
            loss_ctx_epoch = np.zeros(NUM_PATCHES)

            img_idx = 0
            elapsed = 0

            for test_name in tqdm.tqdm(test_data):
                starttime = time.time()

                test_sample = cv2.cvtColor(cv2.imread(test_name),
                                           cv2.COLOR_BGR2RGB)

                test_sample = np.expand_dims(test_sample, axis=0)

                feed_dict_test = {self.input: test_sample}

                i1, i2, i3, i4, p, c, loss_p, loss_c = sess.run(
                    [
                        self.input_1, self.input_2, self.input_3, self.input_4,
                        self.pred_li, self.ctx_li, self.loss_pred_li,
                        self.loss_ctx_li
                    ],
                    feed_dict=feed_dict_test)

                for i_test in range(NUM_PATCHES):
                    loss_pred_epoch[i_test] += loss_p[i_test]
                    loss_ctx_epoch[i_test] += loss_c[i_test]

                endtime = time.time()
                elapsed += endtime - starttime
                save_image_data(img_idx, 1, i1)
                save_image_data(img_idx, 2, i2)
                save_image_data(img_idx, 3, i3)
                save_image_data(img_idx, 4, i4)
                for i in range(NUM_PATCHES):
                    save_pred_data(img_idx, i, p[i])
                    save_ctx_data(img_idx, i, c[i])

                size = np.zeros(2)
                size[0] = i1.shape[1]
                size[1] = i1.shape[2]
                np.savetxt('../c_compression/ICIP_Compression/data/' +
                           str(img_idx) + '_size.txt',
                           size,
                           fmt='%d')

                print('num {}'.format(img_idx))
                img_idx += 1

            elapsed /= img_idx
            loss_pred_epoch[:] = [x / len(test_data) for x in loss_pred_epoch]
            loss_ctx_epoch[:] = [x / len(test_data) for x in loss_ctx_epoch]
            loss_epoch = [
                x + y for x, y in zip(loss_pred_epoch, loss_ctx_epoch)
            ]

            for i2 in range(NUM_PATCHES):
                print('*** {} ***   lossPred='.format(i2 + 1),
                      '{:9.4f}'.format(loss_pred_epoch[i2]), 'lossContext=',
                      '{:9.4f}'.format(loss_ctx_epoch[i2]), 'Loss=',
                      '{:9.4f}'.format(loss_epoch[i2]))
            print('***all*** lossPred=',
                  '{:9.4f}'.format(sum(loss_pred_epoch)), 'lossContext=',
                  '{:9.4f}'.format(sum(loss_ctx_epoch)), 'Loss=',
                  '{:9.4f}'.format(sum(loss_epoch)))

            print("elapsed time: {}s for {} images, {}s per image.".format(
                time.time() - start, img_idx,
                (time.time() - start) / (img_idx)))
            print("purely elapsed time: {}s per image.".format(elapsed))
Example #11
0
    def build(self):

        # Parameters
        DATA_DIR = self.args.data_dir

        LAYER_NUM = self.args.layer_num
        HIDDEN_UNIT = self.args.hidden_unit

        LAMBDA_CTX = self.args.lambda_ctx
        CHANNEL_NUM = self.args.channel_num

        LR = self.args.lr

        BATCH_SIZE = self.args.batch_size
        CROP_SIZE = self.args.crop_size

        CHANNEL_EPOCH = self.args.channel_epoch
        JOINT_EPOCH = self.args.joint_epoch
        NUM_PATCHES = 11

        # TFRecord
        tfrecord_name = 'train.tfrecord'

        # if train tfrecord does not exist, create dataset
        if not data_exist(DATA_DIR, tfrecord_name):
            img_list = read_dir(DATA_DIR + 'train/')
            write_tfrecord(DATA_DIR, img_list, tfrecord_name)

        self.input_crop, _, _ = read_tfrecord(DATA_DIR,
                                              tfrecord_name,
                                              num_epochs=3 * CHANNEL_EPOCH +
                                              JOINT_EPOCH,
                                              batch_size=BATCH_SIZE,
                                              min_after_dequeue=10,
                                              crop_size=CROP_SIZE)

        self.input = tf.placeholder(tf.uint8, (None, None, None, 3))

        input_yuv = self.rgb2yuv(self.input)

        if CHANNEL_NUM == 1:
            input_img = tf.expand_dims(input_yuv[:, :, :, 0], axis=3)
        elif CHANNEL_NUM == 3:
            input_img = input_yuv
        else:
            print("Invalid Channel Num")
            sys.exit(1)

        input_depth = tf.nn.space_to_depth(input_img, 2)
        input_1, input_4, input_3, input_2 = tf.split(input_depth, 4, axis=3)
        """
        if CHANNEL_NUM == 3:
            input_1y, input_1u, input_1v = tf.split(input_1, 3, axis=3)
            input_4y, input_4u, input_4v = tf.split(input_4, 3, axis=3)
            input_3y, input_3u, input_3v = tf.split(input_3, 3, axis=3)
            input_2y, input_2u, input_2v = tf.split(input_2, 3, axis=3)
        """
        type = self.args.learning_order
        if type == "1234":
            order = tf.concat([
                tf.expand_dims(input_1[:, :, :, 0], axis=3),
                tf.expand_dims(input_2[:, :, :, 0], axis=3),
                tf.expand_dims(input_3[:, :, :, 0], axis=3),
                tf.expand_dims(input_4[:, :, :, 0], axis=3),
                tf.expand_dims(input_1[:, :, :, 1], axis=3),
                tf.expand_dims(input_2[:, :, :, 1], axis=3),
                tf.expand_dims(input_3[:, :, :, 1], axis=3),
                tf.expand_dims(input_4[:, :, :, 1], axis=3),
                tf.expand_dims(input_1[:, :, :, 2], axis=3),
                tf.expand_dims(input_2[:, :, :, 2], axis=3),
                tf.expand_dims(input_3[:, :, :, 2], axis=3),
                tf.expand_dims(input_4[:, :, :, 2], axis=3)
            ],
                              axis=3)
        elif type == "yuv":
            order = tf.concat([
                tf.expand_dims(input_1[:, :, :, 0], axis=3),
                tf.expand_dims(input_1[:, :, :, 1], axis=3),
                tf.expand_dims(input_1[:, :, :, 2], axis=3),
                tf.expand_dims(input_2[:, :, :, 0], axis=3),
                tf.expand_dims(input_2[:, :, :, 1], axis=3),
                tf.expand_dims(input_2[:, :, :, 2], axis=3),
                tf.expand_dims(input_3[:, :, :, 0], axis=3),
                tf.expand_dims(input_3[:, :, :, 1], axis=3),
                tf.expand_dims(input_3[:, :, :, 2], axis=3),
                tf.expand_dims(input_4[:, :, :, 0], axis=3),
                tf.expand_dims(input_4[:, :, :, 1], axis=3),
                tf.expand_dims(input_4[:, :, :, 2], axis=3)
            ],
                              axis=3)
        print("building order completed.\n")
        #order = tf.concat([tf.expand_dims(input_1[:,:,:,0],axis=3),tf.expand_dims(input_2[:,:,:,0],axis=3),tf.expand_dims(input_3[:,:,:,0],axis=3),tf.expand_dims(input_4[:,:,:,0],axis=3)], axis=3)

        pred_li = []
        ctx_li = []
        error_pred_li = []
        loss_pred_li = []
        loss_ctx_li = []
        loss_li = []
        total_loss = 0

        for i in range(NUM_PATCHES):
            #order1 = tf.concat([tf.expand_dims(input_1[:,:,:,0],axis=3),tf.expand_dims(input_2[:,:,:,0],axis=3)], axis=3)
            if i == 0:
                pred, ctx = model_conv(
                    tf.expand_dims(input_1[:, :, :, 0], axis=3), LAYER_NUM,
                    HIDDEN_UNIT, 'model_' + str(i + 1))
            else:
                pred, ctx = model_conv(order[:, :, :, :i], LAYER_NUM,
                                       HIDDEN_UNIT, 'model_' + str(i + 1))
            error_pred = abs(
                tf.subtract(pred,
                            tf.expand_dims(order[:, :, :, (i + 1)], axis=3)))
            loss_pred = tf.reduce_mean(error_pred)
            loss_ctx = LAMBDA_CTX * tf.reduce_mean(
                abs(tf.subtract(ctx, error_pred)))
            pred_li.append(pred)
            ctx_li.append(ctx)
            error_pred_li.append(error_pred)
            loss_pred_li.append(loss_pred)
            loss_ctx_li.append(loss_ctx)
            loss_li.append(loss_pred + loss_ctx)
            total_loss += loss_pred + loss_ctx

        all_vars = tf.trainable_variables()

        optimizer_li = []
        for j in range(NUM_PATCHES):
            vars = [
                var for var in all_vars if 'model_' + str(j + 1) in var.name
            ]
            optimizer = tf.train.AdamOptimizer(LR).minimize(loss_li[j],
                                                            var_list=vars)
            optimizer_li.append(optimizer)

        self.pred_li = pred_li
        self.ctx_li = ctx_li
        self.error_pred_li = error_pred_li
        self.loss_pred_li = loss_pred_li
        self.loss_ctx_li = loss_ctx_li
        self.loss_li = loss_li
        self.optimizer_li = optimizer_li
        self.optimizer_all = tf.train.AdamOptimizer(LR).minimize(
            total_loss, var_list=all_vars)
        '''
        # Prediction of 2
        pred_2, ctx_2 = model_conv(input_1, LAYER_NUM, HIDDEN_UNIT, 'pred_2')

        # Prediction of 3
        concat_1_2 = tf.concat([input_1, input_2], axis=3)
        pred_3, ctx_3 = model_conv(concat_1_2, LAYER_NUM, HIDDEN_UNIT, 'pred_3')

        # Prediction of 4
        concat_1_2_3 = tf.concat([input_1, input_2, input_3], axis=3)
        pred_4, ctx_4 = model_conv(concat_1_2_3, LAYER_NUM, HIDDEN_UNIT, 'pred_4')

        # Prediction error
        error_pred_2 = abs(tf.subtract(pred_2, input_2))
        error_pred_3 = abs(tf.subtract(pred_3, input_3))
        error_pred_4 = abs(tf.subtract(pred_4, input_4))

        # Losses
        loss_pred_2 = tf.reduce_mean(error_pred_2)
        loss_pred_3 = tf.reduce_mean(error_pred_3)
        loss_pred_4 = tf.reduce_mean(error_pred_4)

        loss_ctx_2 = LAMBDA_CTX * tf.reduce_mean(abs(tf.subtract(ctx_2, error_pred_2)))
        loss_ctx_3 = LAMBDA_CTX * tf.reduce_mean(abs(tf.subtract(ctx_3, error_pred_3)))
        loss_ctx_4 = LAMBDA_CTX * tf.reduce_mean(abs(tf.subtract(ctx_4, error_pred_4)))

        loss_2 = loss_pred_2 + loss_ctx_2
        loss_3 = loss_pred_3 + loss_ctx_3
        loss_4 = loss_pred_4 + loss_ctx_4

        total_loss = loss_2 + loss_3 + loss_4

        # Optimizer
        all_vars = tf.trainable_variables()
        vars_2 = [var for var in all_vars if 'pred_2' in var.name]
        vars_3 = [var for var in all_vars if 'pred_3' in var.name]
        vars_4 = [var for var in all_vars if 'pred_4' in var.name]

        self.optimizer_2 = tf.train.AdamOptimizer(LR).minimize(loss_2, var_list=vars_2)
        self.optimizer_3 = tf.train.AdamOptimizer(LR).minimize(loss_3, var_list=vars_3)
        self.optimizer_4 = tf.train.AdamOptimizer(LR).minimize(loss_4, var_list=vars_4)
        self.optimizer_all = tf.train.AdamOptimizer(LR).minimize(total_loss, var_list=all_vars)

        # Variables
        self.loss_2 = loss_2
        self.loss_3 = loss_3
        self.loss_4 = loss_4
        self.loss_all = loss_4 + loss_2 + loss_3

        self.loss_pred_2 = loss_pred_2
        self.loss_pred_3 = loss_pred_3
        self.loss_pred_4 = loss_pred_4
        self.loss_pred_all = loss_pred_2 + loss_pred_3 + loss_pred_4

        self.loss_ctx_2 = loss_ctx_2
        self.loss_ctx_3 = loss_ctx_3
        self.loss_ctx_4 = loss_ctx_4
        self.loss_ctx_all = loss_ctx_2 + loss_ctx_3 + loss_ctx_4

        self.pred_2 = pred_2
        self.pred_3 = pred_3
        self.pred_4 = pred_4

        self.ctx_2 = ctx_3
        self.ctx_3 = ctx_3
        self.ctx_4 = ctx_4
        '''
        # Original images
        self.input_1 = input_1
        self.input_2 = input_2
        self.input_3 = input_3
        self.input_4 = input_4
Example #12
0
File: show.py Project: rom1212/nns
    data, dates = read_tdx('./testdata/testdata_1.txt')
    factor = 1

    # data, dates = read_tdx('./sample1.txt')
    # data, dates = read_tdx('./600030.txt')
    
    # factor = 100

    print(len(data))
    print(len(dates))
    total_days = len(data)

    ldays = 5
    target = data[total_days - ldays:,:]

    data, dates = read_dir('./stock/', -1, read_tdx)

    # cands = data[:total_days - ldays,:]
    cands = data
    print('matching ... ...')
    matches = match_all(target, cands)
    print(('min:', min(matches)))
    print('sorting ... ...')
    top = sorted(enumerate(matches), key=itemgetter(1))
    for i in range(3):
        index, score = top[i]
        print(('date:', dates[3:][index], ', score:', score))


    # plt.yticks(range(-10, 40, 5))
    num_subplots = 6
    def build(self):

        # Parameters
        DATA_DIR = self.args.data_dir

        LAYER_NUM = self.args.layer_num
        HIDDEN_UNIT = self.args.hidden_unit
        LAMBDA_CTX = self.args.lambda_ctx
        LAMBDA_Y = self.args.lambda_y
        LAMBDA_U = self.args.lambda_u
        LAMBDA_V = self.args.lambda_v
        LR = self.args.lr

        BATCH_SIZE = self.args.batch_size
        CROP_SIZE = self.args.crop_size

        CHANNEL_EPOCH = self.args.channel_epoch
        JOINT_EPOCH = self.args.joint_epoch

        tfrecord_name = 'train.tfrecord'

        if not data_exist(DATA_DIR, tfrecord_name):
            img_list = read_dir(DATA_DIR + 'train/')
            write_tfrecord(DATA_DIR, img_list, tfrecord_name)

        input_crop, _, _ = read_tfrecord(DATA_DIR, tfrecord_name, num_epochs=3*CHANNEL_EPOCH+JOINT_EPOCH,
                                        batch_size=4, min_after_dequeue=10, crop_size=CROP_SIZE)

        input_data, label = self.crop_to_data(input_crop)

        y_gt = tf.slice(label, [0, 0], [-1, 1])
        u_gt = tf.slice(label, [0, 1], [-1, 1])
        v_gt = tf.slice(label, [0, 2], [-1, 1])

        out_y, hidden_y = model(input_data, LAYER_NUM, HIDDEN_UNIT, 'pred_y')

        input_f2 = tf.concat([hidden_y, input_data, y_gt, tf.expand_dims(out_y[:,0], axis=1)], axis=1)

        out_u, hidden_u = model(input_f2, LAYER_NUM, HIDDEN_UNIT, 'pred_u')

        input_f3 = tf.concat([hidden_u, input_data, y_gt, tf.expand_dims(out_y[:, 0], axis=1), u_gt, tf.expand_dims(out_u[:, 0], axis=1)], axis=1)

        out_v, _, = model(input_f3, LAYER_NUM, HIDDEN_UNIT, 'pred_v')

        pred_y = out_y[:, 0]
        pred_u = out_u[:, 0]
        pred_v = out_v[:, 0]
        ctx_y  = tf.nn.relu(out_y[:, 1])
        ctx_u  = tf.nn.relu(out_u[:, 1])
        ctx_v  = tf.nn.relu(out_v[:, 1])

        predError_y = abs(tf.subtract(pred_y, tf.squeeze(y_gt, axis=1)))
        predError_u = abs(tf.subtract(pred_u, tf.squeeze(u_gt, axis=1)))
        predError_v = abs(tf.subtract(pred_v, tf.squeeze(v_gt, axis=1)))

        loss_pred_y = LAMBDA_Y * tf.reduce_mean(predError_y)
        loss_pred_u = LAMBDA_U * tf.reduce_mean(predError_u)
        loss_pred_v = LAMBDA_V * tf.reduce_mean(predError_v)

        loss_ctx_y = LAMBDA_Y * LAMBDA_CTX * tf.reduce_mean(abs(tf.subtract(ctx_y, predError_y)))
        loss_ctx_u = LAMBDA_U * LAMBDA_CTX * tf.reduce_mean(abs(tf.subtract(ctx_u, predError_u)))
        loss_ctx_v = LAMBDA_V * LAMBDA_CTX * tf.reduce_mean(abs(tf.subtract(ctx_v, predError_v)))

        loss_y = loss_pred_y + loss_ctx_y
        loss_u = loss_pred_u + loss_ctx_u
        loss_v = loss_pred_v + loss_ctx_v

        loss_yuv = loss_y + loss_u + loss_v

        t_vars = tf.trainable_variables()
        y_vars = [var for var in t_vars if 'pred_y' in var.name]
        u_vars = [var for var in t_vars if 'pred_u' in var.name]
        v_vars = [var for var in t_vars if 'pred_v' in var.name]

        self.optimizer_y = tf.train.AdamOptimizer(LR).minimize(loss_y, var_list=y_vars)
        self.optimizer_u = tf.train.AdamOptimizer(LR).minimize(loss_u, var_list=u_vars)
        self.optimizer_v = tf.train.AdamOptimizer(LR).minimize(loss_v, var_list=v_vars)
        self.optimizer_yuv = tf.train.AdamOptimizer(LR).minimize(loss_yuv, var_list=t_vars)

        # Variables
        self.loss_y = loss_y
        self.loss_u = loss_u
        self.loss_v = loss_v
        self.loss_yuv = loss_yuv
        self.loss_pred_y = loss_pred_y
        self.loss_pred_u = loss_pred_u
        self.loss_pred_v = loss_pred_v
        self.loss_pred_yuv = loss_pred_v + loss_pred_u + loss_pred_v
        self.loss_ctx_y = loss_ctx_y
        self.loss_ctx_u = loss_ctx_u
        self.loss_ctx_v = loss_ctx_v
        self.loss_ctx_yuv = loss_ctx_y + loss_ctx_u + loss_ctx_v
        self.ctx_y = ctx_y
        self.ctx_u = ctx_u
        self.ctx_v = ctx_v