예제 #1
0
파일: bicubic.py 프로젝트: iwtw/tf_tools
def main(_):
    init()
    args = argparse()
    file_queue = tf.train.string_input_producer([args.inputpath])
    inputs = data_input.get_batch( file_queue , [args.height,args.width] ,  64 , 4 ,5 , is_training = false )
    output_height = args.height * args.scale
    output_width = args.width * args.scale
    if ( args.scale < 0 ):
        assert (args.height % args.scale == 0 )
        assert ( args.width % args.scale == 0 )
        output_height = args.height / args.scale
        output_width = args.width / args.width

    outputs = tf.image.resize_bicubic(inputs , [output_height , output_width ])
    if
    save_images


    for device_index in range(args.gpus)
        with tf.device("gpus:/{}".format(device_index))  :



if __name__ == "__main__":
    tf.app.run(main)
예제 #2
0
 def predict(self, test_data):
     pbar = data_input.get_batch(
         test_data, batch_size=self.cfg['batch_size'], is_test=1)
     val_pred, val_prob = [], []
     for (t1_ids, t1_len, t2_ids, t2_len) in pbar:
         fd = self.feed_batch(t1_ids, t1_len, t2_ids, t2_len, is_test=1)
         pred_labels, pred_prob = self.sess.run(
             [self.predict_idx, self.predict_prob], feed_dict=fd)
         val_pred.extend(pred_labels)
         val_prob.extend(pred_prob)
     return val_pred, val_prob
예제 #3
0
 def eval(self, test_data):
     pbar = data_input.get_batch(
         test_data, batch_size=self.cfg['batch_size'], is_test=1)
     val_label, val_pred = [], []
     for (out_ids1, m_ids1, seg_ids1, seq_len1, out_ids2, m_ids2, seg_ids2, seq_len2, label) in pbar:
         val_label.extend(label)
         fd = self.feed_batch(out_ids1, m_ids1, seg_ids1, seq_len1, out_ids2, m_ids2, seg_ids2, seq_len2, is_test=1)
         pred_labels, pred_prob = self.sess.run(
             [self.predict_idx, self.predict_prob], feed_dict=fd)
         val_pred.extend(pred_labels)
     test_acc = accuracy_score(val_label, val_pred)
     return test_acc
예제 #4
0
 def run_epoch(self, epoch, data_train, data_val):
     steps = int(math.ceil(float(len(data_train)) / self.cfg['batch_size']))
     progbar = tf.keras.utils.Progbar(steps)
     # 每个 epoch 分batch训练
     batch_iter = data_input.get_batch(
         data_train, batch_size=self.cfg['batch_size'])
     for i, (t1_ids, t1_len, t2_ids, t2_len, label) in enumerate(batch_iter):
         fd = self.feed_batch(t1_ids, t1_len, t2_ids, t2_len, label)
         # a = sess.run([query_norm, doc_norm, prod, cos_sim_raw], feed_dict=fd)
         _, cur_loss = self.sess.run(
             [self.train_op, self.loss], feed_dict=fd)
         progbar.update(i + 1, [("loss", cur_loss)])
     # 训练完一个epoch之后,使用验证集评估,然后预测, 然后评估准确率
     dev_acc = self.eval(data_val)
     print("dev set acc:", dev_acc)
     return dev_acc
예제 #5
0
 def predict_embedding(self, test_data):
     pbar = data_input.get_batch(test_data,
                                 batch_size=self.cfg['batch_size'],
                                 is_test=1)
     val_embed = []
     for (out_ids1, m_ids1, seg_ids1, seq_len1) in pbar:
         fd = {
             self.q_ids: out_ids1,
             self.q_mask_ids: m_ids1,
             self.q_seg_ids: seg_ids1,
             self.q_seq_length: seq_len1,
             self.keep_prob_place: 1,
             self.is_train_place: 0
         }
         pred_embedding = self.sess.run(self.q_emb, feed_dict=fd)
         val_embed.extend(pred_embedding)
     return val_embed
예제 #6
0
 def run_epoch(self, epoch, d_train, d_val):
     steps = int(math.ceil(float(len(d_train)) / self.cfg['batch_size']))
     progbar = tf.keras.utils.Progbar(steps)
     # 每个 epoch 分batch训练
     batch_iter = data_input.get_batch(d_train,
                                       batch_size=self.cfg['batch_size'])
     for i, (out_ids1, m_ids1, seg_ids1, seq_len1,
             label) in enumerate(batch_iter):
         fd = self.feed_batch(out_ids1, m_ids1, seg_ids1, seq_len1, label)
         # a = self.sess.run([self.is_train_place, self.q_e], feed_dict=fd)
         _, cur_loss = self.sess.run([self.train_op, self.loss],
                                     feed_dict=fd)
         progbar.update(i + 1, [("loss", cur_loss)])
     # 训练完一个epoch之后,使用验证集评估,然后预测, 然后评估准确率
     dev_acc = self.eval(d_val)
     print("dev set acc:", dev_acc)
     return dev_acc
예제 #7
0
    return outputs


Generator = generator

DEVICES = ['/gpu:{}'.format(i) for i in xrange(N_GPUS)]

config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as session:

    #image_batch = tf.placeholder( tf.uint8 , shape =(BATCH_SIZE , H*4 , W * 4 , 3 )  )
    file_queue = tf.train.string_input_producer(DATA_TRAIN)
    image_batch, label_batch = data_input.get_batch(file_queue, (112, 96),
                                                    BATCH_SIZE,
                                                    n_threads=4,
                                                    min_after_dequeue=0,
                                                    flip_flag=True)
    # image_batch ranges [0,255] , dtype = tf.uint8

    #preprocessing
    with tf.device("/gpu:0"):
        # image_batch_pre = tf.image.random_flip_left_right(image_batch)
        image_batch_pre = tf.cast(image_batch, tf.float32)
        image_batch_pre = image_batch_pre / 127.5 - 1.

        gen_losses, x_gens, x_bicubics = [], [], []
        content_losses, vgg_losses = [], []
        split_image_batch_pre = tf.split(image_batch_pre, len(DEVICES), axis=0)
    for device_index, device in enumerate(DEVICES):
        with tf.device(device):
예제 #8
0
def build_graph( data ):
    #image_batch = tf.placeholder( tf.uint8 , shape =(BATCH_SIZE , H*4 , W * 4 , 3 )  )
    file_queue = tf.train.string_input_producer( data )
    image_batch , label_batch  = data_input.get_batch( file_queue , (112,96) , BATCH_SIZE , n_threads = 4 , min_after_dequeue = 0 , flip_flag = True )
    # image_batch ranges [0,255] , dtype = tf.uint8
    

    #preprocessing
    with tf.device("/gpu:0"):
       # image_batch_pre = tf.image.random_flip_left_right(image_batch)
        image_batch_pre = tf.cast( image_batch, tf.float32 )
        image_batch_pre =  image_batch_pre /127.5 -1.

        gen_losses   , x_gens  , x_bicubics = [] ,  [] , [] 
        content_losses , vgg_losses = [] , []
        split_image_batch_pre  = tf.split( image_batch_pre , len(DEVICES) , axis = 0  )

    for device_index , device in enumerate(DEVICES):
        with tf.device(device):
            print("device : ", device)
            x = split_image_batch_pre[device_index]
            x_lr = tf.image.resize_bicubic( x , [ H,W ]  )
            x_bicubic =  tf.image.resize_bicubic ( x_lr , [H*4,W*4] ) 
            x_gen = Generator( inputs = x_lr  )

            vgg_inputs = tf.concat(  [ x , x_gen ] , axis = 0 ) 
            vgg_inputs = (vgg_inputs + 1.0)  *127.5
            vgg=vgg19.Vgg19()
            vgg.build( vgg_inputs )
            fmap=tf.split(vgg.conv2_2,2)
            vgg_loss = tf.losses.mean_squared_error( fmap[0] , fmap[1] )
            gen_content_loss = tf.losses.mean_squared_error( x , x_gen )
            gen_loss = (1 -P ) * gen_content_loss + P * vgg_loss 

            gen_losses.append(gen_loss)
            vgg_losses.append(vgg_loss)
            x_bicubics.append(x_bicubic)
            x_gens.append(x_gen)
            content_losses.append(gen_content_loss)

    gen_loss = tf.add_n(gen_losses) * ( 1.0 / len(DEVICES) ) * (1.0 / BATCH_SIZE)
    vgg_loss = tf.add_n(vgg_losses ) * ( 1.0 / len(DEVICES)) * (1.0 / BATCH_SIZE)
    content_loss = tf.add_n(content_losses)  * (1.0 / len(DEVICES)) * (1.0 / BATCH_SIZE)
    x_gen = tf.concat( x_gens , axis = 0 ) 
    x_bicubic = tf.concat( x_bicubics , axis = 0 )
    tf.summary.scalar("gen_loss" , gen_loss)
    tf.summary.scalar("vgg_loss" , vgg_loss)
    tf.summary.scalar("content_loss" , content_loss )

    def convert(x):
        # x ranges[-1,1] , dtype = tf.float32 
        # returns: ranges [0,255] , dtype = tf.uint8 
        x = tf.clip_by_value( x , -1,1 )
        x = (x+1.0) *(255.99/2)
        x = tf.cast(x , tf.uint8)
        return x
        
    x_gen_outputs = convert(x_gen) 
    x_bicubic_outputs = convert(x_bicubic)
    outputs = tf.concat( [image_batch , x_gen_outputs , x_bicubic_outputs] , axis = 0  )

    return outputs , gen_loss