Пример #1
0
def eval(params):
    batch_size = params['batch_size']
    num_examples = len(params['test_files'][0])
    with tf.Graph().as_default():
        batch = dut.distorted_inputs(params,is_training=is_training)

        with slim.arg_scope(vgg.vgg_arg_scope()):
            logits, end_points = vgg.vgg_19(batch[0], num_classes=params['n_output'], is_training=is_training)

        init_fn=ut.get_init_fn(slim,params)
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = params['per_process_gpu_memory_fraction']

        with tf.Session(config=config) as sess:
            # sess.run(tf.initialize_all_variables())
            sess.run(tf.initialize_local_variables())
            coord = tf.train.Coordinator()
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True))

            init_fn(sess)
            num_iter = int(math.ceil(num_examples / batch_size))
            print('%s: Testing started.' % (datetime.now()))

            step = 0
            loss_lst=[]
            run_lst=[]
            run_lst.append(logits)
            [run_lst.append(lst) for lst in batch[1:len(batch)]]

            while step < num_iter and not coord.should_stop():
                try:
                    batch_res= sess.run(run_lst)
                except tf.errors.OutOfRangeError:
                    print ('Testing finished....%d'%step)
                    break
                if(params['write_est']==True):
                    ut.write_est(params,batch_res)
                est=batch_res[0]
                gt=batch_res[1]
                loss= ut.get_loss(params,gt,est)
                loss_lst.append(loss)
                s ='VAL --> batch %i/%i | error %f'%(step,num_iter,loss)
                ut.log_write(s,params)
                # joint_list=['/'.join(p1.split('/')[0:-1]).replace('joints','img').replace('.cdf','')+'/frame_'+(p1.split('/')[-1].replace('.txt','')).zfill(5)+'.png' for p1 in image_names]
                # print ('List equality check:')
                # print len(label_names) == len(set(label_names))
                # print sum(joint_list==label_names)==(len(est))
                # print(len(label_names))
                step += 1
            coord.request_stop()
            coord.join(threads)
            return np.mean(loss_lst)
Пример #2
0
def run_steps(params, epoch_counter):
    with tf.Graph().as_default():
        num_examples = len(params['training_files'][0])
        number_of_steps = int(math.ceil(
            num_examples / params['batch_size'])) - 1
        number_of_steps = number_of_steps * (epoch_counter + 1)
        tf.logging.set_verbosity(tf.logging.INFO)
        batch = dut.distorted_inputs(params, is_training=is_training)

        with slim.arg_scope(vgg.vgg_arg_scope()):
            logits, endpoint = vgg.vgg_19(batch[0],
                                          num_classes=params['n_output'],
                                          is_training=is_training)

        # # Create the model:
        # with slim.arg_scope(inception.inception_v2_arg_scope()):
        #     logits, _ = inception.inception_v2(batch[0], num_classes=params['n_output'], is_training=is_training)

        err = tf.sub(logits, batch[1])
        losses = tf.reduce_mean(tf.reduce_sum(tf.square(err), 1))
        reg_loss = slim.losses.get_total_loss()
        total_loss = losses + reg_loss
        tf.scalar_summary('losses/total_loss', total_loss)
        tf.scalar_summary('losses/losses', losses)
        tf.scalar_summary('losses/reg_loss', reg_loss)
        summary_writer = tf.train.SummaryWriter(params["sm"])

        # Specify the optimizer and create the train op:
        optimizer = tf.train.AdamOptimizer(learning_rate=params['lr'])

        train_op = slim.learning.create_train_op(total_loss,
                                                 optimizer,
                                                 summarize_gradients=True)
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = params[
            'per_process_gpu_memory_fraction']
        # Run the training:
        final_loss = learn.train(
            loss=losses,
            logits=logits,
            batch=batch,
            endpoint=endpoint,
            train_op=train_op,
            logdir=params["cp_file"],
            init_fn=ut.get_init_fn(slim, params),
            number_of_steps=number_of_steps,
            summary_writer=summary_writer,
            session_config=config,
        )
    return final_loss
Пример #3
0
def eval(params):
    # batch_size = params['batch_size']
    # num_examples = len(params['test_files'][0])
    with tf.Graph().as_default() as g:
        url = '/home/coskun/PycharmProjects/data/pose/mv_val/img/S9/Discussion 1.54138969/frame_00010.png'
        filename_queue = tf.train.string_input_producer(
            [url])  #  list of files to read

        reader = tf.WholeFileReader()
        key, value = reader.read(filename_queue)

        # image_raw = tf.image.decode_png(value) # use png or jpg decoder based on your files.
        image_raw = tf.image.decode_png(value, channels=3)

        processed_image = human36m_preprocessing.preprocess_image(
            image_raw, 224, 224, is_training=is_training)
        processed_images = tf.expand_dims(processed_image, 0)
        # image, label = dut.distorted_inputs(params,is_training=is_training)

        with slim.arg_scope(inception.inception_v2_arg_scope()):
            logits, end_points = inception.inception_v2(
                processed_images,
                num_classes=params['n_output'],
                is_training=is_training)

        init_fn = ut.get_init_fn(slim, params, load_previus_cp=True)
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = params[
            'per_process_gpu_memory_fraction']
        # operations = g.get_operations()
        # for operation in operations:
        #     print "Operation:",operation.name

        features = g.get_tensor_by_name(
            'InceptionV2/InceptionV2/Mixed_3b/concat:0')
        # features = g.get_tensor_by_name('InceptionV2/InceptionV2/MaxPool_3a_3x3/MaxPool:0')

        with tf.Session(config=config) as sess:
            sess.run(tf.initialize_local_variables())
            coord = tf.train.Coordinator()
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(
                    qr.create_threads(sess,
                                      coord=coord,
                                      daemon=True,
                                      start=True))

            init_fn(sess)
            num_iter = 1
            print('%s: Model reading started.' % (datetime.now()))

            step = 0
            loss_lst = []
            # while step < num_iter and not coord.should_stop():
            while step < num_iter:
                try:
                    features_values = sess.run(features)
                except tf.errors.OutOfRangeError:
                    print('Testing finished....%d' % step)
                    break
                print features_values.shape
                img_arr = np.squeeze(features_values[:, :, :, 1])
                print img_arr.shape

                img = Image.fromarray(img_arr).convert('RGB')
                img.save(
                    '/home/coskun/PycharmProjects/poseft/files/temp/my.png')
                img.show()

                # joint_list=['/'.join(p1.split('/')[0:-1]).replace('joints','img').replace('.cdf','')+'/frame_'+(p1.split('/')[-1].replace('.txt','')).zfill(5)+'.png' for p1 in image_names]
                # print ('List equality check:')
                # print len(label_names) == len(set(label_names))
                # print sum(joint_list==label_names)==(len(est))
                # print(len(label_names))init_fn=ut.get_init_fn(slim,params,load_previus_cp=True)
                step += 1
            coord.request_stop()
            coord.join(threads)
Пример #4
0
def run_steps(params,epoch_counter):
    with tf.Graph().as_default():
        num_examples=len(params['training_files'][0])
        number_of_steps = int(math.ceil(num_examples / params['batch_size']))-1
        print "Number of steps: %i" % number_of_steps
        number_of_steps=number_of_steps*(epoch_counter+1)

        tf.logging.set_verbosity(tf.logging.INFO)
        batch = dut.distorted_inputs(params,is_training=is_training)

        # with tf.Session() as sess:
        #     tf.initialize_all_variables().run()
        #     print('Variables init.......')
        #     retur=sess.run(batch)
        #     print(retur)
        #
        # Create the model:
        with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
            logits,aux, endpoint = inception_resnet_v2.inception_resnet_v2(batch[0], num_classes=params['n_output'], is_training=is_training)

        err=tf.subtract(logits, batch[1])

        losses = tf.reduce_sum(tf.reduce_sum(tf.square(err)))
        err_tr = tf.reshape(err, shape=[-1, 3])
        err_tr = tf.square(err_tr)
        err_tr = tf.reduce_sum(err_tr,axis=1)
        err_tr = tf.sqrt(err_tr)
        err_tr = tf.reduce_mean(err_tr)


        # err_aux=tf.subtract(aux, batch[1])
        # losses_aux = tf.reduce_sum(tf.reduce_sum(tf.square(err_aux)))
        reg_loss=slim.losses.get_total_loss()
        # total_loss = losses+0.4*losses_aux+reg_loss
        total_loss = losses+reg_loss
        #Compute cross entropy

        # with tf.Session() as sess:
        #     tf.initialize_all_variables().run()
        #     print('Variables init.......')
        #     r1,r2=sess.run([losses,reg_loss])
        #     print(r1)
        #


        tf.summary.scalar('losses/total_loss', total_loss)
        tf.summary.scalar('losses/losses', losses)
        tf.summary.scalar('losses/training_loss', err_tr)
        tf.summary.scalar('losses/reg_loss', reg_loss)
        summary_writer = tf.summary.FileWriter(params["sm"])
        #
        # Specify the optimizer and create the train op:
        optimizer = tf.train.AdamOptimizer(learning_rate=params['lr'])
        #
        # optimizer.compute_gradients()
        train_op = slim.learning.create_train_op(total_loss, optimizer,summarize_gradients=False)
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = 0.95
           # Run the training:
        # final_loss = learn.train(
        #     loss=losses,
        #     logits=logits,
        #     batch=batch,
        #     endpoint=endpoint,
        #     train_op=train_op,
        #     logdir=params["cp_file"],
        #     init_fn=ut.get_init_fn(slim,params),
        #     number_of_steps=number_of_steps,
        #     summary_writer=summary_writer,
        #     session_config=config,
        # )
        final_loss = tf.contrib.slim.learning.train(
            train_op=train_op,
            logdir=params["cp_file"],
            init_fn=ut.get_init_fn(slim,params),
            number_of_steps=number_of_steps,
            summary_writer=summary_writer,
            session_config=config,
        )
        # final_loss=0
    return final_loss
Пример #5
0
        image_raw = tf.image.decode_png(value, channels=3)

    # image = tf.image.resize_bilinear(image_raw, [256, 256,3],    align_corners=False)
    processed_image = human36m_preprocessing.preprocess_image(
        image_raw, 299, 299, is_training=is_training)
    processed_images = tf.expand_dims(processed_image, 0)

    # image, label = dut.distorted_inputs(params,is_training=is_training)

    with slim.arg_scope(inception.inception_v3_arg_scope()):
        logits, aux, end_points = inception.inception_v3(
            processed_images,
            num_classes=params['n_output'],
            is_training=is_training)

    init_fn = ut.get_init_fn(slim, params)
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = params[
        'per_process_gpu_memory_fraction']
    loss_lst = []
    run_lst = []
    run_lst.append(logits)

    with tf.Session(config=config) as sess:
        sess.run(tf.initialize_local_variables())
        coord = tf.train.Coordinator()
        threads = []
        for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
            threads.extend(
                qr.create_threads(sess, coord=coord, daemon=True, start=True))
Пример #6
0
def eval(params):
    batch_size = params['batch_size']
    params['write_est']=False
    num_examples = len(params['test_files'][0])
    with tf.Graph().as_default():
        batch = dut.distorted_inputs(params,is_training=is_training)
        with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
            logits,aux, end_points = inception_resnet_v2.inception_resnet_v2(batch[0],
                                                                         num_classes=params['n_output'],
                                                                         is_training=is_training)

        # Obtain the trainable variables and a saver
        # variables_to_restore = slim.get_variables_to_restore()

        init_fn=ut.get_init_fn(slim,params)
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = params['per_process_gpu_memory_fraction']
        with tf.Session() as sess:
            init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())
            # init_op = tf.global_variables_initializer()
            sess.run(init_op)
            init_fn(sess)
            coord = tf.train.Coordinator()
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True))


            num_iter = int(math.ceil(num_examples / batch_size))
            print('%s: Testing started.' % (datetime.now()))

            step = 0
            loss_lst=[]
            run_lst=[]
            run_lst.append(logits)
            # run_lst.append(end_points['PreLogitsFlatten'])
            # run_lst.append(end_points['PrePool'])
            [run_lst.append(lst) for lst in batch[1:len(batch)]]

            while step < num_iter and not coord.should_stop():
                try:
                    batch_res= sess.run(run_lst)
                except tf.errors.OutOfRangeError:
                    print ('Testing finished....%d'%step)
                    break
                if(params['write_est']==True):
                    ut.write_mid_est(params,batch_res)
                est=batch_res[0]
                gt=batch_res[1]
                # print(est.shape)
                # print(gt.shape)
                prepool=batch_res[-1]
                loss,_= ut.get_loss(params,gt,est)
                loss_lst.append(loss)
                s ='VAL --> batch %i/%i | error %f'%(step,num_iter,loss)
                if step%10==0:
                    ut.log_write(s,params,screen_print=True)
                    print "Current number of examples / mean err: %i / %f"%(step*gt.shape[0],np.mean(loss_lst))
                else:
                    ut.log_write(s, params, screen_print=False)
                # joint_list=['/'.join(p1.split('/')[0:-1]).replace('joints','img').replace('.cdf','')+'/frame_'+(p1.split('/')[-1].replace('.txt','')).zfill(5)+'.png' for p1 in image_names]
                # print ('List equality check:')
                # print len(label_names) == len(set(label_names))
                # print sum(joint_list==label_names)==(len(est))
                # print(len(label_names))
                step += 1
            coord.request_stop()
            coord.join(threads)
            return np.mean(loss_lst)