Beispiel #1
0
def main():

    config = Config()
    with tf.Graph().as_default():

        # create a reader object
        reader = read_data.PoseReader("./labels/txt/new_annos.txt",
            "./data/test_imgs/", config)

        # create a model object
        model = cpm.CPM(config)

        # feedforward
        predicts = model.build_fc(False)
		
        # return the loss
        loss = model.loss()

        # Initializing operation
        init_op = tf.global_variables_initializer()

        saver = tf.train.Saver(max_to_keep = 100)

        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = True
        with tf.Session(config=sess_config) as sess:

            sess.run(init_op)
            model.restore(sess, saver, config.load_filename)

            # start testing
            for idx in xrange(config.test_num):
                with tf.device("/cpu:0"):
                  imgs, fm, coords, begins, filename_list = reader.get_batch(False, idx)
		
		print filename_list
                # feed data into the model
                feed_dict = {
                    model.images: imgs
                    }
                with tf.device(config.gpu):			
                    #
                    predict_points = sess.run(predicts, feed_dict=feed_dict)
		    points = get_points(predict_points)
		    #for ii in xrange(config.points_num):
			#points[0][ii * 2: ii * 2 + 2]  = get_point(predict_points[0], ii)
		    print points 

		    for batch_idx in xrange(config.batch_size):
		    	with open("./labels/annos.txt", "a") as f:
			    f.write('0009' + str(idx * config.batch_size + batch_idx + 5781) + '.png ')
			    for i in xrange(config.points_num * 2):
			        f.write(str(int(points[batch_idx][i])) + ',')
			    f.write('1\n')
			f.close()			    			
Beispiel #2
0
def main():
    config = Config()
    with tf.Graph().as_default():

        # create a reader object
        reader = read_data.PoseReader(config.annos_path,
            config.data_path, config)

        # create a model object
        model = cpm.CPM(config)

        # feedforward
        predicts = model.build_fc(False) # False: is not training

        # Initializing operation
        init_op = tf.global_variables_initializer()

        saver = tf.train.Saver(max_to_keep=100)

        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = True
        with tf.Session(config=sess_config) as sess:
            sess.run(init_op)
            model.restore(sess, saver, config.load_filename)

            # write the predict coordinates to file
            with open(config.annos_write_path, 'w') as fw:
                # start testing
                for idx in xrange(config.test_num):
                    with tf.device("/cpu:0"):
                        imgs, fm, coords, begins, filename_list = \
                        reader.get_random_batch(distort=False)
                    # feed data into the model
                    feed_dict = {
                        model.images: imgs,
                        model.coords: coords,
                        model.labels: fm
                        }
                    with tf.device(config.gpu):
                        # run the testing operation
                        predict_coords_list = sess.run(predicts, feed_dict=feed_dict)
                        # print predict_coords_list
                        for filename, predict_coords in zip(filename_list, predict_coords_list):
                            print (filename, predict_coords)
                            fw.write(filename + ' ')
                            for i in xrange(config.points_num):
                                # w = predict_coords[i*2] * config.img_width
                                # h = predict_coords[i*2 + 1] * config.img_height
                                # item = str(int(w)) + ',' + str(int(h))
                                w = predict_coords[i*2]
                                h = predict_coords[i*2 + 1]
                                item = str(w) + ',' + str(h)
                                fw.write(item + ',')
                            fw.write('1\n')
Beispiel #3
0
def main():

    config = Config()
    with tf.Graph().as_default():

        # create a reader object
        reader = read_data.PoseReader("./labels/txt/validate_annos.txt",
                                      "./data/train_imgs/", config)

        # create a model object
        model = cpm.CPM(config)

        # feedforward
        predicts = model.build_fc(False)

        # return the loss
        loss = model.loss()

        # Initializing operation
        init_op = tf.global_variables_initializer()

        saver = tf.train.Saver(max_to_keep=100)

        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = True
        with tf.Session(config=sess_config) as sess:

            sess.run(init_op)
            model.restore(sess, saver, config.load_filename)

            # start testing
            for idx in xrange(config.test_num):
                with tf.device("/cpu:0"):
                    imgs, fm, coords, begins, filename_list = reader.get_batch(
                        distort=False)

                # feed data into the model
                feed_dict = {
                    model.images: imgs,
                    model.coords: coords,
                    model.labels: fm
                }
                with tf.device(config.gpu):
                    #
                    predict_coords = sess.run(predicts, feed_dict=feed_dict)
Beispiel #4
0
def main():

    config = Config()
    with tf.Graph().as_default():
        # create a reader object
        reader = read_data.PoseReader(config)

        # create a model object
        model = cpm.CPM(config)

        # feedforward
        predicts = model.build_fc(True)

        # return the loss
        loss = model.loss()

        # training operation
        train_op = model.train_op(loss, model.global_step)
        # Initializing operation
        init_op = tf.global_variables_initializer()

        saver = tf.train.Saver(max_to_keep=100)

        sess_config = tf.ConfigProto(log_device_placement=True)
        sess_config.gpu_options.allow_growth = True
        with tf.Session(config=sess_config) as sess:

            # initialize parameters or restore from previous model
            if not os.path.exists(config.params_dir):
                os.makedirs(config.params_dir)
            if os.listdir(config.params_dir) == [] or config.initialize:
                print("Initializing Network")
                sess.run(init_op)
            else:
                sess.run(init_op)
                model.restore(sess, saver, config.load_filename)

            merged = tf.summary.merge_all()
            logdir = os.path.join(config.logdir,
                                  datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))

            writer = tf.summary.FileWriter(logdir, sess.graph)
            new_start = 0
            offset = 0
            epoch_count = 1
            print "Last ", config.image_count % config.batch_size, "will be missed in every ", config.image_count, "images"
            # start training
            for idx in xrange(config.max_iteration):
                with tf.device(config.gpu):
                    if (offset + config.image_count) <= (idx *
                                                         config.batch_size):
                        #    new_start = 0
                        offset = ((idx - 1) * config.batch_size)
                        epoch_count = epoch_count + 1
                        print "start of new epoch"
                    imgs, fm, coords, begins, filename_list,end_index = \
                    reader.get_random_batch( new_start, distort=config.toDistort )
                    new_start = end_index + 1
                    print filename_list
                    print "are the file names"
                    print coords
                    print "are the coordinates"
                # feed data into the model
                feed_dict = {
                    model.images: imgs,
                    model.coords: coords,
                    model.labels: fm
                }
                with tf.device(config.gpu):
                    # run the training operation
                    sess.run(train_op, feed_dict=feed_dict)

                with tf.device(config.gpu):
                    # write summary
                    if (idx + 1) % config.summary_iters == 0:
                        print("iter#", idx + 1)
                        tmp_global_step = model.global_step.eval()
                        summary = sess.run(merged, feed_dict=feed_dict)
                        writer.add_summary(summary, tmp_global_step)
                    # save checkpoint
                    if (idx + 1) % config.checkpoint_iters == 0:
                        tmp_global_step = model.global_step.eval()
                        save_path = model.save(sess, saver,
                                               config.save_filename,
                                               tmp_global_step)
Beispiel #5
0
norm_image_size = (368, 368)
belief_map_size = (46, 46)
keypoints_count = len(utils.keypoints_order[category])

# 2.load data for training
train_data = utils.dataLoader(
    category=category,
    path_to_excel_file=
    '/home/panziqi/project/fashion_ai/annotations/train/train.xlsx',
    images_prefix='/home/public/FashionAI/keypoint/season1/train/',
    norm_image_size=norm_image_size,
    belief_map_size=belief_map_size)

# 3.train
cpm = cpm.CPM(network_name='CPM_' + category,
              stage_count=4,
              norm_image_size=norm_image_size,
              belief_map_size=belief_map_size,
              keypoints_count=keypoints_count)
cpm.train(
    train_data=train_data,
    log_folder='/home/panziqi/project/fashion_ai/version_softmax/log/train/',
    params_folder='/home/panziqi/project/fashion_ai/version_softmax/params/',
    epochs=epochs,
    batch_size=batch_size,
    init_lr=0.015,
    lr_step=3,
    lr_factor=0.7,
    ctx=[mx.gpu(gpu_id) for gpu_id in range(gpu_s, gpu_e)])
Beispiel #6
0
print("read data from: " + file_name)
print("read checkpoint from: " + ckpt_file_name)

df = pd.read_csv(file_name)
if total_size > -1:
    df = df[:total_size]
pred_df = df[["image_id", "image_category"]]
input_data = train_input.data_stream(df,
                                     config.batch_size,
                                     is_train=False,
                                     scale=scale,
                                     pre_path=pre_path)

tf.reset_default_graph()
#with tf.device('/device:GPU:0'):
model = cpm.CPM(config)
predict = model.inference_pose_vgg(is_train=False)

saver = tf.train.Saver(tf.trainable_variables())

init_op = tf.global_variables_initializer()

config_cpu = tf.ConfigProto(device_count={'GPU': 0})

with tf.Session() as sess:
    sess.run(init_op)

    #     if not os.path.exists(config.params_dir):
    #         os.makedirs(config.params_dir)
    #     if os.listdir(config.params_dir) == [] or config.initialize:
    print("Initializing Network")
Beispiel #7
0
def main():

    config = Config()
    with tf.Graph().as_default():

        # create a reader object
        reader = read_data.PoseReader("./labels/txt/validate_annos.txt",
                                      "./data/train_imgs/", config)

        # create a model object
        model = cpm.CPM(config)

        # feedforward
        predicts = model.build_fc(True)

        # return the loss
        loss = model.loss()

        # training operation
        train_op = model.train_op(loss, model.global_step)
        # Initializing operation
        init_op = tf.global_variables_initializer()

        saver = tf.train.Saver(max_to_keep=100)

        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = True
        with tf.Session(config=sess_config) as sess:

            # initialize parameters or restore from previous model
            if not os.path.exists(config.params_dir):
                os.makedirs(config.params_dir)
            if os.listdir(config.params_dir) == [] or config.initialize:
                print "Initializing Network"
                sess.run(init_op)
            else:
                sess.run(init_op)
                model.restore(sess, saver, config.load_filename)

            merged = tf.summary.merge_all()
            logdir = os.path.join(config.logdir,
                                  datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))

            writer = tf.summary.FileWriter(logdir, sess.graph)

            # start training
            for idx in xrange(config.max_iteration):
                with tf.device("/cpu:0"):
                    imgs, fm, coords, begins, filename_list = reader.get_random_batch(
                    )

                # feed data into the model
                feed_dict = {
                    model.images: imgs,
                    model.coords: coords,
                    model.labels: fm
                }
                with tf.device(config.gpu):
                    # run the training operation
                    sess.run(train_op, feed_dict=feed_dict)

                with tf.device('/cpu:0'):
                    # write summary
                    if (idx + 1) % config.summary_iters == 0:
                        tmp_global_step = model.global_step.eval()
                        summary = sess.run(merged, feed_dict=feed_dict)
                        writer.add_summary(summary, tmp_global_step)
                    # save checkpoint
                    if (idx + 1) % config.checkpoint_iters == 0:
                        tmp_global_step = model.global_step.eval()
                        model.save(sess, saver, config.save_filename,
                                   tmp_global_step)