def parse_resize_image_and_labels(): print('Resizing and packing images and labels to bin files.\n') np.random.seed(1701) # to fix test set if not os.path.exists(FLAGS.resized_dir): os.mkdir(FLAGS.resized_dir) jnt_fn = FLAGS.data_dir + 'joints.mat' joints = loadmat(jnt_fn) joints = joints['joints'].swapaxes(0, 2).swapaxes(1, 2) invisible_joints = joints[:, :, 2] < 0.5 joints[invisible_joints] = 0 joints = joints[..., :2] N_test = int(len(joints) * 0.1) permTest = np.random.permutation(int(len(joints)))[:N_test].tolist() imagelist = sorted(glob.glob(FLAGS.orimage_dir + '*.jpg')) fp_train = open(os.path.join(FLAGS.data_dir, FLAGS.trainLabels_fn), 'w') fp_test = open(os.path.join(FLAGS.data_dir, FLAGS.testLabels_fn), 'w') for index, img_fn in enumerate(imagelist): imgFile = Image.open(img_fn) (imWidth, imHeight) = imgFile.size imgFile = imgFile.resize((FLAGS.input_size, FLAGS.input_size), Image.ANTIALIAS) newFileName = os.path.join(FLAGS.resized_dir, b(img_fn).replace("jpg", "bin")) joints[index, :, 0] *= FLAGS.input_size / float(imWidth) joints[index, :, 1] *= FLAGS.input_size / float(imHeight) im_label_pack = np.concatenate( (joints[index, :, :].reshape(LSPGlobals.TotalLabels), np.asarray(imgFile).reshape(LSPGlobals.TotalImageBytes))) im_label_pack.astype(np.uint8).tofile(newFileName) if index in permTest: print(newFileName, file=fp_test) else: print(newFileName, file=fp_train) if (index % 100 == 0): sys.stdout.write("\r%d done" % index) #"\r" deletes previous line sys.stdout.flush() #"\r" deletes previous line sys.stdout.write("\r") sys.stdout.flush() print('Done.')
def parse_resize_image_and_labels(): print('Resizing and packing images and labels to bin files.\n') np.random.seed(1701) # to fix test set if not os.path.exists(FLAGS.resized_dir): os.mkdir(FLAGS.resized_dir) jnt_fn = FLAGS.data_dir + 'joints.mat' joints = loadmat(jnt_fn) joints = joints['joints'].swapaxes(0, 2).swapaxes(1, 2) invisible_joints = joints[:, :, 2] < 0.5 joints[invisible_joints] = 0 joints = joints[...,:2] N_test = int(len(joints) * 0.1) permTest = np.random.permutation(int(len(joints)))[:N_test].tolist() imagelist = sorted(glob.glob(FLAGS.orimage_dir + '*.jpg')) fp_train = open(os.path.join(FLAGS.data_dir, FLAGS.trainLabels_fn), 'w') fp_test = open(os.path.join(FLAGS.data_dir, FLAGS.testLabels_fn), 'w') for index, img_fn in enumerate(imagelist): imgFile = Image.open(img_fn) (imWidth, imHeight) = imgFile.size imgFile = imgFile.resize((FLAGS.input_size,FLAGS.input_size), Image.ANTIALIAS) newFileName = os.path.join(FLAGS.resized_dir, b(img_fn).replace( "jpg", "bin" )) joints[index, :, 0] *= FLAGS.input_size/float(imWidth) joints[index, :, 1] *= FLAGS.input_size/float(imHeight) im_label_pack = np.concatenate(( joints[index, :, :].reshape(LSPGlobals.TotalLabels), np.asarray(imgFile).reshape(LSPGlobals.TotalImageBytes) )) im_label_pack.astype(np.uint8).tofile(newFileName) if index in permTest: print(newFileName, file=fp_test) else: print(newFileName, file=fp_train) if (index % 100 == 0): sys.stdout.write("\r%d done" % index) #"\r" deletes previous line sys.stdout.flush() #"\r" deletes previous line sys.stdout.write("\r") sys.stdout.flush() print('Done.')
def transform_and_write(im_path, joints, resized_dir, transforms=[[]], dry_run=False): """Performs transforms on image and joints, and writes to binary.""" written_files = [] pose_image = PoseImage.from_filename(im_path, joints) for transform in transforms: cur_image = pose_image.copy() should_write = True desc = '' for obj in transform: desc += obj['func'] + ''.join(map(str, obj['args'])) if obj['func'] == 'random_crop_square': bound_sq = scale_square(bounding_rect(cur_image.joints), DEFAULT_SCALE) side_length = max( MIN_SIDE_LENGTH_RATIO * FLAGS.input_size, # Bigger than ratio of input_size (bound_sq[2] - bound_sq[0]) * 1.00001, # Bigger than bound_sq min(cur_image.im_width, cur_image.im_height) * CROP_RATIO, ) if side_length <= min(cur_image.im_width, cur_image.im_height): cur_image.random_crop_square(side_length, bound_sq) else: # No transform would have been done should_write = False else: getattr(cur_image, obj['func'])(*obj['args']) if obj['func'] == 'rotate_and_crop': for x, y in [p for p in cur_image.joints if not np.array_equal(p, [0., 0.])]: if x < 0. or x > cur_image.im_width or y < 0. or y > cur_image.im_height: # Joint out of bounds should_write = False cur_image.resize((FLAGS.input_size, FLAGS.input_size)) if should_write: new_path = os.path.join(resized_dir, b(im_path).replace( ".jpg", desc + '.bin')) written_files.append(new_path) if dry_run: cur_image.show() else: cur_image.save_binary(new_path) return written_files
parser = argparse.ArgumentParser() parser.add_argument('--datadir', type=str, default='data/lspet_dataset') args = parser.parse_args() print(args) jnt_fn = '%s/joints.mat' % args.datadir joints = loadmat(jnt_fn) joints = joints['joints'].swapaxes(0, 2).swapaxes(1, 2) joints = joints[:, :, :2] N_test = int(len(joints) * 0.1) perm = np.random.permutation(int(len(joints)))[:N_test].tolist() print(perm) fp_train = open('%s/train_joints.csv' % args.datadir, 'w') fp_test = open('%s/test_joints.csv' % args.datadir, 'w') for img_fn in sorted(glob.glob('%s/images/*.jpg' % args.datadir)): index = int(re.search(ur'im([0-9]+)', b(img_fn)).groups()[0]) - 1 str_j = [str(j) for j in joints[index].flatten().tolist()] out_list = [b(img_fn)] out_list.extend(str_j) out_str = ','.join(out_list) print(out_str, file=fp) if index in perm: print(out_str, file=fp_test) else: print(out_str, file=fp_train) fp_train.close() fp_test.close()
def evalDeepPose(imagelist): with tf.Graph().as_default(): # Placeholder to switch between train and test sets. dataShape = [FLAGS.batch_size, FLAGS.input_size, FLAGS.input_size, FLAGS.input_depth] input_images = tf.placeholder(tf.float32, shape=dataShape) # Build a Graph that computes the logits predictions from the inference model. logits = LSPModels.inference(input_images, FLAGS.batch_size, keep_prob=1) # Create a saver. saver = tf.train.Saver(tf.all_variables()) # Build an initialization operation to run below. init = tf.initialize_all_variables() with tf.Session() as sess: # Start populating the filename queue. coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) sess.run(init) ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: print("No checkpoint found!") return numSteps = int(math.floor(len(imagelist)/FLAGS.batch_size)+0.1) if (len(imagelist)%FLAGS.batch_size > 0): numSteps = numSteps +1 resizeValues = np.empty([FLAGS.batch_size, 2], dtype=np.float32) input_pack = np.empty(dataShape, dtype=np.float32) for step in xrange(numSteps): for i in xrange(FLAGS.batch_size): index = i + step*FLAGS.batch_size if not index >= len(imagelist): imgFile = Image.open(imagelist[index]) (imWidth, imHeight) = imgFile.size imgFile = imgFile.resize((FLAGS.input_size,FLAGS.input_size), Image.ANTIALIAS) resizeValues[i, 0] = float(imWidth)/FLAGS.input_size resizeValues[i, 1] = float(imHeight)/FLAGS.input_size input_pack[i,...] = np.asarray(imgFile) start_time = time.time() labels = sess.run([logits], feed_dict={input_images: input_pack}) duration = time.time() - start_time labels_reshaped = np.asarray(labels).reshape([FLAGS.batch_size, FLAGS.label_count, FLAGS.label_size]) resizedLabels = resizeLabels(labels_reshaped, resizeValues) for i in xrange(FLAGS.batch_size): index = i + step*FLAGS.batch_size if not index >= len(imagelist): imgFile = Image.open(imagelist[index]) img = np.asarray(imgFile).astype(np.uint8) draw(img, resizedLabels[i,...], FLAGS.output_dir, 0, fname=b(imagelist[index]), reshaped=True) examples_per_sec = FLAGS.batch_size / duration format_str = ('%s: Done: %d, (FeedForward Time: %.1f examples/sec)') print (format_str % (datetime.now(), (step+1)*FLAGS.batch_size, examples_per_sec)) coord.request_stop() coord.join(threads) print('Process Finished...')
def evalDeepPose(imagelist): with tf.Graph().as_default(): # Placeholder to switch between train and test sets. dataShape = [FLAGS.batch_size, FLAGS.input_size, FLAGS.input_size, FLAGS.input_depth] input_images = tf.placeholder(tf.float32, shape=dataShape) # Build a Graph that computes the logits predictions from the inference model. logits = LSPModels.inference(input_images, FLAGS.batch_size, keepProb=1) # Create a saver. saver = tf.train.Saver(tf.all_variables()) # Build an initialization operation to run below. init = tf.initialize_all_variables() with tf.Session() as sess: # Start populating the filename queue. coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) sess.run(init) ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: print("No checkpoint found!") return numSteps = int(math.floor(len(imagelist)/FLAGS.batch_size)+0.1) if (len(imagelist)%FLAGS.batch_size > 0): numSteps = numSteps +1 resizeValues = np.empty([FLAGS.batch_size, 2], dtype=np.float32) input_pack = np.empty(dataShape, dtype=np.float32) for step in xrange(numSteps): for i in xrange(FLAGS.batch_size): index = i + step*FLAGS.batch_size if not index >= len(imagelist): imgFile = Image.open(imagelist[index]) (imWidth, imHeight) = imgFile.size imgFile = imgFile.resize((FLAGS.input_size,FLAGS.input_size), Image.ANTIALIAS) resizeValues[i, 0] = float(imWidth)/FLAGS.input_size resizeValues[i, 1] = float(imHeight)/FLAGS.input_size input_pack[i,...] = np.asarray(imgFile) start_time = time.time() labels = sess.run([logits], feed_dict={input_images: input_pack}) duration = time.time() - start_time labels_reshaped = np.asarray(labels).reshape([FLAGS.batch_size, FLAGS.label_count, FLAGS.label_size]) resizedLabels = resizeLabels(labels_reshaped, resizeValues) for i in xrange(FLAGS.batch_size): index = i + step*FLAGS.batch_size if not index >= len(imagelist): imgFile = Image.open(imagelist[index]) img = np.asarray(imgFile).astype(np.uint8) draw(img, resizedLabels[i,...], FLAGS.output_dir, 0, fname=b(imagelist[index]), reshaped=True) examples_per_sec = FLAGS.batch_size / duration format_str = ('%s: Done: %d, (FeedForward Time: %.1f examples/sec)') print (format_str % (datetime.now(), (step+1)*FLAGS.batch_size, examples_per_sec)) coord.request_stop() coord.join(threads) print('Process Finished...')
print(colored(datum, COL_INFO)) for country in ['kenya']: print(colored(country, COL_INFO)) ### Import data IMPORT_PATH = j(ROOT, 'uncompressed', datum) print(IMPORT_PATH) #EXPORT_PATH = '%s/%s/%s' % (ROOT, datum, country) EXPORT_PATH = '%s/%s-%s' % ( "/Volumes/DATA/Datasets/Geography_Data/viz2", country, datum) left, right, top, bottom = convert_bbox(BBOX[country], FORMAT) months_data = {} for file_ in os.listdir(IMPORT_PATH): if not file_.endswith(".txt"): continue print(" processing", file_) month = b(file_)[:7] data = np.loadtxt(j(IMPORT_PATH, file_)).reshape(FORMAT) months_data[month] = data[bottom:top, left:right] print(months_data.keys()) output = EXPORT_PATH np.save("%s.npy" % output, months_data) save_mat(months_data, "%s.mat" % output)