def main(): # Parameters images_directory = './data/images' categories_file = '../categories.json' if not os.path.isdir(images_directory): loader = ImageLoader(output_directory=images_directory, categories_file=categories_file) loader.load() experience_file = './data/experience-new-ratings.csv' number_of_experiences, ids_for_categories = load_experience_data( experience_file, n_ratings=2) number_of_true_images_for_provider = 8 number_of_noise_images_for_provider = 2 number_of_images_in_collage = 6 output_directory = './data/generated-data' features_original_images_file = './data/images/features-images-1' image_generator = ImageGenerator(images_directory, categories_file, features_original_images_file) image_generator.generate( number_of_true_images_for_provider=number_of_true_images_for_provider, number_of_noise_images_for_provider=number_of_noise_images_for_provider, number_of_images=number_of_experiences, ids=ids_for_categories, number_of_images_in_collage=number_of_images_in_collage, output_directory=output_directory) evaluator = Evaluator(output_directory, features_file='./data/features-generated-data') evaluator.visualize(show='ratings') evaluator.classify()
from images2gif import writeGif from image_generator import ImageGenerator from PIL import Image import os class Something(object): pass if __name__ == '__main__': args = Something() args.fontfile = '/Users/timoeemelikoola/Downloads/Cardo/Cardo-Regular.ttf' imgs = [] for i in xrange(20): ig = ImageGenerator({"text": str(i), "line": str(i)}, args) ig.generate() ig.save() imgs.append(Image.open(ig.file_name)) size = (150, 150) for im in images: im.thumbnail(size, Image.ANTIALIAS) filename = "images/my_gif.GIF" writeGif(filename, images, duration=0.2)
np.savetxt(args.destination, depth, delimiter=",", fmt="%s") else: print("Playing camera feed. Press \'q\' to quit") while True: bgr, depth = depth_live_generator.generate() cv2.imshow("Video", bgr) cv2.imshow("Depth", depth) key = cv2.waitKey(1) if key == ord('c') and args.destination is not None: np.savetxt(args.destination, depth, delimiter=",", fmt="%s") if key == ord('q'): break elif args.generator == "ImageGenerator": image_generator = ImageGenerator(args.image) rgb, depth = image_generator.generate() print(rgb.shape) elif args.generator == "VideoLiveGenerator": video_live_generator = VideoLiveGenerator(1) rgb, depth = video_live_generator.generate() print(rgb.shape) if args.play: print("Playing camera feed. Press \'q\' to quit") while True: rgb, _ = video_live_generator.generate() cv2.imshow("Live video", cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)) key = cv2.waitKey(1) if key == ord('q'): break
# termination criteria criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # Squares are 1.25" across # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6 * 7, 3), np.float32) objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d point in real world space imgpoints = [] # 2d points in image plane. for image in os.listdir(args.dir): # print(image) generator = ImageGenerator(args.dir + "/" + image) img, _ = generator.generate() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chess board corners ret, corners = cv2.findChessboardCorners(gray, (7, 6), None) # If found, add object points, image points (after refining them) print("object points: ", objp) if ret: objpoints.append(objp) corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) imgpoints.append(corners) # Draw and display the cornerss cv2.drawChessboardCorners(img, (7, 6), corners2, ret) cv2.imshow('img', img) cv2.waitKey() ret, mtx, dist, rvec, tvec = cv2.calibrateCamera(