コード例 #1
0
	def validate_output(self, sess, num_shapes, epochs):

		print('===== validation output')
		valid_results_folder = 'epoch-%.2f' % epochs
		names, images = sess.run([self.names, self.valid_images])

		for k in range(len(names)):
			shape_name, view_name = names[k].decode('utf8').split('--')
			if view_name == '0':
				print(shape_name)
			
			name_output = os.path.join(self.config['train_dir'], valid_results_folder, shape_name, ('output--'+view_name+'.png'))
			name_gt = os.path.join(self.config['train_dir'], valid_results_folder, shape_name, ('gt--'+view_name+'.png'))
			image.write_image(name_output, images[0, k])
			image.write_image(name_gt, images[1, k])
			
			name_normal = os.path.join(self.config['train_dir'], valid_results_folder, shape_name, ('normal--'+view_name+'.png'))
			name_depth = os.path.join(self.config['train_dir'], valid_results_folder, shape_name, ('depth--'+view_name+'.png'))
			name_mask = os.path.join(self.config['train_dir'], valid_results_folder, shape_name, ('mask--'+view_name+'.png'))
			image.write_image(name_normal, images[2, k])
			image.write_image(name_depth, images[3, k])
			image.write_image(name_mask, images[4, k])

		# loop over all remaining shapes in the queue...
		num_processed_shapes = self.config['batch_size']
		while num_processed_shapes < num_shapes:
			sess.run(self.names)
			num_processed_shapes += self.config['batch_size']
コード例 #2
0
ファイル: patnet.py プロジェクト: Ansire/pattern_grouping
	def test(self, sess, num_patterns):

		print('Testing...')

		self.saver = tf.train.Saver()
		ckpt = tf.train.get_checkpoint_state(self.config.train_dir)
		if ckpt and ckpt.model_checkpoint_path:
			self.saver.restore(sess, ckpt.model_checkpoint_path)
			self.step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
		else:
			print('Cannot find any checkpoint file')
			return

		coord = tf.train.Coordinator()
		threads = tf.train.start_queue_runners(sess=sess, coord=coord)
		self.summarizer = tf.summary.FileWriter(self.config.test_dir, sess.graph)

		output_features_folder = 'features'
		output_visuals_folder = 'visuals'
		output_count = 0
		finished = False
		while not finished:
			if self.config.visualize_feature:
				names, visuals = sess.run([self.names, self.visual_features])
			else:
				names, features = sess.run([self.names, self.image_features])
			for k in range(len(names)):

				image_name = names[k].decode('utf8')
				print('Processed %d: %s' % (output_count, image_name))

				if self.config.visualize_feature:
					# visualize feature maps
					visual_path = os.path.join(self.config.test_dir, output_visuals_folder, image_name)
					if not os.path.exists(visual_path):
						os.makedirs(visual_path)
					for j in range(self.config.feature_size):
						offset = k*self.config.feature_size+j
						visual_name = os.path.join(visual_path, str(j)+'.png')
						image.write_image(visual_name, visuals[offset])
				else:
					# extract element feature
					element_feature = data.extract_element_feature(features[k], image_name, self.config.data_dir)

					# export results
					name_output = os.path.join(self.config.test_dir, output_features_folder, image_name+'.txt')
					# data.write_bin_data(name_output, element_feature)
					np.savetxt(name_output, element_feature)

				# check termination
				output_count += 1
				if output_count >= num_patterns:
					finished = True
					break

		coord.request_stop()
		coord.join(threads)
コード例 #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--xtiles-per-chunk",
                        type=int,
                        required=False,
                        default=256,
                        help="Number of tiles per chunk in width direction")
    parser.add_argument("--ytiles-per-chunk",
                        type=int,
                        required=False,
                        default=256,
                        help="Number of tiles per chunk in height direction")
    parser.add_argument("--heightmap",
                        type=str,
                        required=True,
                        help="Heightmap filename")
    parser.add_argument("--scaling",
                        type=float,
                        required=False,
                        default=1.0,
                        help="Scaling for converting image pixels to tiles")
    parser.add_argument("-o",
                        type=str,
                        required=False,
                        default="./World",
                        help="Directory for root of the world")
    parser.add_argument("-i",
                        "--image",
                        type=str,
                        required=False,
                        help="Generate an image of the tilemap")

    args = parser.parse_args()

    scaling = args.scaling
    img = Image.open(args.heightmap).convert("L")
    if scaling != 1.0:
        img = img.resize(
            (int(img.size[0] * scaling), int(img.size[1] * scaling)),
            Image.BILINEAR)

    width_tiles, height_tiles = img.size
    xtiles_per_chunk = args.xtiles_per_chunk
    ytiles_per_chunk = args.ytiles_per_chunk
    output_directory = args.o

    create_from_heightmap(
        output_directory,
        np.array(img.getdata()).reshape(width_tiles, height_tiles),
        xtiles_per_chunk, ytiles_per_chunk)
    if args.image:
        image.write_image(
            args.image,
            image.create_image(
                WorldReader(output_directory).get_tiles(
                    0, 0, width_tiles, height_tiles)))
コード例 #4
0
def main():
    # Image
    aspect_ratio = 16.0 / 9.0
    image_width = 256
    image_height = image_width/aspect_ratio
    background = Vector3(0, 0, 0)
    samples_per_pixel = 20
    max_depth = 1

    # World
    world = HittableList()

    # material_ground = Lambertian(Vector3(0.8, 0.8, 0.0))
    # material_center = Lambertian(Vector3(0.7, 0.3, 0.3))
    # material_left = Metal(Vector3(0.8, 0.8, 0.8))
    material_emissive = DiffuseLight(Vector3(1,1,1), Vector3(1,1,1))
    # material_right = Metal(Vector3(0.8, 0.6, 0.2))

    # world.append(Sphere(Vector3(0, 0, -1), 0.5, material_center))
    # world.append(Sphere(Vector3(1, 0.5, -0.5), 1, material_emissive))
    # world.append(Sphere(Vector3(-1, 0, -1), 0.5, material_right))
    # world.append(Sphere(Vector3(0, -100.5, -1), 100, material_ground))
    sphere_count = 35
    sphere_dist = 100
    for i in range(0, sphere_count):
        world.append(Sphere(Vector3(utils.rand_range(-50, 50), utils.rand_range(-40, 40),
                                    sphere_dist),
                            1,
                            material_emissive))

    # Camera
    look_from = Vector3(0, 0, 0)
    look_at = Vector3(0, 0, 1)
    v_up = Vector3(0, 1, 0)
    dist_to_focus = (look_from - look_at).length()
    f_stop = 8
    aperture = 1/f_stop
    cam = Camera(look_from, look_at, v_up, 30.0, aspect_ratio, aperture, dist_to_focus)

    # Render
    render_data = list()
    print("Commencing Rendering.")
    start_time = datetime.now()
    for j in reversed(range(0, int(image_height))):
        print("Scanlines remaining: %s" % j)
        for i in range(0, image_width):
            pixel_colour = Vector3(0, 0, 0)
            for s in range(0, samples_per_pixel):
                u = (i + utils.rand()) / (image_width-1)
                v = (j + utils.rand()) / (image_height-1)
                r = cam.get_ray(u, v, s)
                pixel_colour += ray_colour(r, background, world, max_depth)
            render_data.append(colour.write_colour(pixel_colour, samples_per_pixel))
    print("\nDone.\nTime Spent: %s" % (datetime.now() - start_time))

    file = image.write_image(
        width=image_width,
        height=image_height,
        data=render_data
    )
    return file
コード例 #5
0
ファイル: post.py プロジェクト: mauzepeda/tatamee
 def post(self):
   id = self.request.get("id")
   if id != '':
     item_key = db.Key(id)
     item = db.get(item_key)
     item_parts_to_write = []
     user_id = users.get_current_user().user_id()
     user = User.get_by_key_name('key:' + user_id)
     if (item.parent().user == users.get_current_user()):
       item.title = self.request.get("title")
       item.price = float(self.request.get("price"))
       item.description = self.request.get("description")
       item.category = self.request.get("category")
       item.tags = self.request.get("tags").lower()
       item.location = user.home
       item.currency = user.currency
       
       item.timestamp = datetime.datetime.now()
       item_parts_to_write.append(item)
       
       category_string = pluralize(clean_word(self.request.get("category").lower()))
       
       if self.request.get("img"):
         image = images.Image(self.request.get("img"))
         image_post.write_image(item, image, 0, "")
       
       tags = remove_duplicates([clean_word(word) for word in self.request.get("tags").split(',')])
       extra_tags = ['']  
       for tag in tags:
           if len(tag.split()) > 1:
               tags.extend(tag.split())
               extra_tags.extend(tag.split())
       if category_string:
           tags.append(category_string)
           extra_tags.append(category_string)
       singulars = get_singulars(tags)
       tags += singulars   
       extra_tags += singulars    
       title_words = [clean_word(word) for word in item.title.split(' ')] 
       tags += title_words
       extra_tags += title_words
        
       # Delete old item light so that when sorted by __key__ ascending (default)
       # Oldest items (about to expire) show first.
       #TODO check if touching or re putting is enough?
       db.delete(item.itemlight_set[0])
       item_light = ItemLight(parent=user, title=item.title, item_key=item.key(), location=user.home, area=user.area, tags=tags, extra_tags=extra_tags)
       item_parts_to_write.append(item_light)
       
       db.put(item_parts_to_write)  
       logging.info('Item was updated: %s, by user: %s' % (item.title, item.parent().user.nickname()))
       
   else:
     user_id = users.get_current_user().user_id()
     user = User.get_by_key_name('key:' + user_id)
     item = Item(parent=user, location=user.home)
     item.currency = user.currency
     item.title = self.request.get("title")
     item.category = self.request.get("category")
     item.tags = self.request.get("tags").lower()
     item.price = float(self.request.get("price"))
     item.description = self.request.get("description")
     item_key = item.put()
     
     category_string = pluralize(clean_word(self.request.get("category").lower()))
     
     if self.request.get("img"):
         image = images.Image(self.request.get("img"))
         image_post.write_image(item, image, 0, "")
     
     tags = remove_duplicates([clean_word(word) for word in self.request.get("tags").split(',')])
     extra_tags = ['']  
     for tag in tags:
         if len(tag.split()) > 1:
             tags.extend(tag.split())
             extra_tags.extend(tag.split())
     if category_string:
         tags.append(category_string)
         extra_tags.append(category_string)
     singulars = get_singulars(tags)
     tags += singulars   
     extra_tags += singulars    
     title_words = [clean_word(word) for word in item.title.split(' ')] 
     tags += title_words
     extra_tags += title_words
       
     item_light = ItemLight(parent=user, title=item.title, item_key=item_key, location=user.home, area=user.area, tags=tags, extra_tags=extra_tags)
     item_light.put()
     
     logging.info('Item was created: %s, by user: %s' % (item.title, item.parent().user.nickname()))
     
   self.response.out.write(cgi.escape(MyEncoder.encode(MyEncoder(), item)))
   self.response.headers.add_header('Location', '/item/' + str(item.key()))
   self.response.set_status(201)
コード例 #6
0
ファイル: monnet.py プロジェクト: syrilzhang/SketchModeling
    def test(self, sess, views, num_shapes):

        print('Testing...')

        self.saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(self.config.train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            self.saver.restore(sess, ckpt.model_checkpoint_path)
            try:
                self.step = int(
                    ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
            except ValueError:
                self.step = 0
        else:
            print('Cannot find any checkpoint file')
            return

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        self.summarizer = tf.summary.FileWriter(self.config.test_dir,
                                                sess.graph)

        output_count = 0
        output_prefix = 'dn14'
        output_images_folder = 'images'
        output_results_folder = 'results'

        log_file_name = os.path.join(self.config.test_dir, 'log.txt')
        log_file = open(log_file_name, 'a')

        started = False
        finished = False
        last_shape_name = ''
        last_view_name = ''
        while not finished:
            names, results, errors, images = sess.run(
                [self.names, self.results, self.errors, self.pngs])
            for k in range(len(names)):
                shape_name, view_name = names[k].decode('utf8').split('--')
                if last_shape_name == shape_name:
                    view_name = ('%s' % (int(last_view_name) + 1))
                last_shape_name = shape_name
                last_view_name = view_name
                print('Processed %d: %s--%s %f' %
                      (output_count, shape_name, view_name, errors[k]))

                if view_name == '0' and started:
                    log_file.write('\n')
                started = True
                log_file.write('%6f ' % errors[k])

                # export images
                name_input = os.path.join(self.config.test_dir,
                                          output_images_folder, shape_name,
                                          'input.png')
                image.write_image(name_input, images[0, k])
                name_gt = os.path.join(
                    self.config.test_dir, output_images_folder, shape_name,
                    ('gt-' + output_prefix + '--' + view_name + '.png'))
                name_output = os.path.join(
                    self.config.test_dir, output_images_folder, shape_name,
                    ('pred-' + output_prefix + '--' + view_name + '.png'))
                image.write_image(name_gt, images[1, k])
                image.write_image(name_output, images[2, k])

                name_normal = os.path.join(
                    self.config.test_dir, output_images_folder, shape_name,
                    ('normal-' + output_prefix + '--' + view_name + '.png'))
                name_depth = os.path.join(
                    self.config.test_dir, output_images_folder, shape_name,
                    ('depth-' + output_prefix + '--' + view_name + '.png'))
                name_mask = os.path.join(
                    self.config.test_dir, output_images_folder, shape_name,
                    ('mask-' + output_prefix + '--' + view_name + '.png'))
                image.write_image(name_normal, images[3, k])
                image.write_image(name_depth, images[4, k])
                image.write_image(name_mask, images[5, k])

                # export results
                name_output = os.path.join(
                    self.config.test_dir, output_results_folder, shape_name,
                    (output_prefix + '-' + view_name + '.png'))
                image.write_image(name_output, images[2, k])

                # check termination
                output_count += 1
                if output_count >= num_shapes * views.num_views:
                    finished = True
                    break

        coord.request_stop()
        coord.join(threads)