def test_vec_point_multiplication(self): m = Transformation( m=[ [1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [9.0, 9.0, 8.0, 7.0], [0.0, 0.0, 0.0, 1.0], ], invm=[ [-3.75, 2.75, -1, 0], [5.75, -4.75, 2.0, 1.0], [-2.25, 2.25, -1.0, -2.0], [0.0, 0.0, 0.0, 1.0], ], ) assert m.is_consistent() expected_v = Vec(14.0, 38.0, 51.0) assert expected_v.is_close(m * Vec(1.0, 2.0, 3.0)) expected_p = Point(18.0, 46.0, 58.0) assert expected_p.is_close(m * Point(1.0, 2.0, 3.0)) expected_n = Normal(-8.75, 7.75, -3.0) assert expected_n.is_close(m * Normal(3.0, 2.0, 4.0))
def importFromFile(self): tlist = Transformation.getAll() if len(tlist) > 0: ret = QMessageBox.question( self, self.tr("Import"), self.tr("Do you want to keep the defined transformations?"), QMessageBox.Yes | QMessageBox.No) infile = QFileDialog.getOpenFileName( self, self.tr("Select the file containing transformations"), self.lastImportFile(), self.tr("XML file (*.xml)")) if infile.isEmpty(): return self.setLastImportFile(infile) if len(tlist) > 0 and ret == QMessageBox.No: # clear all existent transformations before continue for t in tlist: t.deleteData() if not Transformation.importFromXml(infile): return False self.table.model().reloadData() self.table.model().reset() return True
def test_is_close(self): m1 = Transformation( m=[ [1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [9.0, 9.0, 8.0, 7.0], [6.0, 5.0, 4.0, 1.0], ], invm=[ [-3.75, 2.75, -1, 0], [4.375, -3.875, 2.0, -0.5], [0.5, 0.5, -1.0, 1.0], [-1.375, 0.875, 0.0, -0.5], ], ) assert m1.is_consistent() # Not using "deepcopy" here would make Python pass a pointer to the *same* matrices and vectors m2 = Transformation(m=deepcopy(m1.m), invm=deepcopy(m1.invm)) assert m1.is_close(m2) m3 = Transformation(m=deepcopy(m1.m), invm=deepcopy(m1.invm)) m3.m[2][ 2] += 1.0 # Note: this makes "m3" not consistent (m3.is_consistent() == False) assert not m1.is_close(m3) m4 = Transformation(m=deepcopy(m1.m), invm=deepcopy(m1.invm)) m4.invm[2][ 2] += 1.0 # Note: this makes "m4" not consistent (m4.is_consistent() == False) assert not m1.is_close(m4)
def test_multiplication(self): m1 = Transformation( m=[ [1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [9.0, 9.0, 8.0, 7.0], [6.0, 5.0, 4.0, 1.0], ], invm=[ [-3.75, 2.75, -1, 0], [4.375, -3.875, 2.0, -0.5], [0.5, 0.5, -1.0, 1.0], [-1.375, 0.875, 0.0, -0.5], ], ) assert m1.is_consistent() m2 = Transformation( m=[ [3.0, 5.0, 2.0, 4.0], [4.0, 1.0, 0.0, 5.0], [6.0, 3.0, 2.0, 0.0], [1.0, 4.0, 2.0, 1.0], ], invm=[ [0.4, -0.2, 0.2, -0.6], [2.9, -1.7, 0.2, -3.1], [-5.55, 3.15, -0.4, 6.45], [-0.9, 0.7, -0.2, 1.1], ], ) assert m2.is_consistent() expected = Transformation( m=[ [33.0, 32.0, 16.0, 18.0], [89.0, 84.0, 40.0, 58.0], [118.0, 106.0, 48.0, 88.0], [63.0, 51.0, 22.0, 50.0], ], invm=[ [-1.45, 1.45, -1.0, 0.6], [-13.95, 11.95, -6.5, 2.6], [25.525, -22.025, 12.25, -5.2], [4.825, -4.325, 2.5, -1.1], ], ) assert expected.is_consistent() assert expected.is_close(m1 * m2)
def main(): v_trans = [90, 50, 0] v_scale = [2, 2, 2] v = np.array([[220, 290, 0, 1]]) vv = np.array([[-220, -290, 0, 1]]) # m = Matrix(matrix = v) # m1 = Matrix(matrix = mx) # cisalhamento = Transformation(30,[0,0,0],[0,0,0],0).get_matrix(0) translacao = Transformation(0, v_trans, [0, 0, 0], 0).get_matrix(6) # scala = Transformation(0,[0,0,0],v_scale,0).get_matrix(7) # rotacao = Transformation(0,[0,0,0],[0,0,0],270).get_matrix(9) # espelho = Transformation(0,[0,0,0],[0,0,0],0).get_matrix(11) axis = np.array([[2.88, -5, 2.88, 0]]) vector = np.array([[2.88], [0], [-11.18], [1]]) q = quaternion(axis, 90) # vv = vector/la.norm(vector) m = run_quaternion(vector, q) # m = np.array(translacao) print np.dot(m, vector)
def main(): parser = argparse.ArgumentParser(description="Compute specific dataset for model using of metric") parser.add_argument('--output', type=str, help='output file name desired (.train and .test)') parser.add_argument('--folder', type=str, help='folder where generated data are available', required=True) parser.add_argument('--features', type=str, help="list of features choice in order to compute data", default='svd_reconstruction, ipca_reconstruction', required=True) parser.add_argument('--params', type=str, help="list of specific param for each metric choice (See README.md for further information in 3D mode)", default='100, 200 :: 50, 25', required=True) parser.add_argument('--sequence', type=int, help='sequence length expected', required=True) parser.add_argument('--size', type=str, help="Size of input images", default="100, 100") parser.add_argument('--selected_zones', type=str, help='file which contains all selected zones of scene', required=True) args = parser.parse_args() p_filename = args.output p_folder = args.folder p_features = list(map(str.strip, args.features.split(','))) p_params = list(map(str.strip, args.params.split('::'))) p_sequence = args.sequence p_size = args.size # not necessary to split here p_selected_zones = args.selected_zones selected_zones = {} with(open(p_selected_zones, 'r')) as f: for line in f.readlines(): data = line.split(';') del data[-1] scene_name = data[0] thresholds = data[1:] selected_zones[scene_name] = [ int(t) for t in thresholds ] # create list of Transformation transformations = [] for id, feature in enumerate(p_features): if feature not in features_choices: raise ValueError("Unknown metric, please select a correct metric : ", features_choices) transformations.append(Transformation(feature, p_params[id], p_size)) if transformations[0].getName() == 'static': raise ValueError("The first transformation in list cannot be static") # create database using img folder (generate first time only) generate_data_model(p_filename, transformations, p_folder, selected_zones, p_sequence)
def reloadData(self): res = Transformation.getByCrs(self.crsA, self.crsB, self.enabledOnly, True) self.transformations = map(lambda x: x[0], res) self.isInverse = map(lambda x: x[1], res) self.row_count = len(self.transformations)
def __init__(self, aspect_ratio=1.0, transformation=Transformation()): """Create a new orthographic camera The parameter `aspect_ratio` defines how larger than the height is the image. For fullscreen images, you should probably set `aspect_ratio` to 16/9, as this is the most used aspect ratio used in modern monitors. The `transformation` parameter is an instance of the :class:`.Transformation` class.""" self.aspect_ratio = aspect_ratio self.transformation = transformation
def exportToFile(self): outfile = QFileDialog.getSaveFileName( self, self.tr("Select where to export transformations"), self.lastImportFile(), self.tr("XML file (*.xml)")) if outfile.isEmpty(): return if not outfile.endsWith(".xml", Qt.CaseInsensitive): outfile += ".xml" self.setLastImportFile(outfile) return Transformation.exportToXml(outfile)
def test_inverse(self): m1 = Transformation( m=[ [1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [9.0, 9.0, 8.0, 7.0], [6.0, 5.0, 4.0, 1.0], ], invm=[ [-3.75, 2.75, -1, 0], [4.375, -3.875, 2.0, -0.5], [0.5, 0.5, -1.0, 1.0], [-1.375, 0.875, 0.0, -0.5], ], ) m2 = m1.inverse() assert m2.is_consistent() prod = m1 * m2 assert prod.is_consistent() assert prod.is_close(Transformation())
def __init__(self, screen_distance=1.0, aspect_ratio=1.0, transformation=Transformation()): """Create a new perspective camera The parameter `screen_distance` tells how much far from the eye of the observer is the screen, and it influences the so-called «aperture» (the field-of-view angle along the horizontal direction). The parameter `aspect_ratio` defines how larger than the height is the image. For fullscreen images, you should probably set `aspect_ratio` to 16/9, as this is the most used aspect ratio used in modern monitors. The `transformation` parameter is an instance of the :class:`.Transformation` class.""" self.screen_distance = screen_distance self.aspect_ratio = aspect_ratio self.transformation = transformation
def parse_transformation(input_file, scene: Scene): result = Transformation() while True: transformation_kw = expect_keywords(input_file, [ KeywordEnum.IDENTITY, KeywordEnum.TRANSLATION, KeywordEnum.ROTATION_X, KeywordEnum.ROTATION_Y, KeywordEnum.ROTATION_Z, KeywordEnum.SCALING, ]) if transformation_kw == KeywordEnum.IDENTITY: pass # Do nothing (this is a primitive form of optimization!) elif transformation_kw == KeywordEnum.TRANSLATION: expect_symbol(input_file, "(") result *= translation(parse_vector(input_file, scene)) expect_symbol(input_file, ")") elif transformation_kw == KeywordEnum.ROTATION_X: expect_symbol(input_file, "(") result *= rotation_x(expect_number(input_file, scene)) expect_symbol(input_file, ")") elif transformation_kw == KeywordEnum.ROTATION_Y: expect_symbol(input_file, "(") result *= rotation_y(expect_number(input_file, scene)) expect_symbol(input_file, ")") elif transformation_kw == KeywordEnum.ROTATION_Z: expect_symbol(input_file, "(") result *= rotation_z(expect_number(input_file, scene)) expect_symbol(input_file, ")") elif transformation_kw == KeywordEnum.SCALING: expect_symbol(input_file, "(") result *= scaling(parse_vector(input_file, scene)) expect_symbol(input_file, ")") # We must peek the next token to check if there is another transformation that is being # chained or if the sequence ends. Thus, this is a LL(1) parser. next_kw = input_file.read_token() if (not isinstance(next_kw, SymbolToken)) or (next_kw.symbol != "*"): # Pretend you never read this token and put it back! input_file.unread_token(next_kw) break return result
def createNew(self): self.editTransformation(Transformation())
def main(): parser = argparse.ArgumentParser( description= "Compute and prepare data of feature of all scenes using specific interval if necessary" ) parser.add_argument( '--features', type=str, help="list of features choice in order to compute data", default='svd_reconstruction, ipca_reconstruction', required=True) parser.add_argument( '--params', type=str, help= "list of specific param for each feature choice (See README.md for further information in 3D mode)", default='100, 200 :: 50, 25', required=True) parser.add_argument('--folder', type=str, help='folder where dataset is available', required=True) parser.add_argument('--output', type=str, help='output folder where data are saved', required=True) parser.add_argument('--thresholds', type=str, help='file which cantains all thresholds', required=True) parser.add_argument('--size', type=str, help="specific size of image", default='100, 100', required=True) parser.add_argument('--replace', type=int, help='replace previous picutre', default=1) args = parser.parse_args() p_features = list(map(str.strip, args.features.split(','))) p_params = list(map(str.strip, args.params.split('::'))) p_folder = args.folder p_output = args.output p_thresholds = args.thresholds p_size = args.size p_replace = bool(args.replace) # list of transformations transformations = [] for id, feature in enumerate(p_features): if feature not in cfg.features_choices_labels or feature == 'static': raise ValueError( "Unknown feature {0}, please select a correct feature (`static` excluded) : {1}" .format(feature, cfg.features_choices_labels)) transformations.append(Transformation(feature, p_params[id], p_size)) human_thresholds = {} # 3. retrieve human_thresholds # construct zones folder with open(p_thresholds) as f: thresholds_line = f.readlines() for line in thresholds_line: data = line.split(';') del data[-1] # remove unused last element `\n` current_scene = data[0] thresholds_scene = data[1:] if current_scene != '50_shades_of_grey': human_thresholds[current_scene] = [ int(threshold) for threshold in thresholds_scene ] # generate all or specific feature data for transformation in transformations: generate_data(transformation, p_folder, p_output, human_thresholds, p_replace)
def reloadData(self): self.transformations = Transformation.getAll(self.enabledOnly) self.row_count = len(self.transformations)
def get_transformation_for_city(self, city): tour = self.reverse_mapping[city] if tour not in self.transformations: self.transformations[tour] = Transformation(tour) return self.transformations[tour]
def __init__(self, transformation=Transformation(), material: Material = Material()): """Create a xy plane, potentially associating a transformation to it""" super().__init__(transformation, material)
def test_parser(self): stream = StringIO(""" float clock(150) material sky_material( diffuse(uniform(<0, 0, 0>)), uniform(<0.7, 0.5, 1>) ) # Here is a comment material ground_material( diffuse(checkered(<0.3, 0.5, 0.1>, <0.1, 0.2, 0.5>, 4)), uniform(<0, 0, 0>) ) material sphere_material( specular(uniform(<0.5, 0.5, 0.5>)), uniform(<0, 0, 0>) ) plane (sky_material, translation([0, 0, 100]) * rotation_y(clock)) plane (ground_material, identity) sphere(sphere_material, translation([0, 0, 1])) camera(perspective, rotation_z(30) * translation([-4, 0, 1]), 1.0, 2.0) """) scene = parse_scene(input_file=InputStream(stream)) # Check that the float variables are ok assert len(scene.float_variables) == 1 assert "clock" in scene.float_variables.keys() assert scene.float_variables["clock"] == 150.0 # Check that the materials are ok assert len(scene.materials) == 3 assert "sphere_material" in scene.materials assert "sky_material" in scene.materials assert "ground_material" in scene.materials sphere_material = scene.materials["sphere_material"] sky_material = scene.materials["sky_material"] ground_material = scene.materials["ground_material"] assert isinstance(sky_material.brdf, DiffuseBRDF) assert isinstance(sky_material.brdf.pigment, UniformPigment) assert sky_material.brdf.pigment.color.is_close(Color(0, 0, 0)) assert isinstance(ground_material.brdf, DiffuseBRDF) assert isinstance(ground_material.brdf.pigment, CheckeredPigment) assert ground_material.brdf.pigment.color1.is_close( Color(0.3, 0.5, 0.1)) assert ground_material.brdf.pigment.color2.is_close( Color(0.1, 0.2, 0.5)) assert ground_material.brdf.pigment.num_of_steps == 4 assert isinstance(sphere_material.brdf, SpecularBRDF) assert isinstance(sphere_material.brdf.pigment, UniformPigment) assert sphere_material.brdf.pigment.color.is_close(Color( 0.5, 0.5, 0.5)) assert isinstance(sky_material.emitted_radiance, UniformPigment) assert sky_material.emitted_radiance.color.is_close( Color(0.7, 0.5, 1.0)) assert isinstance(ground_material.emitted_radiance, UniformPigment) assert ground_material.emitted_radiance.color.is_close(Color(0, 0, 0)) assert isinstance(sphere_material.emitted_radiance, UniformPigment) assert sphere_material.emitted_radiance.color.is_close(Color(0, 0, 0)) # Check that the shapes are ok assert len(scene.world.shapes) == 3 assert isinstance(scene.world.shapes[0], Plane) assert scene.world.shapes[0].transformation.is_close( translation(Vec(0, 0, 100)) * rotation_y(150.0)) assert isinstance(scene.world.shapes[1], Plane) assert scene.world.shapes[1].transformation.is_close(Transformation()) assert isinstance(scene.world.shapes[2], Sphere) assert scene.world.shapes[2].transformation.is_close( translation(Vec(0, 0, 1))) # Check that the camera is ok assert isinstance(scene.camera, PerspectiveCamera) assert scene.camera.transformation.is_close( rotation_z(30) * translation(Vec(-4, 0, 1))) assert pytest.approx(1.0) == scene.camera.aspect_ratio assert pytest.approx(2.0) == scene.camera.screen_distance
def __init__(self, transformation=Transformation()): """Create a unit sphere, potentially associating a transformation to it""" super().__init__(transformation)
def __init__(self, transformation=Transformation()): """Create a shape, potentially associating a transformation to it""" self.transformation = transformation
def __init__(self, transformation: Transformation = Transformation(), material: Material = Material()): """Create a shape, potentially associating a transformation to it""" self.transformation = transformation self.material = material