def shape_net_test():
    std_out_path = "/home/zgong8/PerceptualMetric/examples/data_out/decimation_low_{}.obj"
    std_out_path_ori = "/home/zgong8/PerceptualMetric/examples/data_out/decimation_low_ori_{}.obj"
    for i in range(10):
        in_path = random_shape_net_object()

        print(in_path)

        out_path = std_out_path.format(i)
        out_path_ori = std_out_path_ori.format(i)

        p = Pipeline(1)
        d = None
        while d is None:
            c = {}
            for k, f in DecimationDeformer.get_applicable_configs().items():
                q = np.random.uniform()
                c[k] = f(q)
            tmp_d = DecimationDeformer(c)
            if tmp_d.get_score() == 1:
                d = tmp_d

        p.plug(d)

        p.process_shape_file(in_path, out_path)

        p = Pipeline(0)
        p.process_shape_file(in_path, out_path_ori)
def string_to_pipeline(string):
    each_deformation_finder = re.compile("<.*?}")
    deformation_strings = each_deformation_finder.findall(string)
    p = Pipeline(len(deformation_strings))
    for deformation_string in deformation_strings:
        p.plug(substring_to_deformation(deformation_string))
    return p
示例#3
0
def shape_net_test():
    for i in range(10):
        in_path = random_shape_net_object()

        print(in_path)

        out_path = os.path.join(curr_dir, data_out_dir,
                                "arap_high_{}.obj".format(i))
        out_path_ori = os.path.join(curr_dir, data_out_dir,
                                    "arap_high_{}_ori.obj".format(i))

        p = Pipeline(1)
        d = None
        while d is None:
            c = {}
            for k, f in ARAPDeformer.get_applicable_configs().items():
                q = np.random.uniform()
                c[k] = f(q)
            tmp_d = ARAPDeformer(c)
            if tmp_d.get_score() == 3:
                d = tmp_d

        p.plug(d)

        p.process_shape_file(in_path, out_path)

        p = Pipeline(0)
        p.process_shape_file(in_path, out_path_ori)
def string_to_pipelines(string):
    each_deformation_finder = re.compile("<.*?}")
    deformation_strings = each_deformation_finder.findall(string)
    pipelines = []
    for deformation_string in deformation_strings:
        p = Pipeline(1)
        p.plug(substring_to_deformation(deformation_string))
        pipelines.append(p)
    return pipelines
示例#5
0
def random_shape_sample():
    for i in range(5, 10):
        in_path = random_shape_net_object()
        out_path_ori = r"/home/zgong8/data_out/base-" + str(i) + "-ori.obj"
        out_path1 = r"/home/zgong8/data_out/base-" + str(i) + "-1.obj"
        file_path1 = r"/home/zgong8/data_out/base-" + str(i) + "-1.txt"
        out_path2 = r"/home/zgong8/data_out/base-" + str(i) + "-2.obj"
        file_path2 = r"/home/zgong8/data_out/base-" + str(i) + "-2.txt"
        empty_pipeline = Pipeline(0)
        empty_pipeline.process_shape_file(in_path, out_path_ori)

        deformation_level = i % 3 + 1

        status = False
        while not status:
            ppl, status = run_leveled_pipeline(3, in_path, out_path1)
            with open(file_path1, 'w') as f:
                f.write(in_path)
                f.write("\n")
                f.write(str(ppl))
                f.write("\n")
                f.write("status: ")
                f.write(str(status))
                f.write("\n")
                f.write("score: ")
                f.write(str(ppl.estimate_deform_score()))
                if status:
                    f.write("\n")
                    f.write("manifold: ")
                    f.write(str(is_manifold(file=out_path1)))
                    f.write("\n")
                    f.write("cut_at: ")
                    f.write(str(ppl.cut_by_point_cloud))
            f.close()
        status = False
        while not status:
            ppl, status = run_leveled_pipeline(1, in_path, out_path2)
            with open(file_path2, 'w') as f:
                f.write(in_path)
                f.write("\n")
                f.write(str(ppl))
                f.write("\n")
                f.write("status: ")
                f.write(str(status))
                f.write("\n")
                f.write("score: ")
                f.write(str(ppl.estimate_deform_score()))
                if status:
                    f.write("\n")
                    f.write("manifold: ")
                    f.write(str(is_manifold(file=out_path2)))
                    f.write("\n")
                    f.write("cut_at: ")
                    f.write(str(ppl.cut_by_point_cloud))
            f.close()
        print("{} done!".format(str(i)))
def get_random_pipeline():
    q = np.random.uniform()
    slots = int(np.floor(q * 10 + 1))
    p = Pipeline(slots)
    while not p.is_full():
        p.plug(get_random_deformation())
        # p_str = str(p)
        # vd = p_str.find("VoxelizeDeformer")
        # if vd != -1:
        #     dd = p_str.find("DecimationDeformer", vd)
        #     if dd != -1:
        #         p = None
    return p
示例#7
0
def insurance_cross_sell_prediction():
    test_json = request.get_json()
    
    if test_json: #there is data
        if isinstance(test_json, dict):
            teste_raw = pd.DataFrame(test_json, index=[0]) #unique example
        else:
            teste_raw = pd.DataFrame(test_json, columns=test_json[0].keys()) #multiple examples

		# Instantiate
        pipeline = Pipeline()

        # Data Cleaning
        df1 = pipeline.data_cleaning(teste_raw)
        # Feature Engineering
        df2 = pipeline.feature_engineering(df1)
        # Data Preparation
        df3 = pipeline.data_preparation(df2)
        # Prediction
        df_response = pipeline.predict(model, df3, teste_raw)
        
        return df_response
    
    else:
        return Response('{}', status=200, mimetype='application/json')
def test():
    in_path = os.path.join(curr_dir, data_out_dir, "mc5b.obj")
    out_path = os.path.join(curr_dir, data_out_dir, "mc5b_decimation.obj")

    p = Pipeline(1)
    d = DecimationDeformer({"ratio": 5.0})
    p.plug(d)

    p.process_shape_file(in_path, out_path)
示例#9
0
def head_test():
    in_path = os.path.join(curr_dir, data_in_dir, file_name)
    out_path = os.path.join(curr_dir, data_out_dir, "marching_cubes_head_test.obj")

    p = Pipeline(1)
    d = PointCloudDeformer({})
    p.plug(d)

    p.process_shape_file(in_path, out_path)
示例#10
0
def shape_net_test():
    # in_path = random_shape_net_object()
    in_path = "/home/zgong8/ShapeNetCore.v2/03593526/2872c4b8e2f86201b7d0f3d89bbac230/models/model_normalized.obj"
    for i in range(1):
        print(in_path)

        out_path = os.path.join(curr_dir, data_out_dir,
                                "rc_low_{}.obj".format(i))
        out_path_ori = os.path.join(curr_dir, data_out_dir,
                                    "rc_low_{}_ori.obj".format(i))

        p = Pipeline(1)
        d = None
        while d is None:
            c = {
                't_x': 0.2905716521257016,
                't_y': 0.4142014801845495,
                't_z': 0.4329088231382898,
                'theta': 0.22074621042125056,
                'phi': 0.04610239008672457,
                'pivots': 0.8523162841916193
            }

            for k, f in RandomCageDeformer.get_applicable_configs().items():
                q = np.random.uniform()
                c[k] = f(q)
            tmp_d = RandomCageDeformer(c)
            if tmp_d.get_score() == 1:
                d = tmp_d

        p.plug(d)
        print(p)

        p.process_shape_file(in_path, out_path)

        p = Pipeline(0)
        p.process_shape_file(in_path, out_path_ori)
示例#11
0
def test():
    in_path = os.path.join(curr_dir, data_out_dir, "mc5b_decimation.obj")
    out_path = os.path.join(curr_dir, data_out_dir, "mc5b_arap.obj")

    p = Pipeline(1)
    c = {}
    for k, f in ARAPDeformer.get_applicable_configs().items():
        q = np.random.uniform()
        c[k] = f(q)
    d = ARAPDeformer(c)
    p.plug(d)

    p.process_shape_file(in_path, out_path)
示例#12
0
def shape_net_test():
    for i in range(10):
        in_path = random_shape_net_object()

        print(in_path)

        out_path = os.path.join(curr_dir, data_out_dir, "pcd{}.obj".format(i))

        p = Pipeline(1)
        d = PointCloudDeformer({})
        p.plug(d)

        p.process_shape_file(in_path, out_path)
def shape_net_test2():
    std_out_path = "/home/zgong8/PerceptualMetric/examples/data_out/mc{}.obj"
    for i in range(10):
        in_path = random_shape_net_object()

        print(in_path)

        out_path = std_out_path.format(i)

        p = Pipeline(1)
        d = MarchingCubesDeformer({})
        p.plug(d)

        p.process_shape_file(in_path, out_path)
def validate_pipeline(p: Pipeline, level):
    if level != -1:
        if p.estimate_deform_score() != level:
            return False
    has_decimation = False
    has_reformation = False
    has_scaling = False
    has_voxel = False
    has_sub = False
    for d in p._deformers:
        d_string = str(type(d))
        if "Voxel" in d_string:
            if has_reformation:
                return False
            else:
                has_reformation = True
                has_voxel = True
        elif "Decimation" in d_string:
            if has_decimation:
                return False
            elif has_voxel:
                return False
            else:
                has_decimation = True
        elif "Point" in d_string or "Marching" in d_string:
            if has_reformation:
                return False
            else:
                has_reformation = True
        elif "Length" in d_string:
            if has_scaling:
                return False
            else:
                has_scaling = True
        elif "Subdivision" in d_string:
            if has_sub:
                return False
            else:
                has_sub = True
    return True
示例#15
0
def test():
    in_path = os.path.join(curr_dir, data_out_dir, file_name)
    out_path = os.path.join(curr_dir, data_out_dir, "mc5b_biharmonic.obj")

    p = Pipeline(1)
    c = {
        PIVOTS: np.floor(random() * 4 + 1),
        FIRST: random(),
        SECOND: random(),
        THIRD: random(),
        FOURTH: random()
    }
    d = BiharmonicDeformer(c)
    p.plug(d)

    p.process_shape_file(in_path, out_path)
示例#16
0
def shape_net_test():
    std_out_path = "/home/zgong8/PerceptualMetric/examples/data_out/bi_high_{}.obj"
    for i in range(10):
        in_path = random_shape_net_object()

        print(in_path)

        out_path = std_out_path.format(i)

        p = Pipeline(1)
        c = {
            PIVOTS: 1,
            FIRST: random(),
            SECOND: random(),
            THIRD: random(),
            FOURTH: random(),
            DIS: 1.6
        }
        d = BiharmonicDeformer(c)
        p.plug(d)

        p.process_shape_file(in_path, out_path)
示例#17
0
	def __init__(self):
		
		self.pipeline=Pipeline()
示例#18
0
class NeritFacade(object):



	
	def __init__(self):
		
		self.pipeline=Pipeline()



	def createPosTagger(self,config_file):

		# crear un Tagger para POS usando PostModelGenerator como ayudante

		postModelGenerator=PostModelGenerator(config_file)
		model=postModelGenerator.createModel()
		return model

	def createChunkTagger(self,config_file):

		# crear un Tagger entrenado segun los parametros de /config/taggers.ini utilizando  ChunkerModelGenerator como clase "ayudante"
		


		config = ConfigParser.RawConfigParser()
		config.optionxform = str 			
		config.read(config_file)
		
		phrases=[ph for tag,ph in config.items('chunkers.phrases')]
		files=[fname for key,fname in config.items('chunk_training_corpus.corpus')]
		featureName=config.get('chunker.features','featureExtractor')
		chunkerGenerator=ChunkerModelGenerator(config.get('chunkers','save_to'),config.get('chunk_training_corpus','corpora'),files,phrases,config.getfloat('chunk_training_corpus','training_portion'),config.get('chunkers','ext_file'),ContextFeatureExtractor())

		model=chunkerGenerator.createModel()
		return model

	
	def getTokenizerStage(self,tokenizers_file,abbreviations,src_field='text',dst_field='text'):

		# crear etapa de Tokenizacion completa y adaptar para que enchufe en el Pipe

		tokenizerFactory=TokenizerFactory(tokenizers_file,abbreviations)
		tokenizerStages=tokenizerFactory.toStage(src_field,dst_field)
		return tokenizerStages


	def getTaggerStage(self,model_path,src_field='text',dst_field='tagged',decorator=None):

		# crear etapa de etiquetado gramatical y adaptar para que encaje en el Pipe

		factory=TaggerFactory(model_path)
		factory.set_decorator(decorator)
		stage=factory.toStage(src_field,dst_field)
		return stage

	
	def getChunkerModelStage(self,model_path,src_field='tagged',dst_field='chunked',decorator=None):

		# crear etapa de reconocimiento de Chunks basada en modelo entrenado.Adaptada al pipe.
		
		factory=ModelChunkerFactory(model_path)
		factory.set_decorator(decorator)
		
		stage=factory.toStage(src_field,dst_field)
		return stage	


	
	def getPersistenceStage(self,iFilter,save_to,corpus_size=-1,f_ext=".tcs",src_field='chunked',dst_field='chunked'):

		# Opcional: dejar como una etapa mas en el pipe por si se desea armar un corpus con la salida del pipe.

		factory=PersistenceFactory(save_to,f_ext,corpus_size,iFilter)
		stage=factory.toStage(src_field,dst_field)	
		return stage


	def createPipeline(self,stage_list):

		# armar el pipe con todas las etapas indicadas por stage_list

		for s in stage_list:
			if isinstance(s,list):
				for s_i in s:
					self.pipeline.addStage(s_i)
			else:
				self.pipeline.addStage(s)
				
		return self.pipeline


	def add_finalStage(self,stages):

		# determinar etapa final del pipe. Recordar que el Pipe es Observador, pero tambien Subject y puede emitir notificaciones al culminar su proceso.

		if isinstance(stages,list):
			for stage in stages:
				self.pipeline.addFinalStage(stage)
		else:
			self.pipeline.addFinalStage(stages)

	
	def pipe(self):
		return self.pipeline
	

	def getDataLineConnection(self,filename):

		# instancias una conexion para archvios linea-por-linea

		return DataLineConnection(filename)



	def getFileConnection(self,filename):

		# instanciar una conexion contra archivos con formato Json

		return FileConnection(filename)




	def getTwitterConnection(self,hashtags=[],timeLine=True):

		# contectarme a Twitter: puedo usar hashtags o procesar el timeline

		return TwitterConnection(hashtags=hashtags,timeLine=timeLine)

	
	def getChunkerStrategy(self,grammar_location=locations,grammar_events=events,grammar_words=words,grammar_clean=clean):

		# retorna puramente una estrategia. Es más bien una prueba....
	
		locations=PostPatternStrategy(grammar_location) #loop=3
		
		location_words=PostPatternStrategy(grammar_words)#loop=1
		locations=SequentialStrategy(locations,location_words)
				
		iobFixer=IOBFixerStrategy()
		locations_words_strategy=SequentialStrategy(locations,iobFixer)
		events=PostPatternStrategy(grammar_events) #loop=1
		events=SequentialStrategy(locations_words_strategy,events)
		events=SequentialStrategy(events,iobFixer)
		
		return events

	def getChunkerDecorator(self,grammar_location=locations,grammar_events=events,grammar_words=words,grammar_clean=clean):

		# un decortator ( que detecta sintagmas nominales,adverbiales,etc ) y se aniade funcionalidad para detectar direcciones y eventos 
		
	
		locations=PostPatternStrategy(grammar_location)
		
		location_words=PostPatternStrategy(grammar_words)
		locations=SequentialStrategy(locations,location_words)
		
		
		iobFixer=IOBFixerStrategy()
		locations_words_strategy=SequentialStrategy(locations,iobFixer)
		
		# busquemos por eventos dentro del arbol de chunks 
		events=PostPatternStrategy(grammar_events)
		events=SequentialStrategy(locations_words_strategy,events)

		events=SequentialStrategy(events,iobFixer)
		
		# decorador 
		decorator=WrappedStrategyTagger()
		decorator.set_strategy(events)
		
		return decorator


	def getChunkerStage(self,grammar_chunks=chunks,src_field='tagged',dst_field='chunked',decorator=None):	


		factory=RegexpChunkerFactory(grammar_chunks)
		factory.set_decorator(decorator)
		
		stage=factory.toStage(src_field,dst_field)

		return stage

	

	def getChunkerWrappedStage(self,model_path,grammar_chunks=chunks,src_field='tagged',dst_field='chunked',strategy=None):

		# Me va a servir para pruebas

		# armar un chunker para detectar frases utilizando corpus y luego las gramáticas ( desde cero: desde sintagmas nominales,preposicionales,etc )
		# arma un chunker combinando dos decorators y aplica en secuencia.


		factory=ModelChunkerFactory(model_path)
		modelChunker=factory.createInstance()
		
		factory=RegexpChunkerFactory(grammar_chunks)
		regexpChunker=factory.createInstance()
		
		wrappedRegexpChunker=WrappedStrategyTagger(regexpChunker)
		# estrategia para reconocer direcciones sobre los sintagmas estandar 
		wrappedRegexpChunker.set_strategy(strategy)	

		compositeTagger=CompositeWrapperTagger(modelChunker,wrappedRegexpChunker)
		chunkerStage=TaggerAdapter(compositeTagger,src_field,dst_field)

		return chunkerStage
示例#19
0
import os

from pipeline.Pipeline import Pipeline
from module.deformers.VoxelizeDeformer import VoxelizeDeformer, NUM_CUBES

curr_dir = os.path.dirname(os.path.realpath(__file__))
data_in_dir = "data_in"
data_out_dir = "data_out"
file_name = "head.obj"

in_path = os.path.join(curr_dir, data_in_dir, file_name)
out_path = os.path.join(curr_dir, data_out_dir, "vox_head_test_32.obj")

p = Pipeline(1)
d = VoxelizeDeformer({NUM_CUBES: 128})
p.plug(d)

p.process_shape_file(in_path, out_path)
示例#20
0
def run_pipeline(logging, frames, models, model_classes, models_array, 
model_paths, args, start_time, out, save=False):

    end_time = time.time()

    face, landmarks, pose_estimation, gaze = models

    logging.info("""{f} Frame(s) loading time: {t}""".format(f=len(frames), 
    t=(end_time - start_time)))

    logging.info("Creating Image Frame pipeline: ")

    default_pipeline = Pipeline(model_type=None, model_class=None, objects={}, 
    networks={}, objs={}, nets={}, logging=logging)

    # load the network objects
    default_pipeline.run(model_classes, models_array, model_paths, args)

    image_frame = ImageFrame(model_type=face, model_class=model_classes, 
    objects=default_pipeline.objects, networks=default_pipeline.networks, 
    objs=default_pipeline.objs, nets=default_pipeline.nets)

    face_detection = Face(model_type=landmarks, model_class=model_classes.tolist()[1], 
    objects=default_pipeline.objects, networks=default_pipeline.networks, 
    objs=default_pipeline.objs, nets=default_pipeline.nets)

    gaze_estimation = Gaze(model_type=gaze, model_class=model_classes.tolist()[3], 
    objects=default_pipeline.objects, networks=default_pipeline.networks, 
    objs=default_pipeline.objs, nets=default_pipeline.nets)

    start_time = time.time()

    image_frame.run(args, frames, model_classes)

    logging.info("""Preprocess and exec async for face detection: {t}""".format(t=(time.time() - start_time)))

    # for each n batches and for each batch_size
    start_time = time.time()

    gen_frames, faces, face_boxes = \
        image_frame.produce(args, frames, model_classes)

    logging.info("""Post-process face detection: {t}""".format(t=(time.time() - start_time)))

    start_time = time.time()
    
    face_detection.run(args, frames, faces, model_classes)

    logging.info("""Preprocess and exec async for facial landmarks: {t}""".format(t=(time.time() - start_time)))

    start_time = time.time()

    batch_gen_frames, cropped_left, \
        cropped_right, left_eye, right_eye, \
        nose, left_lip, right_lip = \
            face_detection.produce(args, frames, gen_frames, faces, face_boxes, 
    model_classes)

    logging.info("""Post-process facial landmarks: {t}""".format(t=(time.time() - start_time)))

    start_time = time.time()

    pose_model = Pose(model_type=pose_estimation, 
    model_class=model_classes.tolist()[2], 
    objects=default_pipeline.objects, networks=default_pipeline.networks, 
    objs=default_pipeline.objs, nets=default_pipeline.nets)

    pose_model.run(args, frames, faces, model_classes)
    
    logging.info("""Preprocess and exec async head pose: {t}""".format(t=(time.time() - start_time)))

    start_time = time.time()

    head_pose_angles = pose_model.produce(args, frames, 
    batch_gen_frames, model_classes)

    logging.info("""Post-process head pose: {t}""".format(t=(time.time() - start_time)))

    gen_frames = None

    start_time = time.time()
    # preprocessing the gaze and executing the landmarks detection
    gaze_estimation.run(args, frames, faces, 
    cropped_left, cropped_right, 
    head_pose_angles, model_classes)

    logging.info("""Preprocess and exec async gaze estimation: {t}""".format(t=(time.time() - start_time)))

    faces = None

    start_time = time.time()
    # post process gaze vector
    gaze_vector = gaze_estimation.produce(args, frames, batch_gen_frames, 
    model_classes)

    logging.info("""Post-process gaze: {t}""".format(t=(time.time() - start_time)))

    start_time = time.time()
    ext = os.path.splitext(args.output_path)[1]

    if ext in is_video():
        default_pipeline.finalize_pipeline(out, frames, 
        args, batch_gen_frames, face_boxes, 
        left_eye, right_eye, 
        nose, left_lip, right_lip, 
        gaze_vector, save=save)
    else:
        frame = default_pipeline.write_pipeline(0, frames, batch_gen_frames, face_boxes, 
        left_eye, right_eye, nose, left_lip, right_lip, 
        gaze_vector)
        cv2.imwrite(out, frame)

    logging.info("""Post-process video writing and painting time: {t}""".format(t=(time.time() - start_time)))

    return batch_gen_frames
示例#21
0
def test():
    std_out_path = "/home/zgong8/PerceptualMetric/examples/data_out/decimation_sub_{}.obj"
    std_out_path_ori = "/home/zgong8/PerceptualMetric/examples/data_out/decimation_sub_{}_ori.obj"
    for i in range(10):
        in_path = random_shape_net_object()

        print(in_path)

        out_path = std_out_path.format(i)
        out_path_ori = std_out_path_ori.format(i)

        p = Pipeline(2)
        d = DecimationDeformer({"ratio": 3.0})
        p.plug(d)
        df = SubdivisionDeformer({STEPS: 1})
        p.plug(df)
        p.process_shape_file(in_path, out_path)

        p = Pipeline(0)
        p.process_shape_file(in_path, out_path_ori)
示例#22
0
    def __init__(self):

        self.pipeline = Pipeline()
示例#23
0
class NeritFacade(object):
    def __init__(self):

        self.pipeline = Pipeline()

    def createPosTagger(self, config_file):

        # crear un Tagger para POS usando PostModelGenerator como ayudante

        postModelGenerator = PostModelGenerator(config_file)
        model = postModelGenerator.createModel()
        return model

    def createChunkTagger(self, config_file):

        # crear un Tagger entrenado segun los parametros de /config/taggers.ini utilizando  ChunkerModelGenerator como clase "ayudante"

        config = ConfigParser.RawConfigParser()
        config.optionxform = str
        config.read(config_file)

        phrases = [ph for tag, ph in config.items('chunkers.phrases')]
        files = [
            fname
            for key, fname in config.items('chunk_training_corpus.corpus')
        ]
        featureName = config.get('chunker.features', 'featureExtractor')
        chunkerGenerator = ChunkerModelGenerator(
            config.get('chunkers', 'save_to'),
            config.get('chunk_training_corpus', 'corpora'), files, phrases,
            config.getfloat('chunk_training_corpus', 'training_portion'),
            config.get('chunkers', 'ext_file'), ContextFeatureExtractor())

        model = chunkerGenerator.createModel()
        return model

    def getTokenizerStage(self,
                          tokenizers_file,
                          abbreviations,
                          src_field='text',
                          dst_field='text'):

        # crear etapa de Tokenizacion completa y adaptar para que enchufe en el Pipe

        tokenizerFactory = TokenizerFactory(tokenizers_file, abbreviations)
        tokenizerStages = tokenizerFactory.toStage(src_field, dst_field)
        return tokenizerStages

    def getTaggerStage(self,
                       model_path,
                       src_field='text',
                       dst_field='tagged',
                       decorator=None):

        # crear etapa de etiquetado gramatical y adaptar para que encaje en el Pipe

        factory = TaggerFactory(model_path)
        factory.set_decorator(decorator)
        stage = factory.toStage(src_field, dst_field)
        return stage

    def getChunkerModelStage(self,
                             model_path,
                             src_field='tagged',
                             dst_field='chunked',
                             decorator=None):

        # crear etapa de reconocimiento de Chunks basada en modelo entrenado.Adaptada al pipe.

        factory = ModelChunkerFactory(model_path)
        factory.set_decorator(decorator)

        stage = factory.toStage(src_field, dst_field)
        return stage

    def getPersistenceStage(self,
                            iFilter,
                            save_to,
                            corpus_size=-1,
                            f_ext=".tcs",
                            src_field='chunked',
                            dst_field='chunked'):

        # Opcional: dejar como una etapa mas en el pipe por si se desea armar un corpus con la salida del pipe.

        factory = PersistenceFactory(save_to, f_ext, corpus_size, iFilter)
        stage = factory.toStage(src_field, dst_field)
        return stage

    def createPipeline(self, stage_list):

        # armar el pipe con todas las etapas indicadas por stage_list

        for s in stage_list:
            if isinstance(s, list):
                for s_i in s:
                    self.pipeline.addStage(s_i)
            else:
                self.pipeline.addStage(s)

        return self.pipeline

    def add_finalStage(self, stages):

        # determinar etapa final del pipe. Recordar que el Pipe es Observador, pero tambien Subject y puede emitir notificaciones al culminar su proceso.

        if isinstance(stages, list):
            for stage in stages:
                self.pipeline.addFinalStage(stage)
        else:
            self.pipeline.addFinalStage(stages)

    def pipe(self):
        return self.pipeline

    def getDataLineConnection(self, filename):

        # instancias una conexion para archvios linea-por-linea

        return DataLineConnection(filename)

    def getFileConnection(self, filename):

        # instanciar una conexion contra archivos con formato Json

        return FileConnection(filename)

    def getTwitterConnection(self, hashtags=[], timeLine=True):

        # contectarme a Twitter: puedo usar hashtags o procesar el timeline

        return TwitterConnection(hashtags=hashtags, timeLine=timeLine)

    def getChunkerStrategy(self,
                           grammar_location=locations,
                           grammar_events=events,
                           grammar_words=words,
                           grammar_clean=clean):

        # retorna puramente una estrategia. Es más bien una prueba....

        locations = PostPatternStrategy(grammar_location)  #loop=3

        location_words = PostPatternStrategy(grammar_words)  #loop=1
        locations = SequentialStrategy(locations, location_words)

        iobFixer = IOBFixerStrategy()
        locations_words_strategy = SequentialStrategy(locations, iobFixer)
        events = PostPatternStrategy(grammar_events)  #loop=1
        events = SequentialStrategy(locations_words_strategy, events)
        events = SequentialStrategy(events, iobFixer)

        return events

    def getChunkerDecorator(self,
                            grammar_location=locations,
                            grammar_events=events,
                            grammar_words=words,
                            grammar_clean=clean):

        # un decortator ( que detecta sintagmas nominales,adverbiales,etc ) y se aniade funcionalidad para detectar direcciones y eventos

        locations = PostPatternStrategy(grammar_location)

        location_words = PostPatternStrategy(grammar_words)
        locations = SequentialStrategy(locations, location_words)

        iobFixer = IOBFixerStrategy()
        locations_words_strategy = SequentialStrategy(locations, iobFixer)

        # busquemos por eventos dentro del arbol de chunks
        events = PostPatternStrategy(grammar_events)
        events = SequentialStrategy(locations_words_strategy, events)

        events = SequentialStrategy(events, iobFixer)

        # decorador
        decorator = WrappedStrategyTagger()
        decorator.set_strategy(events)

        return decorator

    def getChunkerStage(self,
                        grammar_chunks=chunks,
                        src_field='tagged',
                        dst_field='chunked',
                        decorator=None):

        factory = RegexpChunkerFactory(grammar_chunks)
        factory.set_decorator(decorator)

        stage = factory.toStage(src_field, dst_field)

        return stage

    def getChunkerWrappedStage(self,
                               model_path,
                               grammar_chunks=chunks,
                               src_field='tagged',
                               dst_field='chunked',
                               strategy=None):

        # Me va a servir para pruebas

        # armar un chunker para detectar frases utilizando corpus y luego las gramáticas ( desde cero: desde sintagmas nominales,preposicionales,etc )
        # arma un chunker combinando dos decorators y aplica en secuencia.

        factory = ModelChunkerFactory(model_path)
        modelChunker = factory.createInstance()

        factory = RegexpChunkerFactory(grammar_chunks)
        regexpChunker = factory.createInstance()

        wrappedRegexpChunker = WrappedStrategyTagger(regexpChunker)
        # estrategia para reconocer direcciones sobre los sintagmas estandar
        wrappedRegexpChunker.set_strategy(strategy)

        compositeTagger = CompositeWrapperTagger(modelChunker,
                                                 wrappedRegexpChunker)
        chunkerStage = TaggerAdapter(compositeTagger, src_field, dst_field)

        return chunkerStage
def shape_net_test():
    std_out_path1 = "/home/zgong8/PerceptualMetric/examples/data_out/sd1_{}.obj"
    std_out_path2 = "/home/zgong8/PerceptualMetric/examples/data_out/sd2_{}.obj"
    std_out_path3 = "/home/zgong8/PerceptualMetric/examples/data_out/sd3_{}.obj"
    for i in range(10):
        in_path = random_shape_net_object()

        print(in_path)

        out_path1 = std_out_path1.format(i)
        out_path2 = std_out_path2.format(i)
        out_path3 = std_out_path3.format(i)

        p = Pipeline(1)
        df = SubdivisionDeformer({STEPS: 1})
        p.plug(df)

        p.process_shape_file(in_path, out_path1)

        p = Pipeline(1)
        df = SubdivisionDeformer({STEPS: 2})
        p.plug(df)

        p.process_shape_file(in_path, out_path2)

        p = Pipeline(1)
        df = SubdivisionDeformer({STEPS: 3})
        p.plug(df)

        p.process_shape_file(in_path, out_path3)