示例#1
0
def run(input_):
    operator = ''
    a = Stack()
    # Stack Excution Test
    count = 0
    for i in input_:
        if i[0] == 'L':
            a.Loader(i[1])
            a.StackViewer()

        elif i[0] == 'T':
            temp = []
            temp.append(operator)
            temp.append(a.getStack())
            temp.append(i[1])

            print(Mapper.Mapper(temp))
            a.ClearStack()
        else:
            a.StackViewer()
            print(i)
            if len(a.stack) == 0:
                print(Mapper.Mapper(i))
            else:
                a.StackViewer()
                print(i)
                operator = i[0]

    a.ClearStack()
    a.StackViewer()
示例#2
0
class Earcon(object):
    mapper = Mapper.Mapper()

    def __init__(self, mapped_values):
        scale = self.retrieve_scale()
        self.earcon = self.create_motive(scale, mapped_values)

    def retrieve_scale(self):
        scale = Scale.Scale(self.mapper.degree, self.mapper.octave,
                            self.mapper.type).scale
        return scale

    def create_motive(self, scale, mapped_values):

        for item in mapped_values:
            for key, value in item.items():
                if key == "frequency":
                    if value is not None:
                        new_value = scale[value]
                    else:
                        new_value = scale[0]
                else:
                    if value is not None:
                        new_value = self.mapper.durations[value]
                    else:
                        new_value = self.mapper.durations[-1]

                item[key] = new_value

        return mapped_values
    def __init__(self):

        self.provider = DocumentProvider.DocumentProvider()
        self.jsonMod = JsonModifier.JsonModifier()
        self.mngCollCreator = MongoCollectionCreator.MongoCollectionCreator().create_collections()
        self.mapper = Mapper.Mapper()
        tc = TailableCursor.TailableCursor(self).watch_collection_changes()
示例#4
0
def main():
    """Funcion principal, donde interactuamos con las 
       demas clases pasandoles los ficheros necesarios e 
       instanciando las clases del MapReduce"""

    num_cores = 4
    files = getArgs()

    file_manager = FileManager(files)
    lines_files = file_manager.split_in_lines()

    num_lines = len(lines_files)
    partialPart = num_lines / num_cores
    difference = num_lines - (partialPart * num_cores)

    mapper = Mapper("")
    for i in range(partialPart, (num_lines - partialPart) + 1, partialPart):
        t = threading.Thread(mapper.mapping(lines_files[i - partialPart:i]))
        t.start()

    t = threading.Thread(
        mapper.mapping(lines_files[num_lines -
                                   (partialPart + difference):num_lines]))
    t.start()

    shuffleDict = mapper.shuffle(mapper.wordsMap)

    reducer = Reducer()

    result = reducer.reduce(shuffleDict)

    dirToTxt(result)
示例#5
0
    def calculate_aggregate_loans(self):
        # Mapper object
        aggregator_map = Mapper.Mapper(self.input_path)
        data = aggregator_map.create_data_frame()

        # Reducer object
        reducer = Reducer.Reducer(data)
        reducer.reduce()
        reducer.print_to_csv(self.output_path)
示例#6
0
    def __init__(self, sg=None, Grange=None, org='mmu', name='SGPlot.eps'):

        if not sg is None and not isinstance(
                sg, SpliceGraph.SpliceGraph) and os.path.exists(sg):
            sg = SpliceGraph.SpliceGraph(filename=sg)
            if not isinstance(sg, SpliceGraph.SpliceGraph):
                sys.stderr.write(
                    "ERROR: not a splice graph received in SpliceGraphPlot.")
                sys.exit(1)
        elif not Grange is None:
            # get splice graphs within genomic range
            pass
        else:
            sys.exit(1)

        self.sg = sg
        self.file_name = name
        self.target_type = name.split('.')[1]

        # load a splice graph mapper
        self.mapper = Mapper.Mapper(verbose=0)
        self.initCanvas()
 def analyze_tables(self):
     for table in self.tables_list:
         print("Section:" + str(table['section_title']) +
               " ... Trying to analyze this table: " + str(table))
         self.set_table(table)
         self.find_rows()
         if self.table_rows_list:
             logging.info("1. rows found")
             print("1. Rows FOUND")
             self.find_headers()
             if self.table_headers:
                 logging.info("2. headers found")
                 print("2. Headers FOUND")
                 self.extract_data()
                 self.refine_data()
                 if self.data_rows:
                     logging.info("3. Data extracted")
                     logging.debug("DATA: " + str(self.data_rows))
                     print("3. Data extracted")
                     map_tool = Mapper.Mapper(self.chapter, self.graph,
                                              self.topic, self.resource,
                                              self.data_rows, 'json',
                                              table['section_title'])
                     map_tool.map()
                 else:
                     logging.debug(
                         "e3 - UNABLE TO EXTRACT DATA - resource: " +
                         str(self.resource) + "table : " + str(table))
                     print("e3 UNABLE TO EXTRACT DATA")
             else:
                 logging.debug(
                     " e2 Unable to find headers - resource: " + str(self.resource) + " under section : " \
                     + str(table['section_title']))
                 print("e2 UNABLE TO FIND HEADERS")
         else:
             logging.debug(" e1 Unable to rows - resource: " +
                           str(self.resource) + " table : " + str(table))
             print(" e1 UNABLE TO FIND ROWS")
         self.erase_table_data()
示例#8
0
Equalize_bol = True
BINS_int = 4
OVERLAP_flt = 0.9
Clust_str = 'CompleteLinkage'
ClusterArguments_array = [1]  #The epsilon value

RightWrong_npArray = np.load('../../Mapper_Data_Files/filter_files/\
Synonyms/rightwrongfilter.npy')

while BINS_int < 21:

    TestObject_ma = ma.Mapper(Cloud_npArray,
                              MetricName_str,
                              LensName_str,
                              LensArguments_array,
                              Equalize_bol,
                              BINS_int,
                              OVERLAP_flt,
                              Clust_str,
                              ClusterArguments_array,
                              DebugMode_bol=False)
    #TestObject_ma.save_filter_values('../../Mapper_Data_Files/filter_files/',
    # 'easygoing_neighborsfilters')

    TestObject_ma.add_mean_properties('RightWrong', RightWrong_npArray)

    TestObject_ma.load_filter_values(
        '../../Mapper_Data_Files/filter_files/Synonyms/',
        'SynonymsAverageFilter')

    TestObject_ma.add_filter_to_graph()
    TestObject_ma.add_labels('Labels', LabelData_npArray)
    def analyze_tables(self):
        """
        Method used to analyze tables structure, compose a simple data structure containing data cells and the
         corresponding headers and give it to a Mapper object.

        Principal method of a HtmlTableParser object, it finds out headers and data, refines them, and finally join
         each others in order to be mapped by a Mapper object.

        :return:nothing
        """
        # Iterates over tables list
        for html_table in self.tables:

            # Update the analyzed table count
            self.tables_analyzed += 1

            # Instantiating statistics for this table
            self.headers_found_num = 0
            self.rows_extracted_num = 0
            self.data_extracted_num = 0

            # set as a class attribute the current html table
            self.current_html_table = html_table

            # create a Table object to contain data structures and statistics for a table
            tab = Table.Table()
            # set tab.n_rows as a result of count_rows()
            tab.n_rows = self.count_rows()
            # set other table properties, such as table attributes
            tab.table_attributes = html_table.attrib
            # find out the table section using find_table_section()
            tab.table_section = self.find_table_section()
            self.logging.info("Table under section: %s" % tab.table_section)

            # find headers for this table
            self.find_headers(tab)

            # Chose to not use this method as it resolves users' made errors but create problems with correctly
            #  written table
            # self.check_miss_subheaders(tab)

            # if headers have been found
            if tab.headers:
                self.logging.info("Headers Found")

                # Refine headers found
                self.refine_headers(tab)

                # Once the headers are refined, start to extract data cells
                self.extract_data(tab)

                # Refine data cells found
                self.refine_data(tab)

                # If data are correctly refined
                if tab.data_refined:
                    # Count data cells and data rows using table.count_data_cells_and_rows()
                    tab.count_data_cells_and_rows()
                    self.logging.info("Rows extracted: %d" % tab.data_refined_rows)
                    self.logging.info("Data extracted for this table: %d" % tab.cells_refined)

                    # update data cells extracted in order to make a final report
                    self.utils.data_extracted += tab.cells_refined
                    self.utils.rows_extracted += tab.data_refined_rows

                    # Create a MAPPER object in order to map data extracted
                    mapper = Mapper.Mapper(self.chapter, self.graph, self.topic, self.resource, tab.data_refined,
                                           'html', self.utils, tab.table_section)

                    # Start the mapping process
                    mapper.map()

                    # Compose headers not mapped for this resource
                    for header in mapper.headers_not_mapped:
                        if header not in self.headers_not_mapped:
                            # Compose a list  with the name of the current resource [0] and with values in [1]
                            support_list = [self.resource, mapper.headers_not_mapped[header]]
                            '''
                            result Eg in self.headers_not_mapped = {'header1': ['resource1',[value0,value1...]], \
                                                                'header2': ['resource2',[value0, value1, value2...]]...}
                            '''
                            self.headers_not_mapped[header] = support_list

                # If no data have been found for this table report this condition with tag E3
                else:
                    self.logging.debug("E3 - UNABLE TO EXTRACT DATA - resource: %s" % self.resource)
                    print("E3 UNABLE TO EXTRACT DATA")
                    # Update the count of tables with this condition (no data found)
                    self.no_data += 1

            # if no headers have been found report this critical condition with the tag E2
            else:
                self.logging.debug(
                    " E2 Unable to find headers - resource: " + str(self.resource))
                print("E2 UNABLE TO FIND HEADERS")
                # Update the count of tables with this condition (no headers found)
                self.no_headers += 1

        # Adding statistics values and errors to the extraction errors, in order to print a final report
        self.utils.tot_tables_analyzed += self.tables_analyzed
        self.utils.headers_errors += self.no_headers
        self.utils.not_resolved_header_errors += self.headers_not_resolved
        self.utils.data_extraction_errors += self.no_data
        exoutFile6.write("#exon match mode: bothSS\n")
        exoutFile6.write("#gnId\tgnId\texId\tensemblGnId\tensemblExId\n")

        exoutFile7.write(
            "#vega exons for SpliceGraph exons, generated by do_gene_associations.py, indir: %s\n"
            % options.indir)
        exoutFile7.write("#exon match mode: singleSS\n")
        exoutFile7.write("#gnId\tgnId\texId\tensemblGnId\tensemblExId\n")

        exoutFile8.write(
            "#vega exons for SpliceGraph exons, generated by do_gene_associations.py, indir: %s\n"
            % options.indir)
        exoutFile8.write("#exon match mode: overlap\n")
        exoutFile8.write("#gnId\tgnId\texId\tensemblGnId\tensemblExId\n")

        mapper = Mapper.Mapper(verbose=1)

        sg = sg_iter.next_sg()
        while (sg):

            print >> sys.stderr, "doing %s (%i of %i)\r" % (
                sg.name, sg_iter.current_index(), sg_iter.number()),

            # ensembl, exon
            (gnIds, exIds) = mapper.map2gene(sg, 'ensembl', 'exon')
            gnInfo = {}
            maxNbEx = 0
            for k in gnIds.iterkeys():
                gnInfo[k] = mapper.getGeneInfo(k, 'ensembl', 'biotype')
                if gnIds[k] > maxNbEx:
                    maxNbEx = gnIds[k]
示例#11
0
import mygame
from pygame.locals import *
from Player import *
import Players
import Mapper
import Obj
from sys import exit

mygame.init()
screen = mygame.screen

player = Player()
player.moveto((3, 89))
players = Players.Players(1, [player])

mp = Mapper.Mapper()
mp.load("./data/map/jungle.tmx")
mp.set_viewer(player)
Obj.Obj.mapper = mp

screen.set_mapper(mp)
screen.set_viewer(player)

default_font = "arial"
font = mygame.font.SysFont(default_font, 20)


cntClock = 0
cntFrame = 0
cntNetwork = 0
lastClock = time.clock() * 1000
    def test_data_frame_with_invalid_mode(self):
        dictionary = self.test_dict
        test_mapper = Mapper.Mapper(dictionary)

        with self.assertRaises(ValueError):
            test_mapper.create_data_frame('csv')
 def test_data_frame(self):
     dictionary = self.test_dict
     test_mapper = Mapper.Mapper(dictionary)
     generated_data_frame = test_mapper.create_data_frame('dict')
     expected_data_frame = pd.DataFrame(self.test_dict)
     assert generated_data_frame.equals(expected_data_frame)
示例#14
0
    def analyze_tables(self):
        """
        Method used to analyze tables structure, compose a simple data structure containing data cells and the
         corresponding headers and give it to a Mapper object.

        Principal method of a HtmlTableParser object, it finds out headers and data, refines them, and finally join
         each others in order to be mapped by a Mapper object.

        :return:nothing
        """
        # Iterates over tables list
        for html_table in self.tables:

            # Update the analyzed table count
            self.tables_analyzed += 1

            # Instantiating statistics for this table
            self.headers_found_num = 0
            self.rows_extracted_num = 0
            self.data_extracted_num = 0

            # set as a class attribute the current html table
            self.current_html_table = html_table

            # create a Table object to contain data structures and statistics for a table
            tab = Table.Table()
            # set tab.n_rows as a result of count_rows()
            tab.n_rows = self.count_rows()
            # set other table properties, such as table attributes
            tab.table_attributes = html_table.attrib
            # find out the table section using find_table_section()
            tab.table_section = self.find_table_section()

            print "Section found: ", tab.table_section
            self.logging.info("Table under section: %s" % tab.table_section)

            # find headers for this table
            self.find_headers(tab)

            # if headers have been found
            if tab.headers:
                self.logging.info("Headers Found")

                # Refine headers found
                self.refine_headers(tab)

                if self.mapping:
                    # Once the headers are refined, start to extract data cells
                    self.extract_data(tab)

                    # Refine data cells found
                    self.refine_data(tab)

                    # If data are correctly refined
                    if tab.data_refined:
                        # Count data cells and data rows using table.count_data_cells_and_rows()
                        tab.count_data_cells_and_rows()
                        self.logging.info("Rows extracted: %d" %
                                          tab.data_refined_rows)
                        self.logging.info("Data extracted for this table: %d" %
                                          tab.cells_refined)

                        # update data cells extracted in order to <make a final report
                        self.utils.data_extracted += tab.cells_refined
                        self.utils.data_extracted_to_map += tab.cells_refined
                        self.utils.rows_extracted += tab.data_refined_rows
                        # Start the mapping process
                        self.all_tables.append(tab)
                        # if i have to map table found (HtmlTableParser can be called even by pyDomainExplorer)

                        # Create a MAPPER object in order to map data extracted
                        mapper = Mapper.Mapper(self.chapter, self.graph,
                                               self.topic, self.resource,
                                               tab.data_refined, self.utils,
                                               self.reification_index,
                                               tab.table_section)
                        mapper.map()
                        # update reification index of this resource
                        self.reification_index = mapper.reification_index
                    # If no data have been found for this table report this condition with tag E3
                    else:
                        self.logging.debug(
                            "E3 - UNABLE TO EXTRACT DATA - resource: %s" %
                            self.resource)
                        print("E3 UNABLE TO EXTRACT DATA")
                        # Update the count of tables with this condition (no data found)
                        self.no_data += 1
                else:
                    self.all_tables.append(tab)
            # if no headers have been found report this critical condition with the tag E2
            else:
                self.logging.debug(" E2 Unable to find headers - resource: " +
                                   str(self.resource))
                print("E2 UNABLE TO FIND HEADERS")
                # Update the count of tables with this condition (no headers found)
                self.no_headers += 1

        print "Resource analysis completed \n\n"
        # Adding statistics values and errors to the extraction errors, in order to print a final report
        self.utils.tot_tables_analyzed += self.tables_analyzed
        self.utils.headers_errors += self.no_headers
        self.utils.not_resolved_header_errors += self.headers_not_resolved
        self.utils.data_extraction_errors += self.no_data
示例#15
0
 def create_outra_coisa(cls, mongo_uri):
     import .mapper import Mapper
     return cls(mongo_uri, Mapper(), 'outra_coisa')
示例#16
0
 def create_stack(cls, mongo_uri, cache_dao, google_dao):
     import .mapper import Mapper
     return cls(mongo_uri, Mapper(), 'stacks')
    def test_data_frame_with_invalid_csv_file(self):
        test_mapper = Mapper.Mapper('te3st_loans.csv')

        with self.assertRaises(ValueError):
            test_mapper.create_data_frame('csv')
示例#18
0
                                 help='method for mapping [default: overlap]'),                               \
                     make_option('-p','--inprefix', dest='inprefix', default='SpliceGraphsFiltered_',         \
                                 help='iterate over dirs with this prefix [default: SpliceGraphsFiltered_]'), \
                     make_option('-s','--species', dest='species', default='hsa',                             \
                                 help='species [default: hsa]'),                                              \
                     ])
(options, args) = opts.parse_args()

if not os.path.isdir(options.inDir):
    print >> sys.stderr, "ERROR: input directory is not found.\n"
    opts.print_help(sys.stderr)
    sys.exit(0)

else:
    mapper = Mapper.Mapper(
        configFile='/r100/burge/xiao/Tools/splice_graphs/configuration.txt',
        verbose=1,
        species=options.species)
    #mapper  = Mapper.Mapper(verbose=1)

    inDirs = [
        d for d in os.listdir(options.inDir) if d.startswith(options.inprefix)
    ]

    for inDir in inDirs:
        chr = inDir.split(options.inprefix)[-1]
        mapper.mapAllInDir(indir="%s/%s" % (options.inDir, inDir),
                           outfile="%s/Mappings_%s_%s.txt" %
                           (options.inDir, options.type, chr),
                           type=options.type,
                           method=options.method)