def _init_imgs_data(self):
     result = Subset()
     dataset_count = len(self._dataset)
     for img_idx in range(dataset_count):
         current_data = ImageDataRep(img_idx, self._labels[img_idx])
         result.add_element(current_data)
     return result
Exemple #2
0
def main():
    # alp - alpha
    alp = 1
    ID = int(sys.argv[1])
    subsets = Subset(n)
    for s in S:
        print '\nProcessing {0}-element subsets...'.format(s)

        # number of subsets of size s
        Ns = subsets.subsetSize(s)
        if Ns >= n:
            myShare = subsets.subsetSize(s)/n
            index = ID * myShare
            toPrint = (myShare, index)
            print 'Start from {1} and process upper {0} elts'.format(*toPrint)
            result = evalFunction(index, myShare)
            printResult(result, "Normal")
            NPrim = Ns - (n * myShare)
        else:
            NPrim = Ns

        if NPrim > 0:
            APrim = range(alp, alp+NPrim) if alp+NPrim-1 <= n else \
                    range(alp, n+1) + range(1, alp+NPrim-n)
            print "Residual indexes: \n" + str(APrim)
            if ID in APrim:
                printResult(evalFunction(APrim.index(ID), 1), "Residual")
            alp += NPrim if alp + NPrim <= n else NPrim - n
Exemple #3
0
 def makeSet(self, subsets, vetor):
     #make set
     for i in range(0, len(vetor)):
         subsets[vetor[i]] = Subset()
         subsets[vetor[i]].pai = vetor[i]
         subsets[vetor[i]].rank = vetor[i]
         subsets[vetor[i]].numero = vetor[i]
         print(i, vetor[i])
     return subsets
Exemple #4
0
    def create(self, dataset, nhyperpipes, percentageatt, percentageinst):
        s = Subset()
        print(dataset + "" + nhyperpipes + '' + percentageatt + "" +
              percentageinst)
        ensemble = []

        for i in range(nhyperpipes):

            ensemble.append(
                s.createsubset(dataset, percentageatt, percentageinst))

        return ensemble
def func_base(percent):
    database = DatabaseCreator(source_dbc, destination_dbc, temp_schema, False)

    database.teardown()
    database.create()
    database.validate_database_create()

    s = Subset(source_dbc, destination_dbc, temp_schema, all_tables)

    s.run_downward(percent)

    database.add_constraints()
    database.validate_constraints()

    norm = SubsetResultNorm(source_dbc, destination_dbc).norm()

    print(percent, norm)
    return norm
Exemple #6
0
def main():
    backends = []
    for i in xrange(12):
        backends.append(i)

    result = {}
    subset_size = 3
    for client_id in xrange(10):
        result[client_id] = Subset(backends=backends,
                                   client_id=client_id,
                                   subset_size=subset_size)

    for client_id, backend in result.iteritems():
        print("{}: {}".format(client_id, backend))
Exemple #7
0
    def reassign_invariant_sites(self, subsets):

        #TODO add a skip:
        #if(len(subsets)==1):
        #   return(subsets)

        # get entropies for whole alignment for this subset
        onesub = subset_ops.merge_subsets(subsets)
        entropies = entropy.sitewise_entropies(
            SubsetAlignment(self.alignment, onesub))

        # find nearest site for each invariant site
        # replacements is a dict of: key: invariant col; value: replacement col,
        # e.g.
        # {512: 513, 514: 513, 515: 513, 516: 517}
        replacements = entropy.get_replacement_sites(entropies, onesub.columns)

        # now make a dict of the CURRENT subsets: key: site; value: subset
        sch_dict = {}
        for i, sub in enumerate(subsets):
            for site in sub.columns:
                sch_dict[site] = i

        # then reassign the sites as necessary based on replacements
        for r in replacements:
            sch_dict[r] = sch_dict[replacements[r]]

        # now build subsets according to the new sites
        sub_dict = {}  # this gives us the subsets to build
        for k, v in sch_dict.iteritems():
            sub_dict.setdefault(v, []).append(k)

        new_subsets = []
        for s in sub_dict:
            n = Subset(the_config, set(sub_dict[s]))
            new_subsets.append(n)

        return (new_subsets)
def test_subset():
    ITEMS = ['77']
    Subset(2015, 'test1').include(ITEMS).inc == ITEMS
    Subset(2015, 'test1').exclude(ITEMS).exc == ITEMS
Exemple #9
0
# -*- coding: utf-8 -*-
"""
"""

from subset import Subset

# bank projects
Subset(2015, 'temp').to_csv(force=True)

# Exclusions 1 (values do not change between 2015 and 2013):
ex1 = [
    '7733574312', '7703200101', '7726725828', '7116503278', '7720531939',
    '7802084784', '5261047820', '7717716979', '7707635583', '7707635618',
    '7707631814', '7707631780', '7707631807', '7707631797', '7707669230',
    '7707627840', '7710662717', '7707622136', '7707620724', '7707627776',
    '7710658157', '5906001179'
]
# Exclusions 2 (values not present in 2013, unit eq 385):
ex2 = [
    '6950089368', '7611020211', '7610052884', '8905049712', '2815015806',
    '6213008693', '1121012228', '1118005125', '1323126443', '7325134560',
    '6518008962', '7206025330', '2130035819', '7718000240', '5709000962',
    '2304030498', '1648030286', '3702549950', '2815014640', '2315097141',
    '3905069565', '2815006255', '6901077218', '4027094050', '6950086060',
    '7716239258', '7303018465', '7206045664', '1637001364', '7702845690',
    '7725261651', '6164029105', '6168079234', '6168084403', '7717795579',
    '7717536373', '7715801523', '4205301359', '4813012497', '5014010858',
    '4501092289', '4253006177', '5001101970', '5047117229', '3327846679',
    '2466134701', '2463256170', '7722347452', '7721160109', '7717045174',
    '7719592060', '6452117213', '6732039141', '5014011210', '1841013578',
    '2805005661', '6027165441', '2805003992', '7701681463', '3123322170',
Exemple #10
0
                           config_reader.get_source_db_connection_info())
    destination_dbc = DbConnect(
        db_type, config_reader.get_destination_db_connection_info())

    database = db_creator(db_type, source_dbc, destination_dbc)
    database.teardown()
    database.create()

    # Get list of tables to operate on
    db_helper = database_helper.get_specific_helper()
    all_tables = db_helper.list_all_tables(source_dbc)
    all_tables = [
        x for x in all_tables if x not in config_reader.get_excluded_tables()
    ]

    subsetter = Subset(source_dbc, destination_dbc, all_tables)

    try:
        subsetter.prep_temp_dbs()
        subsetter.run_middle_out()

        if "--no-constraints" not in sys.argv:
            database.add_constraints()

        print("Beginning post subset SQL calls")
        start_time = time.time()
        for idx, sql in enumerate(config_reader.get_post_subset_sql()):
            print_progress(sql, idx + 1,
                           len(config_reader.get_post_subset_sql()))
            db_helper.run_query(sql, destination_dbc.get_db_connection())
        print("Completed post subset SQL calls in {}s".format(time.time() -
Exemple #11
0
    if "--stdin" in sys.argv:
        config_reader.initialize(sys.stdin)
    else:
        config_reader.initialize()

    source_dbc = DbConnect(config_reader.get_source_db_connection_info())
    destination_dbc = DbConnect(
        config_reader.get_destination_db_connection_info())

    temp_schema = 'subset_' + str(uuid.uuid4()).replace('-', '')

    database = DatabaseCreator(source_dbc, destination_dbc, temp_schema, False)
    database.teardown()
    database.create()
    database.validate_database_create()

    # Get list of tables to operate on
    all_tables = list_all_tables(source_dbc.get_db_connection())
    all_tables = [
        x for x in all_tables if x not in config_reader.get_excluded_tables()
    ]

    s = Subset(source_dbc, destination_dbc, temp_schema, all_tables)
    s.run_middle_out()

    if "--no-constraints" not in sys.argv:
        database.add_constraints()
        database.validate_constraints()

    result_tabulator.tabulate(source_dbc, destination_dbc, all_tables)