def get_tecs_for_Meredith2002_fig11():
    exp_res = []
    exp_res.append(
        TEC([Vector([1, 3])], [], [
            Vector([0, -2]),
            Vector([0, 0]),
            Vector([1, -2]),
            Vector([1, -1]),
            Vector([1, 0]),
            Vector([2, -1])
        ]))

    exp_res.append(
        TEC([Vector([2, 1]), Vector([2, 2])], [],
            [Vector([0, 0]), Vector([0, 1])]))

    exp_res.append(
        TEC([Vector([1, 1]), Vector([2, 1])], [],
            [Vector([0, 0]), Vector([0, 2]),
             Vector([1, 1])]))

    exp_res.append(
        TEC([Vector([1, 1]), Vector([1, 3]),
             Vector([2, 2])], [],
            [Vector([0, 0]), Vector([1, 0])]))
    return exp_res
def get_conj(tec, sorted_dataset):
    """ Computes the conjugate of a TEC as defined in equations
        7-9 of [Meredith2013]. """

    p_0 = tec.get_pattern()[0]
    conj_tec_pattern_ind = [tec.get_pattern_indices()[0]]
    p_prime = [p_0]

    for translator in tec.get_translators():
        if not translator.is_zero():
            p_prime_vec = p_0 + translator
            p_prime.append(p_prime_vec)

            # Find the index for the point in the pattern of the conjugate TEC.
            if p_prime_vec < p_0:
                for i in range(tec.get_pattern_indices()[0], -1, -1):
                    if sorted_dataset[i] == p_prime_vec:
                        conj_tec_pattern_ind.append(i)
                        break
            else:
                for i in range(tec.get_pattern_indices()[0], len(sorted_dataset)):
                    if sorted_dataset[i] == p_prime_vec:
                        conj_tec_pattern_ind.append(i)
                        break

    v_prime = [Vector.zero_vector(p_0.dimensionality())]
    for point in tec.get_pattern():
        p = point - p_0
        if not p.is_zero():
            v_prime.append(point - p_0)

    conj_tec = TEC(p_prime, conj_tec_pattern_ind, v_prime)

    return conj_tec
def compute_encoding(tecs, d):
    """ Implements algorithm in Figure 7 of [Meredith2013]. """

    best_tecs = []
    cover = []

    for i in range(0, len(tecs)):
        tec = tecs[i]
        s = tec.coverage()
        s_diff_cover = deepcopy(s)
        for p in cover:
            if p in s_diff_cover:
                s_diff_cover.remove(p)

        if len(s_diff_cover) > len(tec.get_pattern()) + len(tec.get_translators()) - 1:
            best_tecs.append(tec)
            cover += s

            dataset_covered = True
            for p in d:
                if p not in cover:
                    dataset_covered = False
                    break

            if dataset_covered:
                break

    residual = set(d) - set(cover)
    if residual:
        best_tecs.append(TEC(list(residual), [], []))

    return best_tecs
 def test_bounding_box(self):
     dataset = Dataset('unittest_data/heuristics_test.csv')
     dataset = Dataset.sort_ascending(dataset)
     tec = TEC(
         [Vector([1, 3]),
          Vector([3, 5]),
          Vector([4, 1]),
          Vector([5, 3])], [0, 3, 4, 6], [Vector([0, 0])])
     self.assertEqual(heuristics.bounding_box_compactness(tec, dataset),
                      4 / 9)
def get_tec_for_mtp(pattern_indices, w, p):
    """ Find the TEC for the given MTP. This function is not described in
        [Meredith2013] but is said to use the logic of SIATEC, so this function
        uses the logic of finding translators for the pattern used in SIATEC. """

    translators = find_translators(pattern_indices, w, len(p))
    pattern = []
    for index in pattern_indices:
        pattern.append(p[index])

    tec = TEC(pattern, pattern_indices, translators)
    return tec
def siatechf(d, min_cr):
    """ SIATECH that only returns TECs that have compression ratio of at least min_cr. """
    d = Dataset.sort_ascending(d)
    # Map of difference vector, index list pairs.
    mtp_map = {}

    # Compute the difference vectors between points and add both
    # the starting and ending index as pair to the lists corresponding to the
    # difference vector.
    for i in range(len(d)):
        for j in range(i + 1, len(d)):
            diff = d[j] - d[i]
            if diff in mtp_map:
                mtp_map[diff].append((i, j))
            else:
                mtp_map[diff] = [(i, j)]

    tecs = []
    handled_patterns = set()

    for diff_vec in mtp_map:
        pattern = []
        pattern_indices = []
        mtp = mtp_map[diff_vec]

        for index_pair in mtp:
            pattern_indices.append(index_pair[0])
            pattern.append(d[index_pair[0]])

        vectorized_pattern = Pattern(vec(pattern))

        if vectorized_pattern not in handled_patterns:
            if cr_upper_bound(pattern, mtp_map, d) >= min_cr:
                translators = []
                if len(pattern) == 1:
                    for point in d:
                        translators.append(point - pattern[0])
                else:
                    translators = find_translators_h(pattern,
                                                     vectorized_pattern,
                                                     mtp_map, d)

                tec = TEC(pattern, pattern_indices, translators)

                if heuristics.compression_ratio(tec) >= min_cr:
                    tecs.append(tec)

            handled_patterns.add(vectorized_pattern)

    return tecs
def siatech(d):
    d = Dataset.sort_ascending(d)
    # Map of difference vector, index list pairs.
    mtp_map = {}

    # Compute the difference vectors between points and add both
    # the starting and ending index as pair to the lists corresponding to the
    # difference vector.
    for i in range(len(d)):
        for j in range(i + 1, len(d)):
            diff = d[j] - d[i]
            if diff in mtp_map:
                mtp_map[diff].append((i, j))
            else:
                mtp_map[diff] = [(i, j)]

    tecs = []
    handled_patterns = set()

    for diff_vec in mtp_map:
        pattern = []
        pattern_indices = []
        mtp = mtp_map[diff_vec]

        for index_pair in mtp:
            pattern_indices.append(index_pair[0])
            pattern.append(d[index_pair[0]])

        vectorized_pattern = Pattern(vec(pattern))

        if vectorized_pattern not in handled_patterns:
            translators = []
            if len(pattern) == 1:
                for point in d:
                    translators.append(point - pattern[0])
            else:
                translators = find_translators_h(pattern, vectorized_pattern,
                                                 mtp_map, d)

            tecs.append(TEC(pattern, pattern_indices, translators))
            handled_patterns.add(vectorized_pattern)

    return tecs
def siatech_compress(d):
    """ Implements SIATECCompress as defined in [Meredith2016] but uses SIATECH. """

    tecs = siatech(d)
    sort_tecs_by_quality(tecs, d)

    covered_points = set()
    best_tecs = []
    for tec in tecs:
        new_points = tec.coverage() - covered_points
        if len(new_points) > len(tec.get_pattern()) + len(tec.get_translators()):
            best_tecs.append(tec)
            covered_points = covered_points | tec.coverage()
            if len(covered_points) == len(d):
                break

    residual = set(d) - set(covered_points)
    if residual:
        best_tecs.append(TEC(list(residual), [], []))

    return best_tecs
def compute_tecs(y, v, w, d):
    """ Implements algorithm of fig. 23 in [Meredith2002]. """

    i = 0
    tecs = []

    while i < len(y):
        j = y[i][0]
        pattern_indices = []
        while j < len(v) and v[j][0] == v[y[i][0]][0]:
            pattern_indices.append(v[j][1])
            j += 1

        pattern = collect_pattern(pattern_indices, d)
        translators = find_translators(pattern_indices, w, len(d))
        tecs.append(TEC(pattern, pattern_indices, translators))

        i += 1
        while i < len(y) and y[i][1] == y[i - 1][1]:
            i += 1

    return tecs
 def test_pattern_volume(self):
     tec = TEC([Vector([2, -1, 0]),
                Vector([-1, 2, -1]),
                Vector([0, 1, 2])], [0, 1, 2], [Vector([0, 0, 0])])
     self.assertEqual(heuristics.pattern_volume(tec), 27)
 def test_pattern_width(self):
     tec = TEC([Vector([1, 3, 4]),
                Vector([1, 1, 5]),
                Vector([5, 1, 2])], [0, 1, 2], [Vector([0, 0, 0])])
     self.assertEqual(heuristics.pattern_width(tec), 4)
Exemple #12
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from tec import TEC
import pickle
import copy

std = pickle.load(open("test/TEC_temporary_STANDARD.dat","r"))

output_current_array = []
motives = []

for data in std:
  tec = TEC(data)
  
  output_current_array.append([1e4*data["output_current_density"],tec.calc_forward_current_density()])

#input_params = copy.deepcopy(std[0])
#input_params["Collector"]["voltage"] = 0.5

#tec = TEC_Langmuir(input_params)
#print tec["motive_data"].keys()