示例#1
0
 def getScopFamily(self, id, chainid=None, resnum=None):
     if len(id) == 4:
         families = []
         for entry in self.entriesByPdbid.get(id, []):
             if chainid is not None:
                 # check chainid
                 pass
             if resnum is not None:
                 # check resnum
                 pass
             families.append(entry.scopfamily)
         families = sort(uniq(families))
         return ';'.join(families)
     return ''
示例#2
0
def rank_succession(population, k):
    weights_sum = sum(algoscore.to_fitness() for _, algoscore in population)
    sorted_population = sort(population)

    result = list()
    for _ in xrange(k):
        pick = random.uniform(0, weights_sum)
        current = 0
        for t in sorted_population:
            current += _tuple_to_score(t)
            if current > pick:
                result.append(t)
                break

    return result
示例#3
0
def splitGoodPolygonByZonesOldVersion(goodPolygon, polygon, zones):
    assert (len(polygon) == 12 and len(zones) == 5)
    newPolygon = []
    rays = []

    for i in range(3, 7):
        rays.append(GeneralizedSegment(polygon[i], polygon[i - 1], False,
                                       True))
    #print('LEN: ', len(goodPolygon))
    for i in range(0, len(goodPolygon)):
        p = goodPolygon[i]
        np = goodPolygon[i + 1 if (i + 1 < len(goodPolygon)) else 0]
        seg = GeneralizedSegment(p, np, False, False)
        ps = [p]
        for j in range(0, len(rays)):
            inter = segmentIntersection(seg, rays[j])
            if (inter is None):
                continue
            ps.append(inter)

        ps = utils.sort(ps, ByManhattenDistanceComparator(p))
        #print('PS: ', ps)
        newPolygon.extend(ps)

    anss = []
    for i in range(0, 5):
        anss.append([])
    for i in range(0, len(newPolygon)):
        for j in range(0, 5):
            if (isInStarPolygon(newPolygon[i], zones[j])):
                anss[j].append(newPolygon[i])
    anssp = []

    for i in range(0, 5):
        #print('WorkWith12Gon: splitGoodPolygonsByZones:', i)
        #for j in range(len(anss[i])):
        #    print(j, anss[i][j])
        anss[i] = deleteThreePointsOnOneLine(anss[i])
        #print('WorkWith12Gon: splitGoodPolygonsByZones after:', i)
        #for j in range(len(anss[i])):
        #    print(j, anss[i][j])
        checkIsNotThreeOnOneLine(anss[i])
        if (len(anss[i]) >= 3):
            anssp.append((anss[i], i))

    return anssp
示例#4
0
def createTable(gameteA, gameteB):
    results = []
    i = 0
    j = 0
    print "       ",
    for item in gameteB:
        print "  {}  ".format(item),
    print
    print "--------------------------------------"
    for x in gameteA:
        results.append([])
        print " {} | ".format(x),
        for y in gameteB:
            tmp = sort(x + y)
            results[i].append(tmp)
            print " {} ".format(tmp),
        i += 1
        print
    return results
示例#5
0
def isSegmentInPolygon(p1, p2, polygonB, T=None):
    if T is None:
        T = type(p1.GetX())

    if not (isPointInPolygon(p1, polygonB) and isPointInPolygon(p2, polygonB)):
        return False
    ps = [p1, p2]
    for i in range(len(polygonB)):
        p = intersectSegments(p1, p2, polygonB[i], polygonB[i - 1])
        if not (p is None):
            ps.append(p)

    ps = utils.sort(ps, ByPolarAngleAroundCenterComparator(p1))
    ps = utils.unique(ps)
    for i in range(len(ps)):
        #print(ps[i].GetX(), ps[i].GetY(), ps[i-1].GetX(), ps[i-1].GetY())
        if not isPointInPolygon((ps[i] + ps[i - 1]) / T(2), polygonB):
            #print(i)
            return False
    return True
示例#6
0
def list_users(message):
    try:
        users = session.query(User).all()
        msg = u"".join([user_template.format(
            user.id, user.fullname, user.age, user.gender.value, user.city, user.country) for user in sort(users)])
        bot.reply_to(message, msg)

    except Exception as e:
        bot.reply_to(message, e.message)
示例#7
0
import utils
# TODO Use same input and compare executaion time

@utils.measure_time
def bubble_sort(input):
  swapped = True

  # Precontion check
  if input == None: print "You must provide a list."
  if len(input) <= 1: return input
  
  while swapped:
    swapped = False # Reset the flag
    for i in range(0, len(input) - 1):
      if input[i] > input[i + 1]:
        # Swap
        temp = input[i]
        input[i] = input[i + 1]
        input[i + 1] = temp
        swapped = True
  return input


if __name__ == '__main__':
  utils.sort(bubble_sort)
示例#8
0
parser.add_argument(
    "path",
    default=PATH,
    type=str,
    help="path to activation values .hdf5 file",
    nargs="?",
)
parser.add_argument(
    "title", default=TITLE, type=str, help="title of the image", nargs="?"
)
args = parser.parse_args()

if __name__ == "__main__":
    f = h5py.File(args.path, "r")
    variance = list()
    keys = sort([x for x in f.keys() if "Pretrain" in x])
    pretrain = len(keys)
    keys += sort([x for x in f.keys() if "Iteration" in x])

    for i in keys:
        variance.append(np.var(f[i], axis=1))
    variance = np.array(variance).T

    for i in range(len(variance)):
        ax = sns.lineplot(
            x=range(len(variance[0])), y=variance[i], label="Layer {}".format(i + 1)
        )

    x_pos = (pretrain - min(ax.get_xlim())) / (max(ax.get_xlim()) - min(ax.get_xlim()))

    if pretrain:
示例#9
0
 def get_modus(self):
     return sort(self.detector.percentage_sum.items())[0]
示例#10
0
    def find_updates(self, before, after):
        # If a part is (partially) computed in the before and
        #   does not appear in the after or
        # going from before to after requires undoing some computation
        # it is potentially unstable, and more expensive: ignore
        try:
            before_finputs = self.express_in_terms_of_input(before)
            after_finputs = self.express_in_terms_of_input(after)
        except:
            # [TODO] In LU's variant 5, parts of A appear as lhs's
            return None
        #
        dict_bef = dict([(str(u.get_children()[0]), u)
                         for u in before_finputs])
        dict_aft = dict([(str(u.get_children()[0]), u) for u in after_finputs])
        same = []
        ignore = False
        for k, v in dict_bef.items():
            if k in dict_aft and matchq(v, dict_aft[k]):
                same.extend(v.children[0].children)
            if k not in dict_aft:
                ignore = True
                reason = "%s not in %s" % (k, dict_aft.keys())
                break
            else:
                rules = self.expr_to_rule_rhs_lhs([v])
                rules = list(itertools.chain(*rules))
                expr_copy = copy.deepcopy(dict_aft[k])
                t = replace(expr_copy, rules)
                #if v == replace( expr_copy, rules ):
                if dict_aft[k] == t:
                    ignore = True
                    reason = "%s would require undoing job" % k
                    break
        if ignore:
            print("[INFO] Skipping invariant: %s" % reason)
            return None
        #
        # Wrap outputs for before and after
        WrapBefOut = WrapOutBef
        lhss = []
        for u in before:
            lhss.extend(u.children[0])
            u.children[0] = NList([WrapBefOut(l) for l in u.children[0]])
        for u in before:
            u.children[1] = replace(
                u.children[1],
                [RewriteRule(l, Replacement(WrapBefOut(l))) for l in lhss])
        #
        lhss = []
        for u in after:
            lhss.extend(u.children[0])
            u.children[0] = NList([WrapOutAft(l) for l in u.children[0]])
        wrap_rules_after = \
                [
                    RewriteRule(l, Replacement(WrapBefOut(l))) if l in same else
                    RewriteRule(l, Replacement(WrapOutAft(l))) for l in lhss
                ]
        for u in after:
            u.children[1] = replace(u.children[1], wrap_rules_after)
        # replace before in before
        wrap_rules_before = []
        for u in before:
            lhs, rhs = u.get_children()
            #if len(lhs.children) > 1:
            #wrap_rules_before.append([])
            #continue
            rules = self.expr_to_rule_rhs_lhs([u])
            wrap_rules_before.append(list(itertools.chain(*rules)))
        #
        new_rules = []
        for i, rules in enumerate(wrap_rules_before):
            new_rules.append([])
            for rule in rules:
                new_r = copy.deepcopy(rule)
                new_r.pattern = replace_all(
                    new_r.pattern,
                    list(
                        itertools.chain.from_iterable(wrap_rules_before[:i] +
                                                      wrap_rules_before[i +
                                                                        1:])))
                if new_r.pattern != rule.pattern:
                    new_rules[-1].append(new_r)
        for r1, r2 in zip(new_rules, wrap_rules_before):
            r2.extend(r1)
        #
        wrap_rules_before = list(itertools.chain(*wrap_rules_before))
        done = False
        while not done:
            after_top = [copy.deepcopy(u) for u in after]
            for i, u in enumerate(after):
                _, rhs = u.get_children()
                u.children[1] = simplify(
                    to_canonical(
                        replace_all(copy.deepcopy(rhs), wrap_rules_before)))
            done = True
            for top, bot in zip(after_top, after):
                if top != bot:
                    done = False
                    break
        # replace after in after
        done = False
        while not done:
            # replace after in after
            wrap_rules_after = []
            for u in after:
                lhs, rhs = u.get_children()
                #if len(lhs.children) > 1:
                #wrap_rules_after.append([])
                #continue
                rules = self.expr_to_rule_rhs_lhs([u])
                wrap_rules_after.append(list(itertools.chain(*rules)))
            #
            after_top = [copy.deepcopy(u) for u in after]
            for i, u in enumerate(after):
                _, rhs = u.get_children()
                rules = list(
                    itertools.chain.from_iterable(wrap_rules_after[:i] +
                                                  wrap_rules_after[i + 1:]))
                u.children[1] = simplify(
                    to_canonical(replace_all(copy.deepcopy(rhs), rules)))
            done = True
            for top, bot in zip(after_top, after):
                if top != bot:
                    done = False
                    break
        # [TODO] Multiple lhss, won't work
        updates = []
        for u in after:
            lhs, rhs = u.get_children()
            if len(lhs.children) == 1:
                lhs = lhs.children[0]  # NList[op] -> op
                if isinstance(rhs, WrapBefOut) and isinstance(lhs, WrapOutAft) and \
                        matchq(lhs.children[0], rhs.children[0]):
                    continue
            elif not isinstance(rhs,
                                NList):  # multiple outputs/predicate in rhs,
                # but not complete (otherwise it would be NList)
                pass
            else:
                to_skip = True
                for l, r in zip(lhs.children, rhs.children):
                    if not( isinstance(r, WrapBefOut) and isinstance(l, WrapOutAft) and \
                            matchq(l.children[0], r.children[0]) ):
                        to_skip = False
                        break
                if to_skip:
                    continue
            updates.append(u)
        #
        tiled_updates = []
        for u in updates:
            print("*   ", u)
            tilings = list(tile_expr(u))
            if len(tilings) > 1:
                print("[WARNING] Multiple (%d) tilings for expression %s" %
                      (len(tilings), u))
                print("          Discarding all but one")
            tiled_updates.extend(tilings[0])
        tiled_updates = sort(tiled_updates)
        print("* Tiled update")
        for t in tiled_updates:
            print("*   ", t)

        # Drop WrapOutBef's
        # Drop WrapOutAft's
        s = PatternDot("s")
        updates = []
        for u in tiled_updates:
            u = replace_all(
                u, [RewriteRule(WrapOutAft(s), Replacement(lambda d: d["s"]))])
            u = replace_all(
                u, [RewriteRule(WrapOutBef(s), Replacement(lambda d: d["s"]))])
            updates.append(u)

        return updates
            f"You must create directory for {tol['name']} and populate it with data.yaml, config.yaml and renderer.py files."
        )
        continue

    data = parse_yaml(datafile / "data.yaml")
    config_local = parse_yaml(datafile / "config.yaml")

    # Section description
    description = config_local.get("description", None)
    if description is not None:
        f.write(p(description))
        newline(f)

    # Sort content of section
    sort_key = config_local.get("sort_key", None)
    data = sort(data, sort_key, config_local.get("sort_reverse", False))

    exec(f"from {datafile}.renderer import renderer")

    try:
        exec(f"from {datafile}.renderer import renderer_subdir")
        # e.g. content of Papers / README.md
        fp_sub2 = open(str(Path(tol["name"]) / "README.md"), "w")
        fp_sub2.write(h1(tol["name"]))
        fp_sub2.write(
            a(["Back to awesome edge machine learning", config["url"]]))
        newline(fp_sub2, iter=2)
    except:
        pass

    if not isinstance(data, list):
示例#12
0
    def fulltext(self):
        """Get errors from todays fulltext logs and generate a list for each
        type of error of corresponding bibcodes and source directories. These
        lists are written to files that are further processed in compute.py"""

        # types of errors with corresponding file names
        errors = conf['FULLTEXT_ERRORS']

        # get todays date
        now = datetime.strftime(datetime.now(), "%Y-%m-%d")

        # loop through types of errors messages
        for err_msg in errors.keys():

            bibs = []
            dirs = []

            # location of bibcode and directory in message field
            """example log:
            {"asctime": "2019-08-26T11:38:34.201Z", "msecs": 201.6739845275879,
            "levelname": "ERROR", "process": 13411, "threadName": "MainThread",
            "filename": "checker.py", "lineno": 238, "message": "Bibcode '2019arXiv190105463B'
            is linked to a non-existent file '/some/directory/filename.xml'",
            "timestamp": "2019-08-26T11:38:34.201Z", "hostname": "adsvm05"}"""
            loc_bib = 1
            loc_dir = 3

            if (err_msg == "No such file or directory"):
                loc_bib = 3
                loc_dir = 11
            elif (err_msg == "format not currently supported for extraction"):
                loc_bib = 7
                loc_dir = 23

            # loop through files
            for name in glob.glob(errors[err_msg]):

                command = "awk -F\: '/" + err_msg + "/ && /" + now + "/ && /ERROR/ {print $0}' " + name
                args = shlex.split(command)

                x = Popen(args, stdout=PIPE, stderr=STDOUT)

                # get bibcodes/directories from todays errors
                resp = x.communicate()[0].split("\n")

                for r in resp:
                    if r:
                        r = r.split("'")
                        bibs.append(r[loc_bib])
                        dirs.append(r[loc_dir])

            # create filename based on error message and date
            fname = Filename.get(
                self.date,
                FileType.FULLTEXT,
                adjective=None,
                msg="_" + ("_".join(err_msg.split())).replace('-', '_') + "_")

            # write bibcodes and directories for each error type to file
            with open(fname, 'w') as f:
                writer = csv.writer(f, delimiter='\t')
                writer.writerows(zip(bibs, dirs))

            sort(fname)
示例#13
0
def splitPolygonByLine(polygon, line, verbose=False):
    polygon = polygon.copy()
    T = type(polygon[0].GetX())
    square = getDoubledOrientedSquare(polygon)
    if verbose:
        print('SQUARE: ', square)
    if square < T(0):
        if verbose:
            print('REVERSE')
        polygon.reverse()

    signs = [lineSign(point, line) for point in polygon]
    numSigns = [signs.count(0), signs.count(1), signs.count(-1)]
    if numSigns[-1] == 0:
        return ([], [polygon])
    if numSigns[1] == 0:
        return ([polygon], [])

    newPolygon = []
    for i in range(len(polygon)):
        if signs[i] * signs[i - 1] == -1:
            newPolygon.append(
                segmentIntersection(
                    line,
                    GeneralizedSegment(polygon[i], polygon[i - 1], False,
                                       False)))
        newPolygon.append(polygon[i])

    polygon = newPolygon
    signs = [lineSign(point, line) for point in polygon]
    n = len(polygon)
    graph = []
    for i in range(n):
        graph.append([])
        if i:
            graph[i - 1].append(i)
    graph[n - 1].append(0)

    def segmentIntersectionCoordinateComparator(line):
        def coordinate(point):
            return (line.getSecondPoint() -
                    line.getFirstPoint()) ^ (point - line.getFirstPoint())

        def less(num1, num2):
            return coordinate(polygon[num1]) < coordinate(polygon[num2])

        return less

    if verbose:
        print('SIGNS AFTER:', signs)
    pointsOnLine = [i for i in range(n) if signs[i] == 0]
    pointsOnLine = utils.sort(
        pointsOnLine, less=segmentIntersectionCoordinateComparator(line))
    for i in range(1, len(pointsOnLine)):
        graph[pointsOnLine[i]].append(pointsOnLine[i - 1])
        graph[pointsOnLine[i - 1]].append(pointsOnLine[i])

    if verbose:
        for i in range(n):
            for j in range(len(graph[i])):
                print('EDGE: ', i, graph[i][j])

    used = [0] * n
    ans = ([], [])
    for vertex in range(n):
        if signs[vertex] != 0 and used[vertex] == 0:
            assert len(graph[vertex]) == 1
            if verbose:
                print('VERTEX: ', vertex)
            currentVertex = graph[vertex][0]
            previousVertex = vertex
            newPolygon = [polygon[currentVertex]]
            used[currentVertex] = 1
            while currentVertex != vertex:
                if len(graph[currentVertex]) == 1:
                    nextVertex = graph[currentVertex][0]
                else:
                    less = ByDirectionPolAngleComparator(
                        polygon[currentVertex], polygon[previousVertex])
                    nextVertex = None
                    for v in graph[currentVertex]:
                        if v != previousVertex:
                            if nextVertex == None or less(
                                    polygon[nextVertex], polygon[v]):
                                nextVertex = v
                used[nextVertex] = 1
                #sys.stdout.write(str(nextVertex))
                #sys.stdout.write(' ')
                newPolygon.append(polygon[nextVertex])
                previousVertex = currentVertex
                currentVertex = nextVertex
            #sys.stdout.write('\n')
            newPolygon = deleteThreePointsOnOneLine(newPolygon)
            if signs[vertex] < 0:
                ans[0].append(newPolygon)
            else:
                ans[1].append(newPolygon)
    return ans
示例#14
0
"""

from os import path
import json
from collections import OrderedDict

from utils import sort, error

ABS_PATH = path.dirname(path.abspath(__file__))
DB_FILE = path.realpath(path.join(ABS_PATH, "../data.json"))

with open(DB_FILE, 'r') as db_obj:
    data = json.load(db_obj, object_pairs_hook=OrderedDict)

keys = list(data.keys())
ordreded_keys = sort(keys)
has_errors = False

for i in range(len(keys)):
    if keys[i] != ordreded_keys[i]:
        correct_position = ordreded_keys.index(keys[i])
        has_errors = True
        error("Database entry {} not correctly " "ordred".format(keys[i]))
        print("Should be placed at {} instead "
              "of {}".format(correct_position, i))

for key, value in data.items():
    if value.get("linux"):
        symlinks = value["linux"].get("symlinks")
        if symlinks:
            ordered_symlinks = sort(symlinks)
示例#15
0
文件: sort.py 项目: 18bytes/whoopee
import insertion_sort, bubble_sort
import utils

if __name__ == '__main__':
  utils.sort(insertion_sort.insertion_sort)
  utils.sort(bubble_sort.bubble_sort)
示例#16
0
from utils import sort
#sort('data/ppi/atpin.ppi.2evidence.no-weight.txt', 'data/arabi_gdic.txt', out='test/test.ppi.txt')
sort('data/ppi/atpin.ppi.2evidence.no-weight.txt',
     'data/arabi_gdic.txt',
     out='data/ppi_mat/atpin.2.no-weight.ppi.txt')
sort('data/ppi/atpin.ppi.2evidence.weight.txt',
     'data/arabi_gdic.txt',
     out='data/ppi_mat/atpin.2.weight.ppi.txt')
示例#17
0
import utils

@utils.measure_time
def insertion_sort(input):
  # Precondition check.
  if input == None: print "Input cannot be None."
  for i in range(2, len(input)):
    num = input[i]
    j = i
    while j > 0 and num < input[j - 1]:
      input[j] = input[j - 1]
      j = (j - 1)
    input[j] = num

if __name__ == '__main__':
  utils.sort(insertion_sort)
示例#18
0
                                                index=False)
    tb[tb['m'] > 300][['n1', 'n2', 'm']].to_csv(out_f5,
                                                sep=' ',
                                                header=None,
                                                index=False)
    tb[tb['m'] > 500][['n1', 'n2', 'm']].to_csv(out_f6,
                                                sep=' ',
                                                header=None,
                                                index=False)
    tb[tb['m'] > 700][['n1', 'n2', 'm']].to_csv(out_f7,
                                                sep=' ',
                                                header=None,
                                                index=False)
    tb[tb['m'] > 900][['n1', 'n2', 'm']].to_csv(out_f8,
                                                sep=' ',
                                                header=None,
                                                index=False)


if __name__ == '__main__':
    filt_atpin(ppi_f1)
    #	filt_string(ppi_f2)
    d = 'data/ppi'
    out_d = 'data/ppi_mult'
    for i in os.listdir(d):
        if i.startswith('atpin.ppi') and i.endswith('txt'):
            mat_f = os.path.join(d, i)
            out = os.path.join(out_d, i)
            sort(mat_f, 'data/arabi_gdic.txt', out=out, rm=None)
        #	sort('data/atpin.ppi.4evidence.no-weight.txt', 'data/gdic.txt', out='data/test_sort.txt', rm=None)
示例#19
0
from os import path
import json
from collections import OrderedDict

from utils import sort, error


ABS_PATH = path.dirname(path.abspath(__file__))
DB_FILE = path.realpath(path.join(ABS_PATH, "../data.json"))


with open(DB_FILE, 'r') as db_obj:
    data = json.load(db_obj, object_pairs_hook=OrderedDict)

keys = list(data.keys())
ordreded_keys = sort(keys)
has_errors = False

for i in range(len(keys)):
    if keys[i] != ordreded_keys[i]:
        correct_position = ordreded_keys.index(keys[i])
        has_errors = True
        error("Database entry {} not correctly "
              "ordred".format(keys[i]))
        print("Should be placed at {} instead "
              "of {}".format(correct_position, i))


for key, value in data.items():
    if value.get("linux"):
        symlinks = value["linux"].get("symlinks")
示例#20
0
 def predict(self, image):
     predictions = self.detector.predict([image]).items()
     self.push_percentage(predictions)
     return sort(predictions)
示例#21
0
    def find_updates_v2(self, before, after):
        # If a part is (partially) computed in the before and
        #   does not appear in the after or
        # going from before to after requires undoing some computation
        # it is potentially unstable, and more expensive: ignore
        dict_bef = dict([(str(u.get_children()[0]), u) for u in before])
        dict_aft = dict([(str(u.get_children()[0]), u) for u in after])
        ignore = False
        quadrant = None
        for k, v in dict_bef.items():
            if k not in dict_aft:
                ignore = True
                break
            else:
                rules = self.expr_to_rule_rhs_lhs([v])
                rules = list(itertools.chain(*rules))
                expr_copy = copy.deepcopy(dict_aft[k])
                t = replace(expr_copy, rules)
                #if v == replace( expr_copy, rules ):
                if dict_aft[k] == t:
                    ignore = True
                    break
        if ignore:
            print("[INFO] Skipping invariant: %s" % reason)
            return None
        #
        # Wrap outputs for before and after
        WrapBefOut = WrapOutBef
        for u in before:
            u.children[0] = NList([WrapBefOut(l) for l in u.children[0]])
        #
        wrap_rules_after = []
        for u in after:
            u.children[0] = NList([WrapOutAft(l) for l in u.children[0]])
        # replace before in after
        wrap_rules_before = []
        for u in before:
            print(u)
            lhs, rhs = u.get_children()
            if len(lhs.children) > 1:
                continue
            rules = self.expr_to_rule_rhs_lhs([u])
            wrap_rules_before.append(list(itertools.chain(*rules)))
        #
        for i, rule in enumerate(reversed(wrap_rules_before)):
            idx = len(wrap_rules_before) - i - 1
            for j in range(idx - 1, -1, -1):
                for _rule in rule:
                    _rule.pattern = replace_all(_rule.pattern,
                                                wrap_rules_before[j])
        wrap_rules_before = list(itertools.chain(*wrap_rules_before))
        #
        for u in after:
            _, rhs = u.get_children()
            u.children[1] = simplify(
                to_canonical(replace_all(copy.deepcopy(rhs),
                                         wrap_rules_before)))
        # replace after in after
        done = False
        while not done:
            # replace after in after
            wrap_rules_after = []
            for u in after:
                lhs, rhs = u.get_children()
                if len(lhs.children) > 1:
                    wrap_rules_after.append([])
                    continue
                rules = self.expr_to_rule_rhs_lhs([u])
                wrap_rules_after.append(list(itertools.chain(*rules)))
            #
            after_top = [copy.deepcopy(u) for u in after]
            for i, u in enumerate(after):
                _, rhs = u.get_children()
                rules = list(
                    itertools.chain.from_iterable(wrap_rules_after[:i] +
                                                  wrap_rules_after[i + 1:]))
                u.children[1] = simplify(
                    to_canonical(replace_all(copy.deepcopy(rhs), rules)))
            done = True
            for top, bot in zip(after_top, after):
                if top != bot:
                    done = False
                    break
        # [TODO] Multiple lhss, won't work
        updates = []
        for u in after:
            lhs, rhs = u.get_children()
            lhs = lhs.children[0]  # NList[op] -> op
            if isinstance(rhs, WrapBefOut) and isinstance(lhs, WrapOutAft) and \
                    matchq(lhs.children[0], rhs.children[0]):
                continue
            updates.append(u)
        #
        tiled_updates = []
        for u in updates:
            print("*   ", u)
            tilings = list(tile_expr(u))
            if len(tilings) > 1:
                print("[WARNING] Multiple (%d) tilings for expression %s" %
                      (len(tilings), u))
                print("          Discarding all but one")
            tiled_updates.extend(tilings[0])
        tiled_updates = sort(tiled_updates)
        print("* Tiled update")
        for t in tiled_updates:
            print("*   ", t)

        # Drop WrapOutBef's
        # Drop WrapOutAft's
        s = PatternDot("s")
        updates = []
        for u in tiled_updates:
            u = replace_all(
                u, [RewriteRule(WrapOutAft(s), Replacement(lambda d: d["s"]))])
            u = replace_all(
                u, [RewriteRule(WrapOutBef(s), Replacement(lambda d: d["s"]))])
            updates.append(u)

        return updates
示例#22
0
def drawGeneralizedSegment(surface, segment, color, fat=1):
    T = type(segment.getFirstPoint().GetX())
    rect = surface.get_rect()

    a = [None] * 6
    
    p1 = segment.getFirstPoint()
    p2 = segment.getSecondPoint()
    #print('drawGeneralizedSegment arguments: ', p1, p2)

    if not(segment.isContinuedForFirst()) and IsValidPoint(surface, p1):
        a[0] = p1
        #a[0] = Point2D(float(p1.x), float(p1.y))
    if not(segment.isContinuedForSecond()) and IsValidPoint(surface, p2):
        a[1] = p2
        #a[1] = Point2D(float(p2.x), float(p2.y))

    
    leftUpCorner = Point2D(T(rect.x), T(rect.y))
    rightUpCorner = Point2D(T(rect.x + rect.w), T(rect.y))
    rightDownCorner = Point2D(T(rect.x + rect.w), T(rect.y + rect.h))
    leftDownCorner = Point2D(T(rect.x), T(rect.y + rect.h))

    #print('leftUpCorner ', leftUpCorner)
    #print('rightUpCorner ', leftUpCorner)
    #print('leftDownCorner ', leftDownCorner)
    #print('rightDownCorner ', rightDownCorner)

    a[2] = segmentIntersection(segment, GeneralizedSegment(leftUpCorner, rightUpCorner, False, False))
    a[3] = segmentIntersection(segment, GeneralizedSegment(rightUpCorner, rightDownCorner, False, False))
    a[4] = segmentIntersection(segment, GeneralizedSegment(rightDownCorner, leftDownCorner, False, False))
    a[5] = segmentIntersection(segment, GeneralizedSegment(leftDownCorner, leftUpCorner, False, False))
    
    #print('lstBefore: ', a[0], a[1], a[2], a[3], a[4], a[5])
    for i in range(0, 5):
        if a[i]:
            for j in range(i+1, 6):
                if a[j] and (a[i] == a[j]):
                    a[j] = None
    #print('lstPre: ', a[0], a[1], a[2], a[3], a[4], a[5])

    r = 0
    l = 0
    while (l < 6):
        if not a[l]:
            r = max(l, r)
            while (r < 6 and not a[r]):
                r += 1
            if r == 6:
                break
            t = a[l]
            a[l] = a[r]
            a[r] = t
        l += 1
    #print('lst: ', a[0], a[1], a[2], a[3], a[4], a[5])
    #if l <= 2:
    #    sys.exit()
    while len(a) > l:
        a.pop()
    if l >= 3:
        for i in range(2, l):
            if a[i]:
                assert (a[1] - a[0])*(a[i]-a[0]) == T(0), 'ERROR: point ' + str(i) + ' is not on line...'

        a = utils.sort(a, ByManhattenDistanceComparator(a[0]))
        a = utils.sort(a, ByManhattenDistanceComparator(a[0]))
        a[1] = a[-1]
        while len(a) > 2:
            a.pop()
    
    l = len(a)
    assert l <= 2, 'ERROR: l is ' + str(l) + 'which is more than 2\n'
    if l == 0:
        return
    if l == 1:
        DrawPoint(surface, a[0], color, fat)
    else:
        #print('real segment: ', a[0], a[1])
        drawSegment(surface, a[0], a[1], color, fat)