Example #1
0
from anytree.exporter import DotExporter
import KernelPHash as kph
import re

# read a kernel:0 (text file)
with open('test_cases/test.txt','r') as f:
    k0_txt = f.read()
    
# generate AST (the first element of ast0 is the root node)
ast0 = kph.ASTGen(k0_txt)

# plot the AST to an image AST.png
DotExporter( ast0[0] ).to_picture('AST.png')

# print the tree in terminal
for pre, fill, node in RenderTree( ast0[0] ):
    node_name = re.sub(r'\{[\d]+\}','',node.name) #just to remove the label {n}
    print("%s%s" % (pre,node_name))
    
# print DFS pre-order traversal
text_d = kph.TextGenDFS(ast0, order='pre')
print('\nDFS pre-order:',text_d,'\n')

# show text after changing variable/array names
text_du = kph.UnifyNaming(text_d)
print('After renaming:',text_du,'\n')

# print DFS post-order traversal
text_dpo = kph.TextGenDFS(ast0, order='post')
print('DFS post-order:',text_dpo,'\n')
                        'Total Premium']].groupby(location_merge[var]).agg({
                            'Claims': ['sum'],
                            'Total Premium': ['sum', "size"]
                        })
        roll_up.columns = [
            ' '.join(col).strip() for col in roll_up.columns.values
        ]

        roll_up["Loss Fraction"] = np.round(
            roll_up['Claims sum'] * 100 / roll_up['Total Premium sum'], 0)

        roll_up = roll_up.astype(int)
        levels = np.asarray(roll_up.index)
        print levels
        for level in levels:
            print "Node is"
            print node
            new_node = Node(str(level) + " " +
                            str(roll_up.ix[level]["Loss Fraction"]) + "% " +
                            str(roll_up.ix[level]["Total Premium size"]),
                            parent=node)
            level_data = data[data[var] == level]
            split(level_data, j - 1, new_node)


split(location_merge, total_cols)

# The tree displays segments with two numbers:(1) Loss Fraction and (2) Count of policies in the segment
for pre, fill, node in RenderTree(Tree):
    print("%s%s" % (pre, node.name))
Example #3
0
def print_tree(tree):
    for pre, fill, node in RenderTree(tree):
        print("%s%s" % (pre, node.name))
def run_AutoML(trial,
               X_train=None,
               X_test=None,
               y_train=None,
               y_test=None,
               categorical_indicator=None):
    space = None
    search_time = None
    if not 'space' in trial.user_attrs:
        # which hyperparameters to use
        gen = SpaceGenerator()
        space = gen.generate_params()
        space.sample_parameters(trial)

        trial.set_user_attr('space', copy.deepcopy(space))

        # which constraints to use
        search_time = trial.suggest_int('global_search_time_constraint',
                                        10,
                                        total_search_time,
                                        log=False)

        # how much time for each evaluation
        evaluation_time = trial.suggest_int(
            'global_evaluation_time_constraint', 10, search_time, log=False)

        # how much memory is allowed
        memory_limit = trial.suggest_uniform('global_memory_constraint', 1.5,
                                             4)

        # how many cvs should be used
        cv = trial.suggest_int(
            'global_cv', 2, 20,
            log=False)  #todo: calculate minimum number of splits based on y

        number_of_cvs = trial.suggest_int('global_number_cv', 1, 10, log=False)

        dataset_id = trial.suggest_categorical('dataset_id',
                                               my_openml_datasets)

    else:
        space = trial.user_attrs['space']

        print(trial.params)

        #make this a hyperparameter
        search_time = trial.params['global_search_time_constraint']
        evaluation_time = trial.params['global_evaluation_time_constraint']
        memory_limit = trial.params['global_memory_constraint']
        cv = trial.params['global_cv']
        number_of_cvs = trial.params['global_number_cv']

        if 'dataset_id' in trial.params:
            dataset_id = trial.params['dataset_id']  #get same random seed
        else:
            dataset_id = 31

    for pre, _, node in RenderTree(space.parameter_tree):
        if node.status == True:
            print("%s%s" % (pre, node.name))

    if type(X_train) == type(None):

        my_random_seed = int(time.time())
        if 'data_random_seed' in trial.user_attrs:
            my_random_seed = trial.user_attrs['data_random_seed']

        X_train, X_test, y_train, y_test, categorical_indicator, attribute_names = get_data(
            dataset_id, randomstate=my_random_seed)

        if not isinstance(trial, FrozenTrial):
            my_list_constraints_values = [
                search_time, evaluation_time, memory_limit, cv, number_of_cvs
            ]

            metafeature_values = data2features(X_train, y_train,
                                               categorical_indicator)
            features = space2features(space, my_list_constraints_values,
                                      metafeature_values)
            trial.set_user_attr('features', features)

    search = MyAutoML(cv=cv,
                      number_of_cvs=number_of_cvs,
                      n_jobs=1,
                      evaluation_budget=evaluation_time,
                      time_search_budget=search_time,
                      space=space,
                      main_memory_budget_gb=memory_limit)
    search.fit(X_train,
               y_train,
               categorical_indicator=categorical_indicator,
               scorer=auc)

    best_pipeline = search.get_best_pipeline()

    test_score = 0.0
    if type(best_pipeline) != type(None):
        test_score = auc(search.get_best_pipeline(), X_test, y_test)

    return test_score, search
Example #5
0
# case 3.2 -- works!!
ex1 = np.array([1,3,4,5,6])
ex2 = np.array([2,6,5,4,3])

# case 4 -- works!!
# ex1 = np.array([1,2,3,4,5,6])
# ex2 = np.array([4,3,2,1,6,5])

# case 4.2 -- works!!
# ex1 = np.array([1,2,3,4,5,6])
# ex2 = np.array([2,1,6,5,4,3])


print("ex1: " + str(ex1))
print("ex2: " + str(ex2))

global andDict

while len(ex1) != 2:
    ex1, ex2, andNodes, orNodes = mainAlg(ex1, ex2)
    
andNodes = andNodes.flatten()               # 'flattens' array, collapses it into one dimension
tree = reconstruct(andNodes, orNodes, ex1)
print("\n\nRECONSTRUCTED TREE: \n")
print(RenderTree(tree))                     # anytree feature (Node is too)

#%%


#%%
Example #6
0
                          input_lines).group()
    for leaf in re.findall('(\d) (\w+ \w+) bag', node_line):
        build_tree(Node(leaf[1], parent=node, v=int(leaf[0])))


def sum_childern(node):
    running_total = 0
    for child in node.children:
        running_total += (child.v + (child.v * sum_childern(child)))
    return running_total


root = Node('shiny gold', v=1)
build_tree(root)

for pre, fill, node in RenderTree(root):
    print("%s%s:  %d" % (pre, node.name, node.v))

print(f"PART2: {sum_childern(root)}")

# (venv) *[main][~/dev/aoc2020]$ python day7/part2.py
# shiny gold:  1
# ├── light black:  5
# │   ├── posh coral:  1
# │   │   └── dark tomato:  3
# │   │       ├── light gray:  3
# │   │       ├── dull cyan:  2
# │   │       ├── striped silver:  4
# │   │       └── dark fuchsia:  5
# │   ├── dotted black:  4
# │   │   └── vibrant white:  2
Example #7
0
def plot_tree(tree, metric: str = "probability", pic_path: str = ""):
    try:
        from anytree import Node, RenderTree
    except:
        raise ImportError(
            "The anytree module seems to not be installed in your environment.\nTo be able to use this method, you'll have to install it."
        )
    check_types([(
        "metric",
        metric,
        [str],
    ), (
        "pic_path",
        pic_path,
        [str],
    )])
    try:
        import shutil

        screen_columns = shutil.get_terminal_size().columns
    except:
        import os

        screen_rows, screen_columns = os.popen("stty size", "r").read().split()
    tree_id, nb_nodes, tree_depth, tree_breadth = (
        tree["tree_id"][0],
        len(tree["node_id"]),
        max(tree["node_depth"]),
        sum([1 if item else 0 for item in tree["is_leaf"]]),
    )
    print("-" * int(screen_columns))
    print("Tree Id: {}".format(tree_id))
    print("Number of Nodes: {}".format(nb_nodes))
    print("Tree Depth: {}".format(tree_depth))
    print("Tree Breadth: {}".format(tree_breadth))
    print("-" * int(screen_columns))
    tree_nodes = {}
    for idx in range(nb_nodes):
        op = "<" if not (tree["is_categorical_split"][idx]) else "="
        if tree["is_leaf"][idx]:
            tree_nodes[tree["node_id"][idx]] = Node(
                "[{}] => {} ({} = {})".format(
                    tree["node_id"][idx],
                    tree["prediction"][idx],
                    metric,
                    tree["probability/variance"][idx],
                ))
        else:
            tree_nodes[tree["node_id"][idx]] = Node("[{}] ({} {} {} ?)".format(
                tree["node_id"][idx],
                tree["split_predictor"][idx],
                op,
                tree["split_value"][idx],
            ))
    for idx, node_id in enumerate(tree["node_id"]):
        if not (tree["is_leaf"][idx]):
            tree_nodes[node_id].children = [
                tree_nodes[tree["left_child_id"][idx]],
                tree_nodes[tree["right_child_id"][idx]],
            ]
    for pre, fill, node in RenderTree(tree_nodes[1]):
        print("%s%s" % (pre, node.name))
    if pic_path:
        from anytree.dotexport import RenderTreeGraph

        RenderTreeGraph(tree_nodes[1]).to_picture(pic_path)
        if isnotebook():
            from IPython.core.display import HTML, display

            display(HTML("<img src='{}'>".format(pic_path)))
Example #8
0
#test_node.py

from anytree import NodeMixin, RenderTree, Node

udo = Node("Udo")
marc = Node("Marc", parent=udo)
lian = Node("Lian", parent=marc)

print(udo)

for pre, fill, node in RenderTree(udo):
    print("%s%s" % (pre, node.name))


class MyBaseClass(object):
    foo = 4


class MyClass(MyBaseClass, NodeMixin):
    def __init__(self, name, length, parent=None, children=None):
        super(MyClass, self).__init__()
        self.name = name
        self.length = length
        self.parent = parent
        if children:
            self.children = children

    def addChild(self, pos):
        print("added child")
        if isinstance(pos, list):
            new_node = (Node(pos))
Example #9
0
                    trial=trial,
                    metafeature_values_hold=metafeature_values_hold,
                    search_time=search_time_frozen,
                    model_compare=model_compare,
                    model_success=model_success,
                    memory_limit=memory_budget,
                    privacy_limit=privacy,
                    #evaluation_time=int(0.1*search_time_frozen),
                    #hold_out_fraction=0.33
                ),
                n_trials=500,
                n_jobs=4)

            space = study_prune.best_trial.user_attrs['space']

            for pre, _, node in RenderTree(space.parameter_tree):
                if node.status == True:
                    print("%s%s" % (pre, node.name))

            try:
                result, search = utils_run_AutoML(
                    study_prune.best_trial,
                    X_train=X_train_hold,
                    X_test=X_test_hold,
                    y_train=y_train_hold,
                    y_test=y_test_hold,
                    categorical_indicator=categorical_indicator_hold,
                    my_scorer=my_scorer,
                    search_time=search_time_frozen,
                    memory_limit=memory_budget,
                    privacy_limit=privacy)
Example #10
0
def main(stacksFile, railcarFile, debug):
    # Read in 'Stacks" and "Railcar" files
    stacks_df = stacksPreprocessing(stacksFile)
    railcar_df = railcarPreprocessing(railcarFile)

    # We move one cart per step k, so k is also the number of carts moved
    # N is the total number of carts
    N = railcar_df.shape[0]
    # Initialize Y as having the railcar column names with no data. i.e. The railcar is empty
    # Initialize Z as having the stacks column names with all data. i.e. The stacks are full
    Y = railcar_df.drop(railcar_df.index[0:])
    Z = stacks_df

    # Begin two trees to store the different paths and costs
    rootNode = AnyNode(id='root', cost=0, set=set(), Z=Z, Y=Y)

    for k in range(N):
        if debug:
            print('\n\n --------------------------------- Stage k=' + str(k) +
                  ' ---------------------------------')
        leaves = rootNode.leaves
        for leaf in leaves:

            validChoices = validContainers(Z, Y, railcar_df)
            validNodes = [0] * len(validChoices)
            if debug:
                print(leaf.id)
                print("Valid choice: " + str(validChoices))

            # v is a containerID - string
            for v, i in zip(validChoices, range(len(validChoices))):
                node_set = set(leaf.set)
                node_set.add(v)
                validNodes[i] = AnyNode(id=v,
                                        parent=leaf,
                                        cost=depth(v, Z) + 1,
                                        set=node_set)
        # Since we only have two possible costs 1 and 2, we reduce the DP problem to Cases
        # HEURISTIC: Take the lowest cost. If multiple exist, select one at random
            costs = [u_k.cost for u_k in leaf.children]
            argmin_costs = costs.index(min(costs))
            u_k = leaf.children[argmin_costs]  # Greedy choice u_k
            leaf.children = [u_k]
            Z, Y = move(u_k.id, Z, Y, railcar_df)

        if debug:
            for pre, fill, node in RenderTree(rootNode):
                print("%s%s, %s, set=%s" % (pre, node.id, node.cost, node.set))

    final_move = rootNode.leaves[0]
    for pre, fill, node in RenderTree(rootNode):
        print("%s%s, %s, set=%s" % (pre, node.id, node.cost, node.set))
    if len(final_move.set) == N:
        print('All containers have been placed\n')
        print('The containers in order are as follows.\n')
        costs = [np.inf] * (N + 1)
        k = 0
        print('containerID \t cost')
        for pre, fill, node in RenderTree(rootNode):

            if node.id != 'root':
                print('%s \t %s' % (node.id, node.cost))
            costs[k] = node.cost
            k += 1
        sumC = sum(costs)
        print('Optimal cost: ' + str(sumC))

    else:
        sys.exit('An error occured with the algorithm.')
Example #11
0
 def display_tree_in_shell(self):
     for pre, fill, node in RenderTree(self.graph):
         print("%s%s" % (pre, node.display_name))
Example #12
0
 def _print(self):
     print(RenderTree(self._root).by_attr("_id"))
Example #13
0
    def is_complete(response):
        entities = response["entities"]

        if len(entities):

            # Sort entities based on creation time
            sorted_entities = sorted(
                entities, key=lambda x: int(x["metadata"]["creation_time"])
            )

            # Create nodes of runlog tree and a map based on uuid
            root = RunlogNode(
                {
                    "metadata": {"uuid": app_id},
                    "status": {"type": "app", "state": "", "name": app_name},
                }
            )
            nodes = {}
            nodes[app_id] = root
            for runlog in sorted_entities:
                uuid = runlog["metadata"]["uuid"]
                nodes[str(uuid)] = RunlogNode(runlog, parent=root)

            # Attach parent to nodes
            for runlog in sorted_entities:
                uuid = runlog["metadata"]["uuid"]
                parent_uuid = runlog["status"]["application_reference"]["uuid"]
                node = nodes[str(uuid)]
                node.parent = nodes[str(parent_uuid)]

            # Show Progress
            # TODO - Draw progress bar
            total_tasks = 0
            completed_tasks = 0
            for runlog in sorted_entities:
                runlog_type = runlog["status"]["type"]
                if runlog_type == "action_runlog":
                    total_tasks += 1
                    state = runlog["status"]["state"]
                    if state in RUNLOG.STATUS.SUCCESS:
                        completed_tasks += 1

            if not is_app_describe and total_tasks:
                screen.clear()
                progress = "{0:.2f}".format(completed_tasks / total_tasks * 100)
                screen.print_at("Progress: {}%".format(progress), 0, 0)

            # Render Tree on next line
            line = 1
            for pre, _, node in RenderTree(root):
                lines = json.dumps(node, cls=RunlogJSONEncoder).split("\\n")
                for linestr in lines:
                    tabcount = linestr.count("\\t")
                    if not tabcount:
                        screen.print_at("{}{}".format(pre, linestr), 0, line)
                    else:
                        screen.print_at(
                            "{}{}".format("", linestr.replace("\\t", "")),
                            len(pre) + 2 + tabcount * 2,
                            line,
                        )
                    line += 1
            screen.refresh()

            msg = ""
            is_complete = True
            if not is_app_describe:
                for runlog in sorted_entities:
                    state = runlog["status"]["state"]
                    if state in RUNLOG.FAILURE_STATES:
                        msg = "Action failed. Exit screen? (y)"
                        is_complete = True
                    if state not in RUNLOG.TERMINAL_STATES:
                        is_complete = False

            if not msg:
                msg = "Action ran successfully. Exit screen? (y)"
            if not is_app_describe:
                screen.print_at(msg, 0, line)
                screen.refresh()
                time.sleep(10)
            return (is_complete, msg)
        return (False, "")
Example #14
0
parser = yacc.yacc()

s = """
int a = 5;
int max(int num1, int num2)
{
int i;
if( num1 < 20 ) {
      for( i = 0; i <= num2; i=i-1 ) { num1 = num1-1;break;}
   } else {
      return num1;
   }
   return num1;}
    """

# Give the lexer some input
# lexer.input(s)

# Tokenize
# while True:
#     tok = lexer.token()
#     if not tok: break  # No more input
#     print(tok)

ast_tree = parser.parse(s, lexer=lexer)
visitor = NoteVisitor()

print(visitor.visit(ast_tree))
print(RenderTree(ast_tree))
Example #15
0
 def pretty_print(self, thread):
     for pre, fill, node in RenderTree(thread):
         message = self.mbox.get_messages(node.name)[0]
         print("%.20s\t\t%s%s" % (message['From'], pre, node.name))
Example #16
0
    def isCharLiteral(self, token):
        return self.getType(token) == "char"

    def isStringLiteral(self, token):
        return self.getType(token) == "string"

    
g = Grammar()
#print(g.tokens)

g.syntaxProgram()
#print(RenderTree(g.final_tree))
tofile = g.newtokens

for pre, fill, node in RenderTree(g.final_tree):
    print("%s%s" % (pre, node.name))

outF = open("../semantic check/token.txt", "w")
for line in tofile:
	outF.write(str(line))
	outF.write("\n")
outF.close()

outF = open("../irt/token.txt", "w")
for line in tofile:
	outF.write(str(line))
	outF.write("\n")
outF.close()

Example #17
0
def renderTreeToFile(filename, tree):
    f = open(filename + "_out.txt", "w")
    for pre, _, node in RenderTree(tree, style=AsciiStyle()):
        f.write("%s%s\r" % (pre, node.name))
Example #18
0
        string2 = string2 + "[" + key + "]"

    if key_types[i] == 'reg_key':
        exec(string1 + string2 + " = {}")
    elif key_types[i] == 'last_key':
        exec(string1 + string2 + " = " + keys[i + 1])
    elif key_types[i] == 'terminal_value':
        pass
    else:
        raise ValueError('There was a misspecification')

pprint.pprint(main_dict)
print(main_dict['CRBP']['drugs'].keys())
print(main_dict['CRBP']['drugs']['Lanabasum'].keys())

##create a tree diagram of the data using the module anytree

CRBP = Node("CRBPPPP")
company_name = Node('company_name', parent=CRBP)
Corbus_Pharmaceuticals = Node('Corbus_Pharmaceuticals', parent=company_name)
drugs = Node('drugssss', parent=CRBP)
Anabasum = Node('Anabasum', parent=drugs)
Lanabasum = Node('Lanabasum', parent=drugs)
print(CRBP)
print(Lanabasum)

for pre, fill, node, in RenderTree(CRBP):
    print("%s%s" % (pre, node.name))

#DotExporter(CRBP).to_dotfile("/home/paul/Environments/CRBP.dot")
Example #19
0
    # print(np.max(data, axis=0))

    # N_VALUES = [7, 4, 8, 6, 7, 4, 3, 9, 12, 2, 2, 2, 2, 2, 2, 10, 2, 2, 2, 2, 9, 4, 3, 5, 3, 3, 6]
    # for female
    # N_VALUES = [7, 4, 8, 6, 7, 4, 3, 9, 12, 2, 2, 2, 2, 2, 2, 4, 3, 5, 3, 3, 6]
    # for male
    N_VALUES = [
        7, 2, 6, 4, 3, 2, 2, 2, 2, 2, 2, 10, 2, 2, 2, 2, 9, 4, 3, 5, 3, 3, 6
    ]
    lca = LCA(max_nclass=root_nclass, n_values=N_VALUES)
    lca.fit(data)
    root = HLCA(lca, data, Y, 0, 0.35)

    expand(root, node_nclass, N_VALUES, 0.89, min_split, max_level=max_level)

    for pre, _, node in RenderTree(root):
        print(pre, end='')
        print("{} {} {} {}".format(node.upperClass, len(node.data), node.ratio,
                                   [len(sd) for sd in node.subdata]))

    opred = predict(data, root)
    pred = np.round(opred)
    print(sum(pred == tt.Survived.values) / float(len(data)))

    print("train accurate: ")
    for _ in range(5):
        print(sampling(opred, Y))

    test['Age'] = pd.cut(test.Age,
                         bins=[-np.inf, 16, 25, 48, 65, 80, np.inf],
                         labels=range(6))
Example #20
0
def idbi():
    global makeTree, mtree, h1_bits, h1, leaves
    print("makeTree", makeTree)
    if makeTree == 1:  #Change it to 1
        print("making Merkle tREE")
        stmt = "SELECT * FROM std"
        cur.execute(stmt)
        tuples = cur.fetchall()
        k = 3  # No. of children of a node in a Merkle Tree. Change as per your choice
        ntuples = len(tuples)
        if ntuples < k:
            n = 1
        else:
            n = math.floor(math.log(ntuples, k))
        h1_bits = n
        digest_size = 4
        h1.clear()
        for i in tuples:
            dgst = hashlib.blake2b((i[0] + " " + i[1]).encode(),
                                   digest_size=digest_size).hexdigest()
            dgst_int = int(dgst, 16)
            h1[base_k(dgst_int, k)].append(" ".join(i))
        # print(h1)
        st = "".join(map(str, range(k)))
        mtree = {'root': Node('root')}
        node_ids = [
            "".join(seq) for i in range(1, n + 1)
            for seq in product(st, repeat=i)
        ]
        for i in node_ids:
            if len(i) == 1:
                mtree[i] = Node(i, parent=mtree['root'])
            else:
                mtree[i] = Node(i, parent=mtree[i[:-1]])
        for i in reversed(node_ids):  # Find hash values for leaves
            if len(i) == n:
                leaf_hash = hashlib.sha1("".join(
                    sorted([
                        hashlib.sha1(j.encode()).hexdigest() for j in h1[i]
                    ])).encode()).hexdigest()
                mtree[i].hash = leaf_hash
                leaves[leaf_hash] = i
            else:
                break
        for i in range(len(node_ids), 0, -k):
            node = node_ids[i - 1]
            if len(node) != 1:
                nodeid = node_ids[i - 1][:-1]
            else:
                nodeid = 'root'
            mtree[nodeid].hash = hashlib.sha1("".join(
                sorted([mtree[j].hash
                        for j in node_ids[i - k:i]])).encode()).hexdigest()
        makeTree = 0
        print(RenderTree(mtree['root']))
    postdata = request.json
    children = []
    for nodeitem in postdata['data']:
        if nodeitem in ['GOD', 'god', 'God']:
            children = [mtree['root'].hash]
        else:
            children += [
                node.hash for node in LevelOrderIter(mtree['root'])
                if node.parent != None and node.parent.hash == nodeitem
            ]
    if children == []:
        # print([j for i in postdata["data"] for j in h1[leaves[i]]])
        return jsonify({
            "children": [j for i in postdata["data"] for j in h1[leaves[i]]],
            "leaf":
            "1"
        })

    return jsonify({'children': children, "leaf": "0"})
Example #21
0
    factor_nodes_list = []
    origin_node = Node(ROOT_NODE_NAME)
    for level, factor_values in enumerate(factor_values_list):
        factor_nodes = []
        for factor_value in factor_values:
            if level > 0:
                parent_factor_nodes = factor_nodes_list[level - 1]
                for parent_factor_node in parent_factor_nodes:
                    factor_node = Node(factor_value, parent=parent_factor_node)
                    factor_nodes.append(factor_node)
            else:
                factor_node = Node(factor_value, parent=origin_node)
                factor_nodes.append(factor_node)
        factor_nodes_list.append(factor_nodes)
    for pre, fill, node in RenderTree(origin_node):
        print("%s%s" % (pre, node.name))

    # prepare variable list
    df_variable_list = []
    for variable_name in variable_name_list:
        df_variable_list.append(pd.DataFrame())

    # Iterate over all leafs
    children_factor_nodes = factor_nodes_list[-1]
    for children_factor_node in children_factor_nodes:
        path_to_factor = str(children_factor_node).split("'")[1]
        child_factor_values = path_to_factor.split('/')[2::]
        child_name = '-'.join(child_factor_values)
        df_child = df_quant[eval(" & ".join([
            "(df_quant['{0}'] == '{1}')".format(
Example #22
0
def render_tree(root, data=False):
    for pre, fill, node in RenderTree(root):
        if hasattr(node, 'values') and data:
            print("%s%s: %s" % (pre, node.name, list(node.values)[:5]))
        else:
            print("%s%s" % (pre, node.name))
Example #23
0
        for i in pi:
            # only grab the floats for now
            if isinstance(i[1], float):
                d[i] = i[1]
            if isinstance(i[1], int):
                d[i] = i[1]
        info = t.info()
        info['params'] = d
        #if isinstance(t.inst,cqparts.Assembly):
        #    info['tree'] = t.inst.tree_str()
        return info


#d = directory(cqparts_bucket._namespace,'export')
d = directory('cqparts', 'export')
print(RenderTree(d.root))


@app.route('/')
def base():
    return render_template('list.html', items=d.root.dir())


@app.route('/list')
def list():
    return jsonify(d.items())


@app.route('/list/<path:modelname>')
def subcat(modelname):
    return render_template('list.html', items=d.prefix(modelname))
Example #24
0
    def create(self):
        ret = {}
        block = sorted(
            set([
                self.start_date + datetime.timedelta(days=x)
                for x in range(0, (self.end_date - self.start_date).days + 1)
            ]))
        for date in block:
            ret[date] = []
            for junior_resident in self.get_junior_residents():
                if date >= junior_resident.start_date and date <= junior_resident.end_date:
                    # - resident cannot be post call the first day of vacation
                    post_call_date = date + datetime.timedelta(days=1)
                    if date not in junior_resident.time_off and post_call_date not in junior_resident.get_no_post_calls(
                    ):
                        if junior_resident.allowed_solo_call:
                            ret[date].append(
                                NodeSub(date, None, junior_resident))

                        for senior_resident in self.get_senior_residents():
                            if date >= senior_resident.start_date and date <= senior_resident.end_date:
                                if date not in senior_resident.time_off and post_call_date not in senior_resident.get_no_post_calls(
                                ):
                                    ret[date].append(
                                        NodeSub(date, senior_resident,
                                                junior_resident))

        keys = list(sorted(ret.keys()))
        root = NodeRoot()
        root.add_children(ret[keys[0]])

        for key in keys:
            print(r'key: {} - num_values:{}'.format(key, len(ret[key])))

        for index in range(0, len(keys) - 1):
            print(keys[index])
            print(r'  num parent nodes: {}'.format(len(ret[keys[index]])))
            print(r'  num child candidates: {}'.format(
                len(ret[keys[index + 1]])))
            tmp = []
            child_key = keys[index + 1]
            children = ret[child_key]
            for parent in ret[keys[index]]:
                children_copy = []

                # - Friday and sundays are handled by the same pair
                if child_key.weekday() == 6:
                    gp_senior_id = parent.parent.get_senior_resident_id()
                    gp_junior_id = parent.parent.get_junior_resident_id()

                    f = filter(
                        lambda node: node.get_senior_resident_id(
                        ) == gp_senior_id and node.get_junior_resident_id() ==
                        gp_junior_id, children)
                    found_child = next(f)
                    if found_child:
                        children_copy.append(copy.deepcopy(found_child))
                else:
                    children_copy = copy.deepcopy(children)

                added_children = parent.add_children(children_copy)
                if len(added_children) > 0:
                    tmp.extend(added_children)

            ret[keys[index + 1]] = tmp
            print(r'  num leaf nodes: {}'.format(len(ret[keys[index + 1]])))

        print(RenderTree(root))
Example #25
0
def printTableau(tree):
    for pre, _, node in RenderTree(tree):
        print("%s%s  %s" % (pre, node.name, node.sign))
def run_AutoML(trial, X_train=None, X_test=None, y_train=None, y_test=None, categorical_indicator=None):
    space = None
    search_time = None
    if not 'space' in trial.user_attrs:
        # which hyperparameters to use
        gen = SpaceGenerator()
        space = gen.generate_params()
        space.sample_parameters(trial)

        trial.set_user_attr('space', copy.deepcopy(space))

        search_time, evaluation_time, memory_limit, privacy_limit, training_time_limit, inference_time_limit, pipeline_size_limit, cv, number_of_cvs, hold_out_fraction, sample_fraction, dataset_id = generate_parameters(trial, total_search_time, my_openml_datasets)

    else:
        space = trial.user_attrs['space']

        print(trial.params)

        #make this a hyperparameter
        search_time = trial.params['global_search_time_constraint']

        evaluation_time = search_time
        if 'global_evaluation_time_constraint' in trial.params:
            evaluation_time = trial.params['global_evaluation_time_constraint']

        memory_limit = 10
        if 'global_memory_constraint' in trial.params:
            memory_limit = trial.params['global_memory_constraint']

        privacy_limit = None
        if 'privacy_constraint' in trial.params:
            privacy_limit = trial.params['privacy_constraint']

        training_time_limit = search_time
        if 'training_time_constraint' in trial.params:
            training_time_limit = trial.params['training_time_constraint']

        inference_time_limit = 60
        if 'inference_time_constraint' in trial.params:
            inference_time_limit = trial.params['inference_time_constraint']

        pipeline_size_limit = 350000000
        if 'pipeline_size_constraint' in trial.params:
            pipeline_size_limit = trial.params['pipeline_size_constraint']

        cv = 1
        number_of_cvs = 1
        hold_out_fraction = None
        if 'global_cv' in trial.params:
            cv = trial.params['global_cv']
            if 'global_number_cv' in trial.params:
                number_of_cvs = trial.params['global_number_cv']
        else:
            hold_out_fraction = trial.params['hold_out_fraction']

        sample_fraction = 1.0
        if 'sample_fraction' in trial.params:
            sample_fraction = trial.params['sample_fraction']

        if 'dataset_id' in trial.params:
            dataset_id = trial.params['dataset_id']
        else:
            dataset_id = trial.user_attrs['dataset_id']

    for pre, _, node in RenderTree(space.parameter_tree):
        if node.status == True:
            print("%s%s" % (pre, node.name))

    if type(X_train) == type(None):

        my_random_seed = int(time.time())
        if 'data_random_seed' in trial.user_attrs:
            my_random_seed = trial.user_attrs['data_random_seed']

        X_train, X_test, y_train, y_test, categorical_indicator, attribute_names = get_data(dataset_id, randomstate=my_random_seed)

        if not isinstance(trial, FrozenTrial):
            my_list_constraints_values = [search_time,
                                          evaluation_time,
                                          memory_limit, cv,
                                          number_of_cvs,
                                          ifNull(privacy_limit, constant_value=1000),
                                          ifNull(hold_out_fraction),
                                          sample_fraction,
                                          training_time_limit,
                                          inference_time_limit,
                                          pipeline_size_limit]

            metafeature_values = data2features(X_train, y_train, categorical_indicator)
            features = space2features(space, my_list_constraints_values, metafeature_values)
            features = FeatureTransformations().fit(features).transform(features, feature_names=feature_names)
            trial.set_user_attr('features', features)


    dynamic_params = []
    static_params = []
    for random_i in range(5): #5
        search = MyAutoML(cv=cv,
                          number_of_cvs=number_of_cvs,
                          n_jobs=1,
                          evaluation_budget=evaluation_time,
                          time_search_budget=search_time,
                          space=space,
                          main_memory_budget_gb=memory_limit,
                          differential_privacy_epsilon=privacy_limit,
                          hold_out_fraction=hold_out_fraction,
                          sample_fraction=sample_fraction,
                          training_time_limit=training_time_limit,
                          inference_time_limit=inference_time_limit,
                          pipeline_size_limit=pipeline_size_limit)

        test_score = 0.0
        try:
            search.fit(X_train, y_train, categorical_indicator=categorical_indicator, scorer=my_scorer)

            best_pipeline = search.get_best_pipeline()
            if type(best_pipeline) != type(None):
                test_score = my_scorer(search.get_best_pipeline(), X_test, y_test)
        except:
            pass
        dynamic_params.append(test_score)

        # default params
        gen_new = SpaceGenerator()
        space_new = gen_new.generate_params()
        for pre, _, node in RenderTree(space_new.parameter_tree):
            if node.status == True:
                print("%s%s" % (pre, node.name))

        search_static = MyAutoML(n_jobs=1,
                          time_search_budget=search_time,
                          space=space_new,
                          evaluation_budget=int(0.1 * search_time),
                          main_memory_budget_gb=memory_limit,
                          differential_privacy_epsilon=privacy_limit,
                          hold_out_fraction=0.33,
                          training_time_limit=training_time_limit,
                          inference_time_limit=inference_time_limit,
                          pipeline_size_limit=pipeline_size_limit
                          )

        try:
            best_result = search_static.fit(X_train, y_train, categorical_indicator=categorical_indicator, scorer=my_scorer)
            test_score_default = my_scorer(search_static.get_best_pipeline(), X_test, y_test)
        except:
            test_score_default = 0.0
        static_params.append(test_score_default)

    comparison = np.mean(dynamic_params) - np.mean(static_params)

    return comparison, search
Example #27
0
def printDerivation(d):
    root = importer.import_(d)
    print(RenderTree(root))
Example #28
0
 def display(self):
     if self.root == None:
         raise ValueError('Your root node must be defined use build() to create a root node')
     print(chalk.yellow(self.root.name))
     for pre, fill, node in RenderTree(self.root, style=DoubleStyle):
         print("%s%s" % (chalk.blue(pre), chalk.blue(node.name)))
Example #29
0
def write_tree(f, tree):
    for pre, fill, node in RenderTree(tree):
        f.write("%s%s\n" % (pre, node.name))
Example #30
0
def print_node_tree(root_node: Node):
    for pre, fill, node in RenderTree(root_node):
        print("{}{} ({})".format(pre, node.name, node.page))