Esempio n. 1
0
    def to_representation(self, *args, **kwargs):
        ret = super(FieldsMixin, self).to_representation(*args, **kwargs)

        try:
            fields = self.context["request"].query_params.get("fields")
            if fields:
                fields_to_keep = fields.split(",")

                all_fields = get_field_keys(self.fields, "")

                remove_these_fields = []
                for field in all_fields:
                    for subpath in subpaths(field):
                        if subpath in remove_these_fields:
                            break

                        if not any([
                                f == subpath or f.startswith(subpath + ".")
                                for f in fields_to_keep
                        ]):
                            remove_these_fields.append(subpath)
                            break

                prune(ret, remove_these_fields)
        except Exception as e:
            print(e)

        return ret
Esempio n. 2
0
def main():
    args = parse_arguments()

    # Dataset
    dataset = Dataset(**vars(args))

    # Reset the default graph and set a graph-level seed
    tf.reset_default_graph()
    tf.set_random_seed(9)

    # Model
    model = Model(num_classes=dataset.num_classes, **vars(args))
    model.construct_model()

    # Session
    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()
    tf.local_variables_initializer().run()

    # Prune
    prune.prune(args, model, sess, dataset)

    # Train and test
    train.train(args, model, sess, dataset)
    test.test(args, model, sess, dataset)

    sess.close()
    sys.exit()
Esempio n. 3
0
def split(root):

    # prepare the root directory
    print('preparing root')
    pgndb.prepare_root(root)
    
    old_nodes = 0
    new_nodes = 1
    i = 1
    while new_nodes > old_nodes: # the last cycle generated new children somewhere so keep going
        print('iteration ' + str(i))
        i += 1
        old_nodes = tree.count_nodes(root)
        print(str(old_nodes) + ' nodes so far')
        
        print('splitting one level...')
        split_one_level(root)
        
        print('pruning the super-tiny nodes before transposition check...')
        prune.prune(root, (threshold / 100))
        
        print('fixing transpositions...')
        transpositions.find_and_merge(root, threshold)
        
        print('pruning below threshold...')
        prune.prune(root, threshold)
        
        print('updating node count...')
        new_nodes = tree.count_nodes(root)
        
    print(str(new_nodes) + ' nodes created.')
Esempio n. 4
0
def trans_load(filenames, feature_adder=None, should_prune=False, weights=None, lm=None):
    for file in filenames:
        for fore in forest.Forest.load(file, True, lm=lm):
            if feature_adder:
                feature_adder.add_features(fore)

            if should_prune:
                prune.prune(fore, weights, None, 10)
            yield fore
Esempio n. 5
0
def main():
    args = parse_arguments()

    # Multiple runs
    for run in range(args.nruns):

        # Start
        print('--\nStart run ({})'.format(run))

        # Set paths
        path_save = 'run-{}'.format(run)
        path_keys = ['model', 'log', 'assess']
        args.path = {key: os.path.join(path_save, key) for key in path_keys}

        # Reset the default graph and set a graph-level seed
        tf.reset_default_graph()
        tf.set_random_seed(seed=run)

        # Dataset
        dataset = Dataset(**vars(args))
        if args.transfer_pruning:
            dataset_pruning = Dataset(args.datasource_pruning, args.path_data)

        # Model
        model = Model(**vars(args))
        model.construct_model()

        # Session
        sess = tf.InteractiveSession()

        # Initialization
        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()
        _ = sess.run([model.weights_init], {model.init: True})
        if args.check_jsv:
            check.jacobian_singular_value(args, model, sess, dataset, 'after-init')

        # Prune
        prune.prune(args, model, sess, dataset_pruning if args.transfer_pruning else dataset)
        if args.check_jsv:
            check.jacobian_singular_value(args, model, sess, dataset, 'after-prune')

        # Enforce approximate dynamical isometry in the sparse network
        if args.enforce_isometry:
            approximate_isometry.optimize(args, model, sess, dataset)
            if args.check_jsv:
                check.jacobian_singular_value(args, model, sess, dataset, 'after-isometry')

        # Train and test
        train.train(args, model, sess, dataset)
        test.test(args, model, sess, dataset)

        # Closing
        sess.close()
        print('--\nFinish run ({})'.format(run))

    sys.exit()
Esempio n. 6
0
def check_prune(defconfig):
    msg = git.commit_message('HEAD')

    restriction = None
    data = None
    for line in msg.split('\n'):
        # handle RFC 822 style folded lines
        if data and line[0].isspace():
            data += line[1:]
            continue
        else:
            data = line

        kv = data.split('=', 1)
        data = ''
        if len(kv) != 2:
            continue
        k, v = kv
        if k == 'restriction':
            restriction = v
    if restriction != 'nopublic':
        return 0

    # if we want the top-most patch to have no public changes,
    # remember where we were
    commit = git.rev_parse()
    # first reset to the previous patch:
    git.reset(["-q", "--hard", "HEAD~1"])
    git.clean(['-fdxq'])
    # then prune, commit and remember the state
    prune(defconfig, False, '.')
    git.commit_all("prune test",
                   env={
                       'GIT_COMMITTER_NAME': 'prune tester',
                       'GIT_COMMITTER_EMAIL': '',
                       'GIT_AUTHOR_NAME': 'prune tester',
                       'GIT_AUTHOR_EMAIL': '',
                   })
    pruned_prev = git.rev_parse()
    # go back to the top commit
    git.reset(["-q", "--hard", commit])
    git.clean(['-fdxq'])
    # prune this one again
    prune(defconfig, False, '.')
    # reset to the previous prune
    git.reset([pruned_prev])
    # and check if anything is left:
    if git.is_modified():
        print("FAILURE: restriction=nopublic patch modifies prune tree")
        subprocess.call(['git', 'diff'])
        ret = 1
    else:
        ret = 0
    # eases development:
    git.reset(["-q", "--hard", commit])
    return ret
Esempio n. 7
0
def create_tree(commit, output, readme=True, main={}):
    '''
    Create the tree structure for the commit
    Return the list of full file paths for the commit, from the output dir
    '''
    paths_added = []
    paths_removed = []

    for item in commit[1]:
        parent_path = item["path"].strip()
        if not exists(join(output, parent_path)):
            os.makedirs(join(output, parent_path))
        if item["leaf"]:
            fullpath = join(parent_path, "%s.md" % item["name"])
            with open(join(output, fullpath), "w") as file:
                write_item_file(item, file)
            paths_added.append(fullpath)
        elif readme:
            fullpath = join(parent_path, "README.md")
            with open(join(output, fullpath), "w") as file:
                write_item_file(item, file)
            paths_added.append(fullpath)

    existing = list_files(output, "")

#    base = join(output, os.listdir(output)[0])
    base = output
    print("Base is: %s" % base)
    ## Whatever happens, there'll be a README at the root
    # paths_added.append(join(basename(base), "README.md"))
    paths_added.append("README.md")

    paths_removed = [path for path in existing if path not in paths_added]
    for p in paths_removed:
        os.remove(join(output,p))
    prune(output)

    if readme:
        toc.create_toc(base)
    else:
        print("Writing TOC")
        # toc_md = toc.compute_toc(base, commit[1])
        toc_md = toc2.compute_toc(commit[1])
        with open(join(base, "README.md"), "w") as file:
            if "name" in main:
                file.write("%s\n" % main["name"])
                file.write("=" * len(main["name"]))
                file.write("\n\n")
                file.write(main["title"] + "\n")
                file.write("\n\n".join(main["content"].split("\n")))
            else:
                file.write("%s\n" % basename(base))
                file.write("=" * len(basename(base)))
            file.write("\n\n")
            file.write(toc_md)
    return paths_added, paths_removed
Esempio n. 8
0
def run_prune_vote(year):
    file_name = 'voting_results_' + str(year) + '.json'
    if path.exists(file_name):
        pass
    else:
        #print('run_prune_vote', year)
        year = int(year)
        OFFICIAL_AWARDS = []
        if year >= 2018:
            OFFICIAL_AWARDS = OFFICIAL_AWARDS_1819
        else:
            OFFICIAL_AWARDS = OFFICIAL_AWARDS_1315
        prune.prune(year, OFFICIAL_AWARDS)
        voter(year, OFFICIAL_AWARDS)
Esempio n. 9
0
def unflatten(input,
              parse,
              tokenization=None,
              output=None,
              perfect=False,
              pruneInput=True):
    """
    Convenience wrapper that first prunes the graph and then unflattens.
    This function processes the whole corpus.

    @type input: string
    @param input: input file
    @type parse: string
    @param parse: parse to be used
    @type tokenization: string
    @param tokenization: tokenization to be used
    @type output: string
    @param output: output file
    @type perfect: boolean
    @param perfect: modify only perfectly-resolvable events?
    @rtype: cElementTree.Element
    @return: corpus node
    """
    if pruneInput:
        xml = prune.prune(input)
    else:
        xml = input
    return unflattenPruned(xml, parse, tokenization, output, perfect)
Esempio n. 10
0
def pickle_trans_load(filenames, feature_adder=None, should_prune=False, weights=None, lm=None):
    for file in filenames:
        try:
            print file
            f = open(file, "rb")
            fore = cPickle.load(f)

            while fore:  # forest.Forest.load(file, True, lm=lm):
                if feature_adder:
                    feature_adder.add_features(fore)

                if should_prune:
                    prune.prune(fore, weights, None, 10)
                yield fore
                fore = cPickle.load(f)
        except EOFError:
            pass
Esempio n. 11
0
def train_subset(percent):
	percent = max(min(percent, 1.0), 0.0)
	n = min(int(percent * count), count)
	print "Training... %0.2f%% (%d)" % (100.0 * percent, n,)
	subset = training[np.random.choice(count, size=n, replace=False), :]
	start = time.time()
	if args.profile:
		profiler.enable()
	tree = train.train(subset, meta, n_uniques, classification_coln)
	if args.profile:
		profiler.disable()
	print "Done. (%.2fs)" % (time.time() - start)
	if args.prune is not None:
		print "Pruning..."
		start = time.time()
		prune.prune(tree, lambda t: validate(args.prune, t))
		print "Done. (%.2fs)" % (time.time() - start)
	return tree
Esempio n. 12
0
def train_subset(percent):
    percent = max(min(percent, 1.0), 0.0)
    n = min(int(percent * count), count)
    print "Training... %0.2f%% (%d)" % (
        100.0 * percent,
        n,
    )
    subset = training[np.random.choice(count, size=n, replace=False), :]
    start = time.time()
    if args.profile:
        profiler.enable()
    tree = train.train(subset, meta, n_uniques, classification_coln)
    if args.profile:
        profiler.disable()
    print "Done. (%.2fs)" % (time.time() - start)
    if args.prune is not None:
        print "Pruning..."
        start = time.time()
        prune.prune(tree, lambda t: validate(args.prune, t))
        print "Done. (%.2fs)" % (time.time() - start)
    return tree
Esempio n. 13
0
def unflatten(input, parse, tokenization=None, output=None, perfect=False, pruneInput=True):
    """
    Convenience wrapper that first prunes the graph and then unflattens.
    This function processes the whole corpus.

    @type input: string
    @param input: input file
    @type parse: string
    @param parse: parse to be used
    @type tokenization: string
    @param tokenization: tokenization to be used
    @type output: string
    @param output: output file
    @type perfect: boolean
    @param perfect: modify only perfectly-resolvable events?
    @rtype: cElementTree.Element
    @return: corpus node
    """
    if pruneInput:
        xml = prune.prune(input)
    else:
        xml = input
    return unflattenPruned(xml, parse, tokenization, output, perfect)
Esempio n. 14
0
    def forward(self, x):
        x = self.model.conv1(x)
        x = self.model.bn1(x)
        x = self.model.relu(x)
        x = self.model.maxpool(x)

        x = self.model.layer1(x)
        x = self.model.layer2(x)
        x = self.model.layer3(x)
        x = self.model.layer4(x)
        x = self.avgpool(x)
        y = x.view(x.size(0), x.size(1), x.size(2))
        return y


if __name__ == "__main__":
    from prune import prune

    # debug model structure
    net = ft_net(702)
    #     net = ft_net_dense(751)
    #print(net)

    net = prune(net, 0.1)

    input = Variable(torch.FloatTensor(8, 3, 224, 224))
    output = net(input)
    print('net output size:', output.shape)

#     print(net)
Esempio n. 15
0
def train(net, epochs=100, batch_size=128, lr=0.01, reg=5e-4, prn=False):
    """
    Training a network
    :param net:
    :param epochs:
    :param batch_size:
    """
    print('==> Preparing data..')
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),

    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),

    ])
    best_acc = 0  # best test accuracy
    start_epoch = 0  # start from epoch 0 or last checkpoint epoch
    trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=16)

    testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)

    criterion = nn.CrossEntropyLoss()

    optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=reg, nesterov=False)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(epochs*0.5), int(epochs*0.75)], gamma=0.1)

    global_steps = 0
    start = time.time()

    for epoch in range(start_epoch, epochs):
        """
        Start the training code.
        """
        print('\nEpoch: %d' % epoch)
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.to(device), targets.to(device)
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()

            optimizer.step()
            if prn == True:
                prune(net, method='percentage', q=85.0, s=0.75)
            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            global_steps += 1

            if global_steps % 16 == 0:
                end = time.time()
                num_examples_per_second = 16 * batch_size / (end - start)
                print("[Step=%d]\tLoss=%.4f\tacc=%.4f\t%.1f examples/second"
                      % (global_steps, train_loss / (batch_idx + 1), (correct / total), num_examples_per_second))
                start = time.time()

        scheduler.step()

        """
        Start the testing code.
        """
        net.eval()
        test_loss = 0
        correct = 0
        total = 0
        with torch.no_grad():
            for batch_idx, (inputs, targets) in enumerate(testloader):
                inputs, targets = inputs.to(device), targets.to(device)
                outputs = net(inputs)
                loss = criterion(outputs, targets)

                test_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()
        num_val_steps = len(testloader)
        val_acc = correct / total
        print("Test Loss=%.4f, Test acc=%.4f" % (test_loss / (num_val_steps), val_acc))

        if val_acc > best_acc:
            best_acc = val_acc
            if prn == False:
                print("Saving to net_before_pruning")
                torch.save(net.state_dict(), "net_before_pruning.pt")
            if prn == True:
                print("Saving to net_after_pruning")
                torch.save(net.state_dict(), "net_after_pruning.pt")
Esempio n. 16
0
        else:
            maxDepth = int(r.strip())
        gt.configure(dotFileName, logFileName, maxDepth)
        try:
            gt.run()
        except Exception as e:
            print(e)
            gt.painter.close()
            gt.log.close()
            
            
        subprocess.Popen('dot -Tsvg %s -o %s' %(dotFileName, svgFileName), shell=True)
        if(PRUNED):
            prunedDotFileName = dotFileName.split('.')[0] + '_pruned.dot'
            prunedSvgFileName = svgFileName.split('.')[0] + '_pruned.svg'
            prune(dotFileName, prunedDotFileName)
            subprocess.Popen('dot -Tsvg %s -o %s' %(prunedDotFileName, prunedSvgFileName), shell=True)

        print('Finish ploting %s' % svgFileName)
        print('Continue and plot another graph on %s?(n/y)' % funcName)
        r = input()
        if(r.lower() != 'y'):
            break

        logCnt += 1
    
    os.killpg(os.getpgid(g.pid), signal.SIGTERM)
    os.killpg(os.getpgid(qemu_p.pid), signal.SIGTERM)


Esempio n. 17
0
                x: batch_xs,
                y_: batch_ys,
                keep_prob1: kp1,
                keep_prob2: kp2
            }
            train_loss = sess.run(loss, feed_dict=feed_dict)
            print "step %d training loss %g" % (i, train_loss)
        if i % 1000 == 0:
            print_validation_accuracy()
        sess.run(train_step,
                 feed_dict={
                     x: batch_xs,
                     y_: batch_ys,
                     keep_prob1: kp1,
                     keep_prob2: kp2
                 })
    print_test_accuracy()


if __name__ == '__main__':
    # train(10000)
    # save_weights()
    # save_pb(False)
    kp1 = 0.5
    kp2 = 0.5
    for iter in range(0, 5):
        kp1, kp2 = prune(iter + 1, kp1, kp2)
        load_weights(iter + 1)
        train(10000, kp1, kp2)
        save_weights(iter + 1)
        save_pb(True, iter)
Esempio n. 18
0
from prune import prune
from time import time
outputbase="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm1/output"
prbase="/Users/mengqizhou/Desktop/datamining/assignment5/algorithm1/prunedrules"
start = time()
for i in range(1,5):
    prune(outputbase+str(i),prbase+str(i))
end=time()
print "total time: "+str(end-start)+" sec"
Esempio n. 19
0
def forest_oracle(forest, goldtree, del_puncs=False, prune_results=False):
    """ returns best_score, best_parseval, best_tree, edgelist
           now non-recursive topol-sort-style
    """

    if hasattr(forest.root, "oracle_edge"):
        return extract_oracle(forest)

    ## modifies forest also!!
    if del_puncs:
        idx_mapping, newforest = check_puncs(forest, goldtree.tag_seq)
    else:
        idx_mapping, newforest = lambda x: x, forest

    goldspans = merge_labels(goldtree.all_label_spans(), idx_mapping)
    goldbrs = set(goldspans)  ## including TOP

    for node in newforest:
        if node.is_terminal():
            results = Oracles.unit("(%s %s)" % (node.label, node.word))  ## multiplication unit

        else:
            a, b = (
                (0, 0)
                if node.is_spurious()
                else ((1, 1) if (merge_label((node.label, node.span), idx_mapping) in goldbrs) else (1, 0))
            )

            label = "" if node.is_spurious() else node.label
            results = Oracles()  ## addition unit
            for edge in node.edges:
                edgeres = Oracles.unit()  ## multiplication unit

                for sub in edge.subs:
                    assert hasattr(sub, "oracles"), "%s ; %s ; %s" % (node, sub, edge)
                    edgeres = edgeres * sub.oracles

                ##                nodehead = (a, RES((b, -edge.fvector[0], label, [edge])))   ## originally there is label
                assert 0 in edge.fvector, edge
                nodehead = (a, RES((b, -edge.fvector[0], [edge])))
                results += nodehead * edgeres  ## mul

        if prune_results:
            prune(results)
        node.oracles = results
        if debug:
            print >> logs, node.labelspan(), "\n", results, "----------"

    res = (-1, RES((-1, 0, []))) * newforest.root.oracles  ## scale, remove TOP match

    num_gold = len(goldspans) - 1  ## omit TOP.  N.B. goldspans, not brackets! (NP (NP ...))

    best_parseval = None
    for num_test in res:
        ##        num_matched, score, tree_str, edgelist = res[num_test]
        num_matched, score, edgelist = res[num_test]
        this = Parseval.get_parseval(num_matched, num_test, num_gold)
        if best_parseval is None or this < best_parseval:
            best_parseval = this
            best_score = score
            ##            best_tree = tree_str
            best_edgelist = edgelist

    best_tree = Hyperedge.deriv2tree(best_edgelist)

    ## annotate the forest for oracle so that next-time you can preload oracle
    for edge in best_edgelist:
        edge.head.oracle_edge = edge

    ## very careful here: desymbol !
    ##    return -best_score, best_parseval, Tree.parse(desymbol(best_tree)), best_edgelist
    return -best_score, best_parseval, best_tree, best_edgelist
Esempio n. 20
0
 def prune(self):
     return prune(self)
Esempio n. 21
0
from huffman_coding import huffman_coding
from summary import summary
import torch
import numpy as np
from prune import prune

import copy

device = 'cuda' if torch.cuda.is_available() else 'cpu'

net = VGG16_half()
net = net.to(device)

net.load_state_dict(torch.load("net_after_pruning.pt"))
acc = test(net)

while acc > 0.9:
    # Load the best weight paramters
    net.load_state_dict(torch.load("net_after_pruning.pt"))
    test(net)

    # Test accuracy before fine-tuning
    prune(net, method='std', q=45.0, s=1.25)
    test(net)

    finetune_after_prune(net, epochs=50, batch_size=128, lr=0.001, reg=5e-4)
    net.load_state_dict(torch.load("net_after_pruning.pt"))
    acc = test(net)
    spar = summary(net)
    torch.save(net.state_dict(), "net_after_pruning%.2f_%.2f.pt" % (acc, spar))
Esempio n. 22
0
 def prune(self):
     """ Prune unfeasible values from domain """
     return prune(self)
Esempio n. 23
0
#!/usr/bin/env python

from __future__ import print_function
from make_machine import make_machine
from render import render
from views import views
from prune import prune
from set_machine import set_machine

machines   = ["dibond", "dibond_E3D", "sturdy", "mendel", "huxley", "sturdy_E3D"]
has_manual = ["dibond"]
has_views  = ["dibond", "sturdy", "huxley"]

for machine in machines:
    make_machine(machine)

    if machine in has_manual:
        render(machine)

    if machine in has_views:
        views(machine, False)

    if '_' in machine and machine.split('_')[0] in machines:
        prune(machine)
set_machine("dibond")
Esempio n. 24
0
        x = self.model.avgpool(x)
        x = torch.squeeze(x)

        x = self.classifier(x)
        return x


if __name__ == "__main__":

    import torch
    from torch.autograd import Variable
    from prune import prune

    net = ft_resnet(751)
    print(net.model.cfg)

    final_net = ft_resnet(751)
    final_net.model = prune(net.model, 0.5, False)
    print(final_net)

    input = Variable(torch.FloatTensor(4, 3, 128, 256)).cuda()

    net.cuda()
    final_net.cuda()

    print('\n******** validating pruned forward path ********')
    print('final output size:', final_net.model(input).shape)

    print(
        len([l for l in final_net.state_dict().keys() if 'conv' in l.lower()]))
Esempio n. 25
0
 def test_simple_tree_with_two_keys(self):
     tree = { "a":  1, "b": 2 }
     prune(tree, ["a"])
     self.assertEquals(json.dumps(tree), '{"b": 2}')
Esempio n. 26
0
 def test_simple_tree_with_single_key(self):
     tree = { "a":  1, "b": { "c": "d", "e": "f" } }
     prune(tree, ["b.c"])
     self.assertEquals(json.dumps(tree), '{"a": 1, "b": {"e": "f"}}')
Esempio n. 27
0
 def test_simple_tree_with_single_key(self):
     tree = { "a":  1 }
     prune(tree, ["a"])
     self.assertEquals(json.dumps(tree), "{}")
Esempio n. 28
0
def run(graph, inline = True, prune = True, cse = True):
	constants.remove(graph)

	if cse:    _cse.eliminate(graph)
	if prune:  _prune.prune(graph)
	if inline: autoinline.inline(graph, 2)
Esempio n. 29
0
print("-----Summary before pruning-----")
summary(model)
print("-------------------------------")

pickle.dump(model, open("foo.pkl", "wb"))

if not args.prune:
    print("Option to prune and finetune not chosen. Exiting")
    sys.exit(0)

# --------------------------------------- #
# --- Pruning and finetune -------------- #
# --------------------------------------- #

# Test accuracy before fine-tuning
prune(model, method=args.prune_type, q=args.q)
if args.dataset == "CIFAR10":
    utils.eval_cifar10(model, batch_size=128)
    #test(args.dataset, model)
elif args.dataset == "ImageNet":
    #utils.val_imagenet(model, lmdb=True, amp=True)
    print("Val_imagenet not working!")
else:
    print("Dataset {} not suported!".format(args.dataset))
    sys.exit(0)

print("-----Summary After pruning-----")
summary(model)
print("-------------------------------")

# Uncomment to load pretrained weights
Esempio n. 30
0
import torch
import numpy as np
from prune import prune

import copy

device = 'cuda' if torch.cuda.is_available() else 'cpu'

net = VGG16_half()
net = net.to(device)

# Load the best weight paramters
net.load_state_dict(torch.load("net_before_pruning.pt"))
test(net)
#
print("-----Summary before pruning-----")
summary(net)
print("-------------------------------")
#
# ### Pruning & Finetune with pruned connections
# # Test accuracy before fine-tuning
#
prune(net, method='std', q=0.45, s=0.75)
#
# print("-----Summary after pruning-----")
summary(net)
# print("-------------------------------")
#


Esempio n. 31
0
        real_parseval = Parseval(best_tree, f.goldtree)
        all_real_parseval += real_parseval
        ##assert real_parseval == best_parseval
        ## N.B.: can't make this comparison work, so keep it separate.

        all_parseval += best_parseval

        if prange is None:
            ## dump oracle-annotated forest
            if opts.suffix is not None:
                f.dump("%d.%s" % (i + 1, opts.suffix))

        else:
            for p in prange:

                prune(f, p)
                sc, parseval, tr = forest_oracle(f, f.goldtree)
                pruned_parseval[p] += parseval
                pruned_real_parseval[p] += Parseval(tr, f.goldtree)

                if opts.suffix is not None:
                    f.dump("%d.%s%d" % (i + 1, opts.suffix, p))

    print "1-best (real)", onebest_parseval
    if not implicit_oracle:
        print "forest (punc)", all_parseval
    print "forest (real)", all_real_parseval

    total_time = time.time() - start_time
    print >> logs, "%d forests oracles computed in %.2lf secs (avg %.2lf secs per sent)" % (
        i + 1,
Esempio n. 32
0
#!/usr/bin/env python

from make_machine import make_machine
from render import render
from views import views
from prune import prune
from set_machine import set_machine

machines = ["dibond", "dibond_E3D", "sturdy", "mendel", "huxley", "sturdy_E3D"]
has_manual = ["dibond"]
has_views = ["dibond", "sturdy", "huxley"]

for machine in machines:
    make_machine(machine)

    if machine in has_manual:
        render(machine)

    if machine in has_views:
        views(machine, False)

    if '_' in machine and machine.split('_')[0] in machines:
        prune(machine)
set_machine("dibond")
Esempio n. 33
0
program = parser.parse_file(sys.argv[1])
print program.repr()
dump = builder.build(program, global_module)

print dump.repr()
print

analysis.variable_flow(dump)
analysis.dominance_frontiers(dump)
#for block in dump:
#    print 'analysis', block, '->', ', '.join(map(repr,block.succ))
#    print '  prec     ', block.prec
#    print '  idom     ', block.idom
#    print '  frontiers', block.frontiers
#    print '  phi      ', block.phi
#    print '  provide', block.provides
#    print '  need   ', block.needs
#    print '  sustain', block.sustains
#print

result = prune.prune(dump)
registeralloc.allocate(result)

print 'after pruning'
print result.repr()

print "run the program"
script = objects.Closure(interpret.run, result, None)
script.call(())