Exemplo n.º 1
0
    def __init__(self, filename, deontic=False):
        """ Initialize object and load *filename*. """

        #: The ORM model (:class:`lib.Model.Model`) loaded from the .orm file.
        self.model = Model()
        self._elements = {}  # Dictionary of {id, element} pairs

        # Items in the .orm file omitted by NormaLoader
        self.omissions = []  #: Intentionally omitted model elements
        self.unexpected = set()  #: Unexpected nodes in the XML file

        # Find root of XML tree
        self._model_root = self._parse_norma_file(filename)

        # Load file
        self._load_data_types()
        self._load_object_types()
        self._load_fact_types()  # Also loads subtypes
        self._load_constraints(deontic)

        # Post-processing
        self._fix_nested_fact_type_refs()

        # Report any issues to the user
        self._log_issues(filename, self.omissions, "model element", "ignored")
        self._log_issues(filename, self.unexpected, "XML node", "unexpected")
Exemplo n.º 2
0
    def test_display_nonempty(self):
        """ Test Display of non-empty model. """
        model = Model()
        model.object_types.add(ObjectType(name="O1"))
        model.fact_types.add(FactType(name="F1"))
        model.fact_types.add(FactType(name="F2"))
        model.constraints.add(Constraint(name="C1"))
        model.display()

        output = sys.stdout.getvalue().strip()
        self.assertEqual(output, "Object Types:\n    O1\nFact Types:\n    F1\n    F2\nConstraints:\n    C1")
Exemplo n.º 3
0
    def __init__(self, input_shape, pool_shape, num_classes, name=None):
        self.input_shape = input_shape
        self.batch_size, self.h, self.w, self.fin = self.input_shape
        self.pool_shape = pool_shape
        self.num_classes = num_classes
        self.name = name

        l1 = AvgPool(size=self.input_shape, ksize=self.pool_shape, strides=self.pool_shape, padding='SAME')

        l2_input_shape = l1.output_shape()
        l2 = ConvToFullyConnected(input_shape=l2_input_shape)
        
        l3_input_shape = l2.output_shape()
        l3 = FullyConnected(input_shape=l3_input_shape, size=self.num_classes, init='alexnet', activation=Linear(), bias=0., name=self.name)
        
        self.B = Model(layers=[l1, l2, l3])
Exemplo n.º 4
0
    def test_get_elements(self):
        """ Test get() method. """
        model = Model()
        obj = ObjectType(name="O1")
        cons = Constraint(name="O1")
        fact = FactType(name="F1")
    
        model.add(obj)
        model.add(cons)
        model.add(fact)

        self.assertEquals(model.get("ObjectTypes.O1"), obj)
        self.assertEquals(model.get("Constraints.O1"), cons)
        self.assertEquals(model.get("FactTypes.F1"), fact)
        self.assertEquals(model.get("ObjectTypes."), None)
        self.assertEquals(model.get("F1"), None)       
Exemplo n.º 5
0
    def test_create_directory_exception(self):
        """ Test exception case in _create_directory. """
        model = Model()
        pop = Population(ORMMinusModel(model))

        try:
            handle, filename = tempfile.mkstemp()
            with self.assertRaises(OSError):  # filename is a file, not a dir
                LogiQL(model, pop, filename, make=False)
        finally:
            os.remove(filename)
Exemplo n.º 6
0
    def test_add_remove_constraint_with_side_effects(self):
        """ Test adding and removing a generic constraint from the model. """
        model = Model()
        obj1 = ObjectType(name="O1")        
        cons1 = Constraint(name="C1", covers=[obj1])
        model.add(obj1)
        model.add(cons1)

        self.assertEquals(model.constraints.count(), 1)
        self.assertEquals(model.constraints.get("C1"), cons1)
        self.assertEquals(model.constraints.get("C1").covers[0], obj1)
        self.assertEquals(model.object_types.get("O1").covered_by[0], cons1)

        model.remove(cons1)
        self.assertEquals(model.constraints.count(), 0)
        self.assertEquals(model.constraints.get("C1"), None)
        self.assertEquals(model.object_types.get("O1").covered_by, [])
Exemplo n.º 7
0
    def test_create_directories(self):
        """ Test that appropriate directories get created. """
        model = Model()
        pop = Population(ORMMinusModel(model))

        tempdir = os.path.join(self.tempdir, "test_create_directories")

        # Directory doesn't exist before call
        self.assertFalse(os.path.isdir(tempdir))

        logiql = LogiQL(model, pop, tempdir, make=False)

        # Directory and expected sub-directories now exist
        self.assertTrue(os.path.isdir(tempdir))
        self.assertEquals(tempdir, logiql.rootdir)
        self.assertTrue(os.path.isdir(logiql.importdir))
        self.assertTrue(os.path.isdir(logiql.logicdir))
Exemplo n.º 8
0
    def test_commit_and_rollback(self):
        """ Test committing and rolling back constraints on a model. """
        model = Model()

        obj1 = ObjectType(name="O1")
        obj2 = ObjectType(name="O2")

        fact = FactType(name="F1")
        role1 = fact.add_role(player=obj1, name="R1")
        role2 = fact.add_role(player=obj2, name="R2")

        cons1 = Constraint.MandatoryConstraint(name="M1", covers=[role1])
        cons2 = Constraint.UniquenessConstraint(name="U1",
                                                covers=[role1, role2])
        cons3 = Constraint.ValueConstraint(name="V1", covers=[obj1])

        for element in [obj1, obj2, fact, cons1, cons2, cons3]:
            model.add(element)

        self.assertEquals(model.constraints.get("M1").covers, [role1])
        self.assertEquals(model.constraints.get("U1").covers, [role1, role2])
        self.assertEquals(model.constraints.get("V1").covers, [obj1])

        self.assertEquals(role1.covered_by, [cons1, cons2])
        self.assertEquals(role2.covered_by, [cons2])
        self.assertEquals(obj1.covered_by, [cons3])

        model.remove(cons2)
        model.remove(cons3)

        self.assertEquals(model.constraints.get("M1"), cons1)
        self.assertEquals(model.constraints.get("U1"), None)
        self.assertEquals(model.constraints.get("V1"), None)

        self.assertEquals(role1.covered_by, [cons1])
        self.assertEquals(role2.covered_by, [])
        self.assertEquals(obj1.covered_by, [])

        # Test that additional rollback has no effect
        model.remove(cons3)
        self.assertEquals(model.constraints.get("M1"), cons1)
        self.assertEquals(model.constraints.get("V1"), None)
        self.assertEquals(obj1.covered_by, [])
Exemplo n.º 9
0
    def test_add_remove_generic_constraint(self):
        """ Test adding and removing a generic constraint from the model. """
        model = Model()
        cons1 = Constraint(name="C1")

        model.add(cons1)
        self.assertEquals(model.constraints.count(), 1)
        self.assertEquals(model.constraints.get("C1"), cons1)

        model.remove(cons1)
        self.assertEquals(model.constraints.count(), 0)
        self.assertEquals(model.constraints.get("C1"), None)
Exemplo n.º 10
0
    def test_add_remove_object_type(self):
        """ Test adding and removing an object type from the model. """
        model = Model()
        obj1 = ObjectType(name="O1")
        model.add(obj1)

        self.assertEquals(model.object_types.count(), 1)
        self.assertEquals(model.object_types.get("O1"), obj1)

        with self.assertRaises(NotImplementedError):
            model.remove(obj1)

        # I decided for now to just raise a NotImplementedError for rollback,
        # since I'm not sure what the right behavior should be.
        """
Exemplo n.º 11
0
    def test_add_remove_fact_type(self):
        """ Test adding and removing a fact type from the model. """
        model = Model()
        fact = FactType(name="F1")
        fact.add_role(player=ObjectType(name="O1"))
        model.add(fact)

        self.assertEquals(model.fact_types.count(), 1)
        self.assertEquals(model.fact_types.get("F1"), fact)

        with self.assertRaises(NotImplementedError):
            model.remove(fact)

        # I decided for now to just raise a NotImplementedError for rollback,
        # since I'm not sure what the right behavior should be.
        """
Exemplo n.º 12
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--epochs', type=int, default=200)
    parser.add_argument('--batch_size', type=int, default=128)
    parser.add_argument('--alpha', type=float, default=1e-4)
    parser.add_argument('--beta', type=float,
                        default=1e-4)  #feedback weights, B, learning rate
    parser.add_argument('--sigma', type=float,
                        default=0.1)  #node pert standard deviation
    parser.add_argument('--l2', type=float, default=0.)
    parser.add_argument('--decay', type=float, default=1.)
    parser.add_argument('--eps', type=float, default=1e-5)
    parser.add_argument('--dropout', type=float, default=0.5)
    parser.add_argument('--act', type=str, default='tanh')
    parser.add_argument('--bias', type=float, default=0.1)
    parser.add_argument('--gpu', type=int, default=1)
    parser.add_argument('--dfa', type=int, default=1)
    parser.add_argument('--feedbacklearning', type=int,
                        default=1)  #Whether or not to learn feedback weights
    parser.add_argument('--sparse', type=int, default=0)
    parser.add_argument('--rank', type=int, default=0)
    parser.add_argument('--init', type=str, default="sqrt_fan_in")
    parser.add_argument('--opt', type=str, default="adam")
    parser.add_argument('--N', type=int, default=50)
    parser.add_argument('--save', type=int, default=0)
    parser.add_argument('--name', type=str, default="cifar10_conv_np")
    parser.add_argument('--load', type=str, default=None)
    args = parser.parse_args()

    if args.gpu >= 0:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)

    cifar10 = tf.keras.datasets.cifar10.load_data()

    ##############################################

    EPOCHS = args.epochs
    TRAIN_EXAMPLES = 50000
    TEST_EXAMPLES = 10000
    BATCH_SIZE = args.batch_size

    if args.act == 'tanh':
        act = Tanh()
    elif args.act == 'relu':
        act = Relu()
    else:
        assert (False)

    train_fc = True
    if args.load:
        train_conv = False
    else:
        train_conv = True

    weights_fc = None
    weights_conv = args.load

    #Setup the parameters
    attrs = ['sigma', 'alpha', 'beta']
    log_scale = [True, True, True]
    ranges = [[-4, -1], [-6, -3], [-6, -3]]
    params = []
    isnan = []
    train_accs = []
    test_accs = []

    #Here we run a bunch of times for different parameters...
    for idx in range(args.N):

        #Choose some random parameters...
        param = set_random_hyperparameters(args, attrs, ranges, log_scale)
        params.append(param)

        if args.feedbacklearning == 0:
            args.beta = 0

        #Tell me the params....
        print('Alpha, beta, sigma are: ', args.alpha, args.beta, args.sigma)

        tf.set_random_seed(0)
        tf.reset_default_graph()

        batch_size = tf.placeholder(tf.int32, shape=())
        dropout_rate = tf.placeholder(tf.float32, shape=())
        learning_rate = tf.placeholder(tf.float32, shape=())
        sigma = tf.placeholder(tf.float32, shape=(), name="Sigma")
        X = tf.placeholder(tf.float32, [None, 32, 32, 3])
        X = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame),
                      X)
        Y = tf.placeholder(tf.float32, [None, 10])

        l0 = Convolution(input_sizes=[batch_size, 32, 32, 3],
                         filter_sizes=[5, 5, 3, 96],
                         num_classes=10,
                         init_filters=args.init,
                         strides=[1, 1, 1, 1],
                         padding="SAME",
                         alpha=learning_rate,
                         activation=act,
                         bias=args.bias,
                         last_layer=False,
                         name='conv1',
                         load=weights_conv,
                         train=train_conv)
        l1 = MaxPool(size=[batch_size, 32, 32, 96],
                     ksize=[1, 3, 3, 1],
                     strides=[1, 2, 2, 1],
                     padding="SAME")

        #Add perturbation to activity to get output to train feedback weights with
        l2p = NodePert(size=[batch_size, 16, 16, 96], sigma=sigma)
        l2 = FeedbackConv(size=[batch_size, 16, 16, 96],
                          num_classes=10,
                          sparse=args.sparse,
                          rank=args.rank,
                          name='conv1_fb')

        l3 = Convolution(input_sizes=[batch_size, 16, 16, 96],
                         filter_sizes=[5, 5, 96, 128],
                         num_classes=10,
                         init_filters=args.init,
                         strides=[1, 1, 1, 1],
                         padding="SAME",
                         alpha=learning_rate,
                         activation=act,
                         bias=args.bias,
                         last_layer=False,
                         name='conv2',
                         load=weights_conv,
                         train=train_conv)
        l4 = MaxPool(size=[batch_size, 16, 16, 128],
                     ksize=[1, 3, 3, 1],
                     strides=[1, 2, 2, 1],
                     padding="SAME")
        l5p = NodePert(size=[batch_size, 8, 8, 128], sigma=sigma)
        l5 = FeedbackConv(size=[batch_size, 8, 8, 128],
                          num_classes=10,
                          sparse=args.sparse,
                          rank=args.rank,
                          name='conv2_fb')

        l6 = Convolution(input_sizes=[batch_size, 8, 8, 128],
                         filter_sizes=[5, 5, 128, 256],
                         num_classes=10,
                         init_filters=args.init,
                         strides=[1, 1, 1, 1],
                         padding="SAME",
                         alpha=learning_rate,
                         activation=act,
                         bias=args.bias,
                         last_layer=False,
                         name='conv3',
                         load=weights_conv,
                         train=train_conv)
        l7 = MaxPool(size=[batch_size, 8, 8, 256],
                     ksize=[1, 3, 3, 1],
                     strides=[1, 2, 2, 1],
                     padding="SAME")
        l8p = NodePert(size=[batch_size, 4, 4, 256], sigma=sigma)
        l8 = FeedbackConv(size=[batch_size, 4, 4, 256],
                          num_classes=10,
                          sparse=args.sparse,
                          rank=args.rank,
                          name='conv3_fb')

        l9 = ConvToFullyConnected(shape=[4, 4, 256])

        l10p = NodePert(size=[batch_size, 4 * 4 * 256], sigma=sigma)
        l10 = FullyConnected(size=[4 * 4 * 256, 2048],
                             num_classes=10,
                             init_weights=args.init,
                             alpha=learning_rate,
                             activation=act,
                             bias=args.bias,
                             last_layer=False,
                             name='fc1',
                             load=weights_fc,
                             train=train_fc)
        l11 = Dropout(rate=dropout_rate)
        l12 = FeedbackFC(size=[4 * 4 * 256, 2048],
                         num_classes=10,
                         sparse=args.sparse,
                         rank=args.rank,
                         name='fc1_fb')

        l13p = NodePert(size=[batch_size, 2048], sigma=sigma)
        l13 = FullyConnected(size=[2048, 2048],
                             num_classes=10,
                             init_weights=args.init,
                             alpha=learning_rate,
                             activation=act,
                             bias=args.bias,
                             last_layer=False,
                             name='fc2',
                             load=weights_fc,
                             train=train_fc)
        l14 = Dropout(rate=dropout_rate)
        l15 = FeedbackFC(size=[2048, 2048],
                         num_classes=10,
                         sparse=args.sparse,
                         rank=args.rank,
                         name='fc2_fb')

        l16 = FullyConnected(size=[2048, 10],
                             num_classes=10,
                             init_weights=args.init,
                             alpha=learning_rate,
                             activation=Linear(),
                             bias=args.bias,
                             last_layer=True,
                             name='fc3',
                             load=weights_fc,
                             train=train_fc)

        ##############################################

        model = Model(layers=[
            l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14,
            l15, l16
        ])
        model_perturbed = Model(layers=[
            l0, l1, l2p, l2, l3, l4, l5p, l5, l6, l7, l8p, l8, l9, l10p, l10,
            l11, l12, l13p, l13, l14, l15, l16
        ])

        predict = model.predict(X=X)
        predict_perturbed = model_perturbed.predict(X=X)

        #######
        #Pairs of perturbations and feedback weights
        #feedbackpairs = [[l2p, l2], [l5p, l5], [l8p, l8], [l10p, l12], [l13p, l15]]

        #Test one at a time... this works, so it must be l10p, 12 pair that fails
        feedbackpairs = [[l2p, l2], [l5p, l5], [l8p, l8], [l13p, l15]]

        #Get noise, feedback matrices, and loss function and unperturbed loss function, to make update rule for feedback weights
        loss = tf.reduce_sum(tf.pow(tf.nn.softmax(predict) - Y, 2), 1) / 2
        loss_perturbed = tf.reduce_sum(
            tf.pow(tf.nn.softmax(predict_perturbed) - Y, 2), 1) / 2

        train_B = []
        E = tf.nn.softmax(predict) - Y
        for idx, (noise, feedback) in enumerate(feedbackpairs):
            print(idx, batch_size, feedback.output_size)
            xi = tf.reshape(noise.get_noise(),
                            (batch_size, feedback.output_size))
            B = feedback.B
            lambd = tf.matmul(
                tf.diag(loss_perturbed - loss) / args.sigma / args.sigma, xi)
            np_error = tf.matmul(E, B) - lambd
            grad_B = tf.matmul(tf.transpose(E), np_error)
            new_B = B.assign(B - args.beta * grad_B)
            train_B.append(new_B)
        #######

        weights = model.get_weights()

        if args.opt == "adam" or args.opt == "rms" or args.opt == "decay":
            if args.dfa:
                grads_and_vars = model.dfa_gvs(X=X, Y=Y)
            else:
                grads_and_vars = model.gvs(X=X, Y=Y)

            if args.opt == "adam":
                train = tf.train.AdamOptimizer(
                    learning_rate=learning_rate,
                    beta1=0.9,
                    beta2=0.999,
                    epsilon=args.eps).apply_gradients(
                        grads_and_vars=grads_and_vars)
            elif args.opt == "rms":
                train = tf.train.RMSPropOptimizer(
                    learning_rate=learning_rate, decay=0.99,
                    epsilon=args.eps).apply_gradients(
                        grads_and_vars=grads_and_vars)
            elif args.opt == "decay":
                train = tf.train.GradientDescentOptimizer(
                    learning_rate=learning_rate).apply_gradients(
                        grads_and_vars=grads_and_vars)
            else:
                assert (False)

        else:
            if args.dfa:
                train = model.dfa(X=X, Y=Y)
            else:
                train = model.train(X=X, Y=Y)

        correct = tf.equal(tf.argmax(predict, 1), tf.argmax(Y, 1))
        total_correct = tf.reduce_sum(tf.cast(correct, tf.float32))

        ##############################################

        sess = tf.InteractiveSession()
        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()

        (x_train, y_train), (x_test, y_test) = cifar10

        x_train = x_train.reshape(TRAIN_EXAMPLES, 32, 32, 3)
        y_train = keras.utils.to_categorical(y_train, 10)

        x_test = x_test.reshape(TEST_EXAMPLES, 32, 32, 3)
        y_test = keras.utils.to_categorical(y_test, 10)

        ##############################################

        filename = args.name + '.results'
        f = open(filename, "w")
        f.write(filename + "\n")
        f.write("total params: " + str(model.num_params()) + "\n")
        f.close()

        ##############################################

        for ii in range(EPOCHS):
            if args.opt == 'decay' or args.opt == 'gd':
                decay = np.power(args.decay, ii)
                lr = args.alpha * decay
            else:
                lr = args.alpha

            print(ii)

            #############################

            _count = 0
            _total_correct = 0

            #The training loop... here we add something to also update the feedback weights with the node pert
            for jj in range(int(TRAIN_EXAMPLES / BATCH_SIZE)):
                xs = x_train[jj * BATCH_SIZE:(jj + 1) * BATCH_SIZE]
                ys = y_train[jj * BATCH_SIZE:(jj + 1) * BATCH_SIZE]
                _correct, _ = sess.run(
                    [total_correct, train],
                    feed_dict={
                        sigma: 0.0,
                        batch_size: BATCH_SIZE,
                        dropout_rate: args.dropout,
                        learning_rate: lr,
                        X: xs,
                        Y: ys
                    })

                #Add step to update B......
                _ = sess.run(
                    [train_B],
                    feed_dict={
                        sigma: args.sigma,
                        batch_size: BATCH_SIZE,
                        dropout_rate: args.dropout,
                        learning_rate: lr,
                        X: xs,
                        Y: ys
                    })

                _total_correct += _correct
                _count += BATCH_SIZE

            train_acc = 1.0 * _total_correct / _count
            train_accs.append(train_acc)

            #############################

            _count = 0
            _total_correct = 0

            for jj in range(int(TEST_EXAMPLES / BATCH_SIZE)):
                xs = x_test[jj * BATCH_SIZE:(jj + 1) * BATCH_SIZE]
                ys = y_test[jj * BATCH_SIZE:(jj + 1) * BATCH_SIZE]
                _correct = sess.run(total_correct,
                                    feed_dict={
                                        sigma: 0.0,
                                        batch_size: BATCH_SIZE,
                                        dropout_rate: 0.0,
                                        learning_rate: 0.0,
                                        X: xs,
                                        Y: ys
                                    })

                _total_correct += _correct
                _count += BATCH_SIZE

            test_acc = 1.0 * _total_correct / _count
            test_accs.append(test_acc)

            isnan.append(None)

            #try:
            #    trainer.train()
            #except ValueError:
            #    print("Method fails to converge for these parameters")
            #    isnan[n,m] = 1

            #Save results...
            #############################

            print("train acc: %f test acc: %f" % (train_acc, test_acc))

            f = open(filename, "a")
            f.write("train acc: %f test acc: %f\n" % (train_acc, test_acc))
            f.close()

        #Save params after each run
        fn = "./cifar10_conv_np_hyperparam_search_varalpha_septsearch_2_dfa_%d_fblearning_%d.npz" % (
            args.dfa, args.feedbacklearning)
        to_save = {
            'attr': attrs,
            'params': params,
            'train_accs': train_accs,
            'test_accs': test_accs,
            'isnan': isnan
        }
        pickle.dump(to_save, open(fn, "wb"))
Exemplo n.º 13
0
l10p = NodePert(size=[batch_size, 4*4*256], sigma = sigma)
l10 = FullyConnected(size=[4*4*256, 2048], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=act, bias=args.bias, last_layer=False, name='fc1', load=weights_fc, train=train_fc)
l11 = Dropout(rate=dropout_rate)
l12 = FeedbackFC(size=[4*4*256, 2048], num_classes=10, sparse=args.sparse, rank=args.rank, name='fc1_fb')

l13p = NodePert(size=[batch_size, 2048], sigma = sigma)
l13 = FullyConnected(size=[2048, 2048], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=act, bias=args.bias, last_layer=False, name='fc2', load=weights_fc, train=train_fc)
l14 = Dropout(rate=dropout_rate)
l15 = FeedbackFC(size=[2048, 2048], num_classes=10, sparse=args.sparse, rank=args.rank, name='fc2_fb')

l16 = FullyConnected(size=[2048, 10], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=Linear(), bias=args.bias, last_layer=True, name='fc3', load=weights_fc, train=train_fc)

##############################################

model = Model(layers=[l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16])
model_perturbed = Model(layers=[l0, l1, l2p, l2, l3, l4, l5p, l5, l6, l7, l8p, l8, l9, l10p, l10, l11, l12, l13p, l13, l14, l15, l16])

predict = model.predict(X=X)
predict_perturbed = model_perturbed.predict(X=X)

#######
#Pairs of perturbations and feedback weights
#feedbackpairs = [[l2p, l2], [l5p, l5], [l8p, l8], [l10p, l12], [l13p, l15]]

#Test one at a time... this works, so it must be l10p, 12 pair that fails
feedbackpairs = [[l2p, l2], [l5p, l5], [l8p, l8], [l13p, l15]]

#Get noise, feedback matrices, and loss function and unperturbed loss function, to make update rule for feedback weights
loss = tf.reduce_sum(tf.pow(tf.nn.softmax(predict) - Y, 2), 1)/2
loss_perturbed = tf.reduce_sum(tf.pow(tf.nn.softmax(predict_perturbed) - Y, 2), 1)/2
Exemplo n.º 14
0
    def __init__(self):

        self.model = None
        self.registry = None
        self.mode = None
        self._loaded = False

        DModule.__init__(self)
        QtWidgets.QMainWindow.__init__(self)

        self.model = Model(self)

        self.setWindowTitle("CeraMatch")
        self.setWindowIcon(QtGui.QIcon("res\cm_icon.svg"))
        self.setStyleSheet("QPushButton {padding: 5px; min-width: 100px;}")

        self.central_widget = QtWidgets.QWidget(self)
        self.central_widget.setLayout(QtWidgets.QVBoxLayout())
        self.central_widget.layout().setContentsMargins(0, 0, 0, 0)
        self.setCentralWidget(self.central_widget)

        self.splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)

        self.central_widget.layout().addWidget(self.splitter)

        self.registry = Registry("Deposit")
        self.image_view = ImageView(self)
        self.footer_frame = FooterFrame(self)
        self.descriptor_group = DescriptorGroup(self)
        self.sort_group = SortGroup(self)
        self.cluster_group = ClusterGroup(self)
        self.menu = Menu(self)
        self.toolbar = ToolBar(self)
        self.statusbar = StatusBar(self)

        self.calculate_button = Button("Calculate Distances",
                                       self.on_calculate)
        self.calculate_button.setEnabled(False)

        self.left_frame = QtWidgets.QFrame(self)
        self.left_frame.setLayout(QtWidgets.QVBoxLayout())
        self.left_frame.layout().setContentsMargins(10, 10, 0, 10)

        self.right_frame = QtWidgets.QFrame(self)
        self.right_frame.setLayout(QtWidgets.QVBoxLayout())
        self.right_frame.layout().setContentsMargins(0, 0, 0, 0)

        self.splitter.addWidget(self.left_frame)
        self.splitter.addWidget(self.right_frame)

        self.left_frame.layout().addWidget(self.descriptor_group)
        group = QtWidgets.QGroupBox("Calculate")
        group.setLayout(QtWidgets.QVBoxLayout())
        group.layout().addWidget(self.calculate_button)
        self.left_frame.layout().addWidget(group)
        self.left_frame.layout().addWidget(self.sort_group)
        self.left_frame.layout().addWidget(self.cluster_group)
        self.left_frame.layout().addStretch()

        self.right_frame.layout().addWidget(self.image_view)
        self.right_frame.layout().addWidget(self.footer_frame)

        self.setStatusBar(self.statusbar)

        self._loaded = True

        self.setGeometry(100, 100, 1024, 768)

        self.on_samples()

        self.footer_frame.slider_zoom.setValue(100)

        self.connect_broadcast(Broadcasts.VIEW_ACTION, self.on_update)
        self.connect_broadcast(Broadcasts.STORE_LOCAL_FOLDER_CHANGED,
                               self.on_update)
        self.connect_broadcast(Broadcasts.STORE_SAVED, self.on_update)
        self.connect_broadcast(Broadcasts.STORE_LOADED, self.on_update)
        self.connect_broadcast(Broadcasts.STORE_DATA_SOURCE_CHANGED,
                               self.on_update)
        self.connect_broadcast(Broadcasts.STORE_DATA_CHANGED, self.on_update)
Exemplo n.º 15
0
class View(DModule, QtWidgets.QMainWindow):

    MODE_IDS = 0x00000001
    MODE_DISTANCE_MIN = 0x00000002
    MODE_DISTANCE_MAX = 0x00000004
    MODE_CLUSTER = 0x00000008

    def __init__(self):

        self.model = None
        self.registry = None
        self.mode = None
        self._loaded = False

        DModule.__init__(self)
        QtWidgets.QMainWindow.__init__(self)

        self.model = Model(self)

        self.setWindowTitle("CeraMatch")
        self.setWindowIcon(QtGui.QIcon("res\cm_icon.svg"))
        self.setStyleSheet("QPushButton {padding: 5px; min-width: 100px;}")

        self.central_widget = QtWidgets.QWidget(self)
        self.central_widget.setLayout(QtWidgets.QVBoxLayout())
        self.central_widget.layout().setContentsMargins(0, 0, 0, 0)
        self.setCentralWidget(self.central_widget)

        self.splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)

        self.central_widget.layout().addWidget(self.splitter)

        self.registry = Registry("Deposit")
        self.image_view = ImageView(self)
        self.footer_frame = FooterFrame(self)
        self.descriptor_group = DescriptorGroup(self)
        self.sort_group = SortGroup(self)
        self.cluster_group = ClusterGroup(self)
        self.menu = Menu(self)
        self.toolbar = ToolBar(self)
        self.statusbar = StatusBar(self)

        self.calculate_button = Button("Calculate Distances",
                                       self.on_calculate)
        self.calculate_button.setEnabled(False)

        self.left_frame = QtWidgets.QFrame(self)
        self.left_frame.setLayout(QtWidgets.QVBoxLayout())
        self.left_frame.layout().setContentsMargins(10, 10, 0, 10)

        self.right_frame = QtWidgets.QFrame(self)
        self.right_frame.setLayout(QtWidgets.QVBoxLayout())
        self.right_frame.layout().setContentsMargins(0, 0, 0, 0)

        self.splitter.addWidget(self.left_frame)
        self.splitter.addWidget(self.right_frame)

        self.left_frame.layout().addWidget(self.descriptor_group)
        group = QtWidgets.QGroupBox("Calculate")
        group.setLayout(QtWidgets.QVBoxLayout())
        group.layout().addWidget(self.calculate_button)
        self.left_frame.layout().addWidget(group)
        self.left_frame.layout().addWidget(self.sort_group)
        self.left_frame.layout().addWidget(self.cluster_group)
        self.left_frame.layout().addStretch()

        self.right_frame.layout().addWidget(self.image_view)
        self.right_frame.layout().addWidget(self.footer_frame)

        self.setStatusBar(self.statusbar)

        self._loaded = True

        self.setGeometry(100, 100, 1024, 768)

        self.on_samples()

        self.footer_frame.slider_zoom.setValue(100)

        self.connect_broadcast(Broadcasts.VIEW_ACTION, self.on_update)
        self.connect_broadcast(Broadcasts.STORE_LOCAL_FOLDER_CHANGED,
                               self.on_update)
        self.connect_broadcast(Broadcasts.STORE_SAVED, self.on_update)
        self.connect_broadcast(Broadcasts.STORE_LOADED, self.on_update)
        self.connect_broadcast(Broadcasts.STORE_DATA_SOURCE_CHANGED,
                               self.on_update)
        self.connect_broadcast(Broadcasts.STORE_DATA_CHANGED, self.on_update)

    def get_selected(self):
        # returns [[sample_id, DResource, label, value, index], ...]

        return self.image_view.get_selected()

    def update(self):

        if not hasattr(self, "descriptor_group"):
            return

        self.descriptor_group.update()
        self.sort_group.update()
        self.cluster_group.update()
        self.footer_frame.update()
        self.toolbar.update()
        self.image_view.update_()

        selected = self.get_selected()

        self.calculate_button.setEnabled(self.model.is_connected()
                                         and not self.model.has_distances())

        if selected:
            if self.mode == self.MODE_DISTANCE_MIN:
                text = "Distance: %s, Sample ID: %s" % (selected[0].value,
                                                        selected[0].id)
            else:
                cluster = selected[0].cluster
                levels = self.image_view.get_selected_level()
                if levels:
                    cluster = ".".join(cluster.split(".")[:levels[0]])
                if cluster:
                    text = "Cluster: %s, Leaf: %s, Sample ID: %s" % (
                        cluster, selected[0].value, selected[0].id)
                else:
                    text = "Sample ID: %s" % (selected[0].id)
            self.statusbar.message(text)

    def reload_samples(self):

        self.model.load_samples()
        self.update()
        self.on_samples()

    def on_update(self, *args):

        self.update()

    def on_set_datasource(self, *args):

        self.reload_samples()

    def on_calculate(self, *args):

        self.model.calc_distances()

    def on_samples(self, *args):

        self.mode = self.MODE_IDS
        self.model.sort_by_ids()

    def on_distance_max(self, *args):

        self.mode = self.MODE_DISTANCE_MAX
        self.model.sort_by_distmax()

    def on_distance_min(self, *args):

        selected = self.get_selected()
        if selected:
            sample_id = selected[0].id
        elif isinstance(self.model.samples[0].value, float):
            sample_id = self.model.samples[0].id
        else:
            return
        self.mode = self.MODE_DISTANCE_MIN
        self.model.sort_by_distance(sample_id)

    def on_cluster(self, *args):

        self.mode = self.MODE_CLUSTER

        if self.model.has_clusters():
            self.model.update_clusters()

    def on_auto_cluster(self, *args):

        self.mode = self.MODE_CLUSTER
        self.model.auto_cluster()

    def on_split_cluster(self, *args):

        selected = self.image_view.get_selected()
        if len(selected) != 1:
            return
        cluster = selected[0].cluster
        if cluster is None:
            return
        self.model.split_cluster(cluster, selected[0])

    def on_join_parent(self, *args):

        selected = self.image_view.get_selected()
        if not selected:
            return
        self.mode = self.MODE_CLUSTER

        clusters = set()
        for sample in selected:
            if sample.cluster is None:
                continue
            clusters.add(sample.cluster)
        if clusters:
            for cluster in clusters:
                self.model.join_cluster_to_parent(cluster, selected[0])

    def on_join_children(self, *args):

        selected = self.image_view.get_selected()
        if not selected:
            return
        self.mode = self.MODE_CLUSTER

        clusters = set()
        for sample in selected:
            if sample.cluster is None:
                continue
            clusters.add(sample.cluster)
        if clusters:

            levels = self.image_view.get_selected_level()
            if not levels:
                return
            level = max(levels)
            for cluster in clusters:
                self.model.join_children_to_cluster(cluster, level,
                                                    selected[0])
        else:
            return

    def on_manual_cluster(self, *args):

        selected = self.image_view.get_selected()
        if not selected:
            return
        self.model.manual_cluster(selected, selected[0])

    def on_clear_clusters(self, *args):

        self.model.clear_clusters()
        self.model.sort_by_ids()

    def on_reload(self, *args):

        if self.mode is None:
            return
        if self.mode == self.MODE_IDS:
            self.on_samples()
        elif self.mode == self.MODE_DISTANCE_MIN:
            self.on_distance_min()
        elif self.mode == self.MODE_DISTANCE_MAX:
            self.on_distance_max()
        elif self.mode == self.MODE_CLUSTER:
            self.on_cluster()

    def on_prev(self, *args):

        self.model.browse_distmax(-1)

    def on_next(self, *args):

        self.model.browse_distmax(1)

    def on_zoom(self, value):

        self.image_view.set_thumbnail_size(value)

    def on_drop(self, src_ids, tgt_id):

        self.model.add_to_cluster(src_ids, tgt_id)

    def closeEvent(self, event):

        if not self.model.is_saved():
            reply = QtWidgets.QMessageBox.question(
                self, "Exit", "Save changes to database?",
                QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
                | QtWidgets.QMessageBox.Cancel)
            if reply == QtWidgets.QMessageBox.Yes:
                self.toolbar.on_save()
            elif reply == QtWidgets.QMessageBox.No:
                pass
            else:
                event.ignore()
                return

        self.model.on_close()
Exemplo n.º 16
0
l9 = Dropout(rate=dropout_rate)
l10 = FeedbackFC(size=[1000, 1000],
                 num_classes=10,
                 sparse=args.sparse,
                 rank=args.rank,
                 name='fc3_fb')

l11 = FullyConnected(input_shape=1000,
                     size=10,
                     init=args.init,
                     bias=args.bias,
                     name='fc4')

##############################################

model = Model(layers=[l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11])
predict = model.predict(X=X)
weights = model.get_weights()

if args.dfa:
    grads_and_vars = model.dfa_gvs(X=X, Y=Y)
else:
    grads_and_vars = model.gvs(X=X, Y=Y)

train = tf.train.AdamOptimizer(
    learning_rate=lr,
    epsilon=args.eps).apply_gradients(grads_and_vars=grads_and_vars)

correct = tf.equal(tf.argmax(predict, 1), tf.argmax(Y, 1))
total_correct = tf.reduce_sum(tf.cast(correct, tf.float32))
Exemplo n.º 17
0
    def __init__(self):

        self.model = None

        DModule.__init__(self)
        QtWidgets.QMainWindow.__init__(self)

        self.model = Model(self)

        self.dialogs = Dialogs(self)
        self.registry = Registry("Deposit")
        self.graph_view = GraphView(self)
        self.descriptor_group = DescriptorGroup(self)
        self.distance_group = DistanceGroup(self)
        self.cluster_group = ClusterGroup(self)
        self.toolbar = Toolbar(self)
        self.menu = Menu(self)
        self.statusbar = StatusBar(self)
        self.progress = Progress(self)

        self.setWindowIcon(self.get_icon("cm_icon.svg"))
        self.setStyleSheet("QPushButton {padding: 5px; min-width: 100px;}")

        central_widget = QtWidgets.QWidget(self)
        central_widget.setLayout(QtWidgets.QHBoxLayout())
        central_widget.layout().setContentsMargins(0, 0, 0, 0)
        self.setCentralWidget(central_widget)

        control_frame = QtWidgets.QFrame(self)
        control_frame.setSizePolicy(QtWidgets.QSizePolicy.Minimum,
                                    QtWidgets.QSizePolicy.Minimum)
        control_frame.setLayout(QtWidgets.QVBoxLayout())
        control_frame.layout().setContentsMargins(10, 10, 0, 10)

        graph_view_frame = QtWidgets.QFrame(self)
        graph_view_frame.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
                                       QtWidgets.QSizePolicy.Expanding)
        graph_view_frame.setLayout(QtWidgets.QVBoxLayout())
        graph_view_frame.layout().setContentsMargins(0, 0, 0, 0)

        central_widget.layout().addWidget(control_frame)
        central_widget.layout().addWidget(graph_view_frame)

        control_frame.layout().addWidget(self.descriptor_group)
        control_frame.layout().addWidget(self.distance_group)
        control_frame.layout().addWidget(self.cluster_group)
        control_frame.layout().addStretch()

        graph_view_frame.layout().addWidget(self.graph_view)

        self.setStatusBar(self.statusbar)

        self.menu.load_recent()

        self.set_title()
        self.setGeometry(100, 100, 1024, 768)
        #		self.setGeometry(500, 100, 1024, 768)  # DEBUG

        self.descriptor_group.load_data.connect(self.on_load_data)
        self.descriptor_group.cluster_classes_changed.connect(
            self.on_cluster_classes_changed)
        self.distance_group.calculate.connect(self.on_calculate)
        self.distance_group.delete.connect(self.on_delete_distance)
        self.cluster_group.cluster.connect(self.on_cluster)
        self.cluster_group.update_tree.connect(self.on_update_tree)
        self.cluster_group.add_cluster.connect(self.on_add_cluster)
        self.cluster_group.delete.connect(self.on_delete_clusters)

        self.connect_broadcast(Broadcasts.VIEW_ACTION, self.on_view_action)
        self.connect_broadcast(Broadcasts.STORE_LOADED,
                               self.on_data_source_changed)
        self.connect_broadcast(Broadcasts.STORE_DATA_SOURCE_CHANGED,
                               self.on_data_source_changed)
        self.connect_broadcast(Broadcasts.STORE_DATA_CHANGED,
                               self.on_data_changed)
        self.connect_broadcast(Broadcasts.STORE_SAVED, self.on_saved)
        self.connect_broadcast(Broadcasts.STORE_SAVE_FAILED,
                               self.on_save_failed)
        self.set_on_broadcast(self.on_broadcast)

        self.model.broadcast_timer.setSingleShot(True)
        self.model.broadcast_timer.timeout.connect(self.on_broadcast_timer)

        self.update()

        self.dialogs.open("Connect")
Exemplo n.º 18
0
class NormaLoader(object):
    """ Loads an .orm file produced by the NORMA modeling tool into a
        :class:`lib.Model.Model`.  There are no public methods in this class.
        An .orm file can be loaded via the constructor.  For example: ::

            loader = NormaLoader("/path/to/file/example.orm")
            model = loader.model
    """

    ###########################################################################
    # Constructor: Only public method!
    ###########################################################################
    def __init__(self, filename, deontic=False):
        """ Initialize object and load *filename*. """

        #: The ORM model (:class:`lib.Model.Model`) loaded from the .orm file.
        self.model = Model()
        self._elements = {}  # Dictionary of {id, element} pairs

        # Items in the .orm file omitted by NormaLoader
        self.omissions = []  #: Intentionally omitted model elements
        self.unexpected = set()  #: Unexpected nodes in the XML file

        # Find root of XML tree
        self._model_root = self._parse_norma_file(filename)

        # Load file
        self._load_data_types()
        self._load_object_types()
        self._load_fact_types()  # Also loads subtypes
        self._load_constraints(deontic)

        # Post-processing
        self._fix_nested_fact_type_refs()

        # Report any issues to the user
        self._log_issues(filename, self.omissions, "model element", "ignored")
        self._log_issues(filename, self.unexpected, "XML node", "unexpected")

    ###########################################################################
    # Private Utility Functions
    ###########################################################################
    def _add(self, model_element):
        """ Add model element to the model. """
        self._elements[model_element.uid] = model_element
        self.model.add(model_element)

    @staticmethod
    def _construct(xml_node, model_element_type, **kwargs):
        """ Construct a new model element from the XML node. """
        uid = xml_node.get("id")
        name = xml_node.get("Name") or xml_node.get("_Name")
        return model_element_type(uid=uid, name=name, **kwargs)

    def _parse_norma_file(self, filename):
        """ Parse a NORMA File and return the ORMModel node. """
        if filename.split(".")[-1].upper() != "ORM":
            raise Exception("Input filename must have .orm extension.")

        tree = xml.parse(filename)
        root = tree.getroot()

        if root.tag != NS_ROOT + "ORM2":
            raise Exception("Root of input file must be <ormRoot:ORM2>.")

        model_node = find(root, "ORMModel")

        if model_node is None:
            raise Exception("Cannot find <orm:ORMModel> in input file.")
        else:
            return model_node

    def _log_issues(self, filename, issue_list, subject, issue_type):
        """ Log issues reported in an issues list. """
        logger = logging.getLogger(__name__)
        size = len(issue_list)

        if size > 0:
            subject = ("{0}s were" if size > 1 else "{0} was").format(subject)
            filename = os.path.basename(filename)
            template = "%d %s %s while loading %s."

            logger.warning(template, size, subject, issue_type, filename)

            for issue in issue_list:
                logger.info("%s %s", issue_type.capitalize(), issue)

    def _call_loader(self, loader, node, *args):
        """ Call the loader method listed in the loader map for a given node."""
        tag = local_tag(node)
        try:
            return loader[tag](node, *args)
        except KeyError:  # No loading function defined.
            self.unexpected.add(tag)
            return None

    def _move_node_to_constraints(self, node, parent):
        """ Make an xml node a subelement of the Constraints sequence node. """

        tag = local_tag(node)

        # Special handling for ValueRestriction and CardinalityRestriction:
        # we move the node that is 1 level down instead (e.g. ValueConstraint)
        special = {
            'ValueRestriction': 'value constraint',
            'CardinalityRestriction': 'cardinality constraint'
        }

        if tag in special.keys():
            if len(node) != 1:
                raise ValueError("Unexpected {0} format".format(special[tag]))
            node = node[0]  # Move 1 level down

        root = self._model_root
        constraints_node = find(root, "Constraints")

        if constraints_node == None:  # Create a Constraints node if needed
            constraints_node = xml.SubElement(root, NS_CORE + "Constraints")

        node.set('_covered_element', parent.uid)
        constraints_node.append(node)

    ##########################################################################
    # Private Functions to Load Conceptual Data Types
    ##########################################################################
    def _load_data_types(self):
        """ Load the data types in the model so that we can assign the
            conceptual data type to each value type. """
        for child in node_collection(self._model_root, "DataTypes"):
            data_type = local_tag(child)  # Data type node tag
            data_id = child.get("id")

            # Look-up Domain subclass corresponding to data type
            try:
                domain = DATA_TYPES[data_type]
            except KeyError:
                domain = None  # Leave default Domain in place

            # Store type by ID for later retrieval
            self._elements[data_id] = domain

    ##########################################################################
    # Private Functions to Load Object Types
    ##########################################################################
    def _load_object_types(self):
        """ Load the collection of object types. """
        type_of = {
            'EntityType': ObjectType.EntityType,
            'ValueType': ObjectType.ValueType,
            'ObjectifiedType': ObjectType.ObjectifiedType,
        }
        for node in node_collection(self._model_root, "Objects"):
            tag = local_tag(node)
            self._load_object_type(node, type_of[tag])

    def _load_object_type(self, xml_node, target_type):
        """ Loads object type rooted at xml_node into target type. """
        loader = {
            'NestedPredicate': self._load_nested_fact_type,
            'SubtypeDerivationRule': self._load_subtype_derivation,
            'PreferredIdentifier': self._load_preferred_identifier,
            'ConceptualDataType': self._load_conceptual_data_type,
            'ValueRestriction': self._move_node_to_constraints,
            'CardinalityRestriction': self._move_node_to_constraints,
            'Definitions': noop,
            'Notes': noop,
            'Abbreviations': noop,
            'PlayedRoles': noop,  # Captured when loading <Roles>
            'Instances': noop,
            'Extensions': noop
        }

        # Construct object type of appropriate underlying type
        object_type = self._construct(xml_node, target_type)

        object_type.independent = (xml_node.get("IsIndependent") == "true")
        object_type.implicit = (
            xml_node.get("IsImplicitBooleanValue") == "true")

        # Load inner xml nodes.  Note, some of these may also set implicit=true
        for node in xml_node:
            self._call_loader(loader, node, object_type)

        # Add object type the model, unless it is an implicit object type
        if object_type.implicit == False:
            self._add(object_type)

    @staticmethod
    def _load_nested_fact_type(xml_node, object_type):
        """ Loads NestedPredicate xml_node into object_type. """
        if xml_node.get("IsImplied") == "true":
            object_type.implicit = True
        object_type.nested_fact_type = xml_node.get("ref")  # GUID of fact type

    def _fix_nested_fact_type_refs(self):
        """ Updates objectified type's nested_fact_type attribute to point to
            the actual fact type and not just the GUID.  MUST be called after
            both object types and fact types are loaded. """
        for object_type in self.model.object_types:
            if isinstance(object_type, ObjectType.ObjectifiedType):
                guid = object_type.nested_fact_type
                object_type.nested_fact_type = self._elements[guid]

    def _load_subtype_derivation(self, xml_node, object_type):
        """ Loads SubtypeDerivationRule into object_type. """
        self.omissions.append("Subtype derivation rule for " +
                              object_type.name)

    @staticmethod
    def _load_preferred_identifier(xml_node, object_type):
        """ Loads PreferredIdentifier into object_type. """
        # GUID for uniq constraint corresponding to preferred reference scheme
        object_type.identifying_constraint = xml_node.get("ref")

    def _load_conceptual_data_type(self, xml_node, object_type):
        """ Load ConceptualDataType for a ValueType. """
        ref = xml_node.get("ref")  # GUID for data type
        domain = self._elements.get(ref)
        if domain:
            object_type._data_type = domain()
            object_type.domain = object_type.data_type

    ##########################################################################
    # Private Functions to Load Fact Types
    ##########################################################################
    def _load_fact_types(self):
        """ Load the collection of fact types. """
        loader = {
            'Fact': self._load_fact_type,
            'SubtypeFact': self._load_subtype_fact,
            'ImpliedFact': noop
        }
        for node in node_collection(self._model_root, "Facts"):
            self._call_loader(loader, node)

    def _load_fact_type(self, xml_node):
        """ Load a fact type node into a fact type in the model. """
        loader = {
            'FactRoles': self._load_roles,
            'DerivationRule': self._load_facttype_derivation,
            'Definitions': noop,
            'Notes': noop,
            'ReadingOrders': noop,
            'InternalConstraints': noop,  # Captured when loading <Constraints>
            'Instances': noop,
            'Extensions': noop
        }
        fact_type = self._construct(xml_node, FactType.FactType)

        for node in xml_node:
            self._call_loader(loader, node, fact_type)

        # If arity == 0, all roles were played by implicit object types
        if fact_type.arity() > 0:
            self._add(fact_type)

    def _load_facttype_derivation(self, xml_node, fact_type):
        """ Load a fact type derivation rule. """
        self.omissions.append("Fact type derivation rule for " +
                              fact_type.name)

    def _load_roles(self, xml_node, fact_type):
        """ Load a list of roles of a fact type. """
        loader = {'Role': self._load_role}
        for node in xml_node:
            self._call_loader(loader, node, fact_type)

    def _load_role(self, xml_node, fact_type):
        """ Load a role in a fact type. """
        loader = {
            'RolePlayer':
            noop,  # We call _load_role_player directly           
            'DerivationSource': self._load_role_derivation,
            'ValueRestriction': self._move_node_to_constraints,
            'CardinalityRestriction': self._move_node_to_constraints,
            'RoleInstances': noop,
            'Extensions': noop
        }
        attribs, name = get_basic_attribs(xml_node)
        uid = attribs['uid']
        player = self._load_role_player(xml_node)

        # Add the role if the role player exists (i.e. we do not want a role
        # played by an implicit object type).  For example, NORMA binarizes
        # unary roles; this check reverts the fact type to unary.
        if player is not None:
            role = fact_type.add_role(player, name, uid)
            self._elements[role.uid] = role

            for node in xml_node:  # Process any remaining child nodes
                self._call_loader(loader, node, role)

    def _load_role_player(self, xml_node):
        """ Return the player of the role. """
        uid = find(xml_node, 'RolePlayer').get("ref")
        return self._elements.get(uid)

    def _load_role_derivation(self, xml_node, role):
        """ Load a role derivation rule. """
        name = role.fact_type.name
        self.omissions.append("Role derivation rule within " + name)

    def _load_subtype_fact(self, xml_node):
        """ Load a subtype fact, which indicates a subtype constraint.  Note,
            we chose not to move this node under <Constraints>, because it must
            be loaded prior to any associated XOR/IOR constraints. """
        attribs, name = get_basic_attribs(xml_node)

        # Get super and sub type XML nodes
        factroles = find(xml_node, "FactRoles")
        super_node = find(factroles, "SupertypeMetaRole")
        sub_node = find(factroles, "SubtypeMetaRole")
        supertype_node = find(super_node, "RolePlayer")
        subtype_node = find(sub_node, "RolePlayer")

        # Look-up the corresponding object types
        try:
            supertype = self._elements[supertype_node.get("ref")]
            subtype = self._elements[subtype_node.get("ref")]
        except KeyError:
            raise Exception("Cannot load subtype constraint.")

        # Does this subtype constraint provide a path to the preferred ID?
        path = (xml_node.get("PreferredIdentificationPath") == "true")

        # Create constraint
        cons = Constraint.SubtypeConstraint(subtype, supertype, path,
                                            **attribs)

        # If there are additional constraints on the subtype (e.g. XOR or IOR),
        # their role sequence will consist of the subtype fact's roles. We will
        # redirect the id for those roles to this constraint, so that the covers
        # attribute is a list of SubtypeConstraints for constraints on subtypes.
        self._elements[super_node.get("id")] = cons
        self._elements[sub_node.get("id")] = cons

        self._add(cons)

    ##########################################################################
    # Private Functions to Load Constraints
    ##########################################################################
    def _load_constraints(self, deontic=False):
        """ Load the collection of contraints. """
        loader = {
            'EqualityConstraint': self._load_equality_constraint,
            'ExclusionConstraint': self._load_exclusion_constraint,
            'SubsetConstraint': self._load_subset_constraint,
            'FrequencyConstraint': self._load_frequency_constraint,
            'MandatoryConstraint': self._load_mandatory_constraint,
            'UniquenessConstraint': self._load_uniqueness_constraint,
            'RingConstraint': self._load_ring_constraint,
            'ValueComparisonConstraint': self._load_value_comp_constraint,
            'ValueConstraint': self._load_value_constraint,
            'RoleValueConstraint': self._load_value_constraint,
            'CardinalityConstraint': self._load_cardinality_constraint,
            'UnaryRoleCardinalityConstraint': self._load_cardinality_constraint
        }
        for node in node_collection(self._model_root, "Constraints"):
            if deontic == False and node.get("Modality") == "Deontic":
                continue

            result = self._call_loader(loader, node)

            if not isinstance(result, list):
                result = [result]

            for cons in result:
                if cons != None and cons.covers != None:
                    self._add(cons)

    def _load_exclusion_constraint(self, xml_node):
        """ Load exclusion constraint. """
        attribs, name = get_basic_attribs(xml_node)
        kind = "Exclusion constraint"

        seq_node = find(xml_node, "RoleSequences")
        first_seq = self._load_role_sequence(seq_node[0], kind + " " + name)
        if isinstance(first_seq[0], Constraint.SubtypeConstraint):
            kind = "Subtype " + kind.lower()

        self.omissions.append(kind + " " + name)
        return None

    def _load_subset_constraint(self, xml_node):
        """ Load subset constraint. """
        attribs, name = get_basic_attribs(xml_node)
        name = "Subset constraint " + name

        sequences_node = find(xml_node, "RoleSequences")

        if len(sequences_node) != 2:
            msg = "{0} does not have exactly two role sequences"
            raise Exception(msg.format(name))

        # Load subset and superset role sequences
        attribs['subset'] = self._load_role_sequence(sequences_node[0], name)
        attribs['superset'] = self._load_role_sequence(sequences_node[1], name)

        return Constraint.SubsetConstraint(**attribs)

    def _load_equality_constraint(self, xml_node):
        """ Load equality constraint. """
        attribs, name = get_basic_attribs(xml_node)
        name = "Equality constraint " + name

        sequences_node = find(xml_node, "RoleSequences")

        # If there are > 2 role sequences, we split the equality constraint
        # into multiple 2-role-sequence equality constraints.  Each of them use
        # sequences_node[0] as the superset sequence and then one of the
        # subsequent role sequences as their subset sequence.

        cons_list = []
        superset_seq = sequences_node[0]
        attribs['superset'] = self._load_role_sequence(superset_seq, name)
        sequences_node.remove(superset_seq)

        for sequence in sequences_node:
            attribs['subset'] = self._load_role_sequence(sequence, name)
            cons_list.append(Constraint.EqualityConstraint(**attribs))

        return cons_list

    def _load_frequency_constraint(self, xml_node):
        """ Load frequency constraint. """
        attribs, name = get_basic_attribs(xml_node)
        name = "Frequency constraint " + name

        # Parse frequency attributes
        min_freq = int(xml_node.get("MinFrequency"))
        max_freq = int(xml_node.get("MaxFrequency"))

        # Build attribute dictionary
        attribs['min_freq'] = min_freq
        attribs['max_freq'] = max_freq if max_freq > 0 else float('inf')
        attribs['covers'] = self._load_role_sequence(xml_node, name)

        return Constraint.FrequencyConstraint(**attribs)

    def _load_mandatory_constraint(self, xml_node):
        """ Load mandatory constraint. """
        attribs, name = get_basic_attribs(xml_node)

        implied = (xml_node.get("IsImplied") == "true")
        covers = self._load_role_sequence(xml_node,
                                          "Mandatory constraint " + name)

        # Lambda function to decide if constraint covers a subtype
        subtype = lambda x: x and isinstance(x[0], Constraint.SubtypeConstraint
                                             )

        if implied:
            return None
        elif subtype(covers):
            if len(covers) > 1:  # If len == 1 its on the implicit subtype fact
                self.omissions.append("Subtype inclusive-or constraint " +
                                      name)
            return None
        else:
            return Constraint.MandatoryConstraint(covers=covers, **attribs)

    def _load_uniqueness_constraint(self, xml_node):
        """ Load uniqueness constraint. """
        attribs, name = get_basic_attribs(xml_node)
        name = "Uniqueness constraint " + name

        # Get object type that this constraint is a preferred id for
        pref_node = find(xml_node, "PreferredIdentifierFor")
        if pref_node is not None:
            uid = pref_node.get("ref")
            attribs['identifier_for'] = self._elements.get(uid)

        # Get sequence of covered roles
        covers = self._load_role_sequence(xml_node, name)

        if covers and isinstance(covers[0], Constraint.SubtypeConstraint):
            return None  # Covers a role in an implicit subtype fact
        else:
            return Constraint.UniquenessConstraint(covers=covers, **attribs)

    def _load_ring_constraint(self, xml_node):
        """ Load ring constraint. """
        self.omissions.append("Ring constraint " + xml_node.get("Name"))
        return None

    def _load_value_comp_constraint(self, xml_node):
        """ Load value comparison constraint. """
        name = xml_node.get("Name")
        self.omissions.append("Value comparison constraint " + name)
        return None

    def _load_value_constraint(self, node):
        """ Load value constraint. """
        attribs, name = get_basic_attribs(node)
        attribs['covers'] = covers = self._get_covered_element(node)

        data_type = covers[0].data_type if covers else None

        try:
            domain = Constraint.ValueDomain()
            for value_range in node_collection(node, "ValueRanges"):
                domain.add_range(
                    min_value=value_range.get("MinValue"),
                    max_value=value_range.get("MaxValue"),
                    min_open=(value_range.get("MinInclusion") == "Open"),
                    max_open=(value_range.get("MaxInclusion") == "Open"),
                    data_type=data_type)
        except Constraint.ValueConstraintError as ex:
            reason = ex.message.lower()
            mesg = "Value constraint {0} because {1}".format(name, reason)
            self.omissions.append(mesg)
            return None

        return Constraint.ValueConstraint(domain, **attribs)

    def _load_cardinality_constraint(self, node):
        """ Load cardinality constraint. """
        attribs, name = get_basic_attribs(node)
        attribs['covers'] = self._get_covered_element(node)
        attribs['ranges'] = self._load_cardinality_ranges(node)

        return Constraint.CardinalityConstraint(**attribs)

    def _load_cardinality_ranges(self, parent_node):
        """ Load a list of cardinality ranges. """
        ranges = []
        isrange = lambda x: local_tag(x) == 'CardinalityRange'

        for node in filter(isrange, node_collection(parent_node, "Ranges")):
            lower = int(node.get("From"))  # "From" attribute is mandatory
            upper = node.get("To")  # "To" attribute is optional
            upper = int(upper) if upper else None
            ranges.append(Constraint.CardinalityRange(lower, upper))
        return ranges

    def _get_covered_element(self, node):
        """ Returns element covered by a constraint. Used by ValueConstraint
            and CardinalityConstraint, which have been moved from their parent
            nodes to the Constraints node. """
        try:
            # _covered_element is added via _move_node_to_constraints()
            uid = node.get("_covered_element")
            return [self._elements[uid]]
        except KeyError:
            return None

    def _load_role_sequence(self, xml_node, constraint_name):
        """ Returns a sequence of roles covered by a constraint.
            xml_node points to the RoleSequence node or its parent node. """

        if local_tag(xml_node) != 'RoleSequence':
            xml_node = find(xml_node, 'RoleSequence')

        name = constraint_name
        role_sequence = FactType.RoleSequence()
        implied_roles = 0  # Number of implied roles in the sequence
        total_roles = 0  # Total number of roles in the sequence

        for node in xml_node:
            if local_tag(node) == "Role":
                role = self._load_constraint_role(node, name)
                implied_roles += (role is None)
                total_roles += 1
                role_sequence.append(role)
            elif local_tag(node) == "JoinRule":
                try:
                    role_sequence.join_path = self._load_join_rule(node)
                except JoinPathException as ex:
                    msg = "{0} because its join path {1}."
                    self.omissions.append(msg.format(name, ex.message))
                    return None
            else:
                msg = "{0} has unexpected role sequence."
                raise Exception(msg.format(name))

        if 0 < implied_roles < total_roles:
            msg = "{0} because it covers implied and explicit roles"
            self.omissions.append(msg.format(name))
            return None
        elif implied_roles == total_roles:  # Implied constraint
            return None
        else:
            return role_sequence

    def _load_constraint_role(self, xml_node, constraint_name):
        """ Returns a Role element within the RoleSequence of a constraint. """

        # Confirm deprecated path data is not present
        if find(xml_node, "ProjectedFrom") is not None:
            msg = constraint_name + " has deprecated join rule."
            raise Exception(msg)

        uid = xml_node.get("ref")
        return self._elements.get(uid)

    ###########################################################################
    # Note to future maintainers: the next four methods (_load_join_rule,
    # _load_join_path, _load_linear_path, _load_branches) is my attempt to
    # parse the very complex <JoinRule> node and its children.  Join rules in
    # NORMA support many features (subqueries, calculations, negations, etc.)
    # that we haven't observed in industry ORM models.  Thus, we only load the
    # most common types of join rules and raise JoinPathExceptions for the rest.
    ###########################################################################

    def _load_join_rule(self, node):
        """ Loads a join rule (i.e. a <JoinRule> node and its children). """
        join_path = JoinPath()

        if not (len(node) == 1 and local_tag(node[0]) == 'JoinPath'):
            raise JoinPathException("does not have exactly one JoinPath node")

        for child in node[0]:
            if local_tag(child) in ['PathComponents', 'PathComponent']:
                if len(child) == 1 and local_tag(child[0]) == 'RolePath':
                    self._load_join_path(child[0], join_path)
                else:
                    msg = "does not have exactly one RolePath node"
                    raise JoinPathException(msg)
            elif local_tag(child) == 'JoinPathProjections':
                # NOTE: I am ignoring this node for now, because it's not needed
                # to determine the join path itself.  The only reason to do any
                # processing here would be to confirm that there are no
                # unexpected children of this node (e.g. a CalculatedValue) and
                # to double-check that the ProjectedFrom nodes match the roles
                # covered by the constraint.
                pass
            else:
                raise JoinPathException(unsupported_node(child, node[0]))

        return join_path

    def _load_join_path(self, node, join_path, root_role=None):
        """ Load <RolePath> or <SubPath> node of a <JoinRule> into join_path.
            `node` must point to a <RolePath> or <Subpath> node.  If root_role
            is not None, then the first role of paths along this branch will 
            join with root_role (which must be on a previous branch of 
            Join_path).  Returns first role on this branch of the path. """

        # We do not support negated splits
        split_neg = node.get("SplitIsNegated")
        if split_neg and split_neg.upper() == "TRUE":
            raise JoinPathException("has a negated path split")

        # We do not support subpath combinations other than AND
        split_op = node.get("SplitCombinationOperator")
        if split_op and split_op.upper() != "AND":
            msg = "combines paths with an operator other than AND"
            raise JoinPathException(msg)

        first = None

        for child in node:
            if local_tag(child) == 'RootObjectType':
                # NOTE: I am ignoring this node for now, because it is not
                # needed to determine the join path structure.  The only reason
                # to check this node would be to confirm there is no "Negated"
                # or "ValueRestriction" attribute.
                pass
            elif local_tag(child) == 'PathedRoles':  # Linear Path
                # The root_role for any sub paths that follow this linear path
                # is the last role on the linear path.
                first, root_role = self._load_linear_path(
                    child, join_path, root_role)
            elif local_tag(child) == 'SubPaths':  # Branching
                _first = self._load_branches(child, join_path, root_role)
                first = first or _first  # Don't overwrite first if not None
            else:
                raise JoinPathException(unsupported_node(child, node))

        return first

    def _load_branches(self, node, join_path, root_role=None):
        """ Load <SubPaths> node of a <JoinRule> into join_path. Returns first
            role along any subpath. """

        first = None

        for child in node:
            if local_tag(child) == 'SubPath':
                _first = self._load_join_path(child, join_path, root_role)
                first = first or _first  # Ensure we keep the very first role

                # If root_role is None, then subsequent subpaths join with
                # the first role of the first subpath.
                root_role = root_role or first
            else:
                raise JoinPathException(unsupported_node(child, node))

        return first

    def _load_linear_path(self, node, join_path, prev_role=None):
        """ Load <PathedRoles> node of a <JoinRule> into join_path. If prev_role
            is not None, joins the first role of this linear path with 
            prev_role.  Returns the first and last roles along this path. """

        first_role = None  # First role of this branch of the join path

        for child in node:
            if local_tag(child) != 'PathedRole':
                raise JoinPathException(unsupported_node(child, node))

            purpose = child.get("Purpose")
            isnegated = child.get("IsNegated")

            ref = child.get("ref")
            role = self._elements.get(ref)

            if role == None:
                raise JoinPathException("includes an implicit role")
            elif len(child) != 0:
                raise JoinPathException(unsupported_node(child[0], child))
            elif purpose == "PostOuterJoin":
                raise JoinPathException("includes an outer join")
            elif isnegated and isnegated.upper() == "TRUE":
                raise JoinPathException("includes a negated role")

            # On the first iteration, prev_role is either a role passed by the
            # caller (i.e. from an earlier branch of the join path) or None.
            # On subsequent iterations, it is the previous role on this branch.
            if purpose == 'PostInnerJoin' and prev_role != None:
                join_path.add_join(prev_role, role)

            if first_role == None:  # First role in the path
                first_role = role

            prev_role = role

        return first_role, prev_role  # Permits joining with subsequent branches.
Exemplo n.º 19
0
    x_train_ = train_images.reshape(
        train_images.shape[0],
        train_images.shape[1] * train_images.shape[2]).astype('float32')
    x_train_ = x_train_ / 255
    y_train_ = np.eye(num_classes)[train_labels]

    # flatten 28x28 to 784x1 vectors, [60000, 784]
    x_test_ = test_images.reshape(test_images.shape[0], test_images.shape[1] *
                                  test_images.shape[2]).astype('float32')
    x_test_ = x_test_ / 255
    y_test_ = np.eye(num_classes)[test_labels]
    return x_train_, y_train_, x_test_, y_test_


if __name__ == '__main__':
    x_train, y_train, x_test, y_test = prepare_data()
    model = Model([[784], [30, "relu"], [10, "softmax"]],
                  loss_function="cross_entropy",
                  learning_rate=0.001)

    model.train("gradient_descent",
                x_train,
                y_train,
                epochs=5,
                shuffle_data=True,
                batch_size=10)

    print(model.calculate_accuracy(inputs=x_test, labels=y_test))

    # model.save("../../saved/hw_digits/", "saved.pck")
Exemplo n.º 20
0
        slope = d[10]
        ca = d[11]
        thal = d[12]
        label = d[13]
        target_array = np.array([1, 0]) if label == 1 else np.array([0, 1])
        input_array = np.array([age, sex, cp, fbs, trestbps, chol, restecg, exang, oldpeak,
                                slope, ca, thal, thalach])
        x.append(input_array)
        y.append(target_array)

    return x, y


if __name__ == '__main__':
    df_read = pandas.read_csv("./heart_disease.csv")
    x_train, y_train = prepare_data(df_read)
    total = len(x_train)
    test_num = total / 5
    train_num = total - test_num

    x_test = x_train
    y_test = y_train

    model = Model([[13], [6, "relu"], [2, "softmax"]], loss_function="cross_entropy", learning_rate=0.001)

    model.train("gradient_descent", np.array(x_train), np.array(y_train), epochs=50, shuffle_data=True, batch_size=1)

    print(model.calculate_accuracy(inputs=x_test, labels=y_test))

    # model.save("../../saved/heart_disease/", "saved.pck")
Exemplo n.º 21
0
tf.reset_default_graph()

batch_size = tf.placeholder(tf.int32, shape=())
dropout_rate = tf.placeholder(tf.float32, shape=())
learning_rate = tf.placeholder(tf.float32, shape=())

X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])

l0 = FullyConnected(size=[784, 400], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=Tanh(), bias=args.bias, l2=args.l2, last_layer=False, name="fc1")
l1 = Dropout(rate=dropout_rate)
l2 = FeedbackFC(size=[784, 400], num_classes=10, sparse=args.sparse, rank=args.rank, name="fc1_fb")

l3 = FullyConnected(size=[400, 10], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=Linear(), bias=args.bias, l2=args.l2, last_layer=True, name="fc2")

model = Model(layers=[l0, l1, l2, l3])

##############################################

predict = model.predict(X=X)

weights = model.get_weights()

if args.opt == "adam" or args.opt == "rms" or args.opt == "decay":
    if args.dfa:
        grads_and_vars = model.dfa_gvs(X=X, Y=Y)
    else:
        grads_and_vars = model.gvs(X=X, Y=Y)
        
    if args.opt == "adam":
        train = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=args.eps).apply_gradients(grads_and_vars=grads_and_vars)
Exemplo n.º 22
0
class View(DModule, QtWidgets.QMainWindow):
    def __init__(self):

        self.model = None

        DModule.__init__(self)
        QtWidgets.QMainWindow.__init__(self)

        self.model = Model(self)

        self.dialogs = Dialogs(self)
        self.registry = Registry("Deposit")
        self.graph_view = GraphView(self)
        self.descriptor_group = DescriptorGroup(self)
        self.distance_group = DistanceGroup(self)
        self.cluster_group = ClusterGroup(self)
        self.toolbar = Toolbar(self)
        self.menu = Menu(self)
        self.statusbar = StatusBar(self)
        self.progress = Progress(self)

        self.setWindowIcon(self.get_icon("cm_icon.svg"))
        self.setStyleSheet("QPushButton {padding: 5px; min-width: 100px;}")

        central_widget = QtWidgets.QWidget(self)
        central_widget.setLayout(QtWidgets.QHBoxLayout())
        central_widget.layout().setContentsMargins(0, 0, 0, 0)
        self.setCentralWidget(central_widget)

        control_frame = QtWidgets.QFrame(self)
        control_frame.setSizePolicy(QtWidgets.QSizePolicy.Minimum,
                                    QtWidgets.QSizePolicy.Minimum)
        control_frame.setLayout(QtWidgets.QVBoxLayout())
        control_frame.layout().setContentsMargins(10, 10, 0, 10)

        graph_view_frame = QtWidgets.QFrame(self)
        graph_view_frame.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
                                       QtWidgets.QSizePolicy.Expanding)
        graph_view_frame.setLayout(QtWidgets.QVBoxLayout())
        graph_view_frame.layout().setContentsMargins(0, 0, 0, 0)

        central_widget.layout().addWidget(control_frame)
        central_widget.layout().addWidget(graph_view_frame)

        control_frame.layout().addWidget(self.descriptor_group)
        control_frame.layout().addWidget(self.distance_group)
        control_frame.layout().addWidget(self.cluster_group)
        control_frame.layout().addStretch()

        graph_view_frame.layout().addWidget(self.graph_view)

        self.setStatusBar(self.statusbar)

        self.menu.load_recent()

        self.set_title()
        self.setGeometry(100, 100, 1024, 768)
        #		self.setGeometry(500, 100, 1024, 768)  # DEBUG

        self.descriptor_group.load_data.connect(self.on_load_data)
        self.descriptor_group.cluster_classes_changed.connect(
            self.on_cluster_classes_changed)
        self.distance_group.calculate.connect(self.on_calculate)
        self.distance_group.delete.connect(self.on_delete_distance)
        self.cluster_group.cluster.connect(self.on_cluster)
        self.cluster_group.update_tree.connect(self.on_update_tree)
        self.cluster_group.add_cluster.connect(self.on_add_cluster)
        self.cluster_group.delete.connect(self.on_delete_clusters)

        self.connect_broadcast(Broadcasts.VIEW_ACTION, self.on_view_action)
        self.connect_broadcast(Broadcasts.STORE_LOADED,
                               self.on_data_source_changed)
        self.connect_broadcast(Broadcasts.STORE_DATA_SOURCE_CHANGED,
                               self.on_data_source_changed)
        self.connect_broadcast(Broadcasts.STORE_DATA_CHANGED,
                               self.on_data_changed)
        self.connect_broadcast(Broadcasts.STORE_SAVED, self.on_saved)
        self.connect_broadcast(Broadcasts.STORE_SAVE_FAILED,
                               self.on_save_failed)
        self.set_on_broadcast(self.on_broadcast)

        self.model.broadcast_timer.setSingleShot(True)
        self.model.broadcast_timer.timeout.connect(self.on_broadcast_timer)

        self.update()

        self.dialogs.open("Connect")

    def set_title(self, name=None):

        title = "CeraMatch"
        if name is None:
            self.setWindowTitle(title)
        else:
            self.setWindowTitle("%s - %s" % (name, title))

    def update_mrud(self):

        if self.model.data_source is None:
            return
        if self.model.data_source.connstr is None:
            self.menu.add_recent_url(self.model.data_source.url)
        else:
            self.menu.add_recent_db(self.model.data_source.identifier,
                                    self.model.data_source.connstr)

    def update(self):

        if self.model.is_connected():
            self.set_title(
                os.path.split(str(self.model.identifier))[-1].strip("#"))
        self.descriptor_group.update()
        self.distance_group.update()
        self.cluster_group.update()

    def save(self):

        if self.model.data_source is None:
            self.dialogs.open("Connect")

        else:
            self.progress.show("Saving...")
            self.model.save()
            self.progress.reset()

    def set_clusters(self, clusters, nodes, edges, labels, positions={}):

        if edges:
            self.graph_view.set_data(self.model.sample_data, clusters, nodes,
                                     edges, labels, positions)
        else:
            self.graph_view.set_data(self.model.sample_data)
        self.graph_view.reset_scene()
        self.cluster_group.update_clusters_found(
            len(clusters) if clusters else 0)

    def get_icon(self, name):

        path = os.path.join(os.path.dirname(res.__file__), name)
        if os.path.isfile(path):
            return QtGui.QIcon(path)
        path = os.path.join(os.path.dirname(deposit.__file__), "res", name)
        if os.path.isfile(path):
            return QtGui.QIcon(path)
        raise Exception("Could not load icon", name)

    def save_dendrogram(self, path):

        self.graph_view.save_pdf(path)

    def save_catalog(self, path, scale=1 / 3, dpi=600, line_width=0.5):

        clusters, _, _, labels, _ = self.model.clustering.get_data()
        clusters = dict([(labels[cluster_node_id], [
            labels[sample_node_id]
            for sample_node_id in clusters[cluster_node_id]
        ]) for cluster_node_id in clusters])
        save_catalog(path,
                     self.model.sample_data,
                     clusters,
                     scale,
                     dpi,
                     line_width,
                     progress=self.progress)

    @QtCore.Slot()
    def on_load_data(self):

        nodes = None
        if self.model.lap_descriptors is not None:
            self.set_clusters(*self.model.load_samples())

        self.descriptor_group.update()
        self.distance_group.update()
        self.cluster_group.update()
        self.cluster_group.update_n_clusters()

        self.update()

    @QtCore.Slot()
    def on_cluster_classes_changed(self):

        self.cluster_group.update()

    @QtCore.Slot()
    def on_cluster(self):

        self.cluster_group.update_clusters_found(None)
        max_clusters, limit = self.cluster_group.get_limits()
        self.set_clusters(*self.model.clustering.make(max_clusters, limit))
        self.update()

    @QtCore.Slot()
    def on_update_tree(self):

        self.set_clusters(*self.model.clustering.update())
        self.update()

    @QtCore.Slot()
    def on_add_cluster(self):

        self.graph_view.add_cluster()

    @QtCore.Slot()
    def on_calculate(self):

        self.model.calc_distance()
        self.update()

    @QtCore.Slot()
    def on_delete_distance(self):

        reply = QtWidgets.QMessageBox.question(
            self, "Delete Distances", "Delete distances from database?",
            QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
        if reply == QtWidgets.QMessageBox.Yes:
            self.model.delete_distance()
            self.update()

    @QtCore.Slot()
    def on_delete_clusters(self):

        reply = QtWidgets.QMessageBox.question(
            self, "Delete Clusters", "Delete clusters from database?",
            QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
        if reply == QtWidgets.QMessageBox.Yes:
            self.model.clustering.delete()
            labels = dict([(str(sample_id), sample_id)
                           for sample_id in self.model.sample_ids])
            self.graph_view.set_data(self.model.sample_data)
            self.graph_view.reset_scene()
            self.update()

    def on_view_action(self, *args):

        pass

    def on_broadcast(self, signals):

        if (Broadcasts.STORE_SAVED in signals) or (Broadcasts.STORE_SAVE_FAILED
                                                   in signals):
            self.process_broadcasts()
        else:
            self.model.broadcast_timer.start(100)

    def on_broadcast_timer(self):

        self.process_broadcasts()

    def on_data_source_changed(self, *args):

        self.set_title(
            os.path.split(str(self.model.identifier))[-1].strip("#"))
        self.statusbar.message("")
        self.update_mrud()

    def on_data_changed(self, *args):

        self.statusbar.message("")

    def on_saved(self, *args):

        self.statusbar.message("Database saved.")

    def on_save_failed(self, *args):

        self.statusbar.message("Saving failed!")

    def closeEvent(self, event):

        if not self.model.is_saved():
            reply = QtWidgets.QMessageBox.question(
                self, "Exit", "Save changes to database?",
                QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
                | QtWidgets.QMessageBox.Cancel)
            if reply == QtWidgets.QMessageBox.Yes:
                self.save()
            elif reply == QtWidgets.QMessageBox.No:
                pass
            else:
                event.ignore()
                return

        self.model.on_close()
        QtWidgets.QMainWindow.closeEvent(self, event)
Exemplo n.º 23
0
class LELConv(Layer):
    def __init__(self, input_shape, pool_shape, num_classes, name=None):
        self.input_shape = input_shape
        self.batch_size, self.h, self.w, self.fin = self.input_shape
        self.pool_shape = pool_shape
        self.num_classes = num_classes
        self.name = name

        l1 = AvgPool(size=self.input_shape,
                     ksize=self.pool_shape,
                     strides=self.pool_shape,
                     padding='SAME')

        l2_input_shape = l1.output_shape()
        l2 = ConvToFullyConnected(input_shape=l2_input_shape)

        l3_input_shape = l2.output_shape()
        l3 = FullyConnected(input_shape=l3_input_shape,
                            size=self.num_classes,
                            init='alexnet',
                            activation=Linear(),
                            bias=0.,
                            name=self.name)

        self.B = Model(layers=[l1, l2, l3])

    ###################################################################

    def get_weights(self):
        return []

    def get_feedback(self):
        assert (False)

    def output_shape(self):
        return self.input_shape

    def num_params(self):
        return 0

    def forward(self, X):
        return X

    ###################################################################

    def backward(self, AI, AO, DO):
        return DO

    def gv(self, AI, AO, DO):
        return []

    def train(self, AI, AO, DO):
        return []

    ###################################################################

    def dfa_backward(self, AI, AO, E, DO):
        return DO

    def dfa_gv(self, AI, AO, E, DO):
        return []

    def dfa(self, AI, AO, E, DO):
        return []

    ###################################################################

    def lel_backward(self, AI, AO, E, DO, Y):
        DO = self.B.backwards(AI, Y)
        return DO

    def lel_gv(self, AI, AO, E, DO, Y):
        gvs = self.B.gvs(AI, Y)
        return gvs

    def lel(self, AI, AO, E, DO, Y):
        return []
Exemplo n.º 24
0
 def test_display_empty(self):
     """ Test Display of an empty model. """
     model = Model()
     model.display()
     output = sys.stdout.getvalue().strip()
     self.assertEqual(output, "Object Types:\nFact Types:\nConstraints:")
Exemplo n.º 25
0
l16 = FullyConnected(size=[2048, 10],
                     num_classes=10,
                     init_weights=args.init,
                     alpha=learning_rate,
                     activation=Linear(),
                     bias=args.bias,
                     last_layer=True,
                     name='fc3',
                     load=weights_fc,
                     train=train_fc)

##############################################

model = Model(layers=[
    l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16
])

model_normal = Model(layers=[
    l0, l1, l2p0, l3, l4, l5p0, l6, l7, l8p0, l9, l10p0, l10, l11, l13p0, l13,
    l14, l16
])

predict = model.predict(X=X)
predict_normal = model_normal.predict(X=X)

weights = model.get_weights()

if args.opt == "adam" or args.opt == "rms" or args.opt == "decay":
    if args.dfa:
        grads_and_vars = model.dfa_gvs(X=X, Y=Y)
Exemplo n.º 26
0
 def test_add_invalid_element(self):
     """ Test adding something unexpected to the model. """
     model = Model()
     with self.assertRaises(ValueError) as ex:
         model.add("Invalid")
     self.assertEquals(ex.exception.message, "Unexpected model element type")
Exemplo n.º 27
0
l13 = ConvToFullyConnected(input_shape=[6, 6, 256])

l14 = FullyConnected(input_shape=6*6*256, size=4096, init=args.init, activation=act, bias=args.bias, load=weights_fc, name='fc1', train=train_fc)
l15 = Dropout(rate=dropout_rate)
l16 = FeedbackFC(size=[6*6*256, 4096], num_classes=1000, sparse=args.sparse, rank=args.rank, name='fc1_fb')

l17 = FullyConnected(input_shape=4096, size=4096, init=args.init, activation=act, bias=args.bias, load=weights_fc, name='fc2', train=train_fc)
l18 = Dropout(rate=dropout_rate)
l19 = FeedbackFC(size=[4096, 4096], num_classes=1000, sparse=args.sparse, rank=args.rank, name='fc2_fb')

l20 = FullyConnected(input_shape=4096, size=1000, init=args.init, bias=args.bias, load=weights_fc, name='fc3', train=train_fc)

###############################################################

model = Model(layers=[l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16, l17, l18, l19, l20])
predict = tf.nn.softmax(model.predict(X=features))
weights = model.get_weights()

if args.dfa:
    grads_and_vars = model.dfa_gvs(X=features, Y=labels)
else:
    grads_and_vars = model.gvs(X=features, Y=labels)
        
train = tf.train.AdamOptimizer(learning_rate=lr, epsilon=args.eps).apply_gradients(grads_and_vars=grads_and_vars)

correct = tf.equal(tf.argmax(predict,1), tf.argmax(labels,1))
total_correct = tf.reduce_sum(tf.cast(correct, tf.float32))
top5 = in_top_k(predict, tf.argmax(labels,1), k=5)
total_top5 = tf.reduce_sum(tf.cast(top5, tf.float32))
# Create layers
params = {}
params['init_scale'] = 0.001
params['learning_rate'] = LEARNING_RATE
params['w_h'] = {'affine1': 2, 'affine2': HIDDEN_UNIT}
params['w_w'] = {'affine1': HIDDEN_UNIT, 'affine2': 1}

affine1 = Affine('affine1', params)
affine2 = Affine('affine2', params)
tanh = Tanh()
sigmoid = Sigmoid()
cost_layer = Cost()

# Create Network
layers = [affine1, tanh, affine2, sigmoid]
two_layers_net = Model(layers=layers, cost_layer=cost_layer)

# Training
loss = model_train(model=two_layers_net,
                   data=data,
                   epoch=TRAIN_EPOCH,
                   batch_size=BATCH_SIZE,
                   grad_check=GRAD_CHK)

## Plot loss chart
plt.figure('Loss')
plt.plot(loss)

# Plot trained model
plot_model(model=two_layers_net, data=data)