def main():

    capsule = Capsule("127.0.0.1", 5000)
    # reset Name Service if necessary
    # capsule.resetNS()
    teste = Teste()

    capsule.registerRemoteObject(uuid.uuid1(), "Teste", teste, "127.0.0.1", 5000)

    while True:
        time.sleep(1)
Example #2
0
    def re_encryption(self, capsule):
        """
        Running re-encryption for given capsule and returning re-encrypted capsule

        :param capsule: capsule obj
        :return recapsule: re-encrypted capsule
        """

        recapsule = Capsule(ProxyLib())
        capsule_pointer = proxylib.proxylib_get_re_encryption_capsule(
            self.cm.get_pointer(), capsule.get_pointer(), self.get_pointer())
        recapsule.set_pointer(capsule_pointer)
        return recapsule
    def encapsulation(self):
        """
        Making encapsulation and getting Capsule with symmetric key

        :param no:
        :return capsule and symmetric key

        """
        capsule = Capsule(ProxyLib())
        capsule_pointer, symmetric_key = proxylib.proxylib_encapsulate(
            self.cm.get_pointer(), self.get_pointer())
        capsule.set_pointer(capsule_pointer)
        return capsule, binascii.hexlify(symmetric_key)
Example #4
0
    def __init__(self, dim_input, dim_hidden, n_layers, n_label, n_vocab,
                 embed_dropout_rate, cell_dropout_rate, final_dropout_rate,
                 embed_list, bidirectional, rnn_type, use_cuda):
        super(EncoderRNN, self).__init__()
        self.n_layers = n_layers
        self.dim_input = dim_input
        self.dim_hidden = dim_hidden
        self.n_label = n_label
        self.bidirectional = bidirectional
        self.rnn_type = rnn_type
        self.use_cuda = use_cuda

        self.add_module('embed', nn.Embedding(n_vocab, dim_input))
        self.add_module('embed_dropout', nn.Dropout(embed_dropout_rate))
        self.add_module(
            'rnn',
            getattr(nn, self.rnn_type)(
                dim_input,
                dim_hidden,
                n_layers,
                batch_first=True,
                dropout=cell_dropout_rate,
                bidirectional=bidirectional,
            ))

        for i in range(self.n_label):
            self.add_module(
                'capsule_%s' % i,
                Capsule(dim_hidden * (2 if self.bidirectional else 1),
                        final_dropout_rate, self.use_cuda))

        self.init_weights(embed_list)
        ignored_params = list(map(id, self.embed.parameters()))
        self.base_params = filter(lambda p: id(p) not in ignored_params,
                                  self.parameters())
Example #5
0
def CapsNet():
    # A common Conv2D model
    input_image = Input(shape=(None, None, 3))
    x = Conv2D(64, (3, 3), activation='relu')(input_image)
    x = Dropout(0.5)(x)
    x = AveragePooling2D((2, 2))(x)
    x = Conv2D(64, (3, 3), activation='relu')(x)
    x = Dropout(0.5)(x)
    x = AveragePooling2D((2, 2))(x)
    x = Conv2D(128, (3, 3), activation='relu')(x)
    x = AveragePooling2D((2, 2))(x)
    x = Conv2D(128, (3, 3), activation='relu')(x)
    """now we reshape it as (batch_size, input_num_capsule, input_dim_capsule)
    then connect a Capsule layer.

    the output of final model is the lengths of 120 Capsule, whose dim=16.

    the length of Capsule is the proba,
    so the problem becomes a 120 two-classification problem.
    """

    x = Reshape((-1, 128))(x)
    capsule = Capsule(120, 16, 3, True)(x)
    output = Lambda(lambda z: K.sqrt(K.sum(K.square(z), 2)))(capsule)
    return Model(inputs=input_image, outputs=output)
Example #6
0
def GRUCapsule(params):

    embed_dropout_rate = 0.1
    routings = 5
    num_capsule = 10
    dim_capsule = 16

    Embedding_layer = Embedding(params['nb_words'],
                                params['embedding_dim'],
                                weights=[params['embedding_matrix']],
                                input_length=params['sequence_length'],
                                trainable=False)

    input_ = Input(shape=(params['sequence_length'], ))
    embed_input_ = Embedding_layer(input_)
    x = SpatialDropout1D(embed_dropout_rate, name='embed_drop')(embed_input_)

    x = Bidirectional(CuDNNGRU(params['lstm_units'], return_sequences=True),
                      name="bi_gru0")(x)
    capsule = Capsule(num_capsule=num_capsule,
                      dim_capsule=dim_capsule,
                      routings=routings,
                      share_weights=True)(x)
    x = Flatten()(capsule)

    x = Dropout(params['dropout_rate'])(x)
    x = Dense(6, activation='sigmoid')(x)

    model = Model(inputs=input_, outputs=x)
    model.compile(loss=params['loss'],
                  optimizer=params['optimizer'],
                  metrics=['accuracy'])
    return model
Example #7
0
    def __init__(self,
            dim_input,
            dim_hidden,
            dim_label,
            dim_caps,
            n_layers,
            n_label,
            n_vocab,
            embed_dropout_rate,
            cell_dropout_rate,
            final_dropout_rate,
            embed_list,
            bidirectional,
            rnn_type,
            use_cuda,
            model_name):
        super(EncoderRNN, self).__init__()
        self.n_layers = n_layers
        self.dim_input = dim_input
        self.dim_hidden = dim_hidden
        self.dim_caps = dim_caps
        self.n_label = n_label
        self.bidirectional = bidirectional
        self.rnn_type = rnn_type
        self.use_cuda = use_cuda
        self.model_name = model_name

        self.add_module('embed', nn.Embedding(n_vocab, dim_input))
        self.add_module('embed_dropout', nn.Dropout(embed_dropout_rate))
        self.add_module('rnn', getattr(nn, self.rnn_type)(dim_input, dim_hidden, n_layers, batch_first=True, dropout=cell_dropout_rate, bidirectional=bidirectional,))     
        
        if self.model_name == 'Attention':
            for i in range(self.n_label):
                self.add_module('capsule_%s' % i, Capsule_Att(dim_hidden * (2 if self.bidirectional else 1), final_dropout_rate, self.use_cuda))
        else:
            dim_middle = 64
            # self.add_module('capslayer', Capsule(dim_hidden * (2 if self.bidirectional else 1), n_label, dim_label, iters=3, leaky=True, use_cuda=self.use_cuda))
            self.add_module('capslayer_0', Capsule(dim_hidden * (2 if self.bidirectional else 1), 8, dim_middle, iters=3, leaky=True, use_cuda=self.use_cuda))
            self.add_module('capslayer_1', Capsule(dim_middle, n_label, dim_label, iters=3, leaky=True, use_cuda=self.use_cuda))

        self.add_module('reconstruct', nn.Linear(dim_label, dim_hidden * (2 if self.bidirectional else 1)))
        # self.add_module('reconstruct', nn.Linear(dim_label, dim_hidden))

        self.init_weights(embed_list)
        ignored_params = list(map(id, self.embed.parameters()))
        self.base_params = filter(lambda p: id(p) not in ignored_params,
                     self.parameters())
Example #8
0
    def forward_pass(self, X_in, extra_in):
        
        X_reshaped = tf.reshape(X_in, (self.batch_size, self.shape, self.shape))
        tf.summary.image('original', tf.expand_dims(X_reshaped[:,:,:], -1))
        
        capsules_out = []
        for i in range(self.num_capsules):
            with tf.variable_scope('capsule_%d' % (i)):
                capsule = Capsule(self.in_dimen, self.r_dimen, self.g_dimen)
                capsule_out = capsule.build(X_in, extra_in)
                capsules_out.append(capsule_out)

        all_caps_out = tf.add_n(capsules_out)
        X_prediction = tf.sigmoid(all_caps_out)

        X_prediction_reshaped = tf.reshape(X_prediction, (self.batch_size, self.shape, self.shape))
        tf.summary.image('prediction', tf.expand_dims(X_prediction_reshaped[:,:,:], -1))
        
        return X_prediction
Example #9
0
    def __init__(self, num_input_conv_layer, num_output_conv_layer,
                 conv_kernel_dim, conv_kernel_stride, num_primary_unit,
                 primary_unit_size, num_classes, output_unit_size, num_routing,
                 cuda_enabled):

        super(Capsnet, self).__init__()

        self.cuda_enabled = cuda_enabled

        # MNIST Parameters
        self.image_width = 28
        self.image_height = 28
        self.image_channels = 1

        # Layer 1 : Convolutional Layer
        self.conv1 = nn.Conv2d(in_channels=num_input_conv_layer,
                               out_channels=num_output_conv_layer,
                               padding=0,
                               kernel_size=conv_kernel_dim,
                               stride=conv_kernel_stride)
        self.relu = nn.ReLU(inplace=True)

        # Primary Layer
        # Conv2d with squash activation
        self.primary = Capsule(in_unit=0,
                               in_channel=num_output_conv_layer,
                               num_unit=num_primary_unit,
                               unit_size=primary_unit_size,
                               use_routing=False,
                               num_routing=num_routing,
                               cuda_enabled=cuda_enabled)

        # DigitCaps layer
        # Capsule layer with dynamic routing
        self.digits = Capsule(
            in_unit=num_primary_unit,
            in_channel=primary_unit_size,
            num_unit=num_classes,
            unit_size=output_unit_size,  # 16D capsule per digit class
            use_routing=True,
            num_routing=num_routing,
            cuda_enabled=cuda_enabled)
    def capsule_from_bytes(self, data):
        """
        Get capsule key from given byte array.

        :param data: byte array
        :return: capsule
        """
        cs = Capsule(ProxyLib())
        cs.set_pointer(self.get_pointer())
        cs.from_bytes(data)
        return cs
Example #11
0
    def __init__(self,
                 input_caps,
                 hidden_caps,
                 caps_dim,
                 biases=True,
                 iters=3,
                 leaky=True,
                 use_cuda=True):
        super(CRUCell, self).__init__()
        self.caps_dim = caps_dim
        self.input_caps = input_caps
        self.hidden_caps = hidden_caps

        self.add_module(
            'capsule',
            Capsule(caps_dim,
                    self.hidden_caps,
                    caps_dim,
                    iters=iters,
                    leaky=leaky,
                    use_cuda=use_cuda))
Example #12
0
    def __init__(self,
                 num_capsules,
                 in_dim,
                 recog_dim,
                 gener_dim,
                 activation,
                 rng=None):
        if rng == None:
            rng = numpy.random.RandomState(numpy.random.randint(2015))
        self.num_capsules = num_capsules
        self.params = []
        self.capsules = []
        for i in xrange(num_capsules):
            cap = Capsule(in_dim, recog_dim, gener_dim, activation)
            self.capsules.append(cap)
            self.params += cap.params

        self.b_out = theano.shared(value=numpy.zeros(
            shape=(in_dim, ), dtype=theano.config.floatX),
                                   name='b_out',
                                   borrow=True)
        self.params.append(self.b_out)
Example #13
0
def getModel():
    Routings = 6
    Num_capsule = 10
    Dim_capsule = 16
    rate_drop_dense = 0.35
    
    #def root_mean_squared_error(y_true, y_pred):
    #    return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1)) 
    def root_mean_squared_error(y_true, y_pred):
        return K.sqrt(K.mean(K.square(y_pred - y_true))) 
    sequence_input = Input(shape=(maxlen, ))
    x = Embedding(max_features, embed_size, weights=[embedding_matrix],trainable = False)(sequence_input)
    x = SpatialDropout1D(0.2)(x)
    x = Bidirectional(GRU(32, return_sequences=True,dropout=0.1,recurrent_dropout=0.1))(x)
    capsule = Capsule(num_capsule=Num_capsule, dim_capsule=Dim_capsule, routings=Routings,
                          share_weights=True)(x)
    capsule = Flatten()(capsule)
    capsule = Dropout(rate_drop_dense)(capsule)
    preds = Dense(1, activation="sigmoid")(capsule)
    model = Model(sequence_input, preds)
    model.compile(loss='MSE',optimizer=Adam(lr=1e-3),metrics=['accuracy', root_mean_squared_error])
    
    return model
def main(_):

    data_list = full_data()
    char_embedding = KeyedVectors.load_word2vec_format(
        FLAGS.char_embedding_model_path, binary=False)
    word_embedding = KeyedVectors.load_word2vec_format(
        FLAGS.word_embedding_model_path, binary=False)
    schemas_vocab = build_vocab(FLAGS, 'schemas')
    char_vocab = build_vocab(FLAGS, 'char', data_list, char_embedding)
    word_vocab = build_vocab(FLAGS, 'word', data_list, word_embedding)
    postag_vocab = build_vocab(FLAGS, 'postag', data_list)
    pos_map = pos_mapping(FLAGS)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    if FLAGS.use_dev:
        file_dev = get_config_values('dataset', 'dev')
        data_dev = load_json(file_dev)
        data_list = Process([data_dev], mode='dev')

    if FLAGS.use_test:
        file_test = get_config_values('dataset', 'test')
        data_test = load_json(file_test)
        data_list = Process([data_test], mode='test')

    logits = np.array([])
    for K in range(FLAGS.k_fold):
        with tf.Session(config=config) as sess:
            # instantiate model
            Model = Capsule(
                FLAGS.is_training, FLAGS.num_classes, vocab_size,
                FLAGS.batch_size, FLAGS.embed_size, FLAGS.embed_size_p,
                FLAGS.learning_rate, FLAGS.decay_step, FLAGS.decay_rate,
                FLAGS.entity_window, FLAGS.sequence_length, filter_sizes,
                feature_map, FLAGS.use_highway_flag, FLAGS.highway_layers,
                FLAGS.sentence_size, FLAGS.use_ranking_loss, FLAGS.lm,
                FLAGS.margin_plus, FLAGS.margin_minus, FLAGS.first_decay_steps,
                FLAGS.t_mul, FLAGS.m_mul, FLAGS.alpha)
            # initialize saver
            saver = tf.train.Saver()
            if os.path.exists(FLAGS.ckpt_dir + "Model{}/checkpoint".format(K)):
                logger.info("Restoring Variables from Checkpoint.")
                save_path = FLAGS.ckpt_dir + "Model{0}/Model{0}-5F.ckpt-{1}".format(
                    K, FLAGS.num_epochs - 1)
                saver.restore(sess, save_path)
            else:
                logger.info("Can't load model checkpoint...stoping...")
                return

            data = Batch(data_list, char_vocab, word_vocab, schemas_vocab,
                         pos_map, postag_vocab, FLAGS)
            f1_score, p_score, r_score, confusion_matrix, logits, _ = do_eval(
                sess, data, Model)
            print(
                "Model%d\tf1_score:%.4f\tprecision_score:%.4f\t recall_score:%.4f"
                % (K, f1_score, p_score, r_score))
            print("Model %d\tConfusion matrix:" % (K))
            pprint(confusion_matrix)

        logits += logits * 0.6
        del Model
        gc.collect()
        tf.reset_default_graph()

        with tf.Session(config=config) as sess:
            # instantiate model
            Model = Capsule(
                FLAGS.is_training, FLAGS.num_classes, vocab_size,
                FLAGS.batch_size, FLAGS.embed_size, FLAGS.embed_size_p,
                FLAGS.learning_rate, FLAGS.decay_step, FLAGS.decay_rate,
                FLAGS.entity_window, FLAGS.sequence_length, filter_sizes,
                feature_map, FLAGS.use_highway_flag, FLAGS.highway_layers,
                FLAGS.sentence_size, FLAGS.use_ranking_loss, FLAGS.lm,
                FLAGS.margin_plus, FLAGS.margin_minus, FLAGS.first_decay_steps,
                FLAGS.t_mul, FLAGS.m_mul, FLAGS.alpha)
            # initialize saver
            saver = tf.train.Saver()
            if os.path.exists(FLAGS.ckpt_dir + "Model{}/checkpoint".format(K)):
                logger.info("Restoring Variables from Checkpoint.")
                save_path = FLAGS.ckpt_dir + "Model{0}/Model{0}-5F.ckpt-{1}".format(
                    K, FLAGS.num_epochs - 2)
                saver.restore(sess, save_path)
            else:
                logger.info("Can't load model checkpoint...stoping...")
                return

            data = Batch(data_list, char_vocab, word_vocab, schemas_vocab,
                         pos_map, postag_vocab, FLAGS)
            f1_score, p_score, r_score, confusion_matrix, logits, labels = do_eval(
                sess, data, Model)
            print(
                "Model%d\tf1_score:%.4f\tprecision_score:%.4f\t recall_score:%.4f"
                % (K, f1_score, p_score, r_score))
            print("Model %d\tConfusion matrix:" % (K))
            pprint(confusion_matrix)

        logits += logits * 0.4
        del Model
        gc.collect()
        tf.reset_default_graph()

    logits = logits / K
    threshold, _ = best_threshold(logits, labels)
Example #15
0
from keras.preprocessing import sequence

from capsule import Capsule

max_features = 20000
maxlen = (
    80
)  # cut texts after this number of words (among top max_features most common words)
batch_size = 32

print("Build model...")
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(SpatialDropout1D(0.2))
model.add(GRU(128, dropout=0.2, return_sequences=True))
model.add(Capsule(num_capsule=10, dim_capsule=16, routings=5, share_weights=True))
model.add(Flatten())
model.add(Dense(1))
model.add(Activation("sigmoid"))

# try using different optimizers and different optimizer configs
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])

print("Loading data...")
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=max_features)
print(len(X_train), "train sequences")
print(len(X_test), "test sequences")

print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
Example #16
0
                    x.getparent().remove(x)
            except:
                pass

    view_tree(t)

    county = Counter([(str(x.tag), str(x.attrib)) for x in t.iter()])
    for x in county:
        print(x, county[x])

    county = Counter([(str(x.tag), str(x.attrib)) for x in c2.tree.iter()])
    for x in county:
        print(x, county[x])

    c1 = Capsule(
        'http://www.lazygamer.net/e3-2015/bethesda-reveals-hearthstone-competitor-elder-scrolls-legend/'
    )
    c2 = Capsule(
        'http://www.lazygamer.net/playstation-4-2/call-of-duty-switches-allegiances-from-xbox-to-playstation/'
    )

    c1 = Capsule(
        'http://www.infoworld.com/article/2935436/machine-learning/ibm-spark-bluemix-machine-learning.html'
    )
    c2 = Capsule(
        'http://www.infoworld.com/article/2897287/big-data/5-reasons-to-turn-to-spark-for-big-data-analytics.html'
    )

    c1 = Capsule(
        'http://bankinnovation.net/2014/01/housing-continues-to-rebound/')
    c2 = Capsule(
def main(_):

    data_list = full_data()
    char_embedding = KeyedVectors.load_word2vec_format(FLAGS.char_embedding_model_path, binary=False)
    word_embedding = KeyedVectors.load_word2vec_format(FLAGS.word_embedding_model_path, binary=False)
    schemas_vocab = build_vocab(FLAGS, 'schemas')
    char_vocab = build_vocab(FLAGS,'char',data_list,char_embedding)
    word_vocab = build_vocab(FLAGS,'word',data_list,word_embedding)
    postag_vocab = build_vocab(FLAGS,'postag',data_list)
    pos_map = pos_mapping(FLAGS)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth=True
    tf.set_random_seed(2019)

    file_train = get_config_values('dataset', 'train')
    data_train = load_json(file_train)
    data_list = Process([data_train], mode='train')

    partition = int (len(data_list) * (1.0 / FLAGS.k_fold))
    LEFT = 0
    RIGHT = partition

    print("len total:{0}\tpartition:{1}\t".format(len(data_list),partition))
    for K in range(FLAGS.k_fold):
        with tf.Session(config=config) as sess:
            # instantiate model
            Model = Capsule(FLAGS.is_training,FLAGS.num_classes,vocab_size,FLAGS.batch_size,FLAGS.embed_size,FLAGS.embed_size_p,FLAGS.learning_rate,FLAGS.decay_step,FLAGS.decay_rate,FLAGS.entity_window,
                    FLAGS.sequence_length,filter_sizes,feature_map,FLAGS.use_highway_flag,FLAGS.highway_layers,FLAGS.sentence_size,FLAGS.use_ranking_loss,FLAGS.lm,FLAGS.margin_plus,FLAGS.margin_minus,
                    FLAGS.first_decay_steps, FLAGS.t_mul, FLAGS.m_mul, FLAGS.alpha)
            # initialize saver
            saver = tf.train.Saver(max_to_keep=2)
            logger.info('Initializing Variables')
            sess.run(tf.global_variables_initializer())
            summary_writer = tf.summary.FileWriter(logdir=FLAGS.log_path+'Model{}/'.format(K), graph=sess.graph)
            assign_pretrained_embedding(sess,Model,char_vocab,vocab_size[0],char_embedding,'char',FLAGS)
            assign_pretrained_embedding(sess,Model,word_vocab,vocab_size[1],word_embedding,'word',FLAGS)
            assign_position_embedding(sess,Model,vocab_size[2],FLAGS)
            assign_objects_embedding(sess,Model,vocab_size[3],FLAGS)
            assign_postags_embedding(sess,Model,vocab_size[4],FLAGS)

            curr_epoch=sess.run(Model.epoch_step)

            iteration=0
            for epoch in range(curr_epoch,FLAGS.num_epochs):
                train_data = Batch(data_list[:LEFT]+data_list[RIGHT:], char_vocab, word_vocab, schemas_vocab, pos_map, postag_vocab, FLAGS)
                valid_data = Batch(data_list[LEFT:RIGHT], char_vocab, word_vocab, schemas_vocab, pos_map, postag_vocab, FLAGS)
                loss, counter =  0.0, 0
                for batch in tqdm(train_data):
                    iteration=iteration+1
                    feed_dict={}

                    feed_dict[Model.dropout_keep_prob] = 0.5
                    feed_dict[Model.dropout_keep_prob_spatial] = 0.1
                    feed_dict[Model.input_x_c] = batch['char_sentence']
                    feed_dict[Model.input_x_w] = batch['mix_sentence']
                    feed_dict[Model.input_x_t] = batch['postag_sentence']
                    feed_dict[Model.input_x_p] = batch['relative_position']
                    feed_dict[Model.input_x_p_l1] = batch['entitys_position'][:,0,:]
                    feed_dict[Model.input_x_p_l2] = batch['entitys_position'][:,1,:]
                    feed_dict[Model.input_x_o] = batch['objects_ids']
                    feed_dict[Model.input_x_c_l1] = batch['lexical'][:,0,:]
                    feed_dict[Model.input_x_c_l2] = batch['lexical'][:,1,:]
                    feed_dict[Model.input_x_w_l1] = batch['lexical'][:,2,:]
                    feed_dict[Model.input_x_w_l2] = batch['lexical'][:,3,:]
                    feed_dict[Model.input_y] = batch['label_sentence']
                    feed_dict[Model.input_y_class] = loss_class
                    feed_dict[Model.tst] = FLAGS.is_training

                    train_op = Model.train_op_frozen if FLAGS.is_frozen_step > iteration and epoch == 0 else Model.train_op
                    curr_loss,lr,_,_,summary,logits=sess.run([Model.loss_val,Model.learning_rate,train_op,Model.global_increment,Model.merge,Model.logits],feed_dict)
                    summary_writer.add_summary(summary, global_step=iteration)
                    loss,counter=loss+curr_loss,counter+1

                    if counter %50==0:
                        print ("Model %d\tEpoch %d\tBatch %d\tTrain Loss:%.4f\tLearning rate:%.6f" %(K, epoch,counter,loss/float(counter),lr))

                #epoch increment
                logger.info("going to increment epoch counter....")
                sess.run(Model.epoch_increment)
                if epoch % FLAGS.validate_every==0:
                    eval_loss, f1_score, p_score, r_score, confusion_matrix = do_eval(sess, valid_data, Model)
                    print ("Model %d\tEpoch %d\tValidation Loss:%.4f\t F1_score:%.4f" % (K, epoch, eval_loss, f1_score))
                    print ("Model %d\tEpoch %d\tValidation precision_score:%.4f\t recall_score:%.4f" % (K, epoch, p_score, r_score))
                    #save model to checkpoint
                    save_path=FLAGS.ckpt_dir+"Model{0}/Model{0}-5F.ckpt".format(K)
                    saver.save(sess,save_path,global_step=epoch)

            summary_writer.close()

        LEFT += partition
        RIGHT += partition

        # clear the model and reset the graph
        del Model
        gc.collect()
        tf.reset_default_graph()
x_train = np.expand_dims(x_train, axis=-1)
x_test = np.expand_dims(x_test, axis=-1)

y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

# Define layers
input_image = Input(shape=(None, None, 1))
define_layers = Conv2D(64, (3, 3), activation='relu')(input_image)
define_layers = Conv2D(64, (3, 3), activation='relu')(define_layers)
define_layers = AveragePooling2D((2, 2))(define_layers)
define_layers = Conv2D(128, (3, 3), activation='relu')(define_layers)
define_layers = Conv2D(128, (3, 3), activation='relu')(define_layers)
define_layers = Reshape((-1, 128))(define_layers)

defined_capsule = Capsule(10, 16, 1, True)(define_layers)
output = Lambda(lambda z: K.sqrt(K.sum(K.square(z), 2)))(defined_capsule)

# Define model
model = Model(inputs=input_image, outputs=output)
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()

if not data_augmentation:
    print('Not using data augmentation.')
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
def main():
    capsule = Capsule("127.0.0.1", 5001)

    print capsule.invoke("Teste", "method", [2,5]).result