Ejemplo n.º 1
0
def focused_rnns(arg1_ids):
    """One RNN decides focus weights for other RNNs."""

    # embed arg1 input sequence
    arg1_emb = shared_emb(arg1_ids)
    # shape: (sample, arg1_len, words_dim)

    # focus weights for all RNNs
    focus_weights = GRU(focus_dim, return_sequences=True, dropout_U=focus_dropout_U, dropout_W=focus_dropout_W)(arg1_emb)
    # shape: (sample, arg1_len, focus_dim)

    # individual RNNs with focus
    rnns = []
    for i in range(focus_dim):
        # focus weights for current RNN
        select_repeat = Lambda(lambda x: K.repeat_elements(x[:, i], words_dim, axis=-1), output_shape=lambda s: s[:1] + (words_dim,))
        rnn_focus = TimeDistributed(select_repeat)(focus_weights)
        # shape: (samples, arg1_len, words_dim)
        # weighted input sequence
        rnn_in = merge([arg1_emb, rnn_focus], mode='mul')
        # shape: (samples, arg1_len, words_dim)

        # individual RNN
        rnn = GRU(rnn_dim, return_sequences=False, dropout_U=rnn_dropout_U, dropout_W=rnn_dropout_W)(rnn_in)
        rnns.append(rnn)
        # shape: (samples, rnn_dim)

    return rnns
Ejemplo n.º 2
0
    def build_variational_architecture(self):
        e1 = Convolution2D(64,
                           6,
                           6,
                           subsample=(2, 2),
                           activation='relu',
                           border_mode='valid',
                           name='e1')(self.autoencoder_input)
        e3 = Convolution2D(64,
                           6,
                           6,
                           subsample=(2, 2),
                           activation='relu',
                           border_mode='same',
                           name='e3')(e1)
        e4 = Convolution2D(64,
                           6,
                           6,
                           subsample=(2, 2),
                           activation='relu',
                           border_mode='same',
                           name='e4')(e3)

        e5 = Dense(512, activation='relu')(flatten(e4))
        self.z_mean = Dense(self.latent_shape, activation='linear')(e5)
        self.z_log_sigma = Dense(self.latent_shape, activation='linear')(e5)

        batch_size = tf.shape(self.autoencoder_input)[0]

        def sample_z(args):
            z_m, z_l_s = args
            eps = K.random_normal(shape=(batch_size, self.latent_shape),
                                  mean=0.,
                                  std=1.)
            return z_m + K.exp(z_l_s / 2) * eps

        # Sample z
        z = Lambda(sample_z)([self.z_mean, self.z_log_sigma])

        # Decoder layers
        d1 = Dense(6400, activation='relu', name='d1')
        d2 = Reshape((10, 10, 64), name='d2')
        d3 = Deconvolution2D(64,
                             6,
                             6,
                             output_shape=(None, 20, 20, 64),
                             subsample=(2, 2),
                             activation='relu',
                             border_mode='same',
                             name='d3')
        d4 = Deconvolution2D(64,
                             6,
                             6,
                             output_shape=(None, 40, 40, 64),
                             subsample=(2, 2),
                             activation='relu',
                             border_mode='same',
                             name='d4')
        d5 = Deconvolution2D(1,
                             6,
                             6,
                             output_shape=(None, 84, 84, 1),
                             subsample=(2, 2),
                             activation='sigmoid',
                             border_mode='valid',
                             name='d5')

        # Full autoencoder
        d1_full = d1(z)
        d2_full = d2(d1_full)
        d3_full = d3(d2_full)
        d4_full = d4(d3_full)
        d5_full = d5(d4_full)
        d7_full = Reshape((7056, ))(d5_full)

        # Only decoding
        d1_decoder = d1(self.decoder_input)
        d2_decoder = d2(d1_decoder)
        d3_decoder = d3(d2_decoder)
        d4_decoder = d4(d3_decoder)
        d5_decoder = d5(d4_decoder)
        d7_decoder = Reshape((7056, ))(d5_decoder)

        self.decoder_output = d7_decoder
        self.autoencoder_output = d7_full
        self.encoder_output = self.z_mean

        self.emulator_reconstruction_loss = K.sum(K.binary_crossentropy(
            self.autoencoder_output, flatten(self.autoencoder_input)),
                                                  axis=1)
        kl_loss = -0.5 * K.sum(1 + self.z_log_sigma - K.square(self.z_mean) -
                               K.exp(self.z_log_sigma),
                               axis=-1)
        self.autoencoder_loss = tf.add(self.emulator_reconstruction_loss,
                                       kl_loss)
def cos_distance(y_true, y_pred):
    y_true = K.l2_normalize(y_true, axis=-1)
    y_pred = K.l2_normalize(y_pred, axis=-1)
    return K.mean(1 - K.sum((y_true * y_pred), axis=-1))
Ejemplo n.º 4
0
        fr_model = build_fr_model(input_shape=fr_image_shape)
        fr_model.compile(loss=['binary_crossentropy'], optimizer="adam")

        # Make the face recognition network as non-trainable
        fr_model.trainable = False

        # Input layers
        input_image = Input(shape=(64, 64, 3))
        input_label = Input(shape=(6,))

        # Use the encoder and the generator network
        latent0 = encoder(input_image)
        gen_images = generator([latent0, input_label])

        # Resize images to the desired shape
        resized_images = Lambda(lambda x: K.resize_images(gen_images, height_factor=3, width_factor=3,
                                                          data_format='channels_last'))(gen_images)
        embeddings = fr_model(resized_images)

        # Create a Keras model and specify the inputs and outputs for the network
        fr_adversarial_model = Model(inputs=[input_image, input_label], outputs=[embeddings])

        # Compile the model
        fr_adversarial_model.compile(loss=euclidean_distance_loss, optimizer=adversarial_optimizer)

        for epoch in range(epochs):
            print("Epoch:", epoch)

            reconstruction_losses = []

            number_of_batches = int(len(loaded_images) / batch_size)
            print("Number of batches:", number_of_batches)
Ejemplo n.º 5
0
 def call(self, x, mask=None):
     eij = K.tanh(K.dot(x, self.W) + self.b)
     ai = K.exp(eij)
     weights = ai / K.sum(ai, axis=1).dimshuffle(0, 'x')
     weighted_input = x * weights.dimshuffle(0, 1, 'x')
     return weighted_input.sum(axis=1)
Ejemplo n.º 6
0
    def bottleneck_encoder(self,
                           tensor,
                           nfilters,
                           downsampling=False,
                           dilated=False,
                           asymmetric=False,
                           normal=False,
                           drate=0.1,
                           name=''):
        """
        Encoder

        :param tensor: input tensor
        :param nfilters: Number of filters
        :param downsampling: Downsample the feature map
        :param dilated: determines  if ther should be dilated convultion
        :param asymmetric:  Determines if there should be asymmetric convolution
        :param normal:  enables 3x3 convolution on feature map
        :param drate: rate of dilation
        :param name: the name for the weight variable.
        :return: encoder output
        """
        y = tensor
        skip = tensor
        stride = 1
        ksize = 1

        # Filters operating on downsampled images have a bigger receptive field and hence gathers more context.
        if downsampling:
            stride = 2
            ksize = 2
            skip = MaxPooling2D(pool_size=(2, 2),
                                name=f'max_pool_{name}')(skip)
            skip = Permute(
                (1, 3, 2),
                name=f'permute_1_{name}')(skip)  # (B, H, W, C) -> (B, H, C, W)
            ch_pad = nfilters - K.int_shape(tensor)[-1]
            skip = ZeroPadding2D(padding=((0, 0), (0, ch_pad)),
                                 name=f'zeropadding_{name}')(skip)
            skip = Permute(
                (1, 3, 2),
                name=f'permute_2_{name}')(skip)  # (B, H, C, W) -> (B, H, W, C)

        y = Conv2D(filters=nfilters // 4,
                   kernel_size=(ksize, ksize),
                   kernel_initializer='he_normal',
                   strides=(stride, stride),
                   padding='same',
                   use_bias=False,
                   name=f'1x1_conv_{name}')(y)
        y = BatchNormalization(momentum=0.1, name=f'bn_1x1_{name}')(y)
        y = PReLU(shared_axes=[1, 2], name=f'prelu_1x1_{name}')(y)

        if normal:
            # deconv with 3x3 filter
            y = Conv2D(filters=nfilters // 4,
                       kernel_size=(3, 3),
                       kernel_initializer='he_normal',
                       padding='same',
                       name=f'3x3_conv_{name}')(y)
        elif asymmetric:
            # decompose 5x5 convolution to two asymmetric layers as 5x1 and 1x5
            y = Conv2D(filters=nfilters // 4,
                       kernel_size=(5, 1),
                       kernel_initializer='he_normal',
                       padding='same',
                       use_bias=False,
                       name=f'5x1_conv_{name}')(y)
            y = Conv2D(filters=nfilters // 4,
                       kernel_size=(1, 5),
                       kernel_initializer='he_normal',
                       padding='same',
                       name=f'1x5_conv_{name}')(y)
        elif dilated:
            y = Conv2D(filters=nfilters // 4,
                       kernel_size=(3, 3),
                       kernel_initializer='he_normal',
                       dilation_rate=(dilated, dilated),
                       padding='same',
                       name=f'dilated_conv_{name}')(y)
        y = BatchNormalization(momentum=0.1, name=f'bn_main_{name}')(y)
        y = PReLU(shared_axes=[1, 2], name=f'prelu_{name}')(y)

        y = Conv2D(filters=nfilters,
                   kernel_size=(1, 1),
                   kernel_initializer='he_normal',
                   use_bias=False,
                   name=f'final_1x1_{name}')(y)
        y = BatchNormalization(momentum=0.1, name=f'bn_final_{name}')(y)
        y = SpatialDropout2D(rate=drate,
                             name=f'spatial_dropout_final_{name}')(y)

        y = Add(name=f'add_{name}')([y, skip])
        y = PReLU(shared_axes=[1, 2], name=f'prelu_out_{name}')(y)

        return y
Ejemplo n.º 7
0
 def dropped_inputs():
     return K.dropout(ones, self.recurrent_dropout)
 def call(self, inputs, **kwargs):
     return K.sum(inputs, axis=1)
Ejemplo n.º 9
0
def binary_accuracy(y_true, y_pred):
    return K.mean(K.equal(y_true, K.round(y_pred)), axis=-1)
Ejemplo n.º 10
0
#to many more layers, this will keep our code
#read-able
import os, sys
local_path = os.path.dirname(__file__)
root = os.path.join(local_path, '..', "..")
sys.path.append(root)

from keras import backend as K
import tensorflow as tf

config = tf.ConfigProto(intra_op_parallelism_threads=30, \
                        inter_op_parallelism_threads=30, \
                        allow_soft_placement=True, \
                        device_count = {'CPU': 30})
session = tf.Session(config=config)
K.set_session(session)


def bias_variable(shape, name):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial, name=name)


def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')


def max_pool_2x2(x):
    return tf.nn.max_pool(x,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
Ejemplo n.º 11
0
def Sum(x):
    return K.sum(x, axis=1)
Ejemplo n.º 12
0
 def __call__(self, w):
     return K.clip(w, 0., 1.)
Ejemplo n.º 13
0
 def broadcast_pose(self, pose, height, width):
     pose = Reshape((1, 1, 2))(pose)
     pose = Lambda(lambda pose: K.tile(pose, [1, height, width, 1]))(pose)
     return pose
Ejemplo n.º 14
0
def learn_cnn(x_train, y_train, x_test, y_test):
    num_classes = 10

    # input image dimensions
    img_rows, img_cols = 28, 28

    if K.image_data_format() == 'channels_first':
        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(10, activation='softmax'))
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.sgd(),
                  metrics=['acc'])
    plot_model(model, to_file='cnn_model.png')
    m = model.fit(x_train,
                  y_train,
                  validation_data=(x_test, y_test),
                  epochs=10,
                  batch_size=64,
                  verbose=2)
    plt.plot(m.history['acc'])
    plt.plot(m.history['val_acc'])
    plt.title('learn cnn MNIST accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.savefig("learn_cnn_MNIST_accuracy.png")
    plt.show()

    plt.plot(m.history['loss'])
    plt.plot(m.history['val_loss'])
    plt.title('learn cnn MNIST loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.savefig("learn_cnn_MNIST_loss.png")
    plt.show()
Ejemplo n.º 15
0
def binary_accuracy(y_true, y_pred):
    return K.mean(K.equal(y_true, K.round(y_pred)), axis=-1)
Ejemplo n.º 16
0
            multi_channel.append(model_word2vec_input)

    multi_channel_embedding = merge(multi_channel, mode="concat", concat_axis=1)

    conv1_output = cnn_model([multi_channel_embedding, multi_channel_embedding, multi_channel_embedding])

    full_connected_layers = Dense(output_dim=len(label_to_index), init="glorot_uniform", activation="relu")(
        conv1_output
    )

    dropout_layers = Dropout(p=0.5)(full_connected_layers)

    softmax_output = Activation("softmax")(dropout_layers)

    model = Model(input=[model_onehot_input, model_word2vec_input], output=[softmax_output])
    model_output = K.function([model_onehot_input, model_word2vec_input, K.learning_phase()], [softmax_output])
    if verbose > 1:
        print model.summary()

    # sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)

    logging.debug("开始训练...")
    print "开始训练..."
    model.compile(loss="categorical_crossentropy", optimizer="adadelta", metrics=["accuracy"])

    logging.debug("开始训练,迭代次数:%s" % (config["cnn_nb_epoch"]))
    logging.debug("开始训练,EarlyStopping的patience为:%d次" % (config["earlyStoping_patience"]))

    early_stop = EarlyStopping(patience=config["earlyStoping_patience"], verbose=1)
    # print train_data_features.shape[2]
    model.fit(
Ejemplo n.º 17
0
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(latent_dim, ), mean=0., std=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon
Ejemplo n.º 18
0
 def dropped_inputs():
     return K.dropout(ones, self.dropout)
Ejemplo n.º 19
0
def vae_loss(x, x_decoded_mean):
    xent_loss = input_dim * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                           axis=-1)
    return xent_loss + kl_loss
Ejemplo n.º 20
0
    def step(self, inputs, states):
        h_tm1 = states[0]
        c_tm1 = states[1]
        dp_mask = states[2]
        rec_dp_mask = states[3]
        x_input = states[4]

        # alignment model
        h_att = K.repeat(h_tm1, self.timestep_dim)
        att = _time_distributed_dense(x_input,
                                      self.attention_weights,
                                      self.attention_bias,
                                      output_dim=K.int_shape(
                                          self.attention_weights)[1])
        attention_ = self.attention_activation(
            K.dot(h_att, self.attention_recurrent_weights) + att)
        attention_ = K.squeeze(
            K.dot(attention_, self.attention_recurrent_bias), 2)

        alpha = K.exp(attention_)

        if dp_mask is not None:
            alpha *= dp_mask[0]

        alpha /= K.sum(alpha, axis=1, keepdims=True)
        alpha_r = K.repeat(alpha, self.input_dim)
        alpha_r = K.permute_dimensions(alpha_r, (0, 2, 1))

        # make context vector (soft attention after Bahdanau et al.)
        z_hat = x_input * alpha_r
        context_sequence = z_hat
        z_hat = K.sum(z_hat, axis=1)

        if self.implementation == 2:
            z = K.dot(inputs * dp_mask[0], self.kernel)
            z += K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel)
            z += K.dot(z_hat, self.attention_kernel)

            if self.use_bias:
                z = K.bias_add(z, self.bias)

            z0 = z[:, :self.units]
            z1 = z[:, self.units:2 * self.units]
            z2 = z[:, 2 * self.units:3 * self.units]
            z3 = z[:, 3 * self.units:]

            i = self.recurrent_activation(z0)
            f = self.recurrent_activation(z1)
            c = f * c_tm1 + i * self.activation(z2)
            o = self.recurrent_activation(z3)
        else:
            if self.implementation == 0:
                x_i = inputs[:, :self.units]
                x_f = inputs[:, self.units:2 * self.units]
                x_c = inputs[:, 2 * self.units:3 * self.units]
                x_o = inputs[:, 3 * self.units:]
            elif self.implementation == 1:
                x_i = K.dot(inputs * dp_mask[0], self.kernel_i) + self.bias_i
                x_f = K.dot(inputs * dp_mask[1], self.kernel_f) + self.bias_f
                x_c = K.dot(inputs * dp_mask[2], self.kernel_c) + self.bias_c
                x_o = K.dot(inputs * dp_mask[3], self.kernel_o) + self.bias_o
            else:
                raise ValueError('Unknown `implementation` mode.')

            i = self.recurrent_activation(
                x_i + K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel_i) +
                K.dot(z_hat, self.attention_i))
            f = self.recurrent_activation(
                x_f + K.dot(h_tm1 * rec_dp_mask[1], self.recurrent_kernel_f) +
                K.dot(z_hat, self.attention_f))
            c = f * c_tm1 + i * self.activation(
                x_c + K.dot(h_tm1 * rec_dp_mask[2], self.recurrent_kernel_c) +
                K.dot(z_hat, self.attention_c))
            o = self.recurrent_activation(
                x_o + K.dot(h_tm1 * rec_dp_mask[3], self.recurrent_kernel_o) +
                K.dot(z_hat, self.attention_o))
        h = o * self.activation(c)
        if 0 < self.dropout + self.recurrent_dropout:
            h._uses_learning_phase = True

        if self.return_attention:
            return context_sequence, [h, c]
        else:
            return h, [h, c]
Ejemplo n.º 21
0
    def train(self,
              epochs,
              batch_size=32,
              sample_interval=50,
              save_interval=1500):
        # # Load the dataset
        # (X_train, _), (_, _) = mnist.load_data()

        loss = []

        # Load the images
        X_train = self.load_images()

        # image_size = X_train.shape[1]
        # original_dim = image_size * image_size

        # Normalize
        X_train = X_train / 255

        # Reshape
        # X_train = X_train.reshape((len(X_train), np.prod(X_train.shape[1:])))

        # VAE loss = mse_loss or xent_loss + kl_loss
        reconstruction_loss = K.mean(mse(self.inputs, self.outputs))
        reconstruction_loss *= self.img_rows * self.img_cols
        # reconstruction_loss = np.mean(reconstruction_loss, axis=(1, 2))
        kl_loss = 1 + self.z_log_var - K.square(self.z_mean) - K.exp(
            self.z_log_var)
        kl_loss = K.sum(kl_loss, axis=-1)
        kl_loss *= -0.5
        print(reconstruction_loss.shape, kl_loss.shape)
        vae_loss = K.mean(reconstruction_loss + kl_loss)
        self.vae.add_loss(vae_loss)
        self.vae.compile(optimizer='adam')
        self.vae.summary()
        # plot_model(self.vae,
        #            to_file='vae_mlp.png',
        #            show_shapes=True)

        try:
            for i in range(1, int(epochs / sample_interval) + 1):
                print("True Epoch: " + str(i * sample_interval))
                # train the autoencoder
                history = self.vae.fit(
                    X_train,
                    shuffle=True,
                    epochs=int(epochs / sample_interval),
                    batch_size=batch_size,
                    validation_data=(X_train, None))  # TODO: make test
                self.vae.save_weights('vae_mlp_fruit.h5')

                self.sample_images(X_train, i * sample_interval, noise=False)
                self.sample_images(X_train, i * sample_interval)

                loss.append(history.history['loss'])
        except KeyboardInterrupt:
            pass

        loss = np.stack(loss).flatten()

        history_file = open("histories/%d-history.pkl" % time.time(), "wb")
        pickle.dump(history, history_file)

        plt.clf()
        plt.plot(loss, label="loss")
        plt.legend()
        plt.title(label='VAE-GAN Loss')
        plt.savefig("images/plots/%d-vae-gan_loss.png" % time.time())
        plt.show()
Ejemplo n.º 22
0
def smoothL1(y_true, y_pred):
    x = K.abs(y_true - y_pred)
    x = tf.where(x < HUBER_DELTA, 0.5 * x**2,
                 HUBER_DELTA * (x - 0.5 * HUBER_DELTA))
    return K.sum(x)
def sampleLoss(true_y, pred_y):
    z_mean = tf.expand_dims(pred_y[:, :, :, :, 0], -1)
    z_log_sigma = tf.expand_dims(pred_y[:, :, :, :, 1], -1)
    return -0.5 * K.mean(
        1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)
Ejemplo n.º 24
0
def generateAvgFromVolumes(vol_center, volumes, model):
    session = tf.Session()

    model_config = {
        'batchsize': 1,
        'split': 0.9,
        'validation': 0.1,
        'half_res': True,
        'epochs': 200,
        'groupnorm': True,
        'GN_groups': 32,
        'atlas': 'atlas.nii.gz',
        'model_output': 'model.pkl',
        'exponentialSteps': 7,
    }

    atlas, itk_atlas = DataGenerator.loadAtlas(model_config)

    m = DiffeomorphicRegistrationNet.create_model(model_config)
    m.load_weights(model)
    shapes = atlas.squeeze().shape

    print("First is : {}".format(vol_center))
    vol_first = vol_center
    np_vol_center = readNormalizedVolumeByPath(vol_first, itk_atlas).reshape(
        1, *shapes).astype(np.float32)

    velocities = []
    for vol in volumes:
        #np_atlas = atlas.reshape(1,*shapes).astype(np.float32)
        np_vol = readNormalizedVolumeByPath(vol, itk_atlas).reshape(
            1, *shapes).astype(np.float32)

        np_stack = np.empty(1 * shapes[0] * shapes[1] * shapes[2] * 2,
                            dtype=np.float32).reshape(1, *shapes, 2)
        np_stack[:, :, :, :, 0] = np_vol
        np_stack[:, :, :, :, 1] = np_vol_center

        #tf_stack = tf.convert_to_tensor(np_stack)
        predictions = m.predict(np_stack)
        velocity = predictions[2][0, :, :, :, :]
        velocities.append(velocity)

    # compute avg velocities
    avg_velocity = np.zeros(
        int(1 * shapes[0] / 2 * shapes[1] / 2 * shapes[2] / 2 * 3),
        dtype=np.float32).reshape(1, *[int(s / 2) for s in shapes], 3)
    for v in velocities:
        avg_velocity += v
    avg_velocity /= float(len(velocities))

    # apply squaring&scaling
    steps = model_config['exponentialSteps']
    tf_velo = tf.convert_to_tensor(
        avg_velocity.reshape(1, *[int(s / 2) for s in shapes], 3))
    tf_vol_center = tf.convert_to_tensor(np_vol_center.reshape(1, *shapes, 1))

    x, y, z = K.int_shape(tf_velo)[1:4]

    # clip too large values:
    v_max = 0.5 * (2**steps)
    v_min = -v_max
    velo = tf.clip_by_value(tf_velo, v_min, v_max)

    # ij indexing doesn't change (x,y,z) to (y,x,z)
    grid = tf.expand_dims(
        tf.stack(
            tf.meshgrid(tf.linspace(0., x - 1., x),
                        tf.linspace(0., y - 1., y),
                        tf.linspace(0., z - 1., z),
                        indexing='ij'), -1), 0)

    # replicate along batch size
    stacked_grids = tf.tile(grid, (tf.shape(velo)[0], 1, 1, 1, 1))

    displacement = tfVectorFieldExpHalf(velo, stacked_grids, n_steps=steps)
    displacement_highres = toUpscaleResampled(displacement)
    # warp center volume
    new_warped = remap3d(tf_vol_center, displacement_highres)
    with session.as_default():
        new_volume = new_warped.eval(session=session).reshape(*shapes)

    vol_dirs = np.array(itk_atlas.GetDirection()).reshape(3, 3)
    # reapply directions
    warp_np = np.flip(new_volume,
                      [a for a in range(3) if vol_dirs[a, a] == -1.])
    # prepare axes swap from xyz to zyx
    warp_np = np.transpose(warp_np, (2, 1, 0))
    # write image
    warp_img = sitk.GetImageFromArray(warp_np)
    warp_img.SetOrigin(itk_atlas.GetOrigin())
    warp_img.SetDirection(itk_atlas.GetDirection())
    sitk.WriteImage(warp_img, "new_volume.nii.gz")
Ejemplo n.º 25
0
 def call(self, inputs, **kwargs):
     return K.sqrt(K.sum(K.square(inputs), -1))
Ejemplo n.º 26
0
def expand_label_input(x):
    x = K.expand_dims(x, axis=1)
    x = K.expand_dims(x, axis=1)
    x = K.tile(x, [1, 32, 32, 1])
    return x
Ejemplo n.º 27
0
    def call(self, x, mask=None):
        features_dim = self.features_dim
        step_dim = self.step_dim

        eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),
                        K.reshape(self.W, (features_dim, 1))), (-1, step_dim))

        if self.bias:
            eij += self.b

        eij = K.tanh(eij)

        a = K.exp(eij)

        if mask is not None:
            a *= K.cast(mask, K.floatx())

        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = x * a
        return K.sum(weighted_input, axis=1)
Ejemplo n.º 28
0
    def call(self, u_vecs):
        if self.share_weights:
            u_hat_vecs = K.conv1d(u_vecs, self.W)
        else:
            u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])

        batch_size = K.shape(u_vecs)[0]
        input_num_capsule = K.shape(u_vecs)[1]
        u_hat_vecs = K.reshape(u_hat_vecs,
                               (batch_size, input_num_capsule,
                                self.num_capsule, self.dim_capsule))
        u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
        # final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]

        b = K.zeros_like(
            u_hat_vecs[:, :, :,
                       0])  # shape = [None, num_capsule, input_num_capsule]
        for i in range(self.routings):
            b = K.permute_dimensions(
                b, (0, 2, 1))  # shape = [None, input_num_capsule, num_capsule]
            c = K.softmax(b)
            c = K.permute_dimensions(c, (0, 2, 1))
            b = K.permute_dimensions(b, (0, 2, 1))
            outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
            if i < self.routings - 1:
                b = K.batch_dot(outputs, u_hat_vecs, [2, 3])

        return outputs
Ejemplo n.º 29
0
def hdnn_(input_shape=(140, 140, 3),
          input_tensor=None,
          weights=None,
          activation_fn='tanh',
          init='glorot_uniform',
          l1=0.01,
          l2=0.01,
          dropout=0.5):
    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = conv2d_block(input_tensor=img_input,
                     nb_filter=84,
                     kernel_size=(7, 7),
                     stage=1,
                     block=1,
                     activation_fn=activation_fn,
                     stride=(1, 1),
                     init=init,
                     l1=l1,
                     l2=l2)
    x = conv2d_block(input_tensor=x,
                     nb_filter=84,
                     kernel_size=(4, 4),
                     stage=2,
                     block=1,
                     activation_fn=activation_fn,
                     stride=(1, 1),
                     init=init,
                     l1=l1,
                     l2=l2)
    x = conv2d_block(input_tensor=x,
                     nb_filter=54,
                     kernel_size=(4, 4),
                     stage=3,
                     block=1,
                     activation_fn=activation_fn,
                     pool_size=(3, 3),
                     stride=(1, 1),
                     init=init,
                     l1=l1,
                     l2=l2)

    # Hybrid block parameters
    hybrid_block_params = dict(n_blocks=3,
                               input_tensor=x,
                               input_shape=(14, 14, 54),
                               nb_filters=[54, 20, 10],
                               kernel_sizes=[(4, 4), (4, 4), (6, 6)],
                               stage=3,
                               activation_fns=activation_fn,
                               strides=(2, 2),
                               pool_sizes=[(2, 2), (2, 2), (1, 1)])

    # x = hybrid_conv2d_block(**hybrid_block_params)
    #
    # x = Flatten()(x)
    # x = Dense(1024, activation=activation_fn)(x)
    # x = Dense(1024, activation=activation_fn)(x)
    # x = Dense(1024, activation=activation_fn)(x)
    # x = Dense(output_dim=2, activation=activation_fn)(x)
    # x = Activation('softmax')(x)

    model = Model(input=img_input, output=x, name='hdnn')

    if weights is not None:
        # TODO Handling pre-trained model
        pass

    return model
Ejemplo n.º 30
0
#to many more layers, this will keep our code
#read-able
import os, sys
local_path = os.path.dirname(__file__)
root = os.path.join(local_path, '..', "..")
sys.path.append(root)

from keras import backend as K
import tensorflow as tf

config = tf.ConfigProto(intra_op_parallelism_threads=30, \
                        inter_op_parallelism_threads=30, \
                        allow_soft_placement=True, \
                        device_count = {'CPU': 30})
session = tf.Session(config=config)
K.set_session(session)

def bias_variable(shape, name):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial, name = name)
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1], padding='SAME')

class cnn(BaseClassifier):
    def __init__(self, batch_size = 100, nb_epoch=256, num_filt_1 = 16, num_filt_2 = 14, num_fc_1 = 40, verbose = 1):
        model = Sequential()
        self.classifier = model
        self.batch_size = batch_size
Ejemplo n.º 31
0
def euclidean_distance_loss(y_true, y_pred):
    return K.sqrt(K.sum(K.square(y_pred - y_true), axis=-1))
Ejemplo n.º 32
0
    # print cnn_model.summary()

    model_input = Input(shape=(1,input_length,word_embedding_length))

    conv1_output = cnn_model([model_input,model_input,model_input])


    full_connected_layers = Dense(output_dim=len(label_to_index), init="glorot_uniform",activation='relu')(conv1_output)

    dropout_layers = Dropout(p=0.5)(full_connected_layers)

    softmax_output = Activation("softmax")(dropout_layers)

    model = Model(input=[model_input], output=[softmax_output])
    model_output = K.function([model_input, K.learning_phase()],
                              [softmax_output])
    if verbose > 1:
        print model.summary()

    # sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)

    logging.debug('开始训练...')
    print '开始训练...'

    model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])

    logging.debug('开始训练,迭代次数:%s' % (config['cnn_nb_epoch']))
    logging.debug('开始训练,EarlyStopping的patience为:%d次' % (config['earlyStoping_patience']))

    early_stop = EarlyStopping(patience=config['earlyStoping_patience'], verbose=1)
    # print train_data_features.shape[2]
Ejemplo n.º 33
0
def _time_distributed_dense(x,
                            w,
                            b=None,
                            dropout=None,
                            input_dim=None,
                            output_dim=None,
                            timesteps=None,
                            training=None):
    """Apply `y . w + b` for every temporal slice y of x.
    # Arguments
        x: input tensor.
        w: weight matrix.
        b: optional bias vector.
        dropout: wether to apply dropout (same dropout mask
            for every temporal slice of the input).
        input_dim: integer; optional dimensionality of the input.
        output_dim: integer; optional dimensionality of the output.
        timesteps: integer; optional number of timesteps.
        training: training phase tensor or boolean.
    # Returns
        Output tensor.
    """
    if not input_dim:
        input_dim = K.shape(x)[2]
    if not timesteps:
        timesteps = K.shape(x)[1]
    if not output_dim:
        output_dim = K.int_shape(w)[1]

    if dropout is not None and 0. < dropout < 1.:
        # apply the same dropout pattern at every timestep
        ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
        dropout_matrix = K.dropout(ones, dropout)
        expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
        x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)

    # collapse time dimension and batch dimension together
    x = K.reshape(x, (-1, input_dim))
    x = K.dot(x, w)
    if b is not None:
        x = K.bias_add(x, b)
    # reshape to 3D tensor
    if K.backend() == 'tensorflow':
        x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
        x.set_shape([None, None, output_dim])
    else:
        x = K.reshape(x, (-1, timesteps, output_dim))
    return x
Ejemplo n.º 34
0
    def get_constants(self, inputs, training=None):
        constants = []
        if self.implementation != 0 and 0 < self.dropout < 1:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = [
                K.in_train_phase(dropped_inputs, ones, training=training)
                for _ in range(4)
            ]
            constants.append(dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)

            rec_dp_mask = [
                K.in_train_phase(dropped_inputs, ones, training=training)
                for _ in range(4)
            ]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        # append the input as well for use later
        constants.append(inputs)
        return constants
Ejemplo n.º 35
0
def dot_product(x, kernel):

    if K.backend() == 'tensorflow':
        return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)
    else:
        return K.dot(x, kernel)