Beispiel #1
0
        y.append(new_angle)
    return X, y

def generate_next_batch(batch_size=64):
    data = get_all_image_files_and_angle()
    n = len(data)
    while True:
        data = shuffle(data)
        X_batch, y_batch = read_images(data[:batch_size])
        yield np.array(X_batch), np.array(y_batch)

# The model is based on:
# https://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf

model = Sequential()
model.add(Lambda(lambda x : x / 127.5 - 1.0, input_shape = RESIZED_IMAGE_DIM))

# Five Convolutional Layer
model.add(Convolution2D(24, 5, 5, border_mode='same'))
model.add(MaxPooling2D())
model.add(Activation('relu'))
model.add(Convolution2D(36, 5, 5, border_mode='same'))
model.add(MaxPooling2D())
model.add(Activation('relu'))
model.add(Convolution2D(48, 5, 5, border_mode='same'))
model.add(MaxPooling2D())
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(MaxPooling2D())
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
Beispiel #2
0
def get_siamese_model(input_shape):
    """
        Model architecture based on the one provided in: http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
    """

    # Define the tensors for the two input images
    left_input = Input(input_shape)
    right_input = Input(input_shape)

    # Convolutional Neural Network
    model = Sequential()
    model.add(
        Conv2D(64, (10, 10),
               activation='relu',
               input_shape=input_shape,
               kernel_initializer=initialize_weights,
               kernel_regularizer=l2(2e-4)))
    model.add(MaxPooling2D())
    model.add(
        Conv2D(128, (7, 7),
               activation='relu',
               kernel_initializer=initialize_weights,
               bias_initializer=initialize_bias,
               kernel_regularizer=l2(2e-4)))
    model.add(MaxPooling2D())
    model.add(
        Conv2D(128, (4, 4),
               activation='relu',
               kernel_initializer=initialize_weights,
               bias_initializer=initialize_bias,
               kernel_regularizer=l2(2e-4)))
    model.add(MaxPooling2D())
    model.add(
        Conv2D(256, (4, 4),
               activation='relu',
               kernel_initializer=initialize_weights,
               bias_initializer=initialize_bias,
               kernel_regularizer=l2(2e-4)))
    model.add(Flatten())
    model.add(
        Dense(4096,
              activation='sigmoid',
              kernel_regularizer=l2(1e-3),
              kernel_initializer=initialize_weights,
              bias_initializer=initialize_bias))

    # Generate the encodings (feature vectors) for the two images
    encoded_l = model(left_input)
    encoded_r = model(right_input)

    # Add a customized layer to compute the absolute difference between the encodings
    L1_layer = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1]))
    L1_distance = L1_layer([encoded_l, encoded_r])

    # Add a dense layer with a sigmoid unit to generate the similarity score
    prediction = Dense(1,
                       activation='sigmoid',
                       bias_initializer=initialize_bias)(L1_distance)

    # Connect the inputs with the outputs
    siamese_net = Model(inputs=[left_input, right_input], outputs=prediction)

    return siamese_net
def simo_hybrid_uji(
        gpu_id: int,
        dataset: str,
        frac: float,
        validation_split: float,
        preprocessor: str,
        batch_size: int,
        epochs: int,
        optimizer: str,
        dropout: float,
        corruption_level: float,
        dae_hidden_layers: list,
        sdae_hidden_layers: list,
        cache: bool,
        common_hidden_layers: list,
        floor_hidden_layers: list,
        coordinates_hidden_layers: list,
        floor_weight: float,
        coordinates_weight: float,
        verbose: int
):
    """Multi-building and multi-floor indoor localization based on hybrid
    building/floor classification and coordinates regression using a
    single-input and multi-output (SIMO) deep neural network (DNN) model and
    UJIIndoorLoc datasets.

    Keyword arguments:
    """

    ### initialize numpy, random, TensorFlow, and keras
    np.random.seed()            # based on current time or OS-specific randomness source
    rn.seed()                   #  "
    tf.set_random_seed(rn.randint(0, 1000000))
    if gpu_id >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = ''
    sess = tf.Session(
        graph=tf.get_default_graph(),
        config=session_conf)
    K.set_session(sess)

    ### load datasets after scaling
    print("Loading data ...")
    if dataset == 'uji':
        from ujiindoorloc import UJIIndoorLoc
        uji = UJIIndoorLoc(
            cache=cache,
            frac=frac,
            preprocessor=preprocessor,
            classification_mode='hierarchical')
    else:
        print("'{0}' is not a supported data set.".format(dataset))
        sys.exit(0)
    flr_height = uji.floor_height
    training_df = uji.training_df
    training_data = uji.training_data
    testing_df = uji.testing_df
    testing_data = uji.testing_data

    ### build and train a SIMO model
    print(
        "Building and training a SIMO model for hybrid classification and regression ..."
    )
    rss = training_data.rss_scaled
    coord = training_data.coord_scaled
    coord_scaler = training_data.coord_scaler  # for inverse transform
    labels = training_data.labels
    input = Input(shape=(rss.shape[1], ), name='input')  # common input

    # (optional) build deep autoencoder or stacked denoising autoencoder
    if dae_hidden_layers != '':
        print("- Building a DAE model ...")
        model = deep_autoencoder(
            dataset=dataset,
            input_data=rss,
            preprocessor=preprocessor,
            hidden_layers=dae_hidden_layers,
            cache=cache,
            model_fname=None,
            optimizer=optimizer,
            batch_size=batch_size,
            epochs=epochs,
            validation_split=validation_split)
        x = model(input)
    elif sdae_hidden_layers != '':
        print("- Building an SDAE model ...")
        model = sdae(
            dataset=dataset,
            input_data=rss,
            preprocessor=preprocessor,
            hidden_layers=sdae_hidden_layers,
            cache=cache,
            model_fname=None,
            optimizer=optimizer,
            corruption_level=corruption_level,
            batch_size=batch_size,
            epochs=epochs,
            validation_split=validation_split)
        x = model(input)
    else:
        x = input

    # common hidden layers
    # x = BatchNormalization()(x)
    # x = Activation('relu')(x)
    # x = Dropout(dropout)(x)
    # if common_hidden_layers != '':
    #     for units in common_hidden_layers:
    #         x = Dense(units)(x)
    #         x = BatchNormalization()(x)
    #         x = Activation('relu')(x)
    #         x = Dropout(dropout)(x)
    # common_hl_output = x

    # floor classification output
    # if floor_hidden_layers != '':
    #     for units in floor_hidden_layers:
    #         x = Dense(units)(x)
    #         x = BatchNormalization()(x)
    #         x = Activation('relu')(x)
    #         x = Dropout(dropout)(x)
    # x = Dense(labels.floor.shape[1])(x)
    # x = BatchNormalization()(x)
    # floor_output = Activation(
    #     'softmax', name='floor_output')(x)  # no dropout for an output layer
    #
    # x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = MaxPooling1D(pool_size=5)(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = MaxPooling1D(pool_size=5)(x)
    #
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = Dropout(dropout)(x)
    # x = Flatten()(x)
    #
    # x = Dense(labels.floor.shape[1])(x)
    # x = BatchNormalization()(x)
    # floor_output = Activation('softmax', name='floor_output')(x)
    # # coordinates regression output
    # x = common_hl_output
    # for units in coordinates_hidden_layers:
    #     x = Dense(units, kernel_initializer='normal')(x)
    #     x = BatchNormalization()(x)
    #     x = Activation('relu')(x)
    #     x = Dropout(dropout)(x)
    # x = Dense(coord.shape[1], kernel_initializer='normal')(x)
    # x = BatchNormalization()(x)
    # coordinates_output = Activation(
    #     'linear', name='coordinates_output')(x)  # 'linear' activation
    # x = common_hl_output
    # x = Lambda(lambda x:K.expand_dims(x,axis=-1))(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = MaxPooling1D(pool_size=5)(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    #
    # x = MaxPooling1D(pool_size=5)(x)
    # x = Conv1D(filters=99, kernel_size=12, activation='relu')(x)
    # x = Dropout(dropout)(x)
    # x = Flatten()(x)
    # x = Dense(coord.shape[1],kernel_initializer='normal')(x)
    # x = BatchNormalization()(x)
    # coordinates_output = Activation(
    #     'linear', name='coordinates_output')(x)



    # 1D_CNN by John

    x = Lambda(lambda x:K.expand_dims(x,axis=-1))(x)


    x = Conv1D(filters=99, kernel_size=22, activation='relu')(x)
    x = Dropout(dropout)(x)
    # x = Conv1D(filters=128, kernel_size=10, activation='relu')(x)

    # x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(filters=66, kernel_size=22, activation='relu')(x)

    # x = MaxPooling1D(pool_size=2)(x)

    x = Conv1D(filters=33, kernel_size=22, activation='relu')(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Flatten()(x)
    #1D_CNN by John
    #1DCNN by John
    n = x
    x = Dense(labels.floor.shape[1])(x)
    # x = BatchNormalization()(x)
    floor_output = Activation('softmax', name='floor_output')(x)

    common_hl_output = n
    x = common_hl_output
    x = Dense(coord.shape[1], kernel_initializer='normal')(x)
    # x = BatchNormalization()(x)
    coordinates_output = Activation(
        'linear', name='coordinates_output')(x)
    #1DCNN by John
    model = Model(
        inputs=input,
        outputs=[
            floor_output,
            coordinates_output
        ])
    model.compile(
        optimizer=optimizer,
        loss=[
            'categorical_crossentropy',
            'mean_squared_error'
        ],
        loss_weights={
            'floor_output': floor_weight,
            'coordinates_output': coordinates_weight
        },
        metrics={
            'floor_output': 'accuracy',
            'coordinates_output': 'mean_squared_error'
        })
    weights_file = os.path.expanduser("~/tmp/best_weights.h5")
    checkpoint = ModelCheckpoint(weights_file, monitor='val_loss', save_best_only=True, verbose=0)
    early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0)

    print("- Training a hybrid floor classifier and coordinates regressor ...", end='')
    startTime = timer()
    history = model.fit(
        x={'input': rss},
        y={
            'floor_output': labels.floor,
            'coordinates_output': coord
        },
        batch_size=batch_size,
        epochs=epochs,
        verbose=verbose,
        callbacks=[checkpoint, early_stop],
        validation_split=validation_split,
        shuffle=True)
    elapsedTime = timer() - startTime
    print(" completed in {0:.4e} s".format(elapsedTime))
    model.load_weights(weights_file)  # load weights from the best model

    ### evaluate the model
    print("Evaluating the model ...")
    rss = testing_data.rss_scaled
    labels = testing_data.labels
    flrs = labels.floor
    coord = testing_data.coord  # original coordinates

    # calculate the classification accuracies and localization errors
    flrs_pred, coords_scaled_pred = model.predict(rss, batch_size=batch_size)
    flr_results = (np.equal(
        np.argmax(flrs, axis=1), np.argmax(flrs_pred, axis=1))).astype(int)
    flr_acc = flr_results.mean()
    coord_est = coord_scaler.inverse_transform(coords_scaled_pred)  # inverse-scaling

    # calculate 2D localization errors
    dist_2d = norm(coord - coord_est, axis=1)
    mean_error_2d = dist_2d.mean()
    median_error_2d = np.median(dist_2d)

    # calculate 3D localization errors
    flr_diff = np.absolute(
        np.argmax(flrs, axis=1) - np.argmax(flrs_pred, axis=1))
    z_diff_squared = (flr_height**2)*np.square(flr_diff)
    dist_3d = np.sqrt(np.sum(np.square(coord - coord_est), axis=1) + z_diff_squared)
    mean_error_3d = dist_3d.mean()
    median_error_3d = np.median(dist_3d)

    LocalizationResults = namedtuple('LocalizationResults', ['flr_acc',
                                                             'mean_error_2d',
                                                             'median_error_2d',
                                                             'mean_error_3d',
                                                             'median_error_3d',
                                                             'elapsedTime'])
    return LocalizationResults(flr_acc=flr_acc, mean_error_2d=mean_error_2d,
                               median_error_2d=median_error_2d,
                               mean_error_3d=mean_error_3d,
                               median_error_3d=median_error_3d,
                               elapsedTime=elapsedTime)
# Define IoU metric
def mean_iou(y_true, y_pred):
    prec = []
    for t in np.arange(0.5, 1.0, 0.05):
        y_pred_ = tf.to_int32(y_pred > t)
        score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
        K.get_session().run(tf.local_variables_initializer())
        with tf.control_dependencies([up_opt]):
            score = tf.identity(score)
        prec.append(score)
    return K.mean(K.stack(prec), axis=0)


# Build U-Net model
inputs = Input((im_height, im_width, im_chan))
s = Lambda(lambda x: x / 255)(inputs)

c1 = Conv2D(8, (3, 3), activation='relu', padding='same')(s)
c1 = Conv2D(8, (3, 3), activation='relu', padding='same')(c1)
p1 = MaxPooling2D((2, 2))(c1)

c2 = Conv2D(16, (3, 3), activation='relu', padding='same')(p1)
c2 = Conv2D(16, (3, 3), activation='relu', padding='same')(c2)
p2 = MaxPooling2D((2, 2))(c2)

c3 = Conv2D(32, (3, 3), activation='relu', padding='same')(p2)
c3 = Conv2D(32, (3, 3), activation='relu', padding='same')(c3)
p3 = MaxPooling2D((2, 2))(c3)

c4 = Conv2D(64, (3, 3), activation='relu', padding='same')(p3)
c4 = Conv2D(64, (3, 3), activation='relu', padding='same')(c4)
Beispiel #5
0
input1c = Input(shape=(maxlen2, ))
input2c = Input(shape=(maxlen2, ))
embed1c = Embedding(charnum, embedsize)
# lstm1c = Bidirectional(CuDNNLSTM(6))
lstm1c = Bidirectional(LSTM(6))
att1c = Attention(10)
v1c = embed1(input1c)
v2c = embed1(input2c)
v11c = lstm1c(v1c)
v22c = lstm1c(v2c)
v1c = Concatenate(axis=1)([att1c(v1c), v11c])
v2c = Concatenate(axis=1)([att1c(v2c), v22c])

mul = Multiply()([v1, v2])
sub = Lambda(lambda x: K.abs(x))(Subtract()([v1, v2]))
maximum = Maximum()([Multiply()([v1, v1]), Multiply()([v2, v2])])
mulc = Multiply()([v1c, v2c])
subc = Lambda(lambda x: K.abs(x))(Subtract()([v1c, v2c]))
maximumc = Maximum()([Multiply()([v1c, v1c]), Multiply()([v2c, v2c])])
sub2 = Lambda(lambda x: K.abs(x))(Subtract()([v1ls, v2ls]))
matchlist = Concatenate(axis=1)(
    [mul, sub, mulc, subc, maximum, maximumc, sub2])
matchlist = Dropout(0.05)(matchlist)

matchlist = Concatenate(axis=1)([
    Dense(32, activation='relu')(matchlist),
    Dense(48, activation='sigmoid')(matchlist)
])
res = Dense(1, activation='sigmoid')(matchlist)
Beispiel #6
0
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)
        generator.save(model_dir + 'generator.h5')
        discriminator.save(model_dir + 'discriminator.h5')

    # trian triple network
    single_triple_Model = build_single_triple(vgg_feature_shape,
                                              code_length=code_length)

    sat_tensor = Input(shape=(pix_gan_input_shape, ))
    grd_tensor = Input(shape=(pix_gan_input_shape, ))
    sat_output = single_triple_Model(sat_tensor)
    grd_output = single_triple_Model(grd_tensor)

    output_siams = Lambda(siams_distance,
                          output_shape=siams_dist_output_shape)(
                              [sat_output, grd_output])
    triple_Model = Model(inputs=[sat_tensor, grd_tensor],
                         outputs=[output_siams])
    triple_Model.compile(loss=siams_loss,
                         loss_weights=[1],
                         optimizer=Adam(lr=0.0001))

    train_grd_gan_feature = generator.predict(train_grd)
    train_sat_gan_feature = train_sat
    test_grd_gan_feature = generator.predict(test_grd)
    test_sat_gan_feature = test_sat

    # triple network leanring starts here
    for triple_epoch in range(triple_epochs):
        print('triple network Epoch {} of {}'.format(triple_epoch + 1,
def create_model():
    myInput = Input(shape=(96, 96, 3))

    x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn1')(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = MaxPooling2D(pool_size=3, strides=2)(x)
    x = Lambda(LRN2D, name='lrn_1')(x)
    x = Conv2D(64, (1, 1), name='conv2')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn2')(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(192, (3, 3), name='conv3')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(x)
    x = Activation('relu')(x)
    x = Lambda(LRN2D, name='lrn_2')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = MaxPooling2D(pool_size=3, strides=2)(x)

    # Inception3a
    inception_3a_3x3 = Conv2D(96, (1, 1), name='inception_3a_3x3_conv1')(x)
    inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn1')(inception_3a_3x3)
    inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
    inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
    inception_3a_3x3 = Conv2D(128, (3, 3), name='inception_3a_3x3_conv2')(inception_3a_3x3)
    inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn2')(inception_3a_3x3)
    inception_3a_3x3 = Activation('relu')(inception_3a_3x3)

    inception_3a_5x5 = Conv2D(16, (1, 1), name='inception_3a_5x5_conv1')(x)
    inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn1')(inception_3a_5x5)
    inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
    inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
    inception_3a_5x5 = Conv2D(32, (5, 5), name='inception_3a_5x5_conv2')(inception_3a_5x5)
    inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn2')(inception_3a_5x5)
    inception_3a_5x5 = Activation('relu')(inception_3a_5x5)

    inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
    inception_3a_pool = Conv2D(32, (1, 1), name='inception_3a_pool_conv')(inception_3a_pool)
    inception_3a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_pool_bn')(inception_3a_pool)
    inception_3a_pool = Activation('relu')(inception_3a_pool)
    inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3, 4)))(inception_3a_pool)

    inception_3a_1x1 = Conv2D(64, (1, 1), name='inception_3a_1x1_conv')(x)
    inception_3a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_1x1_bn')(inception_3a_1x1)
    inception_3a_1x1 = Activation('relu')(inception_3a_1x1)

    inception_3a = concatenate([inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1], axis=3)

    # Inception3b
    inception_3b_3x3 = Conv2D(96, (1, 1), name='inception_3b_3x3_conv1')(inception_3a)
    inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn1')(inception_3b_3x3)
    inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
    inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
    inception_3b_3x3 = Conv2D(128, (3, 3), name='inception_3b_3x3_conv2')(inception_3b_3x3)
    inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn2')(inception_3b_3x3)
    inception_3b_3x3 = Activation('relu')(inception_3b_3x3)

    inception_3b_5x5 = Conv2D(32, (1, 1), name='inception_3b_5x5_conv1')(inception_3a)
    inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn1')(inception_3b_5x5)
    inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
    inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
    inception_3b_5x5 = Conv2D(64, (5, 5), name='inception_3b_5x5_conv2')(inception_3b_5x5)
    inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn2')(inception_3b_5x5)
    inception_3b_5x5 = Activation('relu')(inception_3b_5x5)

    inception_3b_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_3a)
    inception_3b_pool = Conv2D(64, (1, 1), name='inception_3b_pool_conv')(inception_3b_pool)
    inception_3b_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_pool_bn')(inception_3b_pool)
    inception_3b_pool = Activation('relu')(inception_3b_pool)
    inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)

    inception_3b_1x1 = Conv2D(64, (1, 1), name='inception_3b_1x1_conv')(inception_3a)
    inception_3b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_1x1_bn')(inception_3b_1x1)
    inception_3b_1x1 = Activation('relu')(inception_3b_1x1)

    inception_3b = concatenate([inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1], axis=3)

    # Inception3c
    inception_3c_3x3 = utils.conv2d_bn(inception_3b,
                                       layer='inception_3c_3x3',
                                       cv1_out=128,
                                       cv1_filter=(1, 1),
                                       cv2_out=256,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(2, 2),
                                       padding=(1, 1))

    inception_3c_5x5 = utils.conv2d_bn(inception_3b,
                                       layer='inception_3c_5x5',
                                       cv1_out=32,
                                       cv1_filter=(1, 1),
                                       cv2_out=64,
                                       cv2_filter=(5, 5),
                                       cv2_strides=(2, 2),
                                       padding=(2, 2))

    inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
    inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_3c_pool)

    inception_3c = concatenate([inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)

    #inception 4a
    inception_4a_3x3 = utils.conv2d_bn(inception_3c,
                                       layer='inception_4a_3x3',
                                       cv1_out=96,
                                       cv1_filter=(1, 1),
                                       cv2_out=192,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(1, 1),
                                       padding=(1, 1))
    inception_4a_5x5 = utils.conv2d_bn(inception_3c,
                                       layer='inception_4a_5x5',
                                       cv1_out=32,
                                       cv1_filter=(1, 1),
                                       cv2_out=64,
                                       cv2_filter=(5, 5),
                                       cv2_strides=(1, 1),
                                       padding=(2, 2))

    inception_4a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_3c)
    inception_4a_pool = utils.conv2d_bn(inception_4a_pool,
                                        layer='inception_4a_pool',
                                        cv1_out=128,
                                        cv1_filter=(1, 1),
                                        padding=(2, 2))
    inception_4a_1x1 = utils.conv2d_bn(inception_3c,
                                       layer='inception_4a_1x1',
                                       cv1_out=256,
                                       cv1_filter=(1, 1))
    inception_4a = concatenate([inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1], axis=3)

    #inception4e
    inception_4e_3x3 = utils.conv2d_bn(inception_4a,
                                       layer='inception_4e_3x3',
                                       cv1_out=160,
                                       cv1_filter=(1, 1),
                                       cv2_out=256,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(2, 2),
                                       padding=(1, 1))
    inception_4e_5x5 = utils.conv2d_bn(inception_4a,
                                       layer='inception_4e_5x5',
                                       cv1_out=64,
                                       cv1_filter=(1, 1),
                                       cv2_out=128,
                                       cv2_filter=(5, 5),
                                       cv2_strides=(2, 2),
                                       padding=(2, 2))
    inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
    inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_4e_pool)

    inception_4e = concatenate([inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)

    #inception5a
    inception_5a_3x3 = utils.conv2d_bn(inception_4e,
                                       layer='inception_5a_3x3',
                                       cv1_out=96,
                                       cv1_filter=(1, 1),
                                       cv2_out=384,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(1, 1),
                                       padding=(1, 1))

    inception_5a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_4e)
    inception_5a_pool = utils.conv2d_bn(inception_5a_pool,
                                        layer='inception_5a_pool',
                                        cv1_out=96,
                                        cv1_filter=(1, 1),
                                        padding=(1, 1))
    inception_5a_1x1 = utils.conv2d_bn(inception_4e,
                                       layer='inception_5a_1x1',
                                       cv1_out=256,
                                       cv1_filter=(1, 1))

    inception_5a = concatenate([inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)

    #inception_5b
    inception_5b_3x3 = utils.conv2d_bn(inception_5a,
                                       layer='inception_5b_3x3',
                                       cv1_out=96,
                                       cv1_filter=(1, 1),
                                       cv2_out=384,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(1, 1),
                                       padding=(1, 1))
    inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)
    inception_5b_pool = utils.conv2d_bn(inception_5b_pool,
                                        layer='inception_5b_pool',
                                        cv1_out=96,
                                        cv1_filter=(1, 1))
    inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)

    inception_5b_1x1 = utils.conv2d_bn(inception_5a,
                                       layer='inception_5b_1x1',
                                       cv1_out=256,
                                       cv1_filter=(1, 1))
    inception_5b = concatenate([inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)

    av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
    reshape_layer = Flatten()(av_pool)
    dense_layer = Dense(128, name='dense_layer')(reshape_layer)
    norm_layer = Lambda(lambda  x: K.l2_normalize(x, axis=1), name='norm_layer')(dense_layer)

    return Model(inputs=[myInput], outputs=norm_layer)
    return sklearn.utils.shuffle(X_train, y_train)


# compile and train the model using the mygenerator function
train_x, train_y = mygenerator(train_samples, batch_size=32)
valid_x, valid_y = mygenerator(validation_samples, batch_size=32)

# Model Architecture
model = Sequential()
model.add(
    Cropping2D(cropping=((50, 20), (0, 0)),
               data_format='channels_last',
               input_shape=(160, 320, 3)))
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: x / 127.5 - 1.))
model.add(
    Conv2D(12, (5, 5), strides=(2, 2), padding='valid', activation='relu'))
model.add(
    Conv2D(24, (5, 5), strides=(2, 2), padding='valid', activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dropout(0.5, noise_shape=None, seed=None))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')

model.fit(train_x,
          train_y,
          batch_size=100,
Beispiel #9
0
    def _init_make_dataparallel(self, gdev_list, *args, **kwargs):
        '''Uses data-parallelism to convert a serial model to multi-gpu. Refer
        to make_parallel doc.
        '''
        gpucopy_ops = []

        def slice_batch(x, ngpus, part, dev):
            '''Divide the input batch into [ngpus] slices, and obtain slice
            no. [part]. i.e. if len(x)=10, then slice_batch(x, 2, 1) will
            return x[5:].
            '''
            sh = KB.shape(x)
            L = sh[0] // ngpus
            if part == ngpus - 1:
                xslice = x[part * L:]
            else:
                xslice = x[part * L:(part + 1) * L]

            # tf.split fails if batch size is not divisible by ngpus. Error:
            #     InvalidArgumentError (see above for traceback): Number of
            #         ways to split should evenly divide the split dimension
            # xslice = tf.split(x, ngpus)[part]

            if not self._enqueue:
                return xslice

            # Did not see any benefit.
            with tf.device(dev):
                # if self._stager is None:
                stager = data_flow_ops.StagingArea(
                    dtypes=[xslice.dtype], shapes=[xslice.shape])
                stage = stager.put([xslice])
                gpucopy_ops.append(stage)
                # xslice_stage = stager.get()
            return stager.get()

        ngpus = len(gdev_list)
        if ngpus < 2:
            raise RuntimeError('Number of gpus < 2. Require two or more GPUs '
                               'for multi-gpu model parallelization.')

        model = self._smodel
        noutputs = len(self._smodel.outputs)
        global_scope = tf.get_variable_scope()
        towers = [[] for _ in range(noutputs)]
        for idev, dev in enumerate(gdev_list):
            # TODO: The last slice could cause a gradient calculation outlier
            # when averaging gradients. Maybe insure ahead of time that the
            # batch_size is evenly divisible by number of GPUs, or maybe don't
            # use the last slice.
            with tf.device(self._ps_device):
                slices = []  # multi-input case
                for ix, x in enumerate(model.inputs):
                    slice_g = Lambda(
                        slice_batch,  # lambda shape: shape,
                        # lambda shape: x.shape.as_list(),
                        name='stage_cpuSliceIn{}_Dev{}'.format(ix, idev),
                        arguments={'ngpus': ngpus, 'part': idev,
                                   'dev': dev})(x)
                    slices.append(slice_g)
                    # print('SLICE_G: {}'.format(slice_g))  # DEBUG
                # print('SLICES: {}'.format(slices))  # DEBUG

            # with tf.variable_scope('GPU_%i' % idev), \
            # tf.variable_scope(global_scope, reuse=idev > 0), \
            # tf.variable_scope('GPU_{}'.format(idev),
            #                   reuse=idev > 0) as var_scope, \
            with tf.device(dev), \
                    tf.variable_scope(global_scope, reuse=idev > 0), \
                    tf.name_scope('tower_%i' % idev):
                # NOTE: Currently not using model_creator. Did not observe
                #     any benefit in such an implementation.
                # Instantiate model under device context. More complicated.
                # Need to use optimizer synchronization in this scenario.
                # model_ = model_creator()
                # If using NCCL without re-instantiating the model then must
                # set the colocate_gradients_with_ops to False in optimizer.
                # if idev == 0:
                #     # SET STATE: Instance of serial model for checkpointing
                #     self._smodel = model_  # for ability to checkpoint

                # Handle multi-output case
                modeltower = model(slices)
                if not isinstance(modeltower, list):
                    modeltower = [modeltower]

                for imt, mt in enumerate(modeltower):
                    towers[imt].append(mt)
                    params = mt.graph._collections['trainable_variables']

                # params = model_.trainable_weights
                # params = tf.get_collection(
                #     tf.GraphKeys.TRAINABLE_VARIABLES, scope=var_scope.name)
                # params = modeltower.graph._collections['trainable_variables']
                # print('PARAMS: {}'.format(params))  # DEBUG

                self._tower_params.append(params)

        with tf.device(self._ps_device):
            # merged = Concatenate(axis=0)(towers)
            merged = [Concatenate(axis=0)(tw) for tw in towers]

        # self._enqueue_ops.append(tf.group(*gpucopy_ops))
        self._enqueue_ops += gpucopy_ops

        kwargs['inputs'] = model.inputs
        kwargs['outputs'] = merged
        super(ModelMGPU, self).__init__(*args, **kwargs)
Beispiel #10
0
def compute_accuracy(preds, labels):
    return labels[preds.ravel() < 0.5].mean()


input_shape = (224, 224, 3)
#input_shape = (320, 243, 3)
base_network = create_base_network(input_shape)

image_left = Input(shape=input_shape)
image_right = Input(shape=input_shape)

vector_left = base_network(image_left)
vector_right = base_network(image_right)

distance = Lambda(cosine_distance, output_shape=cosine_distance_output_shape)(
    [vector_left, vector_right])

# fc1 = Dense(512, kernel_initializer="glorot_uniform")(distance)
# fc1 = Dropout(0.2)(fc1)
# fc1 = Activation("relu")(fc1)

fc1 = Dense(128, kernel_initializer="glorot_uniform")(distance)
fc1 = Dropout(0.2)(fc1)
fc1 = Activation("relu")(fc1)

pred = Dense(2, kernel_initializer="glorot_uniform")(fc1)
pred = Activation("softmax")(pred)

model = Model(inputs=[image_left, image_right], outputs=pred)
# model.summary()
Beispiel #11
0
doc_conv = Convolution1D(K,
                         FILTER_LENGTH,
                         border_mode="same",
                         input_shape=(None, WORD_DEPTH),
                         activation="tanh")


def kmax(masked_data):
    result = masked_data[
        T.arange(masked_data.shape[0]).dimshuffle(0, "x", "x"),
        T.sort(T.argsort(masked_data, axis=1)[:, -pooling_size:, :], axis=1),
        T.arange(masked_data.shape[2]).dimshuffle("x", "x", 0)]
    return result


doc_kmax = Lambda(kmax, output_shape=(pooling_size, K))

# query_out = theano.function([query], query)
# query_conv_out = theano.function([query], query_conv)
# query_kmax_out = theano.function([masked_data], result)

doc_conv2 = Convolution1D(K2,
                          FILTER_LENGTH,
                          border_mode="same",
                          input_shape=(None, pooling_size),
                          activation="tanh")
doc_max = Lambda(lambda x: x.max(axis=1), output_shape=(K2, ))

# result_out2([[[2,1,4,3,4],[5,3,8,1,2], [2,1,4,3,4],[5,3,8,1,2], [5,3,8,1,2]]])
doc_sem = Dense(L, activation="tanh", input_dim=K2)
Beispiel #12
0
## 2. Data Preprocessing functions
def resize_comma(image):
    import tensorflow as tf  # This import is required here otherwise the model cannot be loaded in drive.py
    return tf.image.resize_images(image, [40, 160])


## 3. Model (data preprocessing incorporated into model)
# Model adapted from Comma.ai model
model = Sequential()
# Crop 70 pixels from the top of the image and 25 from the bottom
model.add(Cropping2D(cropping=((70, 25), (0, 0)),
                     dim_ordering='tf', # default
                     input_shape=(240, 320, 3)))
# Resize the data
model.add(Lambda(resize_comma))
# Normalise the data
model.add(Lambda(lambda x: (x/255.0) - 0.5))
# Conv layer 1
model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same"))
model.add(ELU())
# Conv layer 2
model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same"))
model.add(ELU())
# Conv layer 3
model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same"))
model.add(Flatten())
model.add(Dropout(.2))
model.add(ELU())
# Fully connected layer 1
model.add(Dense(512))
Beispiel #13
0
# Reading data, different dataset for training and validation

lines = read_data(data_dir)
train_samples, validation_samples = train_test_split(lines, test_size=0.2)

# Using generators to be able to train on a big dataset

train_generator = generator(data_dir, train_samples, batch_size=48)
validation_generator = generator(data_dir, validation_samples, batch_size=48)

# Model definition like nvidia, but with wider fully connected part and dropouts

model = Sequential()

model.add(Cropping2D(cropping=((50, 20), (0, 0)), input_shape=(160, 320, 3)))
model.add(Lambda(lambda x: x / 255.0 - 0.5))
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Convolution2D(64, 3, 3, subsample=(1, 1), activation='relu'))
model.add(Convolution2D(64, 3, 3, subsample=(1, 1), activation='relu'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dense(1))

model.compile(loss="mse", optimizer="adam")
print('Velikost testne zbirke slik: {}'.format(X_test.shape))

y_train = to_categorical(y_train, NUM_CLASSES)
y_test = to_categorical(y_test, NUM_CLASSES)

# RAZGRADNJA
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)

NUM_SAMPLES, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS = t1_karray.shape

# vhodna plast
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))

S = Lambda(lambda x: x)(inputs)

c1 = Conv2D(16, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
            padding='same')(S)
c1 = Dropout(0.1)(c1)
c1 = Conv2D(16, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
            padding='same')(c1)
p1 = MaxPooling2D((2, 2))(c1)

c2 = Conv2D(32, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
def CTC(name, args):
    return Lambda(ctc_lambda_func, output_shape=(1, ), name=name)(args)
Beispiel #16
0
    def BuildInception(self):
        """
        Main function for building inceptionV2 nn
        :return: An inceptionV2 nn
        """
        # Define the input as a tensor with shape input_shape
        X_input = Input(self.input_shape)

        # First Block
        X = Conv2D(self.init_filters,
                   self.init_kernel,
                   strides=self.init_strides,
                   name='conv1',
                   padding='same')(X_input)
        X = BatchNormalization(axis=self.channel_axis, name='bn1')(X)
        X = Activation('relu')(X)

        # MAXPOOL
        if self.init_maxpooling:
            X = MaxPooling2D((3, 3), strides=2, padding='same')(X)

        # Second Block
        X = Conv2D(64, (1, 1), strides=(1, 1), padding='same', name='conv2')(X)
        X = BatchNormalization(axis=self.channel_axis,
                               epsilon=0.00001,
                               name='bn2')(X)
        X = Activation('relu')(X)

        # Third Block
        X = Conv2D(192, (3, 3), strides=(1, 1), padding='same',
                   name='conv3')(X)
        X = BatchNormalization(axis=self.channel_axis,
                               epsilon=0.00001,
                               name='bn3')(X)
        X = Activation('relu')(X)

        # MAXPOOL
        X = MaxPooling2D(pool_size=3, strides=2, padding='same')(X)

        # inception block: 1(a/b/c)
        X = self._inception_block(_1x1=64,
                                  _3x3r=96,
                                  _3x3=128,
                                  _5x5r=16,
                                  _5x5=32,
                                  _pool_conv=32,
                                  pooling='max',
                                  pool_stride=(2, 2),
                                  name='inception_3a')(X)
        X = self._inception_block(_1x1=64,
                                  _3x3r=96,
                                  _3x3=128,
                                  _5x5r=32,
                                  _5x5=64,
                                  _pool_conv=64,
                                  pooling='avg',
                                  pool_stride=(3, 3),
                                  name='inception_3b')(X)
        X = self._inception_block(_1x1=0,
                                  _3x3r=128,
                                  _3x3=256,
                                  _5x5r=32,
                                  _5x5=64,
                                  _pool_conv=0,
                                  pooling='max',
                                  strides=(2, 2),
                                  pool_stride=(2, 2),
                                  name='inception_3c')(X)

        # inception block: 2(a/e)
        X = self._inception_block(_1x1=256,
                                  _3x3r=96,
                                  _3x3=192,
                                  _5x5r=32,
                                  _5x5=64,
                                  _pool_conv=128,
                                  pooling='avg',
                                  pool_stride=(3, 3),
                                  name='inception_4a')(X)
        X = self._inception_block(_1x1=0,
                                  _3x3r=160,
                                  _3x3=256,
                                  _5x5r=64,
                                  _5x5=128,
                                  _pool_conv=0,
                                  pooling='max',
                                  strides=(2, 2),
                                  pool_stride=(2, 2),
                                  name='inception_4e')(X)

        # inception block: 3(a/b)
        X = self._inception_block(_1x1=256,
                                  _3x3r=96,
                                  _3x3=384,
                                  _5x5r=0,
                                  _5x5=0,
                                  _pool_conv=96,
                                  pooling='avg',
                                  pool_stride=(3, 3),
                                  name='inception_5a')(X)
        X = self._inception_block(_1x1=256,
                                  _3x3r=96,
                                  _3x3=384,
                                  _5x5r=0,
                                  _5x5=0,
                                  _pool_conv=96,
                                  pooling='max',
                                  pool_stride=(2, 2),
                                  name='inception_5b')(X)

        # Top Layer
        X = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(X)
        X = Flatten()(X)
        X = Dense(128, name='dense_layer')(X)

        # L2 normalization
        X = Lambda(lambda x: K.l2_normalize(x, axis=1))(X)

        # Create Model Instance
        model = Model(inputs=X_input, outputs=X, name='FaceNet')

        return model
Beispiel #17
0
                                 
batch_size = 32

train_generator =generator(train_samples, batch_size=batch_size)
validation_generator =generator(validation_samples, batch_size=batch_size)


from keras.models import Sequential
from keras.layers.core import Dense, Flatten, Lambda, Dropout
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers import Cropping2D

model = Sequential()
# Nomrmalize
model.add(Lambda(lambda x: x/255-0.5, input_shape=(160,320,3)))
# Cut unnecessary parts of the images
model.add(Cropping2D(cropping=((70,25),(0,0))))
# Create five convlution layers
model.add(Conv2D(24,5,5, subsample=(2,2), activation='relu'))
model.add(Dropout(0.1))
model.add(Conv2D(36, 5,5, subsample=(2,2), activation='relu'))
model.add(Dropout(0.1))       
model.add(Conv2D(48,5,5, subsample=(2,2), activation='relu'))
model.add(Dropout(0.1))
model.add(Conv2D(64,3,3, activation='relu'))    
model.add(Conv2D(64,3,3, activation='relu'))
# Flatten the images
model.add(Flatten())
# Create four fully connected layers
model.add(Dense(100))   
Beispiel #18
0
def InceptionModel(input_shape):
    """
    Implementation of the Inception model used for FaceNet

    Arguments:
    input_shape -- shape of the images of the dataset

    Returns:
    model -- a Model() instance in Keras
    """

    # Define the input as a tensor with shape input_shape
    X_input = Input(input_shape)

    # Zero-Padding
    X = ZeroPadding2D((3, 3))(X_input)

    # First Block
    X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(X)
    X = BatchNormalization(axis=3, name='bn1')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D((3, 3), strides=2)(X)

    # Second Block
    X = Conv2D(64, (1, 1), strides=(1, 1), name='conv2')(X)
    X = BatchNormalization(axis=3, epsilon=0.00001, name='bn2')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)

    # Second Block
    X = Conv2D(192, (3, 3), strides=(1, 1), name='conv3')(X)
    X = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D(pool_size=3, strides=2)(X)

    # Inception 1: a/b/c
    X = inception_block_1a(X)
    X = inception_block_1b(X)
    X = inception_block_1c(X)

    # Inception 2: a/b
    X = inception_block_2a(X)
    X = inception_block_2b(X)

    # Inception 3: a/b
    X = inception_block_3a(X)
    X = inception_block_3b(X)

    # Top layer
    X = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(X)
    X = Flatten()(X)
    X = Dense(128, name='dense_layer')(X)

    # L2 normalization
    X = Lambda(lambda x: K.l2_normalize(x, axis=1))(X)

    # Create model instance
    model = Model(inputs=X_input, outputs=X, name='FaceRecoModel')

    return model
Beispiel #19
0
def build_generator():
    # Image input
    gf = 64
    channels = 1

    def up_dim(vecs):
        img_A = vecs
        img_A = K.expand_dims(img_A, axis=1)
        img_A = K.expand_dims(img_A, axis=3)
        return img_A

    def down_dim(vec):
        img_A = vec
        img_A = K.reshape(img_A, [-1, 4096])
        return img_A

    def conv2d(layer_input, filters, f_size=(1, 2), bn=True):
        """Layers used during downsampling"""
        d = Conv2D(filters, kernel_size=f_size, strides=2,
                   padding='same')(layer_input)
        d = LeakyReLU(alpha=0.2)(d)
        if bn:
            d = BatchNormalization(momentum=0.8)(d)
        return d

    def deconv2d(layer_input,
                 skip_input,
                 filters,
                 f_size=(2, 1),
                 dropout_rate=0):
        """Layers used during upsampling"""
        u = UpSampling2D(size=2)(layer_input)
        u = Conv2D(filters,
                   kernel_size=f_size,
                   strides=1,
                   padding='valid',
                   activation='relu')(u)
        if dropout_rate:
            u = Dropout(dropout_rate)(u)
        u = BatchNormalization(momentum=0.8)(u)
        u = Concatenate()([u, skip_input])
        return u

    # Image input
    d0 = Input(shape=(vgg_feature_shape, ))
    d0_ = Lambda(up_dim)(d0)

    # Downsampling
    d1 = conv2d(d0_, gf, bn=False)
    d2 = conv2d(d1, gf * 2)
    d3 = conv2d(d2, gf * 4)
    d4 = conv2d(d3, gf * 8)
    d5 = conv2d(d4, gf * 8)
    d6 = conv2d(d5, gf * 8)
    d7 = conv2d(d6, gf * 8)
    d8 = conv2d(d7, gf * 8)

    # Upsampling
    u0 = deconv2d(d8, d7, gf * 8)
    u1 = deconv2d(u0, d6, gf * 8)
    u2 = deconv2d(u1, d5, gf * 8)
    u3 = deconv2d(u2, d4, gf * 8)
    u4 = deconv2d(u3, d3, gf * 4)
    u5 = deconv2d(u4, d2, gf * 2)
    u6 = deconv2d(u5, d1, gf)
    # u7 = deconv2d(u6, d1, gf)

    u8 = UpSampling2D(size=2)(u6)
    output_img = Conv2D(channels,
                        kernel_size=(2, 1),
                        strides=1,
                        padding='valid',
                        activation='tanh')(u8)
    #output_img = Flatten(name='flatten')(output_img)
    # output_img = tf.contrib.layers.flatten(output_img)
    output_img = Lambda(down_dim)(output_img)

    model = Model(d0, output_img)
    return model
Beispiel #20
0
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test


X_train, Y_train, X_test, Y_test = load_mnist()

model = Sequential()
model.add(Dense(512, input_shape=(784, )))
model.add(Activation('relu'))
#model.add(Dropout(0.2))
model.add(Lambda(lambda x: K.dropout(x, level=0.2)))
model.add(Dense(512))
model.add(Activation('relu'))
#model.add(Dropout(0.2))
model.add(Lambda(lambda x: K.dropout(x, level=0.2)))
model.add(Dense(10))
model.add(Activation('softmax'))

rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms)

model.fit(X_train,
          Y_train,
          batch_size=batch_size,
          nb_epoch=nb_epoch,
          show_accuracy=True,
Beispiel #21
0
y = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])

# the emmdedding layer is a common layer for 'scan_input' and 'feature_input'
embedding_layer = Embedding(output_dim=embedding_size,
                            input_dim=feature_size + 1,
                            input_length=pre_len)

data = Input(shape=(pre_len,), name='scan_input')
x = embedding_layer(data)

feature_input = Input(shape=(feature_len,), name='feature_input')
temp_out = embedding_layer(feature_input)

# use the Lambda layer of Keras, do not use the K.mean() directly, else error like 
# 'AttributeError: 'Tensor' object has no attribute '_keras_history''
mean_temp = Lambda(lambda x: K.mean(x, axis=1))(temp_out)
initial_state = Dense(units)(mean_temp)

# first dense to get units, then average the result
# feature_output = TimeDistributed(Dense(units))(temp_out)
# initial_state = Lambda(lambda x: K.mean(x, axis=1))(feature_output)

# for GRU unit, there is only one state
# lstm_output = GRU(units=units, return_sequences=False)(x, initial_state=initial_state)

# for LSTM unit, there are two states, one is hidden state and the other is cell state
lstm_output = LSTM(units=units, return_sequences=False)(x, initial_state=[initial_state, initial_state])

lstm_output = Dropout(0.1)(lstm_output)

output = Dense(feature_size, activation='softmax', name='scan_output')(lstm_output)
Beispiel #22
0
def Co_attention(x):

    h_level = x[0]
    V = x[1]
    k_dim = 512

    C = Lambda(Compute_c)([h_level, V])
    Vt = Lambda(Transpose)(V)
    Wv_v = Dense(k_dim, activation='tanh')(Vt)
    Wv_v = Lambda(Transpose)(Wv_v)
    print("Wv_v", Wv_v.shape)
    Wq_Q = Dense(k_dim, activation='tanh')(h_level)
    Wq_Q = Lambda(Transpose)(Wq_Q)
    print("Wq_Q", Wq_Q.shape)
    Hv = Lambda(Compute_Hv)([Wq_Q, C, Wv_v])
    Hvt = Lambda(Transpose)(Hv)
    av = Dense(1, activation='tanh')(Hvt)
    av = Lambda(Transpose)(av)
    av = Lambda(Soft_max)(av)
    print(av.shape)
    Ct = Lambda(Transpose)(C)
    Wv_V_Ct = Lambda(Compute_Wv_V_Ct)([Wv_v, Ct])
    Hq = Lambda(Compute_Hq)([Wq_Q, Wv_V_Ct])
    Hqt = Lambda(Transpose)(Hq)
    aq = Dense(1, activation='tanh')(Hqt)
    aq = Lambda(Transpose)(aq)
    aq = Lambda(Soft_max)(aq)

    v_att = Lambda(Compute_V_att)([av, Vt])
    q_att = Lambda(Compute_q_att)([aq, h_level])
    return q_att, v_att
Beispiel #23
0
 def build(self, input):
     beeper = Lambda(self.call, output_shape=self.getOutputShape)
     output = beeper(input)
     self.model = Model(input=input, output=output)
def Regularize(layer,
               params,
               shared_layers=False,
               name='',
               apply_noise=True,
               apply_batch_normalization=True,
               apply_prelu=True,
               apply_dropout=True,
               apply_l1=True,
               apply_l2=True):
    """
    Apply the regularization specified in parameters to the layer
    :param layer: Layer to regularize
    :param params: Params specifying the regularizations to apply
    :param shared_layers: Boolean indicating if we want to get the used layers for applying to a shared-layers model.
    :param name: Name prepended to regularizer layer
    :param apply_noise: If False, noise won't be applied, independently of params
    :param apply_dropout: If False, dropout won't be applied, independently of params
    :param apply_prelu: If False, prelu won't be applied, independently of params
    :param apply_batch_normalization: If False, batch normalization won't be applied, independently of params
    :param apply_l1: If False, l1 normalization won't be applied, independently of params
    :param apply_l2: If False, l2 normalization won't be applied, independently of params
    :return: Regularized layer
    """
    shared_layers_list = []

    if apply_noise and params.get('USE_NOISE', False):
        shared_layers_list.append(
            GaussianNoise(params.get('NOISE_AMOUNT', 0.01),
                          name=name + '_gaussian_noise'))

    if apply_batch_normalization and params.get('USE_BATCH_NORMALIZATION',
                                                False):
        if params.get('WEIGHT_DECAY'):
            l2_gamma_reg = l2(params['WEIGHT_DECAY'])
            l2_beta_reg = l2(params['WEIGHT_DECAY'])
        else:
            l2_gamma_reg = None
            l2_beta_reg = None

        bn_mode = params.get('BATCH_NORMALIZATION_MODE', 0)

        shared_layers_list.append(
            BatchNormalization(mode=bn_mode,
                               gamma_regularizer=l2_gamma_reg,
                               beta_regularizer=l2_beta_reg,
                               name=name + '_batch_normalization'))

    if apply_prelu and params.get('USE_PRELU', False):
        shared_layers_list.append(PReLU(name=name + '_PReLU'))

    if apply_dropout and params.get('DROPOUT_P', 0) > 0:
        shared_layers_list.append(
            Dropout(params.get('DROPOUT_P', 0.5), name=name + '_dropout'))

    if apply_l1 and params.get('USE_L1', False):
        shared_layers_list.append(Lambda(L1_norm, name=name + '_L1_norm'))

    if apply_l2 and params.get('USE_L2', False):
        shared_layers_list.append(Lambda(L2_norm, name=name + '_L2_norm'))

    # Apply all the previously built shared layers
    for l in shared_layers_list:
        layer = l(layer)
    result = layer

    # Return result or shared layers too
    if shared_layers:
        return result, shared_layers_list
    return result
Beispiel #25
0

def slic(input_):
    return input_[:, 0]


###############################################################################
activation = 'relu'
info_number = PCAseries1.shape[1]
layer = PCAseries1.shape[1]
input1 = K.Input(shape=(x_test1.shape[1], ))  #line1 species1
input2 = K.Input(shape=(x_test2.shape[1], ))  #line1 species2
input3 = K.Input(shape=(x_test1.shape[1], ))  #line2 species1
input4 = K.Input(shape=(x_test2.shape[1], ))  #line2 species2

Data1 = Lambda(choose_info, arguments={'info_number': info_number})(input1)
Data2 = Lambda(choose_info, arguments={'info_number': info_number})(input2)
Data3 = Lambda(choose_info, arguments={'info_number': info_number})(input3)
Data4 = Lambda(choose_info, arguments={'info_number': info_number})(input4)

Index1 = Lambda(choose_index,
                arguments={
                    'info_number': info_number,
                    'x_samplenumber': PCAseries1.shape[0]
                })(input1)
Index2 = Lambda(choose_index,
                arguments={
                    'info_number': info_number,
                    'x_samplenumber': PCAseries2.shape[0]
                })(input2)
Index3 = Lambda(choose_index,
Beispiel #26
0
    sen1_max3 = MaxPool1D(pool_size=CONV_MAX_LAST,
                          strides=None,
                          padding='valid')(sen1_batch3)
    sen1_flatten = Flatten()(sen1_max3)
    lamb1 = Lambda(expand_dims, expand_dims_output_shape)(sen1_flatten)

    return lamb1


#modell összeállítása
input = Input(shape=(int(WINDOW_SIZE / TAU), TAU, len(SENSORS)))
win_input = Input(shape=(TAU, len(SENSORS)))

lambs = []
for i in range(len(SENSORS)):
    out = Lambda(lambda x: x[:, :, i])(win_input)
    lambs.append(ConvLayers(out))

merg = Concatenate(2)(lambs)

lamb = Lambda(expand_dims, expand_dims_output_shape)(merg)

sen_conv1 = Conv2D(CONV_NUM2,
                   kernel_size=(CONV_MERGE_LEN),
                   strides=(STRIDE_MERGE_LEN),
                   padding='valid',
                   activation='relu')(lamb)
sen_batch1 = BatchNormalization()(sen_conv1)
sen_conv2 = Conv2D(CONV_NUM2,
                   kernel_size=(CONV_MERGE_LEN2),
                   strides=(STRIDE_MERGE_LEN2),
Beispiel #27
0
    def setup(self, X_train_shape):
        print('X_train shape', X_train_shape)

        # Input shape = (None,1,16,200)
        inputs = Input(shape=X_train_shape[1:])

        normal1 = BatchNormalization(axis=1, name='normal1')(inputs)
        conv1 = IntConv2D(16,
                          kernel_size=(X_train_shape[2], 5),
                          bits=self.bits,
                          padding='valid',
                          strides=(1, 2),
                          use_bias=False,
                          name='conv1')(normal1)
        relu1 = Activation('relu')(conv1)
        pool1 = MaxPooling2D(pool_size=(1, 2))(relu1)

        normal2 = BatchNormalization(axis=1, name='normal2')(pool1)

        conv2 = IntConv2D(32,
                          kernel_size=(1, 3),
                          bits=self.bits,
                          padding='valid',
                          strides=(1, 2),
                          use_bias=False,
                          name='conv2')(normal2)
        relu2 = Activation('relu')(conv2)
        pool2 = MaxPooling2D(pool_size=(1, 2))(relu2)

        normal3 = BatchNormalization(axis=1, name='normal3')(pool2)

        conv3 = IntConv2D(64,
                          kernel_size=(1, 3),
                          bits=self.bits,
                          padding='valid',
                          strides=(1, 1),
                          use_bias=False,
                          name='conv3')(normal3)
        relu3 = Activation('relu')(conv3)
        pool3 = MaxPooling2D(pool_size=(1, 2))(relu3)

        flat = Flatten()(pool3)

        drop1 = Dropout(0.5)(flat)

        dens1 = IntDense(128,
                         bits=self.bits,
                         use_bias=False,
                         activation='sigmoid',
                         name='dens1')(drop1)
        drop2 = Dropout(0.5)(dens1)

        dens2 = IntDense(self.nb_classes,
                         bits=self.bits,
                         use_bias=False,
                         name='dens2')(drop2)
        # option to include temperature in softmax
        temp = 1.0
        temperature = Lambda(lambda x: x / temp)(dens2)
        last = Activation('softmax')(temperature)

        self.model = Model(input=inputs, output=last)

        adam = Adam(lr=5e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=adam,
                           metrics=['accuracy'])
        #print (self.model.summary())
        return self
    def get_unet(self):

        inputs = Input((self.n_pts, 3, 1))
        up_crop = Cropping2D(cropping=((0, 1858), (0, 0)))(inputs)
        up_shape = up_crop.shape
        up_crop = Lambda(lambda x: K.reverse(x, axes=1),
                         output_shape=(190, 3, 1))(up_crop)
        print "up_crop shape:", up_crop.shape
        down_crop = Cropping2D(cropping=((1858, 0), (0, 0)))(inputs)
        down_shape = down_crop.shape
        down_crop = Lambda(lambda x: K.reverse(x, axes=1),
                           output_shape=(190, 3, 1))(down_crop)
        print "down_crop shape:", down_crop.shape
        inputs_mirrored = merge([inputs, down_crop],
                                mode='concat',
                                concat_axis=1)
        print "inputs shape:", inputs_mirrored.shape
        inputs_mirrored = merge([up_crop, inputs_mirrored],
                                mode='concat',
                                concat_axis=1)
        print "inputs shape:", inputs_mirrored.shape

        conv1 = Conv2D(64, (3, 3),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(inputs_mirrored)
        print "conv1 shape:", conv1.shape
        conv1 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv1)
        conv1 = Activation('relu')(conv1)
        conv1 = Conv2D(64, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(conv1)
        print "conv1 shape:", conv1.shape
        conv1 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv1)
        conv1 = Activation('relu')(conv1)
        crop1 = Cropping2D(cropping=((184, 184), (0, 0)))(conv1)
        print "crop1 shape:", crop1.shape
        pool1 = AveragePooling2D(pool_size=(2, 1))(conv1)
        print "pool1 shape:", pool1.shape

        conv2 = Conv2D(128, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(pool1)
        print "conv2 shape:", conv2.shape
        conv2 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv2)
        conv2 = Activation('relu')(conv2)
        conv2 = Conv2D(128, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(conv2)
        print "conv2 shape:", conv2.shape
        conv2 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv2)
        conv2 = Activation('relu')(conv2)
        crop2 = Cropping2D(cropping=((88, 88), (0, 0)))(conv2)
        print "crop2 shape:", crop2.shape
        pool2 = MaxPooling2D(pool_size=(2, 1))(conv2)
        print "pool2 shape:", pool2.shape

        conv3 = Conv2D(256, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(pool2)
        print "conv3 shape:", conv3.shape
        conv3 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv3)
        conv3 = Activation('relu')(conv3)
        conv3 = Conv2D(256, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(conv3)
        print "conv3 shape:", conv3.shape
        conv3 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv3)
        conv3 = Activation('relu')(conv3)
        crop3 = Cropping2D(cropping=((40, 40), (0, 0)))(conv3)
        print "crop3 shape:", crop3.shape
        pool3 = MaxPooling2D(pool_size=(2, 1))(conv3)
        print "pool3 shape:", pool3.shape

        conv4 = Conv2D(512, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(pool3)
        print "conv4 shape:", conv4.shape
        conv4 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv4)
        conv4 = Activation('relu')(conv4)
        conv4 = Conv2D(512, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(conv4)
        print "conv4 shape:", conv4.shape
        conv4 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv4)
        conv4 = Activation('relu')(conv4)
        drop4 = Dropout(0.5)(conv4)
        crop4 = Cropping2D(cropping=((16, 16), (0, 0)))(drop4)
        print "crop4 shape:", crop4.shape
        pool4 = MaxPooling2D(pool_size=(2, 1))(drop4)
        print "pool4 shape:", pool4.shape

        conv5 = Conv2D(1024, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(pool4)
        print "conv5 shape:", conv5.shape
        conv5 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv5)
        conv5 = Activation('relu')(conv5)
        conv5 = Conv2D(1024, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(conv5)
        print "conv5 shape:", conv5.shape
        conv5 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv5)
        conv5 = Activation('relu')(conv5)
        drop5 = Dropout(0.5)(conv5)
        crop5 = Cropping2D(cropping=((4, 4), (0, 0)))(drop5)
        print "crop5 shape:", crop5.shape
        pool5 = MaxPooling2D(pool_size=(2, 1))(drop5)
        print "pool5 shape:", pool5.shape

        conv6 = Conv2D(2048, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(pool5)
        print "conv6 kerasshape:", conv6.shape
        conv6 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv6)
        conv6 = Activation('relu')(conv6)
        conv6 = Conv2D(2048, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(conv6)
        print "conv6 shape:", conv6.shape
        conv6 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv6)
        conv6 = Activation('relu')(conv6)
        # conv6 = Conv2D(2048, (3,1), activation = 'relu', padding = 'valid', kernel_initializer = 'glorot_normal')(conv6)
        # print "conv6 shape:",conv6.shape
        drop6 = Dropout(0.5)(conv6)

        # up7 = Conv2D(1024, (1,1), activation = 'relu', padding = 'valid', kernel_initializer = 'glorot_normal')(UpSampling2D(size = (2,1))(drop6))
        up7 = Conv2DTranspose(1024, (2, 1), strides=(2, 1))(drop6)
        print "up7 shape:", up7.shape
        merge7 = merge([crop5, up7], mode='concat', concat_axis=3)
        print "merge7 shape:", merge7.shape
        conv7 = Conv2D(1024, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(merge7)
        print "conv7 shape:", conv7.shape
        conv7 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv7)
        conv7 = Activation('relu')(conv7)
        conv7 = Conv2D(1024, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(conv7)
        print "conv7 shape:", conv7.shape
        conv7 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv7)
        conv7 = Activation('relu')(conv7)

        # up8 = Conv2D(512, (1,1), activation = 'relu', padding = 'valid', kernel_initializer = 'glorot_normal')(UpSampling2D(size = (2,1))(conv7))
        up8 = Conv2DTranspose(512, (2, 1), strides=(2, 1))(conv7)
        print "up8 shape:", up8.shape
        merge8 = merge([crop4, up8], mode='concat', concat_axis=3)
        print "merge8 shape:", merge8.shape
        conv8 = Conv2D(512, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(merge8)
        print "conv8 shape:", conv8.shape
        conv8 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv8)
        conv8 = Activation('relu')(conv8)
        conv8 = Conv2D(512, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(conv8)
        print "conv8 shape:", conv8.shape
        conv8 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv8)
        conv8 = Activation('relu')(conv8)

        # up9 = Conv2D(256, (1,1), activation = 'relu', padding = 'valid', kernel_initializer = 'glorot_normal')(UpSampling2D(size = (2,1))(conv8))
        up9 = Conv2DTranspose(256, (2, 1), strides=(2, 1))(conv8)
        print "up9 shape:", up9.shape
        merge9 = merge([crop3, up9], mode='concat', concat_axis=3)
        print "merge9 shape:", merge9.shape
        conv9 = Conv2D(256, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(merge9)
        print "merge9 shape:", merge9.shape
        conv9 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv9)
        conv9 = Activation('relu')(conv9)
        conv9 = Conv2D(256, (3, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='glorot_normal')(conv9)
        print "merge9 shape:", merge9.shape
        conv9 = BatchNormalization(axis=-1, momentum=0.99,
                                   epsilon=0.001)(conv9)
        conv9 = Activation('relu')(conv9)

        # up10 = Conv2D(128, (1,1), activation = 'relu', padding = 'valid', kernel_initializer = 'glorot_normal')(UpSampling2D(size = (2,1))(conv9))
        up10 = Conv2DTranspose(128, (2, 1), strides=(2, 1))(conv9)
        print "up10 shape:", up10.shape
        merge10 = merge([crop2, up10], mode='concat', concat_axis=3)
        print "merge10 shape:", merge10.shape
        conv10 = Conv2D(128, (3, 1),
                        activation='linear',
                        padding='valid',
                        kernel_initializer='glorot_normal')(merge10)
        print "conv10 shape:", conv10.shape
        conv10 = BatchNormalization(axis=-1, momentum=0.99,
                                    epsilon=0.001)(conv10)
        conv10 = Activation('relu')(conv10)
        conv10 = Conv2D(128, (3, 1),
                        activation='linear',
                        padding='valid',
                        kernel_initializer='glorot_normal')(conv10)
        print "conv10 shape:", conv10.shape
        conv10 = BatchNormalization(axis=-1, momentum=0.99,
                                    epsilon=0.001)(conv10)
        conv10 = Activation('relu')(conv10)

        # up11 = Conv2D(64, (1,1), activation = 'relu', padding = 'valid', kernel_initializer = 'glorot_normal')(UpSampling2D(size = (2,1))(conv10))
        up11 = Conv2DTranspose(64, (2, 1), strides=(2, 1))(conv10)
        print "up11 shape:", up11.shape
        merge11 = merge([crop1, up11], mode='concat', concat_axis=3)
        print "merge11 shape:", merge11.shape
        conv11 = Conv2D(64, (3, 1),
                        activation='relu',
                        padding='valid',
                        kernel_initializer='glorot_normal')(merge11)
        print "conv11 shape:", conv11.shape
        conv11 = BatchNormalization(axis=-1, momentum=0.99,
                                    epsilon=0.001)(conv11)
        conv11 = Activation('relu')(conv11)
        conv11 = Conv2D(32, (3, 1),
                        activation='relu',
                        padding='valid',
                        kernel_initializer='glorot_normal')(conv11)
        print "conv11 shape:", conv11.shape
        conv11 = BatchNormalization(axis=-1, momentum=0.99,
                                    epsilon=0.001)(conv11)
        conv11 = Activation('relu')(conv11)
        conv11 = Conv2D(16, (3, 1),
                        activation='relu',
                        padding='valid',
                        kernel_initializer='glorot_normal')(conv11)
        print "conv11 shape:", conv11.shape
        conv11 = BatchNormalization(axis=-1, momentum=0.99,
                                    epsilon=0.001)(conv11)
        conv11 = Activation('relu')(conv11)
        conv11 = Conv2D(self.num_parts, (3, 1),
                        padding='valid',
                        kernel_initializer='glorot_normal')(conv11)
        conv11 = BatchNormalization(axis=-1, momentum=0.99,
                                    epsilon=0.001)(conv11)
        print "conv11 shape:", conv11.shape
        conv11 = Reshape((2048, self.num_parts))(conv11)
        print "conv11 shape:", conv11.shape
        conv11 = Lambda(self.softmax_,
                        output_shape=(2048, self.num_parts))(conv11)
        print "conv11 shape:", conv11.shape
        # conv11 = up_crop = Lambda(lambda x: K.argmax(x,axis=2),output_shape=(2048,1))(conv11)

        model = Model(input=inputs, output=conv11)

        model.compile(optimizer=Adam(lr=1e-4, decay=0.0001),
                      loss=soft_dice_loss,
                      metrics=['accuracy'])
        model.summary()

        return model
def unet_jon(inputShape=(config.IMAGE_HEIGHT, config.IMAGE_WIDTH,
                         config.CHANNELS)):
    inputs = Input(inputShape)
    s = Lambda(lambda x: x / 255)(inputs)
    c1 = Conv2D(16, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(s)
    c1 = Dropout(0.1)(c1)
    c1 = Conv2D(16, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c1)
    p1 = MaxPooling2D((2, 2))(c1)

    c2 = Conv2D(32, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p1)
    c2 = Dropout(0.1)(c2)
    c2 = Conv2D(32, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)

    c3 = Conv2D(64, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p2)
    c3 = Dropout(0.2)(c3)
    c3 = Conv2D(64, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c3)
    p3 = MaxPooling2D((2, 2))(c3)

    c4 = Conv2D(128, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p3)
    c4 = Dropout(0.2)(c4)
    c4 = Conv2D(128, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c4)
    p4 = MaxPooling2D(pool_size=(2, 2))(c4)

    c5 = Conv2D(256, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p4)
    c5 = Dropout(0.3)(c5)
    c5 = Conv2D(256, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c5)

    u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
    u6 = concatenate([u6, c4])
    c6 = Conv2D(128, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u6)
    c6 = Dropout(0.2)(c6)
    c6 = Conv2D(128, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c6)

    u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(64, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u7)
    c7 = Dropout(0.2)(c7)
    c7 = Conv2D(64, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c7)

    u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(32, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u8)
    c8 = Dropout(0.1)(c8)
    c8 = Conv2D(32, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c8)

    u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(16, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u9)
    c9 = Dropout(0.1)(c9)
    c9 = Conv2D(16, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c9)

    c9 = Conv2D(2, (1, 1), activation='sigmoid')(c9)
    outputs = Reshape((-1, 2))(c9)
    outputs = Activation('softmax')(outputs)
    model = Model(inputs=[inputs], outputs=[outputs])
    return model
Beispiel #30
0
sen_conv3 = Conv1D(CONV_NUM2,
                   kernel_size=(CONV_MERGE_LEN3),
                   strides=(STRIDE_MERGE_LEN3),
                   padding='causal',
                   activation='relu')(sen_batch1)
sen_batch3 = BatchNormalization()(sen_conv3)
sen_max3 = MaxPooling1D(pool_size=MAX2_SIZE)(sen_batch3)
sen_flatten = Flatten()(sen_max3)
merged_model = Model(inputs=win_input, outputs=sen_flatten)
timed = TimeDistributed(merged_model)(input)
#elkészült conv struktúra
print(merged_model.summary())

grucell = GRU(INTER_DIM, return_sequences=True)(timed)
grucell2 = GRU(INTER_DIM, return_sequences=True)(grucell)
avg = Lambda(avg_layer)(grucell2)
if (len(PERSONS) > 2):
    dens = Dense(len(PERSONS), activation="softmax")(
        avg)  #sigmoid binárisnál, softmax categorinál
else:
    dens = Dense(1, activation="sigmoid")(avg)
model = Model(inputs=input, outputs=dens)
#teljes modell
print(model.summary())

if (len(PERSONS) > 2):
    model.compile(loss='categorical_crossentropy',
                  optimizer='RMSprop',
                  metrics=['accuracy'])  # rms prop # binary hogyha bináris

else: