Example #1
0
model = Add()([model, output2])
model = upsample(model, filters=24)
model = Add()([model, output1])
model = upsample(model, filters=16)
model = Add()([model, output0])
model = upsample(model, filters=12)

output = Conv2DTranspose(filters=OUTPUT_CHANNELS,
                         kernel_size=[1, 1],
                         activation='sigmoid',
                         padding='same')(model)

model = Model(inputs, output)

model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.0005),
              loss=TverskyLoss(alpha=0.9835, smooth=2e4),
              metrics=['acc'])
model.load_weights('neuronalnet/saved_weights')

suns = tfds.load('sun_dataset')
PICTURE_SIZE = 224


@tf.autograph.experimental.do_not_convert
def load_images(datapoint):
    input_image = tf.image.resize(datapoint['image'],
                                  (PICTURE_SIZE, PICTURE_SIZE))
    input_mask = tf.image.resize(datapoint['segmentation_mask'],
                                 (PICTURE_SIZE, PICTURE_SIZE),
                                 method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
Example #2
0
model = Add()([model, output0])
model = upsample(model, filters=12)

output = Conv2DTranspose(filters=OUTPUT_CHANNELS,
                         kernel_size=[1, 1],
                         activation='sigmoid',
                         padding='same')(model)

model = Model(inputs, output)
print(model.summary())

model.compile(optimizer='adam',
              loss=TverskyLoss(alpha=0.2, smooth=1e3),
              metrics=[
                  'acc',
                  FalseNegatives(),
                  FalsePositives(),
                  TruePositives(),
                  TrueNegatives()
              ])


def show_predictions(dataset=None, num=1):
    if dataset:
        for image, mask in dataset.take(num):
            pred_mask = model.predict(image)
            display([image[0], mask[0], pred_mask[0]])
    else:
        display([
            sample_image, sample_mask,
            model.predict(sample_image[tf.newaxis, ...])[0]
Example #3
0
def build_model(data, config, output_length, X_test, input_length, index,
                repeats):
    print('Run:', str(index), '/', str(repeats))
    # start time
    start = time.time()
    print('Configuration:', config)
    # define parameters
    epochs, batch_size, n_nodes1, n_nodes2, filter1, filter2, kernel_size = config
    # convert data to supervised learning environment
    X_train = np.stack([
        sliding_window_input(data, input_length, i)
        for i in range(len(data) - input_length)
    ])[:-output_length]
    y_train = np.stack([
        sliding_window_output(data['Upper Stillwater'], input_length,
                              output_length, i)
        for i in range(len(data) - (input_length + output_length))
    ])
    y_train = y_train.reshape(y_train.shape[0], y_train.shape[1], 1)
    print('Feature Training Tensor (samples, timesteps, features):',
          X_train.shape)
    print('Target Training Tensor (samples, timesteps, features):',
          y_train.shape)
    # define parameters
    n_timesteps, n_features, n_outputs = X_train.shape[1], X_train.shape[
        2], y_train.shape[1]
    # define residual CNN-LSTM
    visible1 = Input(shape=(n_timesteps, n_features))

    model = Conv1D(filter1, kernel_size, padding='causal')(visible1)
    residual1 = ReLU()(model)
    model = Conv1D(filter1, kernel_size, padding='causal')(residual1)
    model = Add()([residual1, model])
    model = ReLU()(model)

    model = Conv1D(filter1, kernel_size, padding='causal')(model)
    residual2 = ReLU()(model)
    model = Conv1D(filter1, kernel_size, padding='causal')(residual2)
    model = Add()([residual2, model])
    model = ReLU()(model)
    model = MaxPooling1D()(model)

    model = Conv1D(filter2, kernel_size, padding='causal')(model)
    residual3 = ReLU()(model)
    model = Conv1D(filter2, kernel_size, padding='causal')(residual3)
    model = Add()([residual3, model])
    model = ReLU()(model)

    model = Conv1D(filter2, kernel_size, padding='causal')(model)
    residual4 = ReLU()(model)
    model = Conv1D(filter2, kernel_size, padding='causal')(residual4)
    model = Add()([residual4, model])
    model = ReLU()(model)
    model = MaxPooling1D()(model)

    model = Conv1D(n_nodes1, kernel_size, padding='causal')(model)
    residual5 = ReLU()(model)
    model = Conv1D(n_nodes1, kernel_size, padding='causal')(residual5)
    model = Add()([residual5, model])
    model = ReLU()(model)

    model = Conv1D(n_nodes1, kernel_size, padding='causal')(model)
    residual6 = ReLU()(model)
    model = Conv1D(n_nodes1, kernel_size, padding='causal')(residual6)
    model = Add()([residual6, model])
    model = ReLU()(model)
    model = MaxPooling1D()(model)

    model = Flatten()(model)
    model = RepeatVector(n_outputs)(model)

    model = LSTM(n_nodes1, activation='relu', return_sequences=True)(model)
    model = LSTM(n_nodes1, activation='relu', return_sequences=True)(model)
    model = LSTM(n_nodes1, activation='relu', return_sequences=True)(model)
    model = LSTM(n_nodes1, activation='relu', return_sequences=True)(model)

    dense = TimeDistributed(Dense(n_nodes2, activation='relu'))(model)
    output = TimeDistributed(Dense(1))(dense)
    model = Model(inputs=visible1, outputs=output)
    model.compile(loss='mse', optimizer='adam')
    #model.summary()
    # fit network
    es = EarlyStopping(monitor='loss', mode='min', patience=10)
    history = model.fit(X_train,
                        y_train,
                        epochs=epochs,
                        batch_size=batch_size,
                        verbose=0,
                        validation_split=0.2,
                        callbacks=[es])
    # test model against hold-out set
    y_pred = model.predict(X_test)
    print('\n Elapsed time:', round((time.time() - start) / 60, 3), 'minutes')
    return y_pred, history