Exemple #1
0
def test_separable_conv1d2d():
    in_w = 32
    in_h = 32
    in_ch = 3
    kernel = 32
    ker_w = 3
    ker_h = 3

    model = Sequential(
        SeparableConv1D(kernel, (ker_w,), padding="same", input_shape=(in_w, in_ch))
    )
    flops = get_flops(model, batch_size=1)
    assert (
        flops
        == 2 * ker_w * in_w * in_ch  # depthwise conv with no bias
        + (2 * in_ch + 1) * in_w * kernel  # pointwise conv
    )

    model = Sequential(
        SeparableConv2D(
            kernel, (ker_w, ker_h), padding="same", input_shape=(in_w, in_h, in_ch)
        )
    )
    flops = get_flops(model, batch_size=1)
    assert (
        flops
        == 2 * ker_w * ker_h * in_w * in_h * in_ch  # depthwise conv with no bias
        + (2 * in_ch + 1) * in_w * in_h * kernel  # pointwise conv
    )
Exemple #2
0
def test_conv1d2d3d():
    in_w = 32
    in_h = 32
    in_z = 32
    in_ch = 3
    kernel = 32
    ker_w = 3
    ker_h = 3
    ker_z = 3

    model = Sequential(
        Conv1D(kernel, (ker_w,), padding="same", input_shape=(in_w, in_ch))
    )
    flops = get_flops(model, batch_size=1)
    assert flops == ((2 * ker_w * in_ch) + 1) * in_w * kernel

    model = Sequential(
        Conv2D(kernel, (ker_w, ker_h), padding="same", input_shape=(in_w, in_h, in_ch))
    )
    flops = get_flops(model, batch_size=1)
    assert flops == ((2 * ker_w * ker_h * in_ch) + 1) * in_w * in_h * kernel

    model = Sequential(
        Conv3D(
            kernel,
            (ker_w, ker_h, ker_z),
            padding="same",
            input_shape=(in_w, in_h, in_z, in_ch),
        )
    )
    flops = get_flops(model, batch_size=1)
    assert (
        flops == ((2 * ker_w * ker_h * ker_z * in_ch) + 1) * in_w * in_h * in_z * kernel
    )
Exemple #3
0
def test_raise():
    try:
        raised = False
        get_flops(None)
    except KeyError:
        raised = True

    assert raised
Exemple #4
0
def test_layernormalization():
    """
    layer normalization is calculated as follows,
    fused_
    1. (2 ops * |var|) inv = rsqrt(var + eps)
    2. (1 ops * |var|) inv *= gamma (scale)
    3. (|x| + |mean| + |var| ops) x' = inv * x + beta (shift) - mean * inv
    , where |var| = |mean| = 1 in default
    Thus, 5 + input element size.

    Use nn.fused_batch_norm (gen_nn_ops.fused_batch_norm_v3) for layer normalization, above calculation.
    gen_nn_ops.fused_batch_norm_v3 support only 4D, so reshape data as 4D and input them.
    squeezed_shape (ndim ops), scale (|x| ops) and shift (not float ops) is calculated.
    NOTE: is_training = True, if make trainable attributes of tf.keras.Model instanse False. So, statistics will be incorrect.
    """
    in_w = 32
    in_h = 32
    in_ch = 3

    input_shape = (in_w, in_ch)
    model = Sequential(
        LayerNormalization(
            scale=False,
            center=False,
            input_shape=input_shape,
        ))
    flops = get_flops(model, batch_size=1)
    assert flops == len(input_shape) + 1

    input_shape = (in_w, in_h, in_ch)
    model = Sequential(
        LayerNormalization(
            scale=False,
            center=False,
            input_shape=input_shape,
        ))
    flops = get_flops(model, batch_size=1)
    assert flops == len(input_shape) + 1

    input_shape = (in_w, in_h, in_ch)
    model = Sequential(
        LayerNormalization(
            beta_initializer="ones",
            gamma_initializer="ones",
            input_shape=input_shape,
        ))
    flops = get_flops(model, batch_size=1)
    assert (flops == len(input_shape) + 1 + 5 + in_w * in_h * in_ch +
            5 * in_ch + in_w * in_h *
            in_ch), "fused is True. check gen_nn_ops.fused_batch_norm_v3"
def evaluate_test_data():
	test_dataset = Dataset(x_test_dir,y_test_dir,classes=CLASSES,augmentation=get_validation_augmentation(),preprocessing=get_preprocessing(preprocess_input),)
	test_dataloader = Dataloder(test_dataset, batch_size=1, shuffle=False)
	model = sm.Unet(BACKBONE, classes=1, activation='sigmoid', input_shape=(320,320,3))

	# load best weights
	model.load_weights('./best_model.h5')
        
	optim = keras.optimizers.Adam(LR)

	# Segmentation models losses can be combined together by '+' and scaled by integer or float factor
	dice_loss = sm.losses.DiceLoss()
	focal_loss = sm.losses.BinaryFocalLoss()
	total_loss = dice_loss + (0.5 * focal_loss) 

	metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]

	# compile keras model with defined optimozer, loss and metrics
	model.compile(optim, total_loss, metrics)

	#compute number of flops for the trained model
	flops = get_flops(model, batch_size=1)
	print(f"FLOPS: {flops / 10 ** 9:.03} G")  

	# compute the inference time
	start = timer()
	scores = model.evaluate_generator(test_dataloader)
	end = timer()
	print(str(end - start))


	print("Loss: {:.5}".format(scores[0]))
	for metric, value in zip(metrics, scores[1:]):
		print("mean {}: {:.5}".format(metric.__name__, value))	
Exemple #6
0
def test_additive_attention():
    """
    Bahdanau-style attention. query (batch, Tq, dim), key (batch, Tv, dim) and value (batch, Tv, dim) are inputs.
    following computations is processed.
    1. reshape query as shape [batch, Tq, 1, dim] and value as shape [batch, 1, Tv, dim]
    2. broadcasting multiply between additive of above as output shape [batch, Tq, Tv, dim]
    3. reduce_sum above with dim axis as output shape [batch, Tq, Tv]
    4. softmax of above
    5. MatMul between 4. and value as output shape [batch, Tq, dim]
    """
    Tq = 10
    Tv = 10
    dim = 16
    q_shape = (Tq, dim)
    k_shape = (Tv, dim)
    v_shape = (Tv, dim)
    q = Input(q_shape)
    k = Input(k_shape)
    v = Input(v_shape)
    x = AdditiveAttention()([q, k, v])
    model = Model([q, k, v], x)
    flops = get_flops(model, batch_size=1)
    assert (
        flops
        == Tq * Tv * dim  # No.2 (multiply)
        + Tq * Tv * dim  # No.3 (add)
        + Tq * Tv * (dim - 1)  # No.3 (reduce_sum)
        + 5 * Tq * Tv  # No.4 (softmax)
        + 2 * Tv * Tq * dim  # No.5 (MatMul)
    )
Exemple #7
0
def test_attention():
    """
    Luong-style attention. query (batch, Tq, dim), key (batch, Tv, dim) and value (batch, Tv, dim) are inputs.
    following computations is processed.
    1. query-key dot-product as output shape [batch, Tq, Tv]
    2. softmax of above
    3. MatMul between 2. and value as output shape [batch, Tq, dim]
    """
    Tq = 10
    Tv = 10
    dim = 16
    q_shape = (Tq, dim)
    k_shape = (Tv, dim)
    v_shape = (Tv, dim)
    q = Input(q_shape)
    k = Input(k_shape)
    v = Input(v_shape)
    x = Attention()([q, k, v])
    model = Model([q, k, v], x)
    flops = get_flops(model, batch_size=1)
    assert (
        flops
        == 2 * Tq * Tv * dim  # No.1 (dot-product (MatMul))
        + 5 * Tq * Tv  # No.2 (softmax)
        + 2 * Tv * Tq * dim  # No.3 (MatMul)
    )
Exemple #8
0
def test_maxpooling1d2d3d():
    in_w = 32
    in_h = 32
    kernel = 32
    pool_w = 2
    pool_h = 2

    model = Sequential(MaxPooling1D(pool_size=(pool_w,), input_shape=(in_w, kernel)))
    flops = get_flops(model, batch_size=1)
    assert flops == in_w * kernel

    model = Sequential(
        MaxPooling2D(pool_size=(pool_w, pool_h), input_shape=(in_w, in_h, kernel))
    )
    flops = get_flops(model, batch_size=1)
    assert flops == in_w * in_h * kernel
Exemple #9
0
def test_global_averagepooling1d2d3d():
    in_w = 32
    in_h = 32
    in_z = 32
    kernel = 32

    model = Sequential(GlobalAveragePooling1D(input_shape=(in_w, kernel)))
    flops = get_flops(model, batch_size=1)
    assert flops == in_w * kernel

    model = Sequential(GlobalAveragePooling2D(input_shape=(in_w, in_h, kernel)))
    flops = get_flops(model, batch_size=1)
    assert flops == in_w * in_h * kernel

    model = Sequential(GlobalAveragePooling3D(input_shape=(in_w, in_h, in_z, kernel)))
    flops = get_flops(model, batch_size=1)
    assert flops == in_w * in_h * in_z * kernel
Exemple #10
0
def test_Embedding():

    sentence = 32
    onehot = 100
    emb = 32

    model = Sequential(Embedding(onehot, emb, input_length=sentence))
    flops = get_flops(model, batch_size=1)
    assert flops > 0, "not supported"
Exemple #11
0
def test_gru():
    sentence = 32
    emb = 32
    rnn_unit = 3

    model = Sequential(
        GRU(rnn_unit, use_bias=False, input_shape=(sentence, emb)))
    flops = get_flops(model, batch_size=1)
    assert (flops > 2 * emb * rnn_unit * sentence +
            2 * rnn_unit * rnn_unit * sentence), "not supported"
Exemple #12
0
def test_depthwise_conv2d():
    in_w = 32
    in_h = 32
    in_ch = 3
    ker_w = 3
    ker_h = 3
    model = Sequential(
        DepthwiseConv2D((ker_w, ker_h), padding="same", input_shape=(in_w, in_h, in_ch))
    )
    flops = get_flops(model, batch_size=1)
    assert flops == ((2 * ker_w * ker_h) + 1) * in_w * in_h * in_ch
Exemple #13
0
def test_simpleRNN():
    # NOTE: first input dense is only calculated.
    sentence = 32
    emb = 32
    rnn_unit = 3

    model = Sequential(
        SimpleRNN(rnn_unit, use_bias=False, input_shape=(sentence, emb)))
    flops = get_flops(model, batch_size=1)
    assert (flops == 2 * emb * rnn_unit * sentence + 2 * rnn_unit * rnn_unit *
            sentence), "not supported. first input matmul is only calculated"
Exemple #14
0
def test_dense():
    in_dense = 8
    out_dense = 3

    def build_model1():
        inp = Input((in_dense,))
        out = Dense(out_dense)(inp)
        model = Model(inp, out)
        return model

    model = build_model1()
    flops = get_flops(model, batch_size=1)
    assert flops == 2 * in_dense * out_dense + out_dense

    def build_model2():
        return Sequential(Dense(out_dense, use_bias=False, input_shape=(in_dense,)))

    model = build_model2()
    flops = get_flops(model, batch_size=1)
    assert flops == 2 * in_dense * out_dense
Exemple #15
0
def test_global_maxpooling1d2d3d():
    """
    reduct rest (Ndim) of target axis.
    compare Ndim - 1 ops.
    """
    in_w = 32
    in_h = 32
    in_z = 32
    kernel = 3

    model = Sequential(GlobalMaxPooling1D(input_shape=(in_w, kernel)))
    flops = get_flops(model, batch_size=1)
    assert flops == (in_w - 1) * kernel

    model = Sequential(GlobalMaxPooling2D(input_shape=(in_w, in_h, kernel)))
    flops = get_flops(model, batch_size=1)
    assert flops == (in_w * in_h - 1) * kernel

    model = Sequential(GlobalMaxPooling3D(input_shape=(in_w, in_h, in_z, kernel)))
    flops = get_flops(model, batch_size=1)
    assert flops == (in_w * in_h * in_z - 1) * kernel
Exemple #16
0
def test_upsampling1d2d3d():
    in_w = 32
    in_h = 32
    in_z = 32
    kernel = 32
    up_w = 2
    up_h = 2
    up_z = 2

    model = Sequential(UpSampling1D(size=up_w, input_shape=(in_w, kernel)))
    flops = get_flops(model, batch_size=1)
    assert flops == in_w * kernel

    model = Sequential(
        UpSampling2D(size=(up_w, up_h), input_shape=(in_w, in_h, kernel)))
    flops = get_flops(model, batch_size=1)
    assert flops == in_w * in_h * kernel

    model = Sequential(
        UpSampling3D(size=(up_w, up_h, up_z),
                     input_shape=(in_w, in_h, in_z, kernel)))
    flops = get_flops(model, batch_size=1)
    assert flops == in_w * in_h * in_z * kernel
Exemple #17
0
def test_averagepooling1d2d3d():
    in_w = 32
    in_h = 32
    in_z = 32
    kernel = 32
    pool_w = 2
    pool_h = 2
    pool_z = 2

    model = Sequential(
        AveragePooling3D(pool_size=(pool_w, pool_h, pool_z),
                         input_shape=(in_w, in_h, in_z, kernel)))
    flops = get_flops(model, batch_size=1)
    assert flops == in_w * in_h * in_z * kernel
Exemple #18
0
def test_batchnormalization():
    """
    batch normalization is calculated as follows,
    1. (3 ops * |var|) inv = rsqrt(var + eps)
    2. (1 ops * |var|) inv *= gamma (scale)
    3. (2 * |x| + |mean| + |var| ops) x' = inv * x + beta (shift) - mean * inv
    , where |var| = |mean| = channel size in default
    Thus, tot FLOPs = 6 * channel size + 2 * input element size.
    """
    in_w = 32
    in_h = 32
    in_ch = 3

    model = Sequential(
        BatchNormalization(
            beta_initializer="ones",
            gamma_initializer="ones",
            input_shape=(in_w, in_ch),
        )
    )
    flops = get_flops(model, batch_size=1)
    assert (
        flops == 6 * in_ch + 2 * in_w * in_ch
    ), "fused is False. see nn_impl.batch_normalization"

    model = Sequential(
        BatchNormalization(
            beta_initializer="ones",
            gamma_initializer="ones",
            input_shape=(in_w, in_h, in_ch),
        )
    )
    flops = get_flops(model, batch_size=1)
    assert (
        flops == 6 * in_ch + 2 * in_w * in_h * in_ch
    ), "fused is True, see gen_nn.fused_batch_norm_v3"
Exemple #19
0
def test_conv2dtranspose():
    in_w = 32
    in_h = 32
    in_ch = 3
    kernel = 32
    ker_w = 3
    ker_h = 3

    model = Sequential(
        Conv2DTranspose(
            kernel, (ker_w, ker_h), padding="same", input_shape=(in_w, in_h, in_ch)
        )
    )
    flops = get_flops(model, batch_size=1)
    assert flops >= ((2 * ker_w * ker_h * in_ch) + 1) * in_w * in_h * kernel
Exemple #20
0
def test_subclass():
    class SubClass(tf.keras.Model):
        def __init__(self):
            super().__init__()
            self.dense1 = Dense(10)
            self.dense2 = Dense(3)

        def call(self, x):
            x = self.dense1(x)
            return self.dense2(x)

    inp = Input((30,))
    x = SubClass()(inp)
    model = Model(inp, x)
    flops = get_flops(model, 1)
    assert flops == (2 * 30 + 1) * 10 + (2 * 10 + 1) * 3
Exemple #21
0
def test_conv1dtranspose():
    ignore = True
    major, minor, _ = tf.version.VERSION.split(".")
    if int(major) >= 2 and int(minor) >= 3:
        ignore = False
    if ignore:
        return
    from tensorflow.keras.layers import Conv1DTranspose

    in_w = 32

    in_ch = 3
    kernel = 32
    ker_w = 3

    model = Sequential(
        Conv1DTranspose(kernel, (ker_w,), padding="same", input_shape=(in_w, in_ch))
    )
    flops = get_flops(model, batch_size=1)
    assert flops == ((2 * ker_w * in_ch) + 1) * in_w * kernel + 1
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("{}.h5".format(path))
#
#sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
#loaded_model.compile(optimizer = sgd, loss= 'categorical_crossentropy', metrics = ['accuracy'])
print_test_accuracy(
    test_model=loaded_model,
    test_batch_size=batchSize,
    sample_test=testData,
    labels_test=testTarget,
    classes=classNames,
    show_example_errors=False,
    show_confusion_matrix=True,
)

#%%%

from keras.models import load_model
from keras_flops import get_flops

## Calculate FLOPS
loaded_model.save(path)

model = load_model(path)

flops = get_flops(model, batch_size=32)
print(f"FLOPS: {flops / 10 ** 9:.03} G")
Exemple #23
0
def compute(model, input_shape, batch_size=1):
    inp = Input(input_shape)
    out = model(inp)
    _model = Model(inp, out)
    flops = get_flops(_model, batch_size=batch_size)
    print(f"FLOPS: {flops / 10 ** 9:.03} G")
Exemple #24
0
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("{}.h5".format(path))
#
#sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
#loaded_model.compile(optimizer = sgd, loss= 'categorical_crossentropy', metrics = ['accuracy'])
print_test_accuracy(
    test_model=loaded_model,
    test_batch_size=batchSize,
    sample_test=testData,
    labels_test=testTarget,
    classes=classNames,
    show_example_errors=False,
    show_confusion_matrix=True,
)

#%%%

from keras.models import load_model
from keras_flops import get_flops

## Calculate FLOPS
loaded_model.save(path)

model = load_model(path)

flops = get_flops(model, batch_size=batchSize)
print(f"FLOPS: {flops / 10 ** 9:.03} G")
Exemple #25
0
def test_softmax():
    kernel = 8
    model = Sequential(Activation("softmax", input_shape=(kernel,)))
    flops = get_flops(model, batch_size=1)
    assert flops == 5 * kernel
Exemple #26
0
def test_duplicated_calculation():
    model = Sequential(Dense(5, input_shape=(3,)))
    flops1 = get_flops(model)
    flops2 = get_flops(model)
    flops3 = get_flops(model)
    assert flops1 == flops2 and flops2 == flops3
Exemple #27
0
def test_multi_input():
    inputs = [Input((5,)), Input(5,)]
    out = tf.keras.layers.Multiply()(inputs)
    model = Model(inputs, out)
    flops = get_flops(model, 1)
    assert flops == 5
Exemple #28
0
def test_ignore():
    model = Sequential(
        [Flatten(input_shape=(16, 16)), Activation("relu"), Dropout(0.25),]
    )
    flops = get_flops(model, 1)
    assert flops == 0
Exemple #29
0
def train_test_and_collect_data(
    model_constructor,
    model_kwargs,
    loss,
    optimizer,
    epochs,
    callbacks,
    train_ds,
    val_ds,
    test_ds,
    labels,
    false_ds=None,
    thresholds=[],
    num_timer_samples=0,
    timer_iters=100,
    load_weights='',
    model_save_path='',
):
    num_labels = len(labels)

    test_samples = [sample for sample in test_ds.take(8)]
    # draw_spectrogram_from_tensors(test_samples)

    for sample, _ in train_ds.take(1):
        # exclude batch dimension
        input_shape = sample.shape[1:]

    print('Number of classes', num_labels)
    print('Input shape', input_shape)

    model = model_constructor(input_shape, num_labels, **model_kwargs)

    trainable_count = np.sum(
        [tf.keras.backend.count_params(w) for w in model.trainable_weights])
    non_trainable_count = np.sum([
        tf.keras.backend.count_params(w) for w in model.non_trainable_weights
    ])
    flops = get_flops(model, batch_size=1)

    print('trainable params: ', trainable_count)
    print('non-trainable params', non_trainable_count)
    print('total params', trainable_count + non_trainable_count)
    print('flops', flops)
    model.summary()

    if load_weights:
        try:
            with open(load_weights + '.json', 'r') as f:
                progress = json.load(f)
                epochs = epochs - int(progress['epoch']) - 1
        except IOError:
            pass

        try:
            model.load_weights(load_weights)
        except tf.errors.NotFoundError:
            print('Weights not found.')

    model, history = train_model(model,
                                 train_ds,
                                 val_ds,
                                 loss=loss,
                                 optimizer=optimizer,
                                 epochs=epochs,
                                 callbacks=callbacks)

    test_loss, test_acc, test_times, confusion_matrix, thresholding_results = test_model(
        model, test_ds, false_ds, thresholds, num_timer_samples, timer_iters)

    print('Test accuracy: ', test_acc)

    if model_save_path:
        model.save(model_save_path)

    return {
        'model': model,
        'epoch': history.epoch,
        'history': history.history,
        'test_loss': test_loss,
        'test_acc': test_acc,
        'test_times': test_times,
        'confusion_matrix': confusion_matrix.tolist(),
        'thresholding_results': thresholding_results,
        'total_params': int(trainable_count + non_trainable_count),
        'trainable_params': int(trainable_count),
        'non_trainable_params': int(non_trainable_count),
        'flops': flops
    }