def getcomplexmodel(data, labels, verbose, epochs, name) -> Sequential:
    # print("Data size: " + str(len(data)))

    # print("No. of classes: " + str(len(labels)))

    labels = np.asarray(labels).astype('float32')

    model = Sequential()
    model.add(Dense(32, activation='relu', input_dim=1))
    model.add(Dense(32, activation='relu'))
    model.add(Dense(1, activation='linear'))
    model.compile(optimizer='adam',
                  loss='mse',
                  metrics=['mse', 'accuracy', 'mae'])

    history = model.fit(
        data,
        labels,
        verbose=verbose,
        epochs=epochs,
        # batch_size=512,
        #validation_data=(x_val, y_val)
    )
    final_accuracy = history.history['acc'][-1]
    final_loss = history.history['loss'][-1]
    print("MODEL: {:15} | Loss: {:20} | Accuracy: {:20}".format(
        name, final_loss, final_accuracy))
    # with open(DATA_PATH + "modelaccuracy.data", "a") as f:
    #     f.write(name + "," + str(final_accuracy) + "\n")
    kerasify.export_model(model, name + ".model")
    return model
예제 #2
0
def create_model(input_dict_size,
                 output_dict_size,
                 input_length=DEFAULT_INPUT_LENGTH,
                 output_length=DEFAULT_OUTPUT_LENGTH):

    encoder_input = Input(shape=(input_length, ))
    decoder_input = Input(shape=(output_length, ))

    encoder = Embedding(input_dict_size,
                        64,
                        input_length=input_length,
                        mask_zero=True)(encoder_input)
    encoder = LSTM(64, return_sequences=False)(encoder)

    decoder = Embedding(output_dict_size,
                        64,
                        input_length=output_length,
                        mask_zero=True)(decoder_input)
    decoder = LSTM(64, return_sequences=True)(decoder,
                                              initial_state=[encoder, encoder])
    decoder = TimeDistributed(Dense(output_dict_size,
                                    activation="softmax"))(decoder)

    model = Model(inputs=[encoder_input, decoder_input], outputs=[decoder])
    model.compile(optimizer='adam', loss='categorical_crossentropy')

    return model
def generate_inceptionResnetv2_based_model():
    irv2 = tf.keras.applications.inception_resnet_v2.InceptionResNetV2(
        include_top=False)
    irv2.trainable = False
    # This returns a tensor
    input1 = Input(shape=(299, 299, 3), name='input1')
    input2 = Input(shape=(299, 299, 3), name='input2')
    out1 = irv2(input1)
    out2 = irv2(input2)
    averPool = AveragePooling2D(pool_size=(8, 8))
    out1 = averPool(out1)
    out2 = averPool(out2)
    y = concatenate([out1, out2])
    dense = Dense(1)
    y = dense(y)
    activation = Activation('tanh')
    y = activation(y)
    y = Flatten()(y)
    model = Model(inputs=[input1, input2], outputs=y)
    return model
예제 #4
0
# 4th Convolutional Layer
model.add(
    Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(Activation('relu'))

# 5th Convolutional Layer
model.add(
    Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='valid'))
model.add(Activation('relu'))
# Max Pooling

# Passing it to a Fully Connected layer
model.add(Flatten())
# 1st Fully Connected Layer
model.add(Dense(4096, input_shape=(200 * 200 * 3, )))
model.add(Activation('relu'))
# Add Dropout to prevent overfitting
model.add(Dropout(0.4))

# 2nd Fully Connected Layer
model.add(Dense(4096))
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.4))

# 3rd Fully Connected Layer
model.add(Dense(1000))
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.4))
예제 #5
0
import tensorflow as tf
from tensorflow._api.v1.keras import preprocessing
from tensorflow._api.v1.keras.layers import Input, Dense, Conv2D, MaxPool2D, Dropout, ReLU, BatchNormalization, concatenate, Flatten, GlobalAveragePooling2D
from tensorflow._api.v1.keras.models import Model

# This returns a tensor
inputs = Input(shape=(32, 32, 3))
pre_net = Conv2D(64, (7, 7), strides=(2, 2))

# a layer instance is callable on a tensor, and returns a tensor
x = Dense(64, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)

# This creates a model that includes
# the Input layer and three Dense layers
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.summary()
예제 #6
0
print(img_data.shape)
img_data = np.rollaxis(img_data, 1, 0)
print(img_data.shape)
img_data = img_data[0]
print(img_data.shape)
num_classes = 6
train_X, test_X, train_Y, test_Y = train_test_split(img_data,
                                                    label_array,
                                                    test_size=0.10)
image_input = Input(shape=(224, 224, 3))
model = VGG19(input_tensor=image_input, include_top=True, weights=None)
last_layer = model.get_layer('fc2').output  #VGG
# last_layer = model.get_layer('fc1000').output  #RES
# last_layer = model.get_layer('predictions').output
last_layer = Dropout(0.5)(last_layer)
out = Dense(num_classes, activation="softmax", name="output")(last_layer)
model = Model(image_input, out)
for layer in model.layers[:-1]:
    layer.trainable = False
model.summary()

model.compile(loss='sparse_categorical_crossentropy',
              optimizer=Adam(lr=0.001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=1e-08,
                             decay=0.0),
              metrics=['accuracy'])

hist = model.fit(train_X,
                 train_Y,
예제 #7
0
pre_net = Conv2D(64, (7, 7), strides=(2, 2))(inputs)
pre_net = ReLU()(pre_net)
pre_net = MaxPool2D(pool_size=(3, 3), strides=(1, 1))(pre_net)
pre_net = BatchNormalization()(pre_net)

def feature_extractor(input_net):
    net_1 = Conv2D(96, 1, 1)(input_net)
    net_1 = ReLU()(net_1)
    net_1 = Conv2D(208, 3, 1)(net_1)
    net_1 = ReLU()(net_1)

    net_2 = MaxPool2D(3, 1)(input_net)
    net_2 = Conv2D(64, 1, 1)(net_2)
    net_2 = ReLU()(net_2)

    concat = concatenate(inputs=[net_1, net_2], axis=3)
    pooling_out = MaxPool2D(3, 2)(concat)

    return pooling_out

feat_ex_1 = feature_extractor(pre_net)
feat_ex_2 = feature_extractor(feat_ex_1)

net = GlobalAveragePooling2D()(feat_ex_2)
output = Dense(units=11, activation='softmax')(net)

model = Model(inputs=inputs, outputs=output)
#model.summary()

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
예제 #8
0
def main(train_epochs):
    print('Hello Lenin Welcome to Transfer Learning with VGG16')
    # Reading images to form X vector
    labels_name = {'benign': 0, 'malignant': 1}
    img_data, img_labels = read_dataset('/data_roi_single/train',
                                        labels_dict=labels_name)
    print(np.unique(img_labels, return_counts=True))
    # categories_names = ['benign', 'malignant']
    num_classes = 2
    # labels = labelling_outputs(num_classes, img_data.shape[0])
    # labels = labelling_mammo(num_classes, img_data.shape[0])
    # converting class labels to one-hot encoding
    y_one_hot = to_categorical(img_labels, num_classes)
    #Shuffle data
    x, y = shuffle(img_data, y_one_hot, random_state=2)
    # Dataset split
    xtrain, xtest, ytrain, ytest = train_test_split(x,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=2)

    #########################################################################################
    # Custom_vgg_model_1
    # Training the classifier alone
    image_input = Input(shape=(224, 224, 3))

    model = VGG16(input_tensor=image_input,
                  include_top=True,
                  weights='imagenet')
    model.summary()
    last_layer = model.get_layer('fc2').output
    out = Dense(num_classes, activation='sigmoid',
                name='vgg16TL')(last_layer)  # sigmoid insted of softmax
    custom_vgg_model = Model(image_input, out)
    custom_vgg_model.summary()
    # until this point the custom model is retrainable at all layers
    # Now we freeze all the layers up to the last one
    for layer in custom_vgg_model.layers[:-1]:
        layer.trainable = False
    custom_vgg_model.summary()

    # custom_vgg_model.layers[3].trainable
    # custom_vgg_model.layers[-1].trainable

    # Model compilation
    custom_vgg_model.compile(
        loss='binary_crossentropy', optimizer='rmsprop',
        metrics=['accuracy'])  # binary cross entropy instead of categorical
    print('Transfer Learning Training...')
    t = time.time()

    num_of_epochs = train_epochs  # User defines number of epochs

    hist = custom_vgg_model.fit(xtrain,
                                ytrain,
                                batch_size=64,
                                epochs=num_of_epochs,
                                verbose=1,
                                validation_data=(xtest, ytest))
    print('Training time: %s' % (time.time() - t))
    # Model saving parameters

    custom_vgg_model.save('vgg16_tf_bc.h5')

    print('Evaluation...')
    (loss, accuracy) = custom_vgg_model.evaluate(xtest,
                                                 ytest,
                                                 batch_size=10,
                                                 verbose=1)
    print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss, accuracy * 100))
    print("Finished")

    # Model Training Graphics
    # Visualizing losses and accuracy
    train_loss = hist.history['loss']
    val_loss = hist.history['val_loss']
    train_acc = hist.history['acc']
    val_acc = hist.history['val_acc']

    xc = range(num_of_epochs)  # Este valor esta anclado al numero de epocas

    plt.figure(1, figsize=(7, 5))
    plt.plot(xc, train_loss)
    plt.plot(xc, val_loss)
    plt.xlabel('num of epochs')
    plt.ylabel('loss')
    plt.title('train_loss vs val_loss')
    plt.grid(True)
    plt.legend(['train', 'val'])
    plt.style.use(['classic'])  # revisar que mas hay
    plt.savefig('vgg16_train_val_loss.jpg')

    plt.figure(2, figsize=(7, 5))
    plt.plot(xc, train_acc)
    plt.plot(xc, val_acc)
    plt.xlabel('num of epochs')
    plt.ylabel('accuracy')
    plt.title('train_accuracy vs val_accuracy')
    plt.grid(True)
    plt.legend(['train', 'val'], loc=4)
    plt.style.use(['classic'])  # revisar que mas hay
    plt.savefig('vgg16_train_val_acc.jpg')

    plt.show()
예제 #9
0
X, y = data
X = X / 255.0
print(len(X))

model = Sequential()

model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))

model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(X,
          y,
          batch_size=32,
          epochs=1,
          validation_split=0.3,
          callbacks=[tensorboard])
예제 #10
0
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# image check
plt.imshow(X_train[0])
plt.show()

print(X_train)

X_train = X_train.reshape(60000, 28, 28, 1)
X_test = X_test.reshape(10000, 28, 28, 1)

y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

# CNN
from tensorflow._api.v1.keras.models import Sequential
from tensorflow._api.v1.keras.layers import Dense, Conv2D, Flatten

model = Sequential()

model.add(Conv2D(64, kernel_size=3, activation='relu',
                 input_shape=(28, 28, 1)))
model.add(Conv2D(32, kernel_size=3, activation='relu'))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3)
예제 #11
0
                                            target_size=(200, 200))

from tensorflow._api.v1.keras.models import Sequential
from tensorflow._api.v1.keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, Dropout

model = Sequential()

model.add(
    Conv2D(32, kernel_size=3, activation='relu', input_shape=(200, 200, 3)))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))
model.add(Conv2D(64, kernel_size=3, activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))
model.add(Conv2D(64, kernel_size=3, activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

STEP_SIZE_TRAIN = train_gen.n // train_gen.batch_size
STEP_SIZE_VALID = valid_gen.n // valid_gen.batch_size
STEP_SIZE_TEST = ceil(test_gen.n / test_gen.batch_size)

history = model.fit_generator(generator=train_gen,
                              steps_per_epoch=STEP_SIZE_TRAIN,