예제 #1
0
    channel = 3
    num_classes = 10
    batch_size = 16
    nb_epoch = 10

    # Load Cifar10 data. Please implement your own load_data() module for your own dataset
    X_train, Y_train, X_valid, Y_valid = load_cifar10_data(img_rows, img_cols)

    # Load our model
    # model = densenet169_model(img_rows=img_rows, img_cols=img_cols, color_type=channel, num_classes=num_classes)

    # load keras model
    model = NASNetLarge(weights=None, classes=10)
    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()

    # Start Fine-tuning
    model.fit(
        X_train,
        Y_train,
        batch_size=batch_size,
        epochs=nb_epoch,
        shuffle=True,
        verbose=1,
        validation_data=(X_valid, Y_valid),
    )

    # Make predictions
예제 #2
0
파일: NasNET.py 프로젝트: Monsets/Kaggle
DATA_PATH = 'trainset-data.pic'
LABELS_PATH = 'trainset-data.pic'

input_tensor = Input(shape=(32, 32, 3))

model = NASNetLarge(input_tensor=input_tensor, weights=None, include_top = False)

# add a global spatial average pooling layer
output = model.output
output = GlobalAveragePooling2D()(output)
# let's add a fully-connected layer
output = Dense(1024, activation='relu')(output)
# and a logistic layer -- let's say we have 200 classes
predictions = Dense(2, activation='softmax')(output)

# this is the model we will train
model = Model(inputs=model.input, outputs=predictions)

# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='Adam', loss='categorical_crossentropy')

x, y = load_data(DATA_PATH, LABELS_PATH)
y = to_categorical(y)

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.1)
#data augmentation  using flips
idg = ImageDataGenerator(horizontal_flip = True, vertical_flip = True)

model.fit_generator(idg.flow(x_train, y_train, batch_size=64), steps_per_epoch = len(x_train) / 64, epochs = 100,
                    validation_data = idg.flow(x_test, y_test, batch_size = 64), validation_steps = len(x_test) / 64,
                    callbacks = ModelCheckpoint('checkpoints/nasnet-cancer-{val_loss:.2f}.hdf5', save_best_only = True))
예제 #3
0
x = model.get_layer(index=len(model.layers)-2).output

print(x)
x = Dense(1)(x)

model = Model(inputs=model.input, outputs=x)
model.summary()


# **Using RMSprop optimizer, mean absolute error with metrics, and mean square erro with loss**

# In[ ]:


opt = RMSprop(lr=0.0001)
model.compile(loss='mean_squared_error', optimizer=opt, metrics=['mae'])


# **Puting the model for fit**
# 
# **NOTE: The number of epochs is set to 100**

# In[11]:


network_history = model.fit(x_train, y_train, batch_size=8, epochs=100, verbose=1, validation_data= (x_val, y_val))


# ### Save the Model Trained
# 
# 
예제 #4
0
def TransferNet(input_shape: Tuple[int, int],
                num_classes: int,
                feature_extractor: FeatureExtractor = None,
                dense_layers: int = 3,
                dropout_rate: float = 0.3) -> Model:
    """
    Exploits a pre-trained model as feature extractor, feeding its output into a fully-connected NN.
    The feature extractor model is NOT fine-tuned for the specific task.
    Dropout and batch normalization are used throughout the trainable portion of the network.

    :param input_shape: Shape of the input tensor as a 2-dimensional int tuple
    :param num_classes: Number of classes for the final FC layer
    :param feature_extractor: FeatureExtractor instance representing which pre-trained model to use as feature extractor
    :param dense_layers: Number of layers for the FC NN
    :param dropout_rate: Dropout rate

    :return: a Keras model
    """

    adam_opt = Adam(lr=0.1)
    model_input = Input(shape=input_shape)

    # load pre-trained model on ImageNet
    if feature_extractor == FeatureExtractor.Dense121:
        fe_model = DenseNet121(weights="imagenet",
                               include_top=False,
                               input_tensor=model_input)
    elif feature_extractor == FeatureExtractor.Dense169:
        fe_model = DenseNet169(weights="imagenet",
                               include_top=False,
                               input_tensor=model_input)
    elif feature_extractor == FeatureExtractor.Dense201:
        fe_model = DenseNet201(weights="imagenet",
                               include_top=False,
                               input_tensor=model_input)
    elif feature_extractor == FeatureExtractor.NASNetLarge:
        fe_model = NASNetLarge(weights="imagenet",
                               include_top=False,
                               input_tensor=model_input)
    else:
        # default: NASNetMobile
        fe_model = NASNetMobile(weights="imagenet",
                                include_top=False,
                                input_tensor=model_input)

    fe_model = Model(input=model_input,
                     output=fe_model.output,
                     name="FeatureExtractor")
    fe_model.compile(loss=keras.losses.categorical_crossentropy,
                     optimizer=adam_opt,
                     metrics=["accuracy"])

    # get handles to the model (input, output tensors)
    fe_input = fe_model.get_layer(index=0).input
    fe_output = fe_model.get_layer(index=-1).output

    # freeze layers
    for _, layer in enumerate(fe_model.layers):
        layer.trainable = False

    # final fully-connected layers
    dense = Flatten()(fe_output)
    dense = BatchNormalization()(dense)
    dense = Dropout(rate=dropout_rate)(dense)

    num_units = 128
    for i in range(1, dense_layers + 1):
        dense = Dense(units=int(num_units / i), activation="relu")(dense)
        dense = BatchNormalization()(dense)
        dense = Dropout(rate=dropout_rate)(dense)

    output_dense = Dense(units=num_classes, activation="softmax")(dense)

    model = Model(input=fe_input, output=output_dense, name="TransferNet")
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=adam_opt,
                  metrics=["accuracy"])

    return model