예제 #1
0
class MobileNetModel:  #(ClassificationModel):
    def __init__(self):
        #super(MobileNetModel, self).__init___(model_name='MobileNet')
        self.num_classes = 2
        self.build_model()

        return

    def build_model(self):
        # Initializing the model with random wights
        self.arch = MobileNet(weights=None,
                              input_shape=(256, 256, 3),
                              classes=self.num_classes)

        # Compiling model with optimization of RSM and cross entropy loss
        self.arch.compile(optimizer='rmsprop',
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])
        return

    def __repr__(self):
        return str(self.arch.summary())

    def fit_data(self,
                 train_images,
                 train_labels,
                 val_images,
                 val_labels,
                 initial_epoch=None):
        train_history = self.arch.fit(train_images,
                                      train_labels,
                                      epochs=5,
                                      steps_per_epoch=train_images.shape[0],
                                      validation_steps=val_images.shape[0],
                                      validation_data=(val_images, val_labels),
                                      shuffle=True)
        return train_history

    def save_model(self, model_path):
        self.arch.save(model_path)
        return

    def load_model(self, model_path):
        self.arch = load_model(model_path)
        return
예제 #2
0
파일: Project3.py 프로젝트: wluoac/Project3
def train(epochs) :

    dataFolder = "./data"

    image_size = (224,224)
    # variables to hold features and labels
    features = []
    labels   = []

    class_count = 1000;
    X_test = []
    y_test = []
    name_test = []

    trainData = np.loadtxt("./train.txt", dtype="str", delimiter='\t' );
    for k in range(len(trainData)) :
        aLine = trainData[k];
        image_path = filePrefixWith(dataFolder, aLine[0]);
        label = int(aLine[1]);
        ground_truth = np.zeros(class_count, dtype=np.float32)
        ground_truth[label] = 1;

        img = image.load_img(image_path, target_size=image_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        labels.append(ground_truth)
        features.append(x[0])



    trainData = np.loadtxt("./test.txt", dtype="str",  delimiter='\t' );
    for k in range(len(trainData)) :
        aLine = trainData[k];
        image_path = filePrefixWith(dataFolder, aLine);
        img = image.load_img(image_path, target_size=image_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        X_test.append(x[0])
        name_test.append(image_path)

    X_train = features
    y_train = labels;
    
    # 9. Fit model on training data
    X_train = np.array(X_train)
    Y_train = np.array(y_train)
    X_test = np.array(X_test)

    model = MobileNet(include_top=True,weights=None, classes = class_count);

    # 8. Compile model
    model.compile(loss='categorical_crossentropy',
                    optimizer='adam',
                    metrics=['accuracy'])
 
    model.fit(X_train, Y_train, batch_size=16,  epochs=epochs, verbose=1, validation_split=0.2  )

    Y_pred = model.predict(X_test)
    
    f = open('project3.txt', 'w')
    for k in range(len(name_test)) :
        thePrediction = Y_pred[k];
        nonzeroind = thePrediction.argmax(axis=0);
        f.write(str(nonzeroind) + '\n')  # python will convert \n to os.linesep

    f.close()  # you can omit in most cases as the destructor will call it
    del model
예제 #3
0
lb = LabelBinarizer()
lb.fit(np.asarray(data['primary_microconstituent']))
y = lb.transform(labels)
print('\nLabels Binarized, converting array')


input = np.asarray(processed_imgs)

X_train, X_test, y_train, y_test = train_test_split(
    input, y, test_size=0.1, random_state=42)


model = MobileNet(weights=None, classes = 7)

model.summary()
model.compile(loss = 'categorical_crossentropy', optimizer = 'sgd', metrics = ['accuracy'])
time_callback = TimeHistory()
model.fit(X_train, y_train, epochs = 5, batch_size = 32, validation_data=(X_test, y_test), callbacks=[time_callback])
name = 'results/UHCS_MobileNet_Weights'
score = model.evaluate(X_test, y_test)
print('Test score:', score[0])
print('Test accuracy:', score[1])
model.save_weights(name+'.h5')

times = time_callback.times
file = open('MobileNet.txt', 'w')
file.write('Test score:'+ str(score[0])+'\n')
file.write('Test accuracy:'+ str(score[1])+'\n')
file.write(times)
file.close()
예제 #4
0
                  input_tensor=inputs,
                  pooling='max')

# freeze the network
#for layer in model.layers:
#    layer.trainable = False

x = model.output
pred = Dense(3, activation='softmax')(x)

model = Model(model.input, pred)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

tb_cb = keras.callbacks.TensorBoard(log_dir='tensorboard-log', batch_size=batch_size)
cbks = [tb_cb]
    
# train model
print("Start training model {}".format(model_name))

history = model.fit(x=X, y=y,
                    batch_size=batch_size, epochs=args.epochs,
                    validation_split=0.12,
                    shuffle=True,
                    callbacks=cbks)

# save model
model.save(model_name + ".h5")
print("Model saved to {}".format(model_name + ".h5"))

print(history.history)
예제 #5
0
def train(dataset, architecture, task_name):
    ROOT_MODELS = '/home/dembanakh/.ml-manager/tasks-weights/'
    ROOT_DATASETS = '/home/dembanakh/.ml-manager/datasets/'
    if dataset == 'IMAGENET':
        if architecture == 'VGG16':
            from keras.applications.vgg16 import VGG16
            model = VGG16(weights='imagenet')
        elif architecture == 'VGG19':
            from keras.applications.vgg19 import VGG19
            model = VGG19(weights='imagenet')
        elif architecture == 'MobileNet':
            from keras.applications.mobilenet import MobileNet
            model = MobileNet(weights='imagenet')
        elif architecture == 'ResNet':
            from keras.applications.resnet import ResNet50, preprocess_input
            model = ResNet50(weights='imagenet')
        elif architecture == 'DenseNet':
            from keras.applications.densenet import DenseNet121, preprocess_input
            model = DenseNet121(weights='imagenet')
        else:
            return 0
        model.compile(optimizer='adam',
                      metrics=['accuracy'],
                      loss='sparse_categorical_crossentropy')
        model.save(ROOT_MODELS + task_name + '.h5')
    else:
        input_shape = (224, 224, 3)
        batch_size = 1  # subject to change, but Azure server has little RAM
        import os
        import numpy as np
        from keras.preprocessing import image
        try:
            samples = [i for i in os.listdir(dataset + '/samples')]
        except OSError:
            print 'There is no such directory', dataset + '/samples'
            return 0
        X = np.zeros((len(samples), input_shape[0], input_shape[1],
                      input_shape[2]))  # maybe depends on architecture
        y = np.zeros((len(samples), ))
        if architecture == 'VGG16':
            from keras.applications.vgg16 import VGG16, preprocess_input
            model = VGG16()
            for i in range(X.shape[0]):
                X[i] = preprocess_input(X[i])
        elif architecture == 'VGG19':
            from keras.applications.vgg19 import VGG19, preprocess_input
            model = VGG19()
            for i in range(X.shape[0]):
                X[i] = preprocess_input(X[i])
        elif architecture == 'MobileNet':
            from keras.applications.mobilenet import MobileNet, preprocess_input
            model = MobileNet()
            for i in range(X.shape[0]):
                X[i] = preprocess_input(X[i])
        elif architecture == 'ResNet':
            from keras.applications.resnet import ResNet50, preprocess_input
            model = ResNet50()
            for i in range(X.shape[0]):
                X[i] = preprocess_input(X[i])
        elif architecture == 'DenseNet':
            from keras.applications.densenet import DenseNet121, preprocess_input
            model = DenseNet121()
            for i in range(X.shape[0]):
                X[i] = preprocess_input(X[i])
        else:
            return 0
        for i, sample in enumerate(samples):
            try:
                img = image.load_img(dataset + '/samples/' + sample,
                                     target_size=input_shape)
            except IOError:
                print 'Failed to open file', dataset + '/samples/' + sample
                return 0
            try:
                f_lbl = open(
                    dataset + '/labels/' + sample.split('.')[0] + '.txt', 'r')
            except IOError:
                print 'Failed to open file', dataset + '/labels/' + sample.split(
                    '.')[0] + '.txt'
                return 0
            try:
                y[i] = int(f_lbl.read())
            except ValueError:
                print 'File', dataset + '/labels/' + sample.split(
                    '.')[0] + '.txt', 'doesn\'t contain integer'
                return 0
        model.compile(optimizer='adam',
                      metrics=['accuracy'],
                      loss='sparse_categorical_crossentropy')
        model.fit(X, y, batch_size=batch_size)
        model.save(ROOT_MODELS + task_name + '.h5')
    return 1
예제 #6
0
    # Load our model
    # model = densenet169_model(img_rows=img_rows, img_cols=img_cols, color_type=channel, num_classes=num_classes)

    # load keras model
    model = MobileNet(weights=None, classes=10)
    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()

    # Start Fine-tuning
    model.fit(
        X_train,
        Y_train,
        batch_size=batch_size,
        epochs=nb_epoch,
        shuffle=True,
        verbose=1,
        validation_data=(X_valid, Y_valid),
    )

    # Make predictions
    predictions_valid = model.predict(X_valid,
                                      batch_size=batch_size,
                                      verbose=1)

    # Cross-entropy loss score
    score = log_loss(Y_valid, predictions_valid)