Beispiel #1
0
def build_mobilenet(input_tensor, num_classses, k=1, weights=None, gpus=None):
    model = MobileNet(input_shape=None,
                      alpha=1.0,
                      depth_multiplier=1,
                      dropout=1e-3,
                      include_top=True,
                      weights=weights,
                      input_tensor=input_tensor,
                      pooling=None,
                      classes=num_classses)
    sgd = optimizers.SGD(lr=.01, momentum=0.9, nesterov=True)
    if gpus:
        parallel_model = multi_gpu_model(model, gpus=gpus)
        parallel_model.compile(loss='categorical_crossentropy',
                               optimizer=sgd,
                               metrics=['accuracy'])
        return parallel_model

    else:
        if k == 1:
            model.compile(loss='categorical_crossentropy',
                          optimizer=sgd,
                          metrics=['accuracy'])
        else:
            model.compile(loss='categorical_crossentropy',
                          optimizer=sgd,
                          metrics=['accuracy', top_k_acc])
        return model
Beispiel #2
0
def cnnMobileNet():

    model = MobileNet(include_top=True, weights='imagenet')

    # 8. Compile model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
def evaluate():
    args = parse_args()
    model_name = args.model_name
    dataset_path = args.dataset_path
    Model, preprocess_input, size = get_model_artifacts(model_name)
    idg = ImageDataGenerator(preprocessing_function=preprocess_input)
    model = Model(weights="imagenet")
    model.compile("adam", "categorical_crossentropy", metrics=["accuracy"])
    out = model.evaluate(
        idg.flow_from_directory(dataset_path, target_size=(size, size)))
    print(out)
    def __init__(self, model_path="weights/vasya_mobilenetv2_keypoints.h5", model_type = 'mobilenet_v2'):
        if model_type == 'mobilenet':
            model = MobileNet(input_shape=(112, 112, 3), classes=68*2, weights=None)
        else:
            model = MobileNetV2(input_shape=(112, 112, 3), classes=68*2, weights=None)

        out = model.layers[-2].output
        out = Dense(68*2, activation ='linear')(out)
        model = Model(inputs = model.input, outputs = out)
        model.compile(optimizer=Adam(), loss = 'mse')

        with open(model_path, 'rb') as f:
            model.load_weights(f)

        self.model = model
def mobilenet(weight_dir):
    model = MobileNet(input_shape=(64, 64, 1),
                      alpha=1.0,
                      depth_multiplier=1,
                      dropout=1e-3,
                      include_top=True,
                      weights=None,
                      input_tensor=None,
                      pooling='avg',
                      classes=4)
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.load_weights(weight_dir)
    return model
class MobileNetModel:  #(ClassificationModel):
    def __init__(self):
        #super(MobileNetModel, self).__init___(model_name='MobileNet')
        self.num_classes = 2
        self.build_model()

        return

    def build_model(self):
        # Initializing the model with random wights
        self.arch = MobileNet(weights=None,
                              input_shape=(256, 256, 3),
                              classes=self.num_classes)

        # Compiling model with optimization of RSM and cross entropy loss
        self.arch.compile(optimizer='rmsprop',
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])
        return

    def __repr__(self):
        return str(self.arch.summary())

    def fit_data(self,
                 train_images,
                 train_labels,
                 val_images,
                 val_labels,
                 initial_epoch=None):
        train_history = self.arch.fit(train_images,
                                      train_labels,
                                      epochs=5,
                                      steps_per_epoch=train_images.shape[0],
                                      validation_steps=val_images.shape[0],
                                      validation_data=(val_images, val_labels),
                                      shuffle=True)
        return train_history

    def save_model(self, model_path):
        self.arch.save(model_path)
        return

    def load_model(self, model_path):
        self.arch = load_model(model_path)
        return
def mobilenet(input_tensor, input_shape, nb_classes):
    architectures_log.info("Architecture: MobileNet")

    model = MobileNet(include_top=True,
                      weights=None,
                      input_tensor=input_tensor,
                      input_shape=input_shape,
                      pooling="avg",
                      classes=nb_classes)

    opt = Adam(lr=0.0001, decay=1e-6)

    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy", custom_acc(0.75)])

    print(model.summary())
    architectures_log.info("Architecture: MobileNet")

    return model
Beispiel #8
0
class Navigation:
    def __init__(self):
        self.imageSub = message_filters.Subscriber(
            "robot1/camera/rgb/image_raw", Image)
        self.odometrySub = message_filters.Subscriber("robot1/odom", Odometry)
        self.cmd_vel = rospy.Publisher("/robot1/mobile_base/commands/velocity",
                                       Twist,
                                       queue_size=1)

        ts = message_filters.TimeSynchronizer(
            [self.imageSub, self.odometrySub], 10)
        ts.registerCallback(self.callback)

        self.cv = CvBridge()
        self.lastImage = None
        self.OdomLX = None
        self.OdomAZ = None

        self.model = MobileNet()
        self.model.load_weights(
            "./weights/WeightsMobileNetTrain1/results/weights.59-0.01.hdf5")
        self.model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])

    def callback(self, imageMsg, odometryMsg):
        try:
            cv = self.bridge.imgmsg_to_cv2(imageMsg, "mono8")
            image = cv
            self.lastImage = cv.resize(image, (224, 224))
            self.OdomLX = odometryMsg.twist.twist.linear.x
            self.OdomAZ = odometryMsg.twist.twist.angular.z
        except Exception as e:
            print(e)

    def run(self):
        batch = [self.lastImage]
        inputModel = np.array(batch)
        output = self.model.predict(inputModel, batch_size=1, verbose=0)
        move_cmd = Twist()
        move_cmd.linear.x = output[0][0]
        move_cmd.angular.z = output[0][1]
# In[ ]:

# first we need to import MobileNet
from keras.applications.mobilenet import MobileNet

# here instantiate a MobileNet that is specific to our data (image size and number of classes) with randomized initial weights
#model_mn = ?
#<SOLUTION>
model_mn = MobileNet(weights=None,
                     input_shape=(dims, dims, 1),
                     classes=classes)
#</SOLUTION>

# now let's train it
model_mn.compile(loss="categorical_crossentropy",
                 optimizer=optimizers.Adam(lr=1e-5),
                 metrics=["categorical_accuracy"])
history_mn = model.fit_generator(train_generator,
                                 steps_per_epoch=steps,
                                 epochs=5,
                                 validation_data=valid_generator,
                                 validation_steps=val_steps)

# Now we can plot the losses compared to our first network:

# In[ ]:

plt.figure(figsize=(6.0, 4.0))
plt.subplot(121)
plt.plot(history_mn.epoch, history_mn.history['loss'], 'b-s')
plt.plot(history.epoch, history.history['loss'], 'g-s')
Beispiel #10
0
def main():
    # Parameters
    if len(sys.argv) == 4:
        superclass = sys.argv[1]
        imgmove = sys.argv[2]
        if imgmove == 'False':
            imgmove = False
        else:
            imgmove = True
        lr = float(sys.argv[3])
    else:
        print('Parameters error')
        exit()

    # The constants
    classNum = {'A': 40, 'F': 40, 'V': 40, 'E': 40, 'H': 24}
    testName = {'A': 'a', 'F': 'a', 'V': 'b', 'E': 'b', 'H': 'b'}
    date = '20180321'

    trainpath = 'trainval_' + superclass + '/train'
    valpath = 'trainval_' + superclass + '/val'

    if not os.path.exists('model'):
        os.mkdir('model')

    # Train/validation data preparation
    if imgmove:
        os.mkdir('trainval_' + superclass)
        os.mkdir(trainpath)
        os.mkdir(valpath)
        sourcepath = '../zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_'+date\
                     +'/zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_images_'+date
        categories = os.listdir(sourcepath)
        for eachclass in categories:
            if eachclass[0] == superclass[0]:
                print(eachclass)
                os.mkdir(trainpath + '/' + eachclass)
                os.mkdir(valpath + '/' + eachclass)
                imgs = os.listdir(sourcepath + '/' + eachclass)
                idx = 0
                for im in imgs:
                    if idx % 8 == 0:
                        shutil.copyfile(
                            sourcepath + '/' + eachclass + '/' + im,
                            valpath + '/' + eachclass + '/' + im)
                    else:
                        shutil.copyfile(
                            sourcepath + '/' + eachclass + '/' + im,
                            trainpath + '/' + eachclass + '/' + im)
                    idx += 1

    # Train and validation ImageDataGenerator
    batchsize = 32

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=15,
                                       width_shift_range=5,
                                       height_shift_range=5,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(trainpath,
                                                        target_size=(224, 224),
                                                        batch_size=batchsize)

    valid_generator = test_datagen.flow_from_directory(valpath,
                                                       target_size=(224, 224),
                                                       batch_size=batchsize)

    # Train MobileNet
    model = MobileNet(include_top=True,
                      weights=None,
                      input_tensor=None,
                      input_shape=None,
                      pooling=None,
                      classes=classNum[superclass[0]])
    model.summary()
    model.compile(optimizer=SGD(lr=lr, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    steps_per_epoch = int(train_generator.n / batchsize)
    validation_steps = int(valid_generator.n / batchsize)

    weightname = 'model/mobile_' + superclass + '_wgt.h5'

    checkpointer = ModelCheckpoint(weightname,
                                   monitor='val_loss',
                                   verbose=0,
                                   save_best_only=True,
                                   save_weights_only=True,
                                   mode='auto',
                                   period=2)
    model.fit_generator(train_generator,
                        steps_per_epoch=steps_per_epoch,
                        epochs=100,
                        validation_data=valid_generator,
                        validation_steps=validation_steps,
                        callbacks=[checkpointer])
Beispiel #11
0
def train(epochs) :

    dataFolder = "./data"

    image_size = (224,224)
    # variables to hold features and labels
    features = []
    labels   = []

    class_count = 1000;
    X_test = []
    y_test = []
    name_test = []

    trainData = np.loadtxt("./train.txt", dtype="str", delimiter='\t' );
    for k in range(len(trainData)) :
        aLine = trainData[k];
        image_path = filePrefixWith(dataFolder, aLine[0]);
        label = int(aLine[1]);
        ground_truth = np.zeros(class_count, dtype=np.float32)
        ground_truth[label] = 1;

        img = image.load_img(image_path, target_size=image_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        labels.append(ground_truth)
        features.append(x[0])



    trainData = np.loadtxt("./test.txt", dtype="str",  delimiter='\t' );
    for k in range(len(trainData)) :
        aLine = trainData[k];
        image_path = filePrefixWith(dataFolder, aLine);
        img = image.load_img(image_path, target_size=image_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        X_test.append(x[0])
        name_test.append(image_path)

    X_train = features
    y_train = labels;
    
    # 9. Fit model on training data
    X_train = np.array(X_train)
    Y_train = np.array(y_train)
    X_test = np.array(X_test)

    model = MobileNet(include_top=True,weights=None, classes = class_count);

    # 8. Compile model
    model.compile(loss='categorical_crossentropy',
                    optimizer='adam',
                    metrics=['accuracy'])
 
    model.fit(X_train, Y_train, batch_size=16,  epochs=epochs, verbose=1, validation_split=0.2  )

    Y_pred = model.predict(X_test)
    
    f = open('project3.txt', 'w')
    for k in range(len(name_test)) :
        thePrediction = Y_pred[k];
        nonzeroind = thePrediction.argmax(axis=0);
        f.write(str(nonzeroind) + '\n')  # python will convert \n to os.linesep

    f.close()  # you can omit in most cases as the destructor will call it
    del model
Beispiel #12
0
model = ResNet50(weights='imagenet',include_top=False, input_shape=(224, 224, 3),classes = 6)
x = GlobalAveragePooling2D(input_shape=model.output_shape[1:])(model.output)
x = Dense(6, activation='softmax', kernel_initializer='glorot_normal')(x)
model = models.Model(inputs=model.input, outputs=x)


# fine tune training for top layers
for layer in model.layers[:-1]:#[n_frozen_layers:]:
    layer.trainable=True


'''
"""# Modelin Görselleştirilmesi"""

model.compile(optimizer=SGD(lr=1e-4, momentum=0.9, nesterov=True),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.summary()
"""# Modelin Oluşturulması"""

history = model.fit_generator(datagen.flow(x_test,
                                           y_test,
                                           batch_size=batch_size),
                              validation_data=(x_test, y_test),
                              steps_per_epoch=len(x_train) // batch_size,
                              epochs=epochs)
"""# Sonuçların Görselleştirilmesi"""

score = model.evaluate(x_test, y_test, verbose=0)
print('Test Loss:', score[0])
Beispiel #13
0
def train(dataset, architecture, task_name):
    ROOT_MODELS = '/home/dembanakh/.ml-manager/tasks-weights/'
    ROOT_DATASETS = '/home/dembanakh/.ml-manager/datasets/'
    if dataset == 'IMAGENET':
        if architecture == 'VGG16':
            from keras.applications.vgg16 import VGG16
            model = VGG16(weights='imagenet')
        elif architecture == 'VGG19':
            from keras.applications.vgg19 import VGG19
            model = VGG19(weights='imagenet')
        elif architecture == 'MobileNet':
            from keras.applications.mobilenet import MobileNet
            model = MobileNet(weights='imagenet')
        elif architecture == 'ResNet':
            from keras.applications.resnet import ResNet50, preprocess_input
            model = ResNet50(weights='imagenet')
        elif architecture == 'DenseNet':
            from keras.applications.densenet import DenseNet121, preprocess_input
            model = DenseNet121(weights='imagenet')
        else:
            return 0
        model.compile(optimizer='adam',
                      metrics=['accuracy'],
                      loss='sparse_categorical_crossentropy')
        model.save(ROOT_MODELS + task_name + '.h5')
    else:
        input_shape = (224, 224, 3)
        batch_size = 1  # subject to change, but Azure server has little RAM
        import os
        import numpy as np
        from keras.preprocessing import image
        try:
            samples = [i for i in os.listdir(dataset + '/samples')]
        except OSError:
            print 'There is no such directory', dataset + '/samples'
            return 0
        X = np.zeros((len(samples), input_shape[0], input_shape[1],
                      input_shape[2]))  # maybe depends on architecture
        y = np.zeros((len(samples), ))
        if architecture == 'VGG16':
            from keras.applications.vgg16 import VGG16, preprocess_input
            model = VGG16()
            for i in range(X.shape[0]):
                X[i] = preprocess_input(X[i])
        elif architecture == 'VGG19':
            from keras.applications.vgg19 import VGG19, preprocess_input
            model = VGG19()
            for i in range(X.shape[0]):
                X[i] = preprocess_input(X[i])
        elif architecture == 'MobileNet':
            from keras.applications.mobilenet import MobileNet, preprocess_input
            model = MobileNet()
            for i in range(X.shape[0]):
                X[i] = preprocess_input(X[i])
        elif architecture == 'ResNet':
            from keras.applications.resnet import ResNet50, preprocess_input
            model = ResNet50()
            for i in range(X.shape[0]):
                X[i] = preprocess_input(X[i])
        elif architecture == 'DenseNet':
            from keras.applications.densenet import DenseNet121, preprocess_input
            model = DenseNet121()
            for i in range(X.shape[0]):
                X[i] = preprocess_input(X[i])
        else:
            return 0
        for i, sample in enumerate(samples):
            try:
                img = image.load_img(dataset + '/samples/' + sample,
                                     target_size=input_shape)
            except IOError:
                print 'Failed to open file', dataset + '/samples/' + sample
                return 0
            try:
                f_lbl = open(
                    dataset + '/labels/' + sample.split('.')[0] + '.txt', 'r')
            except IOError:
                print 'Failed to open file', dataset + '/labels/' + sample.split(
                    '.')[0] + '.txt'
                return 0
            try:
                y[i] = int(f_lbl.read())
            except ValueError:
                print 'File', dataset + '/labels/' + sample.split(
                    '.')[0] + '.txt', 'doesn\'t contain integer'
                return 0
        model.compile(optimizer='adam',
                      metrics=['accuracy'],
                      loss='sparse_categorical_crossentropy')
        model.fit(X, y, batch_size=batch_size)
        model.save(ROOT_MODELS + task_name + '.h5')
    return 1
# create callbacks
callbacks = []
callbacks.append(
    ModelCheckpoint('model.h5', monitor='val_accuracy', save_best_only=True))
callbacks.append(CSVLogger('training_log.csv', separator=',', append=False))
callbacks.append(
    ReduceLROnPlateau(monitor='val_accuracy',
                      factor=0.2,
                      patience=10,
                      min_lr=0.0000001))
callbacks.append(EarlyStopping(monitor='val_accuracy', patience=15))

# compile model
model.compile(loss='categorical_crossentropy',
              optimizer=SGD(momentum=0.9),
              metrics=['accuracy'])

# create fit function
history = model.fit_generator(datagen.flow(x_train,
                                           y_train,
                                           batch_size=batch_size),
                              steps_per_epoch=len(x_train) / 128,
                              epochs=n_epochs,
                              verbose=2,
                              callbacks=callbacks,
                              validation_data=(x_test, y_test))

from matplotlib import pyplot as plt
import numpy as np
Beispiel #15
0
def main(args):
    train_data = args.train_data
    valid_data = args.valid_data
    nb_train_samples = args.train_samples
    nb_validation_samples = args.valid_samples
    model_path = args.model_path
    model_weights_path = args.model_weights_path
    num_classes = args.num_classes
    batch_size = args.batch_size
    epochs = args.epochs
    loss = args.loss
    optimizer = args.optimizer
    img_width, img_height = args.target_size

    if not os.path.isdir(train_data):
        raise EnvironmentError(f'--train-data {train_data} should exist')

    if not os.path.isdir(valid_data):
        raise EnvironmentError(f'--valid-data {valid_data} should exist')

    if K.image_data_format() == 'channels_first':
        input_shape = (3, img_width, img_height)
    else:
        input_shape = (img_width, img_height, 3)

    model_params = {
        'input_tensor': Input(shape=input_shape),
        'classes': num_classes,
        'weights': None,
    }

    print(
        f'Start training mobile net for {epochs} epochs.',
        f'{nb_train_samples} train samples, {nb_validation_samples} valid samples'
    )

    model = MobileNet(**model_params)
    model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])

    train_datagen = ImageDataGenerator(rescale=1. / 255)
    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='categorical',
    )

    validation_generator = test_datagen.flow_from_directory(
        valid_data,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='categorical',
    )

    model.fit_generator(train_generator,
                        steps_per_epoch=nb_train_samples // batch_size,
                        epochs=epochs,
                        validation_data=validation_generator,
                        validation_steps=nb_validation_samples // batch_size,
                        verbose=1)

    model.save(model_path)
    model.save_weights(model_weights_path)

    print('Model saved.')
    return 0
from keras.applications.mobilenet import MobileNet
import tensorflow as tf

model = MobileNet(weights='imagenet',
                  include_top=False,
                  input_shape=(32, 32, 1))
model.compile(optimizer='Adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
print(model.summary())
Beispiel #17
0
  x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
  x = _depthwise_conv_block(x, 512, alpha, depth_multiplier,strides=(2, 2), block_id=6)
  x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
  shape = (1, 1, int(512 * alpha))
  x = keras.layers.GlobalAveragePooling2D()(x)
  x = keras.layers.Reshape(shape, name='reshape_1')(x)
  x = keras.layers.Dropout(dropout, name='dropout')(x)
  x = keras.layers.Conv2D(classes, (1, 1),padding='same',name='conv_preds')(x)
  x = keras.layers.Reshape((classes,), name='reshape_2')(x)
  output = keras.layers.Activation('softmax', name='act_softmax')(x)
  model = keras.models.Model(img_input, output)
  return model

model = MobileNet()
adam = keras.optimizers.Adam(lr = 0.001)
model.compile(optimizer = "adam" , loss = "categorical_crossentropy" , metrics = ["acc"])
#model.summary()

"""
    Train model.
"""

reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
                              patience=3, min_lr=0.001)

example = model.fit_generator(
        train_generator,
        epochs=100,
        validation_data=validation_generator,
        shuffle = True, callbacks = [reduce_lr])
Beispiel #18
0
    print('--> Starting evalutation...')
    from keras.preprocessing.image import ImageDataGenerator
    from keras import metrics

    def in_top_k(y_true, y_pred):
        return metrics.top_k_categorical_accuracy(y_true, y_pred, k=5)

    val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
    validation_generator = val_datagen.flow_from_directory(
        './imagenet-data/validation',
        target_size=(224, 224),
        batch_size=10,
        class_mode='categorical',
        shuffle=False)

    model.trainable = False
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy', in_top_k])

    results = model.evaluate(validation_generator,
                             steps=5000,
                             workers=1,
                             max_queue_size=1)

    print('--> Results for ' + sys.argv[1])
    print(model.metrics_names)
    print(results)

#########################################
# features = model.predict(x)
train_labels = train_generator.classes
test_labels = test_generator.classes

#####get model#####
model = MobileNet(weights=None, classes=4)

#####Compile model#####
sgd = SGD(lr=0.000001, decay=1e-6, momentum=1.9)
rms = RMSprop()
# mse = losses.mean_squared_error
adam = Adam(lr=0.001)

model.compile(loss='categorical_crossentropy',
              optimizer=adam,
              metrics=['accuracy'])

# Fit the model
# f = open("output/log.csv","w+")
# csv_logger = CSVLogger('/output/log.csv', append=True, separator=',')
tb_callback = TensorBoard(log_dir='/output/logs',
                          histogram_freq=0,
                          batch_size=batch_size)
# model.fit(train_features, train_labels, epochs=128, batch_size=batch_size,  verbose=2, callbacks=[csv_logger])
model.fit_generator(train_generator,
                    epochs=150,
                    steps_per_epoch=(1400 / batch_size) + 1,
                    verbose=2,
                    callbacks=[tb_callback])
model.save("/output/mobnet.h5")
Beispiel #20
0
def train_mobilenet(image_class, epochs):

    start_train_time = datetime.datetime.now()
    image_class = image_class
    root_path = '/home/yu/Documents/tensorflow/MURA/MURA-v1.1/'  # the root path of dataset
    train_dirs = os.path.join(
        root_path, 'train/{}'.format(image_class))  # import data for training
    valid_dirs = os.path.join(
        root_path,
        'valid/{}'.format(image_class))  # import data for validation

    if not os.path.exists('v_train/{}'.format(
            image_class)):  # iterate to create symbolic link to data
        make_vdirs_for_keras(train_dirs, 'v_train/{}'.format(image_class))

    if not os.path.exists('v_valid/{}'.format(image_class)):
        make_vdirs_for_keras(valid_dirs, 'v_valid/{}'.format(image_class))

    idg_train_settings = dict(samplewise_center=True,
                              samplewise_std_normalization=True,
                              rotation_range=5,
                              width_shift_range=0.1,
                              height_shift_range=0.1,
                              zoom_range=0.1,
                              horizontal_flip=True,
                              vertical_flip=True)
    idg_train = ImageDataGenerator(**idg_train_settings)

    idg_valid_settings = dict(samplewise_center=True,
                              samplewise_std_normalization=True,
                              rotation_range=0,
                              width_shift_range=0.,
                              height_shift_range=0.,
                              zoom_range=0.0,
                              horizontal_flip=False,
                              vertical_flip=False)
    idg_valid = ImageDataGenerator(**idg_valid_settings)

    train_gen = idg_train.flow_from_directory('v_train/{}'.format(image_class),
                                              follow_links=True,
                                              target_size=(128, 128),
                                              color_mode='grayscale')

    valid_gen = idg_valid.flow_from_directory('v_valid/{}'.format(image_class),
                                              follow_links=True,
                                              target_size=(128, 128),
                                              color_mode='grayscale')

    a, b = next(valid_gen)
    s_net = MobileNet(classes=b.shape[1],
                      weights=None,
                      input_shape=a.shape[1:])
    # s_net.summary()

    s_net.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['acc'])
    # print('Layers: {}, parameters: {}'.format(len(s_net.layers), s_net.count_params()))

    if not os.path.exists('weights'):
        os.mkdir('weights/')
    file_path = "weights/weights.best.hdf5." + image_class
    checkpoint = ModelCheckpoint(file_path,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    early = EarlyStopping(monitor="val_acc", mode="max", patience=3)
    callbacks_list = [checkpoint, early]  #early

    s_net.fit_generator(
        train_gen,
        steps_per_epoch=30,  # default 30
        validation_data=valid_gen,
        validation_steps=10,
        epochs=epochs,
        callbacks=callbacks_list)

    end_train_time = datetime.datetime.now()
    time_train = end_train_time - start_train_time
    return time_train, s_net, valid_gen
class MobileNetModel:  #(ClassificationModel):
    def __init__(self):
        #super(CardModel, self).__init___(model_name='CardModel')
        self.num_classes = 2
        self.build_model()

        return

    def build_model(self):
        # Initializing the model with random wights
        self.arch = MobileNet(weights=None,
                              include_top=True,
                              classes=self.num_classes)

        # Compiling model with optimization of RSM and cross entropy loss
        self.arch.compile(optimizer='rmsprop',
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])
        return

    def train(self,
              epochs,
              train_images,
              train_labels,
              val_data,
              batch_size,
              initial_epoch=None):
        history = self.arch.fit_generator(
            train_generator,
            epochs=epochs,
            steps_per_epoch=train_generator.samples // batch_size,
            validation_data=validation_generator,
            validation_steps=validation_generator.samples // batch_size)
        return history

    def fit_gen(self, train_dir, val_dir, num_train, num_val, batch_size,
                epochs):

        gen = ImageDataGenerator(preprocessing_function=preprocess_input)
        train_generator = gen.flow_from_directory(train_dir,
                                                  target_size=(224, 224),
                                                  batch_size=batch_size,
                                                  class_mode='categorical')
        val_generator = gen.flow_from_directory(val_dir,
                                                target_size=(224, 224),
                                                batch_size=batch_size,
                                                class_mode='categorical')
        train_history = self.arch.fit_generator(
            train_generator,
            steps_per_epoch=(num_train // batch_size),
            epochs=epochs,
            validation_data=val_generator,
            validation_steps=(num_val // batch_size))

        return train_history

    def save(self, output_dir):
        model_path = os.path.join(output_dir, 'model.h5')
        self.arch.save(model_path)
        return

    def load(self, input_path):
        model_path = os.path.join(output_dir, 'model.h5')
        self.arch = load_model(model_path)
        return

    def __repr__(self):
        return str(self.arch.summary())
Beispiel #22
0
from keras.applications.mobilenet import MobileNet
import pycbc.psd
import gmind.generator

p = pycbc.psd.aLIGOZeroDetHighPower(2**19, 1.0 / 16, 15)
s = gmind.generator.GaussianNoiseGenerator(16, 1024, p, 20)
p = gmind.generator.WFParamGenerator(["examples/test.ini"])
w = gmind.generator.GaussianSignalQImageGenerator(s, p, 3, (224, 224), q=20)

i, t = w.next()
print i.shape, t
model = MobileNet(classes=1, weights=None)
model.compile(loss='mean_squared_error',
              optimizer='adagrad',
              metrics=['accuracy'])

model.fit_generator(w, 100, epochs=1)
Beispiel #23
0
    alpha=0.1,
    input_tensor=None,
    input_shape=input_shape,
    pooling='avg',
    classes=2)

if modelContinueFlag:
    model.load_weights(modelContinueWeigthsFile, by_name=False)

model.summary()
model.compile(
    loss='categorical_crossentropy',
    #model.compile(loss='categorical_hinge',
    optimizer=Adam(lr=startingLeraningRate,
                   beta_1=0.9,
                   beta_2=0.999,
                   epsilon=1e-08,
                   decay=0.0),
    #optimizer=RMSprop(lr=startingLeraningRate, rho=0.9, epsilon=None, decay=0.0),
    #optimizer=SGD(lr=startingLeraningRate, decay=1e-6, momentum=0.9, nesterov=True),
    metrics=['accuracy', 'categorical_accuracy']
)  # default lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0 or 0.00005

tensorboard = TensorBoard(
    log_dir=os.path.join(modelDir, "{}".format(runningTime)))
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.5,
                              patience=50,
                              min_lr=0.000001,
                              verbose=1)
earlyStop = EarlyStopping(monitor='val_loss',
                          min_delta=0.0001,