示例#1
0
def train(batch, epochs, num_classes, size, weights, tclasses, tflite):
    """Train the model.

    # Arguments
        batch: Integer, The number of train samples per batch.
        epochs: Integer, The number of train iterations.
        num_classes, Integer, The number of classes of dataset.
        size: Integer, image size.
        weights, String, The pre_trained model weights.
        tclasses, Integer, The number of classes of pre-trained model.
        tflite, Boolean, Convert the final model to a tflite model
    """

    train_generator, validation_generator, count1, count2 = generate(
        batch, size)

    if weights:
        if tclasses:
            print("fine tunning")
            model = MobileNetv2((size, size, 3), tclasses)
            model = fine_tune(num_classes, weights, model)
        else:
            print("Loading Weights")
            model = MobileNetv2((size, size, 3), num_classes)
            model = keep_training(weights, model)

    else:
        model = MobileNetv2((size, size, 3), num_classes)

    opt = Adam()
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    hist = model.fit_generator(train_generator,
                               validation_data=validation_generator,
                               steps_per_epoch=count1 // batch,
                               validation_steps=count2 // batch,
                               epochs=epochs,
                               callbacks=create_callbacks())

    generate_report(model, validation_generator, batch, count2)

    if not os.path.exists('model'):
        os.makedirs('model')

    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/hist.csv', encoding='utf-8', index=False)
    print("Saving weights")
    model.save_weights('model/weights.h5')

    model_name = "mobile_model.h5"

    if tflite:
        print("Saving model")
        model.save(model_name)
        print("Converting model")
        convert_to_lite(model_name)
def train(batch, epochs, num_classes, size, weights, tclasses,train_path,valid_path):
    """Train the model.

    # Arguments
        batch: Integer, The number of train samples per batch.
        epochs: Integer, The number of train iterations.
        num_classes, Integer, The number of classes of dataset.
        size: Integer, image size.
        weights, String, The pre_trained model weights.
        tclasses, Integer, The number of classes of pre-trained model.
    """

    train_generator, validation_generator, count1, count2 = generate(batch, size,train_path,valid_path)

    if weights:
        model = MobileNetv2((size, size, 3), tclasses)
        model = fine_tune(num_classes, weights, model)
    else:
        model = MobileNetv2((size, size, 3), num_classes)

    opt = Adam()
    earlystop = EarlyStopping(monitor='val_acc', patience=30, verbose=0, mode='auto')
    model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])

    hist = model.fit_generator(
        train_generator,
        validation_data=validation_generator,
        steps_per_epoch=count1 // batch,
        validation_steps=count2 // batch,
        epochs=epochs,
        callbacks=[earlystop])
    # rootDir='/home/eric/data/scene'
    # for i in range(10):      
    #     test_path='group'+str(i)+'_test.txt'
    #     testfilepath=os.path.join(rootDir,test_path)
    #     testDf=pd.read_csv(testfilepath,header=None) #加载papa.txt,指定它的分隔符是 \t
    #     testDf.rename(columns={0:"filename",1:'class'},inplace=True)
    #     datagen3 = ImageDataGenerator(rescale=1. / 255)
    #     validation_generator = datagen3.flow_from_dataframe(
    #                                                         dataframe=testDf,
    #                                                             directory=rootDir,
    #                                                             x_col="filename",
    #                                                             y_col="class",
    #                                                             subset="training",
    #                                                             #   classes=labels,
    #                                                             target_size=[size, size],
    #                                                             batch_size=batch,
    #                                                             class_mode='categorical')
        
    #     result=model.evaluate_generator(validation_generator,steps=testDf.shape[0]//batch)
    #     print(result)

    if not os.path.exists('model'):
        os.makedirs('model')

    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/hist.csv', encoding='utf-8', index=False)
    model.save_weights('model/weights_'+train_path.split('.')[0]+'.h5')
示例#3
0
def train(batch, epochs, num_classes, size, weights, tclasses):
    """Train the model.

    # Arguments
        batch: Integer, The number of train samples per batch.
        epochs: Integer, The number of train iterations.
        num_classes, Integer, The number of classes of dataset.
        size: Integer, image size.
        weights, String, The pre_trained model weights.
        tclasses, Integer, The number of classes of pre-trained model.
    """

    # For the CuDnn compatible
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.log_device_placement = True
    sess = tf.Session(config=config)
    set_session(sess)

    train_generator, validation_generator, count1, count2 = generate(
        batch, size)

    if weights:
        model = MobileNetv2((size, size, 3), tclasses)
        model = fine_tune(num_classes, weights, model)
    else:
        model = MobileNetv2((size, size, 3), num_classes)

    opt = Adam()
    earlystop = EarlyStopping(monitor='val_acc',
                              patience=30,
                              verbose=0,
                              mode='auto')
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    hist = model.fit_generator(train_generator,
                               validation_data=validation_generator,
                               steps_per_epoch=count1 // batch,
                               validation_steps=count2 // batch,
                               epochs=epochs,
                               callbacks=[earlystop])

    if not os.path.exists('model'):
        os.makedirs('model')

    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/hist.csv', encoding='utf-8', index=False)
    model.save_weights('model/weights.h5')
示例#4
0
def train(batch, epochs, num_classes, size, weights, tclasses):
    """Train the model.

    # Arguments
        batch: Integer, The number of train samples per batch.
        epochs: Integer, The number of train iterations.
        num_classes, Integer, The number of classes of dataset.
        size: Integer, image size.
        weights, String, The pre_trained model weights.
        tclasses, Integer, The number of classes of pre-trained model.
    """

    train_generator, validation_generator, count1, count2 = generate(batch, size)

    if weights:
        model = MobileNetv2((size, size, 3), tclasses)
        model = fine_tune(num_classes, weights, model)
    else:
        model = MobileNetv2((size, size, 3), num_classes)

    opt = Adam()
    earlystop = EarlyStopping(monitor='val_acc', patience=30, verbose=0, mode='auto')
    model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])

    class Save(keras.callbacks.Callback):
        def __init__(self):
            self.max_acc = 0.0
     
        def on_epoch_begin(self, epoch, logs=None):
            pass
     
        def on_epoch_end(self, epoch, logs=None):
            self.val_acc = logs["val_acc"]
            if epoch != 0:
                #if self.val_acc > self.max_acc and self.val_acc > 0.8:
                if self.val_acc > self.max_acc:
                    model.save("./log/model/kears_model_"+str(epoch)+ "_acc="+str(self.val_acc)+".h5")
                    self.max_acc = self.val_acc
    save_function = Save()
    hist = model.fit_generator(
        train_generator,
        validation_data=train_generator,
        steps_per_epoch=count1 // batch,
        validation_steps=count2 // batch,
        epochs=epochs,
        callbacks=[TensorBoard(log_dir='./log'),save_function,earlystop])
 
    #df = pd.DataFrame.from_dict(hist.history)
    #df.to_csv('model/hist.csv', encoding='utf-8', index=False)
    model.save_weights('log/model/weights.h5')
示例#5
0
def train(batch, epochs, num_classes, size, weights, tclasses):
    """Train the model.

    # Arguments
        batch: Integer, The number of train samples per batch.
        epochs: Integer, The number of train iterations.
        num_classes, Integer, The number of classes of dataset.
        size: Integer, image size.
        weights, String, The pre_trained model weights.
        tclasses, Integer, The number of classes of pre-trained model.
    """

    train_generator, validation_generator, count1, count2 = generate(
        batch, size)

    if weights:
        model = MobileNetv2((size, size, 3), tclasses)
        model = fine_tune(num_classes, weights, model)
    else:
        model = MobileNetv2((size, size, 3), num_classes, mode=args.mode)

    print(model.summary())
    model = multi_gpu_model(model, gpus=gpu_count)

    opt = Adam()
    earlystop = EarlyStopping(monitor='val_acc',
                              patience=30,
                              verbose=0,
                              mode='auto')
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    hist = model.fit_generator(train_generator,
                               validation_data=validation_generator,
                               steps_per_epoch=count1 // batch,
                               validation_steps=count2 // batch,
                               epochs=epochs,
                               callbacks=[earlystop])

    if not os.path.exists('model'):
        os.makedirs('model')

    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/hist.csv', encoding='utf-8', index=False)
    model.save_weights(args.save_model)
示例#6
0
def train(batch, epochs, num_classes, size, weights, tclasses):
    """Train the model.

    # Arguments
        batch: Integer, The number of train samples per batch.
        epochs: Integer, The number of train iterations.
        num_classes, Integer, The number of classes of dataset.
        size: Integer, image size.
        weights, String, The pre_trained model weights.
        tclasses, Integer, The number of classes of pre-trained model.
    """

    train_data, validation_data, test_data = get_mnist_dataset()

    if weights:
        model = MobileNetv2((size, size, 1), tclasses)
        model = fine_tune(num_classes, weights, model)
    else:
        model = MobileNetv2((size, size, 1), num_classes)

    opt = RMSprop()
    earlystop = EarlyStopping(monitor='val_acc',
                              patience=10,
                              verbose=1,
                              mode='auto')
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    history = model.fit(train_data[0],
                        train_data[1],
                        validation_data=validation_data,
                        batch_size=500,
                        epochs=epochs,
                        shuffle=True,
                        callbacks=[earlystop])

    if not os.path.exists('model'):
        os.makedirs('model')
    df = pd.DataFrame.from_dict(history.history)
    df.to_csv('model/history.csv', encoding='utf-8', index=False)
    model.save_weights('model/weights.h5')

    predictions = model.predict(test_data, verbose=1)

    return history
示例#7
0
def model_feed(size, num_classes):
    ''' 

    Wrapper the model creation
    
    #Arguments
        num_classes: Integer, The number of classes to create a model.
        size: tuple, The shape of the data.

    #Return:
        The model
    
    '''

    return MobileNetv2(size, num_classes)
示例#8
0
def train_factory(MODEL_NAME):

    config = tf.ConfigProto()
    config.gpu_options.allocator_type = 'BFC'
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config)) 
    # model = CCR(input_shape=(img_width,img_height,1),classes=charset_size)
    # model = LeNet.build(width=img_width, height=img_height, depth=1, classes=charset_size)
    # model = ResNet.build_model(SHAPE=(img_width,img_height,1), classes=charset_size)

    # vgg net 5
    # MODEL_PATH='trained_model/vggnet5.hdf5'
    # model=VGGNet5.vgg(input_shape=(img_width,img_height,1),classes=charset_size)

    model=None
    if(MODEL_NAME=='inception_resnet_v2'):
        model=InceptionResNetV2.inception_resnet_v2(input_shape=(img_width,img_height,3),classes=charset_size,weights='./trained_model/inception_resnet_v2/inception_resnet_v2.12-0.8244.hdf5')
    elif(MODEL_NAME=='xception'):
        # xeception
        model=Xception.Xception((img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='mobilenet_v2'):
        #mobilenet v2
        model=MobileNetv2.MobileNet_v2((img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='inception_v3'):
        #mobilenet v2
        model=Inception_v3.inception((img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='vgg16'):
        model=VGGNet.vgg(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='vgg19'):
        model=VGG19.VGG19(input_shape=(img_width,img_height,3),classes=charset_size,weights='weights/vgg19_weights_tf_dim_ordering_tf_kernels.h5')
    elif(MODEL_NAME=='resnet50'):
        model=ResNet50.resnet(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='inception_v4'):
        model=inception_v4.inception_v4(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='resnet34'):
        model=ResNet34.ResNet34(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='densenet121'):
        model=DenseNet.DenseNet(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='densenet161'):
        model=DenseNet.DenseNet(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='shufflenet_v2'):
        model=ShuffleNetV2.ShuffleNetV2(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='resnet_attention_56'):
        model=Resnet_Attention_56.Resnet_Attention_56(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='squeezenet'):
        model=SqueezeNet.SqueezeNet(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='seresnet50'):
        model=SEResNet50.SEResNet50(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='se_resnext'):
        model=SEResNext.SEResNext(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='nasnet'):
        model=NASNet.NASNetLarge(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='custom'):
        model=Custom_Network.Custom_Network(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='resnet18'):
        model=ResnetBuilder.build_resnet_18(input_shape=(img_width,img_height,3),num_outputs=charset_size)



    print(model.summary())
    train(model,MODEL_NAME)
示例#9
0
def train(batch, epochs, num_classes, size, weights, tclasses):
    """Train the model.

    # Arguments
        batch: Integer, The number of train samples per batch.
        epochs: Integer, The number of train iterations.
        num_classes, Integer, The number of classes of dataset.
        size: Integer, image size.
        weights, String, The pre_trained model weights.
        tclasses, Integer, The number of classes of pre-trained model.
    """

    train_generator, validation_generator, count1, count2 = generate(batch, size)

    train_generator = ImageDataGenerator(
        rescale=1. / 255,
        shear_range=0.2,
        zoom_range=0.2,
        rotation_range=90,
        width_shift_range=0.2,
        height_shift_range=0.2,
        brightness_range=(1, 1.3),
        horizontal_flip=True)
    
    directory="/home/gnss/Desktop/garbage_train"
    train_generator = MixupImageDataGenerator(generator=train_generator,
                                          directory=directory,
                                          batch_size=32,
                                          img_height=224,
                                          img_width=224,
                                          subset='training')

    if weights:
        model = MobileNetv2((size, size, 3), tclasses)
        model = fine_tune(num_classes, weights, model)
        print(num_classes)
    else:
        model = MobileNetv2((size, size, 3), num_classes)
        print(num_classes)

    opt = Adam(1e-2)
    # earlystop = EarlyStopping(monitor='val_acc', patience=30, verbose=0, mode='auto')
    tensorboard = TensorBoard('/home/gnss/Desktop/MobileNetV2/logs',write_images=True)
    # reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,patience=3, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)

    warmup_epoch = 5
    
    warm_up_lr = WarmUpCosineDecayScheduler(learning_rate_base=1e-2,
                                            total_steps=count1 // batch)

    checkpointer = ModelCheckpoint(filepath='/home/gnss/Desktop/MobileNetV2/mobilenet.h5',verbose=1,save_best_only=True)

    model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])

    

    # lr=XTensorBoard('/home/gnss/Desktop/MobileNetV2/logslr')

    hist = model.fit_generator(
        train_generator,
        validation_data=validation_generator,
        steps_per_epoch=count1 // batch,
        validation_steps=count2 // batch,
        epochs=epochs,
        callbacks=[warm_up_lr,tensorboard,checkpointer])
    '''
    learning_rate_base=1e-2
    total_steps=count1 // batch
    plt.plot(warm_up_lr.learning_rates)
    plt.xlabel('Step', fontsize=20)
    plt.ylabel('lr', fontsize=20)
    plt.axis([0, total_steps, 0, learning_rate_base*1.1])
    plt.xticks(np.arange(0, total_steps, 50))
    plt.grid()
    plt.title('Cosine decay with warmup', fontsize=20)
    plt.show()
    '''
    if not os.path.exists('model'):
        os.makedirs('model')

    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/hist.csv', encoding='utf-8', index=False)
    model.save_weights('model/weights.h5')
from keras.preprocessing import image
import numpy as np
from keras.models import Model, load_model 
from mobilenet_v2 import MobileNetv2

model = MobileNetv2((224,224, 3), 34)
model.load_weights("./model/weights.h5")

img_path = '/home/eric/data/flower_photos/roses/353897245_5453f35a8e.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
# x = preprocess_input(x)
x=x/255.0
print(x[0])

preds = model.predict(x)
print(preds)
# preds=np.argmax(preds)
# # print('Predicted:', decode_predictions(preds))
# print(preds)
# print(label_dict[str(preds)])