Exemple #1
0
def main():
    #模型保存的位置
    #删除遗留下来的文件
    if os.path.exists(cfg.log_dir):
        shutil.rmtree(cfg.log_dir)

    if not os.path.exists(cfg.log_dir):  # 判断logs的文件夹是否存在,如果不存在则创建logs
        os.mkdir(cfg.log_dir)

    #打开数据集的txt
    dataHandler = dataloader()
    train_db, val_db = dataHandler.data_split()

    #建立VGG16模型
    model = VGG16()

    # 保存的方式,3世代保存一次
    checkpoint_period1 = tf.keras.callbacks.ModelCheckpoint(
        cfg.log_dir + "/" +
        'Model-Weight-ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}-val_accuracy{val_accuracy:.3f}.h5',
        monitor='accuracy',  #一般使用val_accuracy
        save_weights_only=False,
        save_best_only=True,
        period=3)

    # 学习率下降的方式,acc三次不下降就下降学习率继续训练
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
        monitor='accuracy',  #训练的精度
        factor=0.5,
        patience=3,
        verbose=1)

    # 是否需要早停,当val_loss一直不下降的时候意味着模型基本训练完毕,可以停止
    early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                      min_delta=0,
                                                      patience=10,
                                                      verbose=1)

    # 交叉熵
    model.compile(
        optimizer=tf.keras.optimizers.Adam(lr=cfg.learning_rate),
        loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
        metrics=['accuracy', "Recall", "AUC"])

    print('Train on {} samples, val on {} samples, with batch size {}.'.format(
        len(train_db), len(val_db), cfg.batch_size))

    # 开始训练
    model.fit_generator(
        dataHandler.data_loader(train_db, cfg.batch_size),
        steps_per_epoch=max(1, int(len(train_db) // cfg.batch_size)),
        validation_data=dataHandler.data_loader(val_db, cfg.batch_size),
        validation_steps=max(1, int(len(val_db) // cfg.batch_size)),
        epochs=cfg.epochs,
        initial_epoch=0,
        callbacks=[checkpoint_period1, reduce_lr, early_stopping])

    model.save_weights(cfg.log_dir + '/last1.h5')
def train_model():
  train_datagen = ImageDataGenerator(
    rescale = 1./255,
    shear_range = 0.1,
    zoom_range = 0.1,
    horizontal_flip = True,
    fill_mode="nearest")

  test_datagen = ImageDataGenerator(rescale = 1./255)
  # load train data 
  training_set = train_datagen.flow_from_directory(
    'dataset/train',
    target_size = (64, 64),
    batch_size = 32,
    shuffle=True,
    seed = 7,
    class_mode = 'categorical')
  # load validation data
  validation_set = test_datagen.flow_from_directory(
    'dataset/validation',
    target_size = (64, 64),
    batch_size = 32,
    shuffle=True,
    seed = 7,
    class_mode = 'categorical')

  print(validation_set.class_indices)
  print(validation_set.classes)
  print(validation_set.num_classes)

  imgs,labels = next(training_set)
  print(labels)

  file_Name = 'label_match.pickle'
  fileObject = open(file_Name,'wb') 
  # this writes the object a to the file named 'testfile'
  pickle.dump(validation_set.class_indices,fileObject)   
  # here we close the fileObject
  fileObject.close()


  nClasses = validation_set.num_classes

  print('nClasses',nClasses)
  model = VGG16().create_model((64, 64,3),nClasses)
  model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
  model.fit_generator(training_set,
    steps_per_epoch = 20,
    nb_epoch = 80,
    validation_data = validation_set,
    verbose=1)

  # save the model to disk
  print("[INFO] serializing network...")
Exemple #3
0
def main():
    args = get_args()
    m_config = process_config(args.config)

    config = tf.ConfigProto(log_device_placement=True)
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        # create_dirs([config.summary_dir, config.checkpoint_dir])
        data = DataLoader(config=m_config)
        model = VGG16(data_loader=data, config=m_config)
        logger = Logger(sess=sess, config=m_config)

        trainer = Trainer(
            sess=sess,
            model=model,
            data=data,
            config=m_config,
            logger=logger)

        trainer.train()
Exemple #4
0
import numpy as np
import utils
import cv2
from keras import backend as K
from model.VGG16 import VGG16

K.set_image_dim_ordering('tf')

if __name__ == "__main__":
    model = VGG16(2)
    model.load_weights("./logs/middle_one.h5")
    img = cv2.imread("./data/image/train/cat.1.jpg")
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = img / 255
    img = np.expand_dims(img, axis=0)
    img = utils.resize_image(img, (224, 224))
    print(utils.print_answer(np.argmax(model.predict(img))))
test_dataset = torchvision.datasets.CIFAR10(root="../../data",
                                           download=True,
                                           train=False,
                                           transform=transform)

# DataLoader
train_loader = torch.utils.data.DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=8)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, num_workers=8)

# Dataset classes
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')


########## 2. Model ##########
# Create an instance
vgg16 = VGG16()
if device == 'cuda':
    vgg16.cuda()

# Loss function
criterion = nn.CrossEntropyLoss()

# Optimizer
optimizer = optim.Adam(vgg16.parameters(), lr=learning_rate)


########## 3. Train and Test ##########
def train(net, num_epoch):
    """
    It is a function to train the model.
    :param net: defined model instance
Exemple #6
0

def get_classes():
    with open("./data/model/index_word.txt", "r", encoding='utf-8') as f:
        synset = [l.split(";")[1].replace("\n", "") for l in f.readlines()]
    return synset


if __name__ == "__main__":
    #---------------------------------------------#
    #   区分的类的个数
    #   猫+狗=2
    #---------------------------------------------#
    NCLASSES = 2

    model = VGG16(NCLASSES)
    #--------------------------------------------------#
    #   载入权重,训练好的权重会保存在logs文件夹里面
    #   我们需要将对应的权重载入
    #   修改model_path,将其对应我们训练好的权重即可
    #   下面只是一个示例
    #--------------------------------------------------#
    model.load_weights("./logs/middle_one.h5")

    img = Image.open("./data/image/train/cat.0.jpg")
    img = img.resize((224, 224), Image.BICUBIC)
    img = np.expand_dims(np.array(img) / 255, axis=0)

    classes = get_classes()
    print(classes[np.argmax(model.predict(img)[0])])
Exemple #7
0
 def __init__(self, class_num=1000, use_layer=8, batch=32):
     self.class_num = class_num
     self.use_layer = use_layer
     self.batch_size = batch
     self.vgg = VGG16(self.class_num, training=False)