Esempio n. 1
0
def main():
    # Parameters
    if len(sys.argv) == 4:
        superclass = sys.argv[1]
        imgmove = sys.argv[2]
        if imgmove == 'False':
            imgmove = False
        else:
            imgmove = True
        lr = float(sys.argv[3])
    else:
        print('Parameters error')
        exit()

    # The constants
    classNum = {'A': 40, 'F': 40, 'V': 40, 'E': 40, 'H': 24}
    testName = {'A': 'a', 'F': 'a', 'V': 'b', 'E': 'b', 'H': 'b'}
    date = '20180321'

    trainpath = 'trainval_' + superclass + '/train'
    valpath = 'trainval_' + superclass + '/val'

    if not os.path.exists('model'):
        os.mkdir('model')

    # Train/validation data preparation
    if imgmove:
        os.mkdir('trainval_' + superclass)
        os.mkdir(trainpath)
        os.mkdir(valpath)
        sourcepath = '../zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_'+date\
                     +'/zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_images_'+date
        categories = os.listdir(sourcepath)
        for eachclass in categories:
            if eachclass[0] == superclass[0]:
                print(eachclass)
                os.mkdir(trainpath + '/' + eachclass)
                os.mkdir(valpath + '/' + eachclass)
                imgs = os.listdir(sourcepath + '/' + eachclass)
                idx = 0
                for im in imgs:
                    if idx % 8 == 0:
                        shutil.copyfile(
                            sourcepath + '/' + eachclass + '/' + im,
                            valpath + '/' + eachclass + '/' + im)
                    else:
                        shutil.copyfile(
                            sourcepath + '/' + eachclass + '/' + im,
                            trainpath + '/' + eachclass + '/' + im)
                    idx += 1

    # Train and validation ImageDataGenerator
    batchsize = 32

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=15,
                                       width_shift_range=5,
                                       height_shift_range=5,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(trainpath,
                                                        target_size=(224, 224),
                                                        batch_size=batchsize)

    valid_generator = test_datagen.flow_from_directory(valpath,
                                                       target_size=(224, 224),
                                                       batch_size=batchsize)

    # Train MobileNet
    model = MobileNet(include_top=True,
                      weights=None,
                      input_tensor=None,
                      input_shape=None,
                      pooling=None,
                      classes=classNum[superclass[0]])
    model.summary()
    model.compile(optimizer=SGD(lr=lr, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    steps_per_epoch = int(train_generator.n / batchsize)
    validation_steps = int(valid_generator.n / batchsize)

    weightname = 'model/mobile_' + superclass + '_wgt.h5'

    checkpointer = ModelCheckpoint(weightname,
                                   monitor='val_loss',
                                   verbose=0,
                                   save_best_only=True,
                                   save_weights_only=True,
                                   mode='auto',
                                   period=2)
    model.fit_generator(train_generator,
                        steps_per_epoch=steps_per_epoch,
                        epochs=100,
                        validation_data=valid_generator,
                        validation_steps=validation_steps,
                        callbacks=[checkpointer])
    ReduceLROnPlateau(monitor='val_accuracy',
                      factor=0.2,
                      patience=10,
                      min_lr=0.0000001))
callbacks.append(EarlyStopping(monitor='val_accuracy', patience=15))

# compile model
model.compile(loss='categorical_crossentropy',
              optimizer=SGD(momentum=0.9),
              metrics=['accuracy'])

# create fit function
history = model.fit_generator(datagen.flow(x_train,
                                           y_train,
                                           batch_size=batch_size),
                              steps_per_epoch=len(x_train) / 128,
                              epochs=n_epochs,
                              verbose=2,
                              callbacks=callbacks,
                              validation_data=(x_test, y_test))

from matplotlib import pyplot as plt
import numpy as np


def save_plot(training_history, num_epochs):
    # saves training progress logs in plots
    # summarize and save loss plot
    plt.subplot(211)
    plt.plot(training_history.history['loss'], color='purple', label='train')
    plt.plot(training_history.history['val_loss'], color='green', label='test')
    plt.legend(loc="upper right")
Esempio n. 3
0
    layer.trainable=True


'''
"""# Modelin Görselleştirilmesi"""

model.compile(optimizer=SGD(lr=1e-4, momentum=0.9, nesterov=True),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.summary()
"""# Modelin Oluşturulması"""

history = model.fit_generator(datagen.flow(x_test,
                                           y_test,
                                           batch_size=batch_size),
                              validation_data=(x_test, y_test),
                              steps_per_epoch=len(x_train) // batch_size,
                              epochs=epochs)
"""# Sonuçların Görselleştirilmesi"""

score = model.evaluate(x_test, y_test, verbose=0)
print('Test Loss:', score[0])
print('Test Accuracy:', score[1])

print(history.history.keys())
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
Esempio n. 4
0
# Train on ImageNet
checkpoint_path = "Mobilenet/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
os.makedirs(checkpoint_dir, exist_ok=True)

cp_callback = keras.callbacks.ModelCheckpoint(
    checkpoint_path,
    verbose=1,
    save_weights_only=True,
    # Save weights, every 1-epoch
    period=1)

csv_logger = keras.callbacks.CSVLogger('MobileNet_training.csv')

print("Starting to train Modified MobileNet...")
epochs = 5

model.fit_generator(imagenet_generator(train_dataset,
                                       batch_size=Nbatch,
                                       is_training=True),
                    steps_per_epoch=Ntrain // Nbatch,
                    epochs=epochs,
                    validation_data=imagenet_generator(validation_dataset,
                                                       batch_size=Nbatch),
                    validation_steps=Nvalidation // Nbatch,
                    verbose=1,
                    callbacks=[cp_callback, csv_logger])

model.save("MobileNet.h5")
Esempio n. 5
0
def main(args):
    train_data = args.train_data
    valid_data = args.valid_data
    nb_train_samples = args.train_samples
    nb_validation_samples = args.valid_samples
    model_path = args.model_path
    model_weights_path = args.model_weights_path
    num_classes = args.num_classes
    batch_size = args.batch_size
    epochs = args.epochs
    loss = args.loss
    optimizer = args.optimizer
    img_width, img_height = args.target_size

    if not os.path.isdir(train_data):
        raise EnvironmentError(f'--train-data {train_data} should exist')

    if not os.path.isdir(valid_data):
        raise EnvironmentError(f'--valid-data {valid_data} should exist')

    if K.image_data_format() == 'channels_first':
        input_shape = (3, img_width, img_height)
    else:
        input_shape = (img_width, img_height, 3)

    model_params = {
        'input_tensor': Input(shape=input_shape),
        'classes': num_classes,
        'weights': None,
    }

    print(
        f'Start training mobile net for {epochs} epochs.',
        f'{nb_train_samples} train samples, {nb_validation_samples} valid samples'
    )

    model = MobileNet(**model_params)
    model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])

    train_datagen = ImageDataGenerator(rescale=1. / 255)
    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='categorical',
    )

    validation_generator = test_datagen.flow_from_directory(
        valid_data,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode='categorical',
    )

    model.fit_generator(train_generator,
                        steps_per_epoch=nb_train_samples // batch_size,
                        epochs=epochs,
                        validation_data=validation_generator,
                        validation_steps=nb_validation_samples // batch_size,
                        verbose=1)

    model.save(model_path)
    model.save_weights(model_weights_path)

    print('Model saved.')
    return 0
# mse = losses.mean_squared_error
adam = Adam(lr=0.001)

model.compile(loss='categorical_crossentropy',
              optimizer=adam,
              metrics=['accuracy'])

# Fit the model
# f = open("output/log.csv","w+")
# csv_logger = CSVLogger('/output/log.csv', append=True, separator=',')
tb_callback = TensorBoard(log_dir='/output/logs',
                          histogram_freq=0,
                          batch_size=batch_size)
# model.fit(train_features, train_labels, epochs=128, batch_size=batch_size,  verbose=2, callbacks=[csv_logger])
model.fit_generator(train_generator,
                    epochs=150,
                    steps_per_epoch=(1400 / batch_size) + 1,
                    verbose=2,
                    callbacks=[tb_callback])
model.save("/output/mobnet.h5")
score, acc = model.evaluate_generator(test_generator,
                                      steps=(550 / batch_size) + 1)
# calculate predictions
# pred = model.predict(test_features)
# print(test_labels.shape,pred.shape)
# print(test_labels[0],pred[0])
target_names = ['blade', 'gun', 'others', 'shuriken']
print("Test score: " + str(score))
print("Test accuracy: " + str(acc))

# print(classification_report(test_labels, pred,target_names=target_names))
Esempio n. 7
0
        # list of idxs repeated by l: np.repeat(idxs, l)
        # idx1item1, idxs1item2, idxs2item1,...
        axis_1_idxs = np.concatenate([
            np.random.choice(list(range(5)), l, replace=False)
            for _ in range(batch_size)
        ])
        arr = img_features[axis_0_idxs,
                           axis_1_idxs, :].reshape(batch_size, l, feature_dim)
        yield [arr[:, :-1, :], arr[:, -1, :]], np.eye(batch_size)


def store_data():
    paths = os.listdir(IMG_DIR)[1:]
    postprocess = lambda x: x.squeeze().mean(axis=0).mean(axis=0)
    names, imgs = embed_outfits(paths, postprocess=postprocess)
    save_outfit_data(names, imgs, DATA_DIR)


if __name__ == '__main__':
    if not os.path.isfile(os.path.join(DATA_DIR, 'outfit_imgs.npy')):
        store_data()
    names, imgs = load_outfit_data(DATA_DIR)
    model = build_model()
    g = batch_generator(imgs[:-64])
    v = batch_generator(imgs[-64:], batch_size=16)
    model.fit_generator(g,
                        steps_per_epoch=1000,
                        epochs=10,
                        validation_data=v,
                        validation_steps=10)
Esempio n. 8
0
from keras.applications.mobilenet import MobileNet
import pycbc.psd
import gmind.generator

p = pycbc.psd.aLIGOZeroDetHighPower(2**19, 1.0 / 16, 15)
s = gmind.generator.GaussianNoiseGenerator(16, 1024, p, 20)
p = gmind.generator.WFParamGenerator(["examples/test.ini"])
w = gmind.generator.GaussianSignalQImageGenerator(s, p, 3, (224, 224), q=20)

i, t = w.next()
print i.shape, t
model = MobileNet(classes=1, weights=None)
model.compile(loss='mean_squared_error',
              optimizer='adagrad',
              metrics=['accuracy'])

model.fit_generator(w, 100, epochs=1)
Esempio n. 9
0
    exit(1)

model.summary()

print("TRAINING PHASE")

decay = lrate / epochs

sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])

tensorboard = TensorBoard(log_dir=logdir, histogram_freq=0, write_graph=True)
tensorboard.set_model(model)

history = model.fit_generator(train_generator,
                              epochs=epochs,
                              validation_data=validation_generator,
                              callbacks=[tensorboard])

model.save("leaf.%s.h5" % runmodel)

print("TESTING PHASE")

evaltest = model.evaluate_generator(test_generator, 1)
for name, val in zip(model.metrics_names, evaltest):
    print(name, val)

print("END", datetime.datetime.now().isoformat())
Esempio n. 10
0
    class_mode='categorical')

callbacksList = [tensorboard]

if earlyStopFlag:
    callbacksList.append(earlyStop)
if reduceLRFlag:
    callbacksList.append(reduce_lr)
if modelCheckpointFlag:
    callbacksList.append(modelCheckpoint)

history = model.fit_generator(
    trainGenerator,
    steps_per_epoch=trainSamplesNumber // batchSize * foldAugment,
    epochs=epochs,
    verbose=1,
    callbacks=callbacksList,
    validation_data=validationGenerator,
    class_weight=getClassWeights(trainGenerator.classes),
    shuffle=True,
    validation_steps=validateSamplesNumber // batchSize)

score = model.evaluate_generator(testGenerator, testSamplesNumber)

print('Test loss:', score[0])
print('Test accuracy:', score[1])

# serialize model to JSON
model_json = model.to_json()
with open(modelFile, "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
Esempio n. 11
0
# kien truc mo hinh
model.summary()

# Check point
cp_callback = "./models/weights.{epoch:02d}.h5"
checkpoint = ModelCheckpoint(cp_callback,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min')
callbacks_list = [checkpoint]

# Tiến hành huấn luyện mô hình
history = model.fit_generator(train_generator,
                              steps_per_epoch=709595 // BATCH_SIZE,
                              epochs=N_EPOCHS,
                              validation_data=validation_generator,
                              validation_steps=72000 // BATCH_SIZE,
                              callbacks=callbacks_list)

# lưu mô hình
with open("./models/trongso.json", "w") as json_file:
    json_file.write(model.to_json())

# Biểu đồ training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
    # training logger callback, log in csv file
    record = stamp + method
    csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
    callbacks_list = [loss_history, lrate, csv_logger]
    # train the model
    history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples//batch_size, 
                                  epochs=epochs, validation_data=None, callbacks=callbacks_list, verbose=2)
    '''
    # learning schedule callback
    lrate = LearningRateScheduler(step_decay)
    # training logger callback, log in csv file
    record = stamp + method + '-lr{}'.format(init_lr)
    csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
    callbacks_list = [lrate, csv_logger]
    # train the model
    history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples//batch_size, 
                                  epochs=epochs, validation_data=None, callbacks=callbacks_list, verbose=1)
elif optimizer_chosen == 'B':
    # using adam update as adaptive learning rate method
    method = 'Adam'
    print('\nUsing using adam update as adaptive learning rate method')
    adam = Adam(lr=init_lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # original lr=0.001
    # compile the model
    # loss = mse can be tried also
    train_start = timer()
    model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
    # training logger callback, log in csv file
    record = stamp + method + '-lr{}'.format(init_lr)
    csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
    # train the model
    history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples // batch_size,
                              epochs=epochs, validation_data=None, callbacks=[csv_logger], verbose=1)
class MobileNetModel:  #(ClassificationModel):
    def __init__(self):
        #super(CardModel, self).__init___(model_name='CardModel')
        self.num_classes = 2
        self.build_model()

        return

    def build_model(self):
        # Initializing the model with random wights
        self.arch = MobileNet(weights=None,
                              include_top=True,
                              classes=self.num_classes)

        # Compiling model with optimization of RSM and cross entropy loss
        self.arch.compile(optimizer='rmsprop',
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])
        return

    def train(self,
              epochs,
              train_images,
              train_labels,
              val_data,
              batch_size,
              initial_epoch=None):
        history = self.arch.fit_generator(
            train_generator,
            epochs=epochs,
            steps_per_epoch=train_generator.samples // batch_size,
            validation_data=validation_generator,
            validation_steps=validation_generator.samples // batch_size)
        return history

    def fit_gen(self, train_dir, val_dir, num_train, num_val, batch_size,
                epochs):

        gen = ImageDataGenerator(preprocessing_function=preprocess_input)
        train_generator = gen.flow_from_directory(train_dir,
                                                  target_size=(224, 224),
                                                  batch_size=batch_size,
                                                  class_mode='categorical')
        val_generator = gen.flow_from_directory(val_dir,
                                                target_size=(224, 224),
                                                batch_size=batch_size,
                                                class_mode='categorical')
        train_history = self.arch.fit_generator(
            train_generator,
            steps_per_epoch=(num_train // batch_size),
            epochs=epochs,
            validation_data=val_generator,
            validation_steps=(num_val // batch_size))

        return train_history

    def save(self, output_dir):
        model_path = os.path.join(output_dir, 'model.h5')
        self.arch.save(model_path)
        return

    def load(self, input_path):
        model_path = os.path.join(output_dir, 'model.h5')
        self.arch = load_model(model_path)
        return

    def __repr__(self):
        return str(self.arch.summary())
Esempio n. 14
0
model = MobileNet()
adam = keras.optimizers.Adam(lr = 0.001)
model.compile(optimizer = "adam" , loss = "categorical_crossentropy" , metrics = ["acc"])
#model.summary()

"""
    Train model.
"""

reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
                              patience=3, min_lr=0.001)

example = model.fit_generator(
        train_generator,
        epochs=100,
        validation_data=validation_generator,
        shuffle = True, callbacks = [reduce_lr])


"""
    Test model on test set
"""

name = os.listdir('/content/drive/My Drive/Colab Notebooks/Large_data/Test')
test_images = []
for i in range(len(name)):
  img = cv2.imread('/content/drive/My Drive/Colab Notebooks/Large_data/Test/' + name[i],0)
  img = cv2.resize(img,(64,64))	
  test_images.append(img)
  #print('Test' + str(i))
Esempio n. 15
0
#         model.add(Dense(NUM_CLASSES))
#         model.add(Activation('softmax'))
 model = MobileNet(weights='imagenet',include_top = False)
        x = model.output #Take the last layer
        x = GlobalAveragePooling2D()(x) #Add a GlobalAvgPooling        
        x = Dense(1024, activation='relu')(x)
        
        out = Dense(NUM_CLASSES, activation='softmax')(x)
        
        model = Model(inputs=model.input, outputs=out)        
        
        model.summary()

        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        history = model.fit_generator(train_generator,
                                  steps_per_epoch = train_generator.n // _batch,
                                  epochs = _epochs,
                                  verbose=1)
    
    model.save(model_name)
     plt.plot(history.history['loss'])
    plt.plot(history.history['acc'])
    plt.title('Model history')
    plt.ylabel('Loss / Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Loss', 'Accuracy'], loc='upper left')
    plt.savefig('accuracy_plot.png')
    
    return model
mod = create_model(PATH,EPOCHS,32,False)
def load_testing_data(path,color,_shuffle):
    x = blist([])
Esempio n. 16
0
def train_mobilenet(image_class, epochs):

    start_train_time = datetime.datetime.now()
    image_class = image_class
    root_path = '/home/yu/Documents/tensorflow/MURA/MURA-v1.1/'  # the root path of dataset
    train_dirs = os.path.join(
        root_path, 'train/{}'.format(image_class))  # import data for training
    valid_dirs = os.path.join(
        root_path,
        'valid/{}'.format(image_class))  # import data for validation

    if not os.path.exists('v_train/{}'.format(
            image_class)):  # iterate to create symbolic link to data
        make_vdirs_for_keras(train_dirs, 'v_train/{}'.format(image_class))

    if not os.path.exists('v_valid/{}'.format(image_class)):
        make_vdirs_for_keras(valid_dirs, 'v_valid/{}'.format(image_class))

    idg_train_settings = dict(samplewise_center=True,
                              samplewise_std_normalization=True,
                              rotation_range=5,
                              width_shift_range=0.1,
                              height_shift_range=0.1,
                              zoom_range=0.1,
                              horizontal_flip=True,
                              vertical_flip=True)
    idg_train = ImageDataGenerator(**idg_train_settings)

    idg_valid_settings = dict(samplewise_center=True,
                              samplewise_std_normalization=True,
                              rotation_range=0,
                              width_shift_range=0.,
                              height_shift_range=0.,
                              zoom_range=0.0,
                              horizontal_flip=False,
                              vertical_flip=False)
    idg_valid = ImageDataGenerator(**idg_valid_settings)

    train_gen = idg_train.flow_from_directory('v_train/{}'.format(image_class),
                                              follow_links=True,
                                              target_size=(128, 128),
                                              color_mode='grayscale')

    valid_gen = idg_valid.flow_from_directory('v_valid/{}'.format(image_class),
                                              follow_links=True,
                                              target_size=(128, 128),
                                              color_mode='grayscale')

    a, b = next(valid_gen)
    s_net = MobileNet(classes=b.shape[1],
                      weights=None,
                      input_shape=a.shape[1:])
    # s_net.summary()

    s_net.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['acc'])
    # print('Layers: {}, parameters: {}'.format(len(s_net.layers), s_net.count_params()))

    if not os.path.exists('weights'):
        os.mkdir('weights/')
    file_path = "weights/weights.best.hdf5." + image_class
    checkpoint = ModelCheckpoint(file_path,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    early = EarlyStopping(monitor="val_acc", mode="max", patience=3)
    callbacks_list = [checkpoint, early]  #early

    s_net.fit_generator(
        train_gen,
        steps_per_epoch=30,  # default 30
        validation_data=valid_gen,
        validation_steps=10,
        epochs=epochs,
        callbacks=callbacks_list)

    end_train_time = datetime.datetime.now()
    time_train = end_train_time - start_train_time
    return time_train, s_net, valid_gen