コード例 #1
0
def main():
    parser = argparse.ArgumentParser(
        description="Simple script to visualize VGG fliters."
        )
    parser.add_argument(
        "-p", "--path", type=str, default=None, required=True,
        help="Path where the image is."
        )
    parser.add_argument(
        "-w", "--weights", type=str, default=None, required=True,
        help="Path of the VGG-16 weight file."
        )

    args = parser.parse_args()

    # simdat dependencies
    dp = dp_models.DPModel()
    tl = tools.DATA()

    # basic parameters
    img_width = 224
    img_height = 224

    model = dp.VGG_16(weights_path=args.weights)
    model.summary()
    X, Y, cls, F = dp.prepare_data_test(
        args.path, img_width, img_height, convert_Y=False, y_as_str=False)
    inputs = [K.learning_phase()] + model.inputs

    # Visualize convolution result (after activation)
    def conv_f(X):
        # The [0] is to disable the training phase flag
        return _conv_f([0] + [X])

    for layer in model.layers:
        lname = dp.is_convolutional(layer)
        if lname is None:
            continue
        # return the output of a certain layer given a certain input
        # http://keras.io/getting-started/faq/
        _conv_f = K.function(inputs, [layer.output])
        C1 = conv_f(X)
        C1 = np.squeeze(C1)
        print("%s shape : " % lname, C1.shape)
        pl.figure(figsize=(15, 15))
        pl.suptitle(lname)
        nice_imshow(pl.gca(), make_mosaic(C1), cmap=cm.binary,
                    name=lname + '.png')
コード例 #2
0
import os
import cv2
import time
import numpy as np
from keras.optimizers import SGD
from simdat.core import dp_models
from simdat.core import ml
from simdat.core import plot
from simdat.core import image

im = image.IMAGE()
pl = plot.PLOT()
mlr = ml.MLRun()

t0 = time.time()
mdls = dp_models.DPModel()
imnet = dp_models.ImageNet()

weight_path = '/home/tammy/SOURCES/keras/examples/vgg16_weights.h5'
t0 = pl.print_time(t0, 'initiate')

model = mdls.VGG_16(weight_path)
t0 = pl.print_time(t0, 'load weights')
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
t0 = pl.print_time(t0, 'compile')

imgs = im.find_images()
X = []
Y = []
コード例 #3
0
ファイル: demo_inception.py プロジェクト: jinjinhong/simdat
from simdat.core import dp_models
dp = dp_models.DPModel()
model = dp.Inception_v3()
print(model.summary())
# model.compile('rmsprop', 'categorical_crossentropy')
コード例 #4
0
def main():
    parser = argparse.ArgumentParser(
        description="Use Simple model to train a classifier.")
    parser.add_argument("-p",
                        "--path",
                        type=str,
                        default='.',
                        help="Path where the images are. Default: $PWD.")
    parser.add_argument("--img-rows",
                        type=int,
                        default=224,
                        dest='rows',
                        help="Rows of the images, default: 224.")
    parser.add_argument("--img-cols",
                        type=int,
                        default=224,
                        dest='cols',
                        help="Columns of the images, default: 224.")
    parser.add_argument("--seed",
                        type=int,
                        default=1337,
                        help="Random seed, default: 1337.")
    parser.add_argument("--batch-size",
                        type=int,
                        default=64,
                        dest='batchsize',
                        help="Size of the mini batch. Default: 64.")
    parser.add_argument("--epochs",
                        type=int,
                        default=20,
                        help="Number of epochs, default 20.")

    t0 = time.time()
    mdls = dp_models.DPModel()
    tl = tools.TOOLS()

    args = parser.parse_args()
    np.random.seed(args.seed)

    X_train, X_test, Y_train, Y_test, classes = mdls.prepare_data(
        args.path, args.rows, args.cols)
    t0 = tl.print_time(t0, 'prepare data')
    model = mdls.Simple(len(classes),
                        img_row=X_train.shape[2],
                        img_col=X_train.shape[3],
                        colors=X_train.shape[1])
    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy')
    t0 = tl.print_time(t0, 'compile the Simple model')

    model.fit(X_train,
              Y_train,
              batch_size=args.batchsize,
              nb_epoch=args.epochs,
              show_accuracy=True,
              verbose=1,
              validation_data=(X_test, Y_test))
    t0 = tl.print_time(t0, 'fit')
    score = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])
    t0 = tl.print_time(t0, 'evaluate')
コード例 #5
0
def main():
    parser = argparse.ArgumentParser(
        description="Demo Sports 1M C3D Network on Keras")
    parser.add_argument(
        "--model-loc",
        type=str,
        default=os.getcwd(),
        dest='ofolder',
        help="Path of the folder to output or to load the model.")
    parser.add_argument("-p",
                        "--path",
                        type=str,
                        default=None,
                        required=True,
                        help="Path of the video.")
    parser.add_argument("--img-width",
                        type=int,
                        default=128,
                        dest='width',
                        help="Rows of the images, default: 128.")
    parser.add_argument("--img-height",
                        type=int,
                        default=171,
                        dest='height',
                        help="Columns of the images, default: 171.")

    t0 = time.time()
    tl = tools.TOOLS()
    pl = plot.PLOT()
    mdls = dp_models.DPModel()
    args = parser.parse_args()
    path_model = os.path.join(args.ofolder, 'model.json')
    path_weights = os.path.join(args.ofolder, 'weights.h5')
    path_label = os.path.join(args.ofolder, 'labels.txt')
    t0 = tl.print_time(t0, 'init')

    model = model_from_json(open(path_model).read())
    model.load_weights(path_weights)
    print(model.summary())
    t0 = tl.print_time(t0, 'load model')
    model.compile(loss='mean_squared_error', optimizer='sgd')
    t0 = tl.print_time(t0, 'compile model')

    with open(path_label, 'r') as f:
        labels = [line.strip() for line in f.readlines()]
    print('Total labels: {}'.format(len(labels)))

    X_test, Y_test, classes, F = mdls.prepare_data_test(args.path,
                                                        args.width,
                                                        args.height,
                                                        trans=False,
                                                        scale=False)
    t0 = tl.print_time(t0, 'load data')

    # c x l x h x w where c is the number of
    # channels, l is length in number of frames, h and w are the
    # height and width of the frame
    # Original shape = (16, 3, 122, 122)
    # New shape = (3, 16, 122, 122)
    results = []
    detected_lbs = {}
    for i in range(0, X_test.shape[0] - 16):
        X = X_test[i:i + 16, 8:120, 30:142, :].transpose((3, 0, 1, 2))
        output = model.predict_on_batch(np.array([X]))
        # iframe = int(F[i].split('.')[0].split('_')[1])
        iframe = int(F[i].split('.')[0].split('-')[1])
        pos_max = output[0].argmax()
        results.append(pos_max)
        if pos_max not in detected_lbs:
            detected_lbs[pos_max] = labels[pos_max]

    pl.plot(results, xlabel='Frame', ylabel='Detected Sport')
    print(detected_lbs)
    t0 = tl.print_time(t0, 'predict')
コード例 #6
0
ファイル: finetune_vgg.py プロジェクト: jinjinhong/simdat
def main():
    parser = argparse.ArgumentParser(
        description="Use Simple model to train a classifier.")
    subparsers = parser.add_subparsers(help='commands', dest='sbp_name')
    parser.add_argument("-p",
                        "--path",
                        type=str,
                        default='.',
                        help="Path where the images are. Default: $PWD.")
    parser.add_argument("--img-width",
                        type=int,
                        default=224,
                        dest='width',
                        help="Rows of the images, default: 224.")
    parser.add_argument("--img-height",
                        type=int,
                        default=224,
                        dest='height',
                        help="Columns of the images, default: 224.")
    parser.add_argument("--seed",
                        type=int,
                        default=1337,
                        help="Random seed, default: 1337.")

    predict_parser = subparsers.add_parser("predict",
                                           help='Predict the images.')
    add_prediction_args(predict_parser)

    batch_train_parser = subparsers.add_parser(
        "batch-train", help='Command to train with batches.')
    add_traiining_args(batch_train_parser)
    batch_train_parser.add_argument(
        "--size",
        type=int,
        default=5000,
        help="Size of the image batch (default: 5,000)")

    finetune_parser = subparsers.add_parser(
        "train", help='Command to finetune the images.')
    add_traiining_args(finetune_parser)

    crop_parser = subparsers.add_parser("augmentation",
                                        help='Generate scroped images.')

    t0 = time.time()
    tl = tools.DATA()
    simdat_im = image.IMAGE()
    mdls = dp_models.DPModel()

    args = parser.parse_args()
    np.random.seed(args.seed)

    if args.sbp_name in ['train', 'predict', 'batch-train']:
        tl.check_dir(args.ofolder)
        path_model = os.path.join(args.ofolder, 'model.json')
        path_weights = os.path.join(args.ofolder, 'weights.h5')
        path_cls = os.path.join(args.ofolder, 'classes.json')

    if args.sbp_name == 'batch-train':
        imgs = simdat_im.find_images(dir_path=args.path)
        classes = simdat_im.find_folders(dir_path=args.path)

        model = mdls.VGG_16(args.weights, lastFC=False)
        sgd = SGD(lr=args.lr,
                  decay=args.lrdecay,
                  momentum=args.momentum,
                  nesterov=True)
        print('[finetune_vgg] lr = %f, decay = %f, momentum = %f' %
              (args.lr, args.lrdecay, args.momentum))

        print('[finetune_vgg] Adding Dense(nclasses, activation=softmax).')
        model.add(Dense(len(classes), activation='softmax'))
        model.compile(optimizer=sgd, loss='categorical_crossentropy')
        t0 = tl.print_time(t0, 'compile the model to be fine tuned.')

        shuffle(imgs)
        for e in range(args.epochs):
            print("[finetune_vgg] Epoch %d/%d" % (e + 1, args.epochs))
            for i in range(len(imgs) / args.size + 1):
                start = i * args.size
                end = ((i + 1) * args.size)
                files = imgs[start:end]
                shuffle(files)
                if (i + 1) * args.size > len(imgs):
                    end = len(imgs)
                X_train, X_test, Y_train, Y_test, _c = mdls.prepare_data_train(
                    files,
                    args.width,
                    args.height,
                    classes=classes,
                    rc=args.rc)
                model.fit(X_train,
                          Y_train,
                          batch_size=args.batchsize,
                          nb_epoch=1,
                          show_accuracy=True,
                          verbose=1,
                          validation_data=(X_test, Y_test))

        t0 = tl.print_time(t0, 'fit')

        tl.write_json(classes, fname=path_cls)
        json_string = model.to_json()
        open(path_model, 'w').write(json_string)
        model.save_weights(path_weights, overwrite=True)

    elif args.sbp_name == 'train':

        scale = True
        if args.augmentation:
            scale = False
        X_train, X_test, Y_train, Y_test, classes = mdls.prepare_data_train(
            args.path, args.width, args.height, rc=args.rc, scale=scale)
        tl.write_json(classes, fname=path_cls)
        nclasses = len(classes)
        t0 = tl.print_time(t0, 'prepare data')

        model = mdls.VGG_16(args.weights, lastFC=False)
        sgd = SGD(lr=args.lr,
                  decay=args.lrdecay,
                  momentum=args.momentum,
                  nesterov=True)
        print('[finetune_vgg] lr = %f, decay = %f, momentum = %f' %
              (args.lr, args.lrdecay, args.momentum))

        print('[finetune_vgg] Adding Dense(nclasses, activation=softmax).')
        model.add(Dense(nclasses, activation='softmax'))
        model.compile(optimizer=sgd, loss='categorical_crossentropy')
        t0 = tl.print_time(t0, 'compile the model to be fine tuned.')

        for stack in ['conv1', 'conv2', 'conv3', 'conv4', 'conv5']:
            for l in mdls.layers[stack]:
                l.trainable = False

        if args.augmentation:
            datagen = ImageDataGenerator(featurewise_center=True,
                                         samplewise_center=False,
                                         featurewise_std_normalization=True,
                                         samplewise_std_normalization=False,
                                         zca_whitening=False,
                                         rotation_range=20,
                                         width_shift_range=0.2,
                                         height_shift_range=0.2,
                                         horizontal_flip=True,
                                         vertical_flip=False)

            datagen.fit(X_train)
            model.fit_generator(datagen.flow(X_train,
                                             Y_train,
                                             batch_size=args.batchsize),
                                samples_per_epoch=X_train.shape[0],
                                nb_epoch=args.epochs,
                                show_accuracy=True,
                                validation_data=(X_test, Y_test),
                                nb_worker=1)

        else:
            model.fit(X_train,
                      Y_train,
                      batch_size=args.batchsize,
                      nb_epoch=args.epochs,
                      show_accuracy=True,
                      verbose=1,
                      validation_data=(X_test, Y_test))
        t0 = tl.print_time(t0, 'fit')
        score = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
        print('[finetune_vgg] Test score:', score[0])
        print('[finetune_vgg] Test accuracy:', score[1])
        t0 = tl.print_time(t0, 'evaluate')

        json_string = model.to_json()
        open(path_model, 'w').write(json_string)
        model.save_weights(path_weights, overwrite=True)

    elif args.sbp_name == 'predict':
        cls_map = tl.parse_json(path_cls)
        model = model_from_json(open(path_model).read())
        t0 = tl.print_time(t0, 'load model from json')

        model.load_weights(path_weights)
        t0 = tl.print_time(t0, 'load model weights')

        if args.cm:
            from simdat.core import plot
            from sklearn.metrics import confusion_matrix
            pl = plot.PLOT()

            X_test, Y_test, classes, F = mdls.prepare_data_test(
                args.path,
                args.width,
                args.height,
                convert_Y=False,
                y_as_str=False,
                classes=cls_map)
            t0 = tl.print_time(t0, 'prepare data')
            results = model.predict_classes(X_test,
                                            batch_size=args.batchsize,
                                            verbose=1)
            cm = confusion_matrix(Y_test, results)
            pl.plot_confusion_matrix(cm,
                                     xticks=cls_map,
                                     yticks=cls_map,
                                     xrotation=90)

        else:
            X_test, Y_test, classes, F = mdls.prepare_data_test(
                args.path, args.width, args.height)
            t0 = tl.print_time(t0, 'prepare data')

            results = model.predict_proba(X_test,
                                          batch_size=args.batchsize,
                                          verbose=1)
            outputs = []
            precision = dict((el, 0) for el in cls_map)
            recall = dict((el, 0) for el in cls_map)
            total = dict((el, 0) for el in classes)
            for i in range(0, len(F)):
                _cls = results[i].argmax()
                max_prob = results[i][_cls]
                outputs.append({'input': F[i], 'max_probability': max_prob})
                cls = cls_map[_cls]
                recall[cls] += 1
                total[Y_test[i]] += 1
                if max_prob >= args.threshold:
                    outputs[-1]['class'] = cls
                    if Y_test[i] == cls:
                        precision[cls] += 1
                    else:
                        print('[finetune_vgg] %s: %s (%.2f)' %
                              (F[i], cls, max_prob))
                else:
                    print('[finetune_vgg] %s: low probability (%.2f),'
                          ' cannot find a match' % (F[i], max_prob))
                    outputs[-1]['class'] = None
            tl.write_json(outputs, fname=args.output_loc)
            print_precision_recall(precision, recall, total)

    elif args.sbp_name == 'augmentation':
        fimgs = simdat_im.find_images(dir_path=args.path)
        for fimg in fimgs:
            imgs = simdat_im.read_and_random_crop(fimg, save=True)

    else:
        print('Wrong command.')
        parser.print_help()