Example #1
0
def train_main():
    with open('config.json') as f:
        args = json.load(f)

    # make save folder
    try:
        print('Creating checkpoint folder...')
        os.makedirs(args['save_folder'])
    except OSError as e:
        if e.errno == errno.EEXIST:
            print('Directory already exists.')
        else:
            raise

    # read and preprocess data
    train_data = pd.read_csv(args['train_path'])
    preprocessed_train = preprocess(train_data, args)

    if args['train_model_weights']:  # resume training
        model = restore_model(args['save_folder'], args['train_model_weights'])
    else:
        model = create_model(args)
        save_model(model, args['save_folder'])

    # split data for CV
    train_set, val_set = val_split(*preprocessed_train[0],
                                   preprocessed_train[1])

    model, history = fit_model(model, train_set, val_set, args)
    plot_model_history(history, args['save_folder'])
Example #2
0
def detect():
    form = Form(request.form)
    if request.method == 'POST':  #and form.validate():
        test_values = np.array([
            [
                form.pregnancies.data, form.glucose.data,
                form.bloodPressure.data, form.skinThickness.data,
                form.insulin.data, form.bmi.data,
                form.diabetesPedigreeFunction.data, form.age.data
            ],
        ],
                               dtype=np.float32)
        result = restore_model(test_values)
        print("************")
        if (result):
            result = "Time to go Sugar Free"
        else:
            result = "Time for dessert"
    else:
        result = None
    iframe = 'details.html'

    return render_template('detect.html',
                           form=form,
                           result=result,
                           iframe=iframe)
Example #3
0
def test_main():
    with open('config.json') as f:
        args = json.load(f)

    # read and preprocess data
    test_data = pd.read_csv(args['test_path'])
    test_data.drop('id', axis=1, inplace=True)
    preprocessed_test = preprocess(test_data, args, False)

    # load checkpoint
    model = restore_model(args['save_folder'], args['test_model_weights'])
    model.compile(loss='mse', optimizer='adam')  # only for evaluation

    test(model, preprocessed_test, args, True)
Example #4
0
File: test.py Project: yang-neu/bnn
def pr_stats(run, image_dir, label_db, connected_components_threshold):

    # TODO: a bunch of this can go back into one off init in a class

    _train_opts, model = m.restore_model(run)

    label_db = LabelDB(label_db_file=label_db)

    set_comparison = u.SetComparison()

    # use 4 images for debug
    debug_imgs = []

    for idx, filename in enumerate(sorted(os.listdir(image_dir))):
        # load next image
        # TODO: this block used in various places, refactor
        img = np.array(Image.open(image_dir + "/" +
                                  filename))  # uint8 0->255  (H, W)
        img = img.astype(np.float32)
        img = (img / 127.5) - 1.0  # -1.0 -> 1.0  # see data.py

        # run through model
        prediction = expit(model.predict(np.expand_dims(img, 0))[0])

        if len(debug_imgs) < 4:
            debug_imgs.append(u.side_by_side(rgb=img, bitmap=prediction))

        # calc [(x,y), ...] centroids
        predicted_centroids = u.centroids_of_connected_components(
            prediction, rescale=2.0, threshold=connected_components_threshold)

        # compare to true labels
        true_centroids = label_db.get_labels(filename)
        true_centroids = [(y, x) for (x, y) in true_centroids]  # sigh...
        tp, fn, fp = set_comparison.compare_sets(true_centroids,
                                                 predicted_centroids)

    precision, recall, f1 = set_comparison.precision_recall_f1()

    return {
        "debug_imgs": debug_imgs,
        "precision": precision,
        "recall": recall,
        "f1": f1
    }
Example #5
0
            mask, rain_list, out_list, output = model(input_x, input_detail)
            output = output.cpu().data.numpy().transpose((0, 2, 3, 1))[0]
            output = np.where(output > 0., output, 0.)
            output = np.where(output < 1., output, 1.)

            truth = cv2.cvtColor(y, cv2.COLOR_BGR2GRAY)
            pred = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
            psnr.append(
                compare_psnr(np.array(truth * 255., dtype='uint8'),
                             np.array(pred * 255., dtype='uint8')))
            ssim.append(
                compare_ssim(np.array(truth * 255., dtype='uint8'),
                             np.array(pred * 255., dtype='uint8')))

            id += 1

        print("average psnr = " + str(np.average(psnr)) +
              ", average ssim_255 = " + str(np.average(ssim)))


if __name__ == '__main__':
    test = GenerateData(
        input_dir='path/to/test/data/',
        label_dir='path/to/test/label/',
    )

    test.read_data()
    model = restore_model('model/model.pkl')
    validate(test, model)
Example #6
0
def main():
    # parse command line arguments
    parser = argparse.ArgumentParser(
        description='An rCNN scene labeling model.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--dataset',
        type=str,
        default=FROM_GAMES,
        choices=DATASETS.keys(),
        help=
        'Type of dataset to use. This determines the expected format of the data directory'
    )
    parser.add_argument('--data_dir',
                        type=str,
                        help='Directory for image and label data')
    parser.add_argument('--category_map',
                        type=str,
                        help='File that maps colors ')
    parser.add_argument('--hidden_size_1',
                        type=int,
                        default=25,
                        help='First Hidden size for CNN model')
    parser.add_argument('--hidden_size_2',
                        type=int,
                        default=50,
                        help='Second Hidden size for CNN model')
    parser.add_argument('--patch_size',
                        type=int,
                        default=67,
                        help='Patch size for input images')
    parser.add_argument('--learning_rate',
                        type=float,
                        default=1e-4,
                        help='Learning rate for training CNN model')
    # TODO figure out batch size
    parser.add_argument('--batch_size',
                        type=int,
                        default=1,
                        help='Batch size for training CNN model')
    parser.add_argument('--num_epochs',
                        type=int,
                        default=1,
                        help='Number of epochs for training CNN model')
    parser.add_argument('--use_patches',
                        action='store_true',
                        default=False,
                        help='Whether to train model on individual patches')
    parser.add_argument(
        '--patches_per_image',
        type=int,
        default=1000,
        help=
        'Number of patches to sample for each image during training of CNN model'
    )
    parser.add_argument(
        '--gaussian_sigma',
        type=int,
        choices=[15, 30],
        default=None,
        help='Size of gaussian mask to apply to patches. Not used by default.')
    parser.add_argument(
        '--fix_random_seed',
        action='store_true',
        default=False,
        help='Whether to reset random seed at start, for debugging.')
    parser.add_argument('--model_save_path',
                        type=str,
                        default=None,
                        help='Optional location to store saved model in.')
    parser.add_argument('--model_load_path',
                        type=str,
                        default=None,
                        help='Optional location to load saved model from.')
    parser.add_argument(
        '--dry_run',
        action='store_true',
        default=False,
        help=
        'If true, only trains on one image, to test the training code quickly.'
    )
    parser.add_argument(
        '--train_fraction',
        type=float,
        default=0.8,
        help=
        'Fraction of data to train on. If positive, trains on first X images, otherwise trains on '
        'last X images.')

    args = parser.parse_args()

    if args.fix_random_seed:
        random.seed(0)

    # load class labels
    category_colors, category_names, names_to_ids = read_object_classes(
        args.category_map)
    num_classes = len(category_names)

    # create function that when called, provides iterator to an epoch of the data
    dataset_func = DATASETS[args.dataset]
    if args.dry_run:

        def dataset_epoch_iter():
            return dataset_func(args.data_dir, num_train=1)
    else:

        def dataset_epoch_iter():
            return dataset_func(args.data_dir,
                                train_fraction=args.train_fraction)

    model = CNNModel(args.hidden_size_1,
                     args.hidden_size_2,
                     args.batch_size,
                     num_classes,
                     args.learning_rate,
                     num_layers=2)

    sess = tf.Session()
    init = tf.initialize_all_variables()
    sess.run(init)
    if args.model_load_path is not None:
        restore_model(sess, args.model_load_path)
    train(sess,
          model,
          dataset_epoch_iter,
          num_epochs=args.num_epochs,
          use_patches=args.use_patches,
          patches_per_image=args.patches_per_image,
          save_path=args.model_save_path,
          gaussian_sigma=args.gaussian_sigma)

    print "Saving trained model to %s ..." % args.model_save_path
    save_model(sess, args.model_save_path)
Example #7
0
def main():
    """
    Trains or evaluates an rCNN model.
    """
    # parse command line arguments
    parser = argparse.ArgumentParser(
        description='An rCNN scene labeling model.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--dataset',
        type=str,
        default=FROM_GAMES,
        choices=DATASETS.keys(),
        help=
        'Type of dataset to use. This determines the expected format of the data directory'
    )
    parser.add_argument('--category_map',
                        type=str,
                        help='File that maps colors ')
    parser.add_argument('--data_dir',
                        type=str,
                        help='Directory for image and label data')
    parser.add_argument(
        '--data_fraction',
        type=float,
        default=None,
        help=
        'Fraction of data to train on. If positive, trains on first X images, otherwise trains on '
        'last X images.')
    parser.add_argument('--hidden_size_1',
                        type=int,
                        default=10,
                        help='First Hidden size for CNN model')
    parser.add_argument('--hidden_size_2',
                        type=int,
                        default=10,
                        help='Second Hidden size for CNN model')
    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.001,
                        help='Learning rate for training CNN model')
    parser.add_argument('--num_epochs',
                        type=int,
                        default=1,
                        help='Number of epochs for training CNN model')
    parser.add_argument('--model_save_path',
                        type=str,
                        default=None,
                        help='Optional location to store saved model in.')
    parser.add_argument('--model_load_path',
                        type=str,
                        default=None,
                        help='Optional location to load saved model from.')
    parser.add_argument('--training',
                        action='store_true',
                        default=False,
                        help='Whether or not to train model.')
    parser.add_argument('--output_dir',
                        type=str,
                        default=None,
                        help='Directory in which to save test output images.')
    parser.add_argument(
        '--dry_run',
        action='store_true',
        default=False,
        help=
        'If true, only trains on one image, to test the training code quickly.'
    )
    parser.add_argument(
        '--fix_random_seed',
        action='store_true',
        default=False,
        help='Whether to reset random seed at start, for debugging.')

    args = parser.parse_args()

    if args.fix_random_seed:
        random.seed(0)

    # load class labels
    category_colors, category_names = read_object_classes(args.category_map)
    num_classes = len(category_names)

    # create function that when called, provides iterator to an epoch of the data
    dataset_func = DATASETS[args.dataset]
    if args.dry_run:

        def dataset_epoch_iter():
            return dataset_func(args.data_dir, num_samples=1)
    else:

        def dataset_epoch_iter():
            return dataset_func(args.data_dir,
                                data_fraction=args.data_fraction)

    model = RCNNModel(args.hidden_size_1,
                      args.hidden_size_2,
                      num_classes,
                      args.learning_rate,
                      num_layers=2)

    sess = tf.Session()
    init = tf.initialize_all_variables()
    sess.run(init)
    if args.model_load_path is not None:
        restore_model(sess, args.model_load_path)

    run_model(sess,
              model,
              dataset_epoch_iter,
              num_epochs=args.num_epochs,
              training=args.training,
              save_path=args.model_save_path,
              output_dir=args.output_dir,
              color_map=category_colors)
Example #8
0
def main():
    # parse command line arguments
    parser = argparse.ArgumentParser(
        description='Evaluate an rCNN scene labelling model.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model', type=str, help='Filename of saved model')
    parser.add_argument('--category_map',
                        type=str,
                        help='File that maps colors ')
    parser.add_argument(
        '--dataset',
        type=str,
        default=FROM_GAMES,
        choices=DATASETS.keys(),
        help=
        'Type of dataset to use. This determines the expected format of the data directory'
    )
    parser.add_argument('--data_dir',
                        type=str,
                        help='Directory for image and label data')
    parser.add_argument(
        '--output_dir',
        type=str,
        default=None,
        help=
        'Directory to store model output. By default no output is generated.')
    parser.add_argument('--patch_size',
                        type=int,
                        default=67,
                        help='Size of input patches')
    parser.add_argument('--use_patches',
                        action='store_true',
                        default=False,
                        help='Whether to evaluate model on individual patches')
    parser.add_argument(
        '--patches_per_image',
        type=int,
        default=2000,
        help=
        'Number of patches to sample from each test image. Not used by default.'
    )
    parser.add_argument(
        '--gaussian_sigma',
        type=int,
        choices=[15, 30],
        default=None,
        help='Size of gaussian mask to apply to patches. Not used by default.')
    parser.add_argument(
        '--test_fraction',
        type=float,
        default=-0.2,
        help=
        'Fraction of data to test on. If positive, tests on first X images, otherwise tests on '
        'last X images.')
    parser.add_argument('--layer',
                        choices=[1, 2],
                        type=int,
                        default=2,
                        help='Number of rCNN layers to use.')
    args = parser.parse_args()

    # load class labels
    category_colors, category_names, names_to_ids = read_object_classes(
        args.category_map)
    num_classes = len(category_names)

    # load dataset
    def dataset_func():
        return DATASETS[args.dataset](args.data_dir,
                                      train_fraction=args.test_fraction)

    # TODO only test?
    # TODO don't hardcode these (maybe store them in config file?)
    model = CNNModel(25, 50, 1, num_classes, 1e-4, num_layers=2)

    sess = tf.Session()
    restore_model(sess, args.model)

    test_model(sess,
               model,
               dataset_func,
               args.layer,
               use_patches=args.use_patches,
               patches_per_image=args.patches_per_image,
               gaussian_sigma=args.gaussian_sigma,
               output_dir=args.output_dir,
               color_map=category_colors)
Example #9
0
)
parser.add_argument('--output-label-db',
                    type=str,
                    default=None,
                    help='if not set dont write label_db')
parser.add_argument('--run',
                    type=str,
                    required=True,
                    help='model, also used as subdir for export-pngs')
parser.add_argument(
    '--export-pngs',
    default='',
    help='how, if at all, to export pngs {"", "predictions", "centroids"}')
opts = parser.parse_args()

train_opts, model = m.restore_model(opts.run)
print(model.summary())

if opts.output_label_db:
    db = LabelDB(label_db_file=opts.output_label_db)
    db.create_if_required()
else:
    db = None

if opts.export_pngs:
    export_dir = "predict_examples/%s" % opts.run
    print("exporting prediction samples to [%s]" % export_dir)
    if not os.path.exists(export_dir):
        os.makedirs(export_dir)

imgs = os.listdir(opts.image_dir)