コード例 #1
0
def main(model, training_cnf, data_dir, start_epoch, resume_lr, weights_from,
         clean, visuals):
    util.check_required_program_args([model, training_cnf, data_dir])
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    util.init_logging('train.log',
                      file_log_level=logging.INFO,
                      console_log_level=logging.INFO,
                      clean=clean)
    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    training_iter, validation_iter = create_training_iters(
        cnf, data_set, model_def.crop_size, start_epoch,
        cnf.get('iterator_type', 'queued') == 'parallel')
    trainer = SupervisedTrainer(model,
                                cnf,
                                training_iter,
                                validation_iter,
                                classification=cnf['classification'])
    trainer.fit(data_set,
                weights_from,
                start_epoch,
                resume_lr,
                verbose=1,
                summary_every=cnf.get('summary_every', 10),
                clean=clean,
                visuals=visuals)
コード例 #2
0
def predict_command(model, training_cnf, features_file, images_dir, weights_from, tag, sync, ):
    util.check_required_program_args([model, training_cnf, features_file, images_dir, weights_from])
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf
    weights_from = str(weights_from)
    image_features = np.load(features_file)
    images = data.get_image_files(images_dir)
    predictions = predict_withf(model, cnf, weights_from, image_features)
    predict_dir = os.path.dirname(features_file)
    prediction_results_dir = os.path.abspath(os.path.join(predict_dir, 'predictions', tag))
    if not os.path.exists(prediction_results_dir):
        os.makedirs(prediction_results_dir)

    names = data.get_names(images)
    image_prediction_probs = np.column_stack([names, predictions])
    headers = ['score%d' % (i + 1) for i in range(predictions.shape[1])]
    title = np.array(['image'] + headers)
    image_prediction_probs = np.vstack([title, image_prediction_probs])
    prediction_probs_file = os.path.join(prediction_results_dir, 'predictions.csv')
    np.savetxt(prediction_probs_file, image_prediction_probs, delimiter=",", fmt="%s")
    print('Predictions saved to: %s' % prediction_probs_file)
    if cnf['classification']:
        class_predictions = np.argmax(predictions, axis=1)
        image_class_predictions = np.column_stack([names, class_predictions])
        title = np.array(['image', 'label'])
        image_class_predictions = np.vstack([title, image_class_predictions])
        prediction_class_file = os.path.join(prediction_results_dir, 'predictions_class.csv')
        np.savetxt(prediction_class_file, image_class_predictions, delimiter=",", fmt="%s")
        print('Class predictions saved to: %s' % prediction_class_file)
コード例 #3
0
def get_metrics(actual_labels_file, predict_labels_file):
    util.check_required_program_args([actual_labels_file, predict_labels_file])
    actual_labels_df = pd.read_csv(actual_labels_file, names=['image', 'label'], header=0)
    predict_labels_df = pd.read_csv(predict_labels_file, names=['image', 'label'], header=0)

    # assumes equal number of items in both file
    assert (actual_labels_df['image'].count()) == predict_labels_df['image'].count()

    actual_labels_df = actual_labels_df.sort_values(by=['image'])
    predict_labels_df = predict_labels_df.sort_values(by=['image'])
    assert (list(actual_labels_df['image'].values) == list(predict_labels_df['image'].values))

    # Hopefully y_true and y_pred are alligned properly.
    y_labels = actual_labels_df['image'].values
    y_true = actual_labels_df['label'].values
    y_pred = predict_labels_df['label'].values

    print "Confusion matrix:"
    print confusion_matrix(y_true, y_pred)
    print ""
    print "Classification report:"
    print classification_report(y_true, y_pred)

    accuracy = accuracy_score(y_true, y_pred)
    kappa = quadratic_weighted_kappa(y_true, y_pred)

    print('Accuracy: %.4f' % accuracy)
    print('Kappa: %.4f' % kappa)
    print ""
コード例 #4
0
ファイル: model_info.py プロジェクト: shivam-kotwalia/mnist
def model_info(model):
    util.check_required_program_args([model])
    model_def = util.load_module(model)
    model = model_def.model
    end_points = model(False, None)
    util.show_layer_shapes(end_points)
    util.show_vars()
コード例 #5
0
ファイル: train_withf.py プロジェクト: shivam-kotwalia/mnist
def main(model, training_cnf, data_dir, start_epoch, resume_lr, weights_from, clean):
    util.check_required_program_args([model, training_cnf, data_dir])
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    util.init_logging('train.log', file_log_level=logging.INFO, console_log_level=logging.INFO, clean=clean)
    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    training_iter = BatchIterator(cnf['batch_size_train'], True)
    validation_iter = BatchIterator(cnf['batch_size_test'], True)
    trainer = SupervisedTrainer(model, cnf, training_iter, validation_iter, classification=cnf['classification'])
    trainer.fit(data_set, weights_from, start_epoch, resume_lr, verbose=1,
                summary_every=cnf.get('summary_every', 10), clean=clean)
コード例 #6
0
def main(model, training_cnf, data_dir, start_epoch, resume_lr, weights_from,
         weights_exclude_scopes, trainable_scopes, clean, visuals):
    util.check_required_program_args([model, training_cnf, data_dir])
    sys.path.insert(0, '.')
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    util.init_logging('train.log',
                      file_log_level=logging.INFO,
                      console_log_level=logging.INFO,
                      clean=clean)
    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    training_iter, validation_iter = create_training_iters(
        cnf, data_set, model_def.crop_size, start_epoch,
        cnf.get('iterator_type', 'parallel') == 'parallel')

    try:
        input_shape = (-1, model_def.crop_size[1], model_def.crop_size[0],
                       model_def.num_channels)
    except AttributeError:
        input_shape = (-1, model_def.crop_size[1], model_def.crop_size[0], 3)

    trainer = SupervisedTrainerQ(model,
                                 cnf,
                                 input_shape,
                                 trainable_scopes,
                                 training_iter,
                                 validation_iter,
                                 classification=cnf['classification'])
    trainer.fit(data_set,
                weights_from,
                weights_exclude_scopes,
                start_epoch,
                resume_lr,
                verbose=1,
                summary_every=cnf.get('summary_every', 10),
                clean=clean,
                visuals=visuals)
コード例 #7
0
def main(directory, convert_directory, target_size, extension):
    util.check_required_program_args([directory, convert_directory])
    try:
        os.mkdir(convert_directory)
    except OSError:
        pass

    supported_extensions = set(['jpg', 'png', 'tiff', 'jpeg', 'tif'])

    filenames = [
        os.path.join(dp, f) for dp, dn, fn in os.walk(directory) for f in fn
        if f.split('.')[-1].lower() in supported_extensions
    ]
    filenames = sorted(filenames)

    print("Resizing images in {} to {}, this takes a while."
          "".format(directory, convert_directory))

    n = len(filenames)
    # process in batches, sometimes weird things happen with Pool on my machine
    batchsize = 500
    batches = n // batchsize + 1
    pool = Pool(N_PROC)

    args = []

    for f in filenames:
        args.append((convert, (directory, convert_directory, f, target_size,
                               extension)))

    for i in range(batches):
        print("batch {:>2} / {}".format(i + 1, batches))
        pool.map(process, args[i * batchsize:(i + 1) * batchsize])

    pool.close()

    print('done')
コード例 #8
0
ファイル: predict.py プロジェクト: charu1994/tefla
def predict(model, output_layer, training_cnf, predict_dir, weights_from, tag,
            convert, image_size, sync, predict_type):
    util.check_required_program_args(
        [model, training_cnf, predict_dir, weights_from])
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf
    weights_from = str(weights_from)
    images = data.get_image_files(predict_dir)

    preprocessor = convert_preprocessor(image_size) if convert else None
    prediction_iterator = create_prediction_iter(cnf, model_def.crop_size,
                                                 preprocessor, sync)

    if predict_type == 'quasi':
        predictor = QuasiCropPredictor(model, cnf, weights_from,
                                       prediction_iterator, 20, output_layer)
    elif predict_type == '1_crop':
        predictor = OneCropPredictor(model, cnf, weights_from,
                                     prediction_iterator, output_layer)
    elif predict_type == '10_crop':
        predictor = TenCropPredictor(model, cnf, weights_from,
                                     prediction_iterator,
                                     model_def.crop_size[0],
                                     model_def.image_size[0], output_layer)
    else:
        raise ValueError('Unknown predict_type: %s' % predict_type)
    predictions = predictor.predict(images)

    prediction_results_dir = os.path.abspath(
        os.path.join(predict_dir, '..', 'predictions', tag))
    if not os.path.exists(prediction_results_dir):
        os.makedirs(prediction_results_dir)

    if output_layer == 'predictions':
        names = data.get_names(images)
        image_prediction_probs = np.column_stack([names, predictions])
        headers = ['score%d' % (i + 1) for i in range(predictions.shape[1])]
        title = np.array(['image'] + headers)
        image_prediction_probs = np.vstack([title, image_prediction_probs])
        prediction_probs_file = os.path.join(prediction_results_dir,
                                             'predictions.csv')
        np.savetxt(prediction_probs_file,
                   image_prediction_probs,
                   delimiter=",",
                   fmt="%s")
        print('Predictions saved to: %s' % prediction_probs_file)

        if cnf['classification']:
            class_predictions = np.argmax(predictions, axis=1)
            image_class_predictions = np.column_stack(
                [names, class_predictions])
            title = np.array(['image', 'label'])
            image_class_predictions = np.vstack(
                [title, image_class_predictions])
            prediction_class_file = os.path.join(prediction_results_dir,
                                                 'predictions_class.csv')
            np.savetxt(prediction_class_file,
                       image_class_predictions,
                       delimiter=",",
                       fmt="%s")
            print('Class predictions saved to: %s' % prediction_class_file)
    else:
        # feature extraction
        features_file = os.path.join(prediction_results_dir, 'features.npy')
        np.save(features_file, predictions)
        print('Features from layer: %s saved to: %s' %
              (output_layer, features_file))