Exemplo n.º 1
0
def predict_command(model, training_cnf, features_file, images_dir, weights_from, tag, sync, ):
    util.check_required_program_args([model, training_cnf, features_file, images_dir, weights_from])
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf
    weights_from = str(weights_from)
    image_features = np.load(features_file)
    images = data.get_image_files(images_dir)
    predictions = predict_withf(model, cnf, weights_from, image_features)
    predict_dir = os.path.dirname(features_file)
    prediction_results_dir = os.path.abspath(os.path.join(predict_dir, 'predictions', tag))
    if not os.path.exists(prediction_results_dir):
        os.makedirs(prediction_results_dir)

    names = data.get_names(images)
    image_prediction_probs = np.column_stack([names, predictions])
    headers = ['score%d' % (i + 1) for i in range(predictions.shape[1])]
    title = np.array(['image'] + headers)
    image_prediction_probs = np.vstack([title, image_prediction_probs])
    prediction_probs_file = os.path.join(prediction_results_dir, 'predictions.csv')
    np.savetxt(prediction_probs_file, image_prediction_probs, delimiter=",", fmt="%s")
    print('Predictions saved to: %s' % prediction_probs_file)
    if cnf['classification']:
        class_predictions = np.argmax(predictions, axis=1)
        image_class_predictions = np.column_stack([names, class_predictions])
        title = np.array(['image', 'label'])
        image_class_predictions = np.vstack([title, image_class_predictions])
        prediction_class_file = os.path.join(prediction_results_dir, 'predictions_class.csv')
        np.savetxt(prediction_class_file, image_class_predictions, delimiter=",", fmt="%s")
        print('Class predictions saved to: %s' % prediction_class_file)
Exemplo n.º 2
0
def main(model, training_cnf, data_dir, parallel, start_epoch, weights_from,
         resume_lr, gpu_memory_fraction, is_summary, num_classes):
    model_def = util.load_module(model)
    model = model_def
    cnf = util.load_module(training_cnf).cnf

    util.init_logging('train_ss.log',
                      file_log_level=logging.INFO,
                      console_log_level=logging.INFO)
    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    standardizer = cnf.get('standardizer', NoOpStandardizer())

    training_iter, validation_iter = create_training_iters(cnf,
                                                           data_set,
                                                           standardizer,
                                                           model_def.crop_size,
                                                           start_epoch,
                                                           parallel=parallel)
    trainer = GenerativeLearner(model,
                                cnf,
                                training_iterator=training_iter,
                                validation_iterator=validation_iter,
                                resume_lr=resume_lr,
                                classification=cnf['classification'],
                                gpu_memory_fraction=gpu_memory_fraction,
                                is_summary=is_summary,
                                verbosity=2)
    trainer.fit(data_set,
                num_classes,
                weights_from,
                start_epoch,
                summary_every=399)
Exemplo n.º 3
0
def main(model, training_cnf, data_dir, parallel, start_epoch, task_id, job_name, ps_hosts, worker_hosts, weights_from, resume_lr, gpu_memory_fraction, is_summary, loss_type):
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    ps_hosts = ps_hosts.split(',')
    worker_hosts = worker_hosts.split(',')
    cluster_spec = tf.train.ClusterSpec({'ps': ps_hosts,
                                       'worker': worker_hosts})
    server = tf.train.Server(
        {'ps': ps_hosts,
         'worker': worker_hosts},
         job_name=job_name,
         task_index=task_id)

    util.init_logging('train.log', file_log_level=logging.INFO,
                      console_log_level=logging.INFO)
    if weights_from:
        weights_from = str(weights_from)

    if job_name == 'ps':
        server.join()
    else:
	    learner = DistSupervisedLearner(model, cnf, resume_lr=resume_lr, classification=cnf[
					'classification'], gpu_memory_fraction=gpu_memory_fraction, is_summary=is_summary, loss_type=loss_type, verbosity=1)
	    data_dir_train = os.path.join(data_dir, 'train')
	    data_dir_val = os.path.join(data_dir, 'val')
	    learner.fit(task_id, server, cluster_spec, data_dir_train, data_dir_val, weights_from=weights_from, start_epoch=start_epoch, training_set_size=50000, val_set_size=10000,
                summary_every=399, keep_moving_averages=True)
Exemplo n.º 4
0
def predict(model, training_cnf, predict_dir, weights_from, predict_type):
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf
    weights_from = str(weights_from)
    images = data.get_image_files(predict_dir)

    preprocessor = None
    prediction_iterator = create_prediction_iter(cnf, model_def.crop_size,
                                                 preprocessor, False)

    if predict_type == 'quasi':
        predictor = QuasiCropPredictor(model, cnf, weights_from,
                                       prediction_iterator, 20)
    elif predict_type == '1_crop':
        predictor = OneCropPredictor(model, cnf, weights_from,
                                     prediction_iterator)
    elif predict_type == '10_crop':
        predictor = TenCropPredictor(model, cnf, weights_from,
                                     prediction_iterator,
                                     model_def.crop_size[0],
                                     model_def.image_size[0])
    else:
        raise ValueError('Unknown predict_type: %s' % predict_type)
    predictions = predictor.predict(images)
    predictions = predictions.reshape(-1, 1000)

    names = data.get_names(images)
    for i, name in enumerate(names):
        print("---Predictions for %s:" % name)
        preds = (np.argsort(predictions[i])[::-1])[0:5]
        for p in preds:
            print(class_names[p], predictions[i][p])
Exemplo n.º 5
0
def main(model, training_cnf, data_dir, start_epoch, resume_lr, weights_from,
         clean, visuals):
    util.check_required_program_args([model, training_cnf, data_dir])
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    util.init_logging('train.log',
                      file_log_level=logging.INFO,
                      console_log_level=logging.INFO,
                      clean=clean)
    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    training_iter, validation_iter = create_training_iters(
        cnf, data_set, model_def.crop_size, start_epoch,
        cnf.get('iterator_type', 'queued') == 'parallel')
    trainer = SupervisedTrainer(model,
                                cnf,
                                training_iter,
                                validation_iter,
                                classification=cnf['classification'])
    trainer.fit(data_set,
                weights_from,
                start_epoch,
                resume_lr,
                verbose=1,
                summary_every=cnf.get('summary_every', 10),
                clean=clean,
                visuals=visuals)
Exemplo n.º 6
0
def main(model, training_cnf, data_dir, parallel, max_to_keep, start_epoch,
         weights_from, weights_dir, num_classes, resume_lr,
         gpu_memory_fraction, is_summary, loss_type, weighted, data_balancing,
         log_file_name):
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf
    if weights_from:
        weights_from = str(weights_from)

    learner = SupervisedLearner(model,
                                cnf,
                                data_balancing=data_balancing,
                                resume_lr=resume_lr,
                                classification=cnf['classification'],
                                gpu_memory_fraction=gpu_memory_fraction,
                                num_classes=num_classes,
                                is_summary=is_summary,
                                loss_type=loss_type,
                                verbosity=1,
                                weighted=weighted,
                                log_file_name=log_file_name)
    data_dir_train = os.path.join(data_dir, 'train')
    data_dir_val = os.path.join(data_dir, 'val')
    learner.fit(data_dir_train,
                data_dir_val,
                weights_from=weights_from,
                weights_dir=weights_dir,
                max_to_keep=max_to_keep,
                start_epoch=start_epoch,
                training_set_size=50000,
                val_set_size=10000,
                summary_every=399,
                keep_moving_averages=True)
Exemplo n.º 7
0
def predict(model, training_cnf, predict_dir, weights_from, dataset_name,
            convert, image_size, sync, test_type, gpu_memory_fraction):
    model = util.load_module(model)
    cnf = util.load_module(training_cnf).cnf
    weights_from = str(weights_from)
    with tf.Graph().as_default():
        end_points_G = model.generator([32, 100], True, None)
        inputs = tf.placeholder(tf.float32,
                                shape=(None, model.image_size[0],
                                       model.image_size[0], 3),
                                name="input")
        end_points_D = model.discriminator(inputs,
                                           True,
                                           None,
                                           num_classes=6,
                                           batch_size=32)
        saver = tf.train.Saver()
        print('Loading weights from: %s' % weights_from)
        if gpu_memory_fraction is not None:
            gpu_options = tf.GPUOptions(
                per_process_gpu_memory_fraction=gpu_memory_fraction)
            sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        saver.restore(sess, weights_from)
        end_points_G_val = model.generator([cnf['batch_size_test'], 100],
                                           False,
                                           True,
                                           batch_size=cnf['batch_size_test'])

        util.save_images('generated_images.png',
                         sess.run(end_points_G_val['softmax']),
                         width=128,
                         height=128)

        sess.close()
Exemplo n.º 8
0
def load_model(model, model_cnf, weights_from, layer_name, data_path):
    model_def = util.load_module(model)
    cnf = util.load_module(model_cnf).cnf
    standardizer = cnf['standardizer']
    model = model_def.model
    sess = tf.Session()
    try:
        end_points = model(is_training=False, reuse=None)
        saver = tf.train.Saver()
        print('Loading weights from: %s' % weights_from)
        saver.restore(sess, weights_from)
    except Exception:
        print('not loaded')

    inputs = end_points['inputs']
    layer = end_points[layer_name]
    data = tf.read_file(data_path)
    data = tf.to_float(tf.image.decode_jpeg(data))
    data = standardizer(data, False)
    data = tf.transpose(data, perm=[1, 0, 2])
    data_ = data.eval(session=sess)
    data_ = scipy.misc.imresize(data_, size=(448, 448), interp='cubic')
    data_ = np.expand_dims(data_, 0)
    acti = get_activation(sess, layer, inputs, data_)
    acti = np.mean(acti, 3).squeeze()
    # acti = np.asarray(acti.transpose(1, 0), dtype=np.float32)
    plt.imshow(acti)
    plt.show()
    # print(acti)
    """
Exemplo n.º 9
0
def main(model, training_cnf, data_dir, parallel, start_epoch, weights_from,
         weights_dir, resume_lr, num_classes, gpu_memory_fraction, is_summary,
         loss_type, log_file_name):
    with tf.Graph().as_default():
        model_def = util.load_module(model)
        model = model_def.model
        cnf = util.load_module(training_cnf).cnf

        if weights_from:
            weights_from = str(weights_from)

        trainer = SupervisedLearner(model,
                                    cnf,
                                    log_file_name=log_file_name,
                                    resume_lr=resume_lr,
                                    classification=cnf['classification'],
                                    gpu_memory_fraction=gpu_memory_fraction,
                                    num_classes=num_classes,
                                    is_summary=is_summary,
                                    loss_type=loss_type,
                                    verbosity=1)
        trainer.fit(data_dir,
                    weights_from=weights_from,
                    weights_dir=weights_dir,
                    start_epoch=start_epoch,
                    summary_every=399,
                    keep_moving_averages=True)
Exemplo n.º 10
0
def main(model, training_cnf, data_dir, parallel, start_epoch, weights_from,
         resume_lr, gpu_memory_fraction, is_summary, num_classes,
         log_file_name):
    model_def = util.load_module(model)
    model = model_def
    cnf = util.load_module(training_cnf).cnf

    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    standardizer = cnf.get('standardizer', NoOpStandardizer())

    training_iter, validation_iter = create_training_iters(cnf,
                                                           data_set,
                                                           standardizer,
                                                           model_def.crop_size,
                                                           start_epoch,
                                                           parallel=parallel)
    trainer = SemiSupervisedTrainer(model,
                                    cnf,
                                    training_iterator=training_iter,
                                    validation_iterator=validation_iter,
                                    resume_lr=resume_lr,
                                    classification=cnf['classification'],
                                    gpu_memory_fraction=gpu_memory_fraction,
                                    is_summary=is_summary,
                                    verbosity=1,
                                    log_file_name=log_file_name)
    trainer.fit(data_set,
                num_classes,
                weights_from,
                start_epoch,
                summary_every=399)
Exemplo n.º 11
0
def main(model, training_cnf, data_dir, parallel, start_epoch, weights_from,
         resume_lr, gpu_memory_fraction, is_summary):
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    util.init_logging('train.log',
                      file_log_level=logging.INFO,
                      console_log_level=logging.INFO)
    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    standardizer = cnf.get('standardizer', NoOpStandardizer())

    training_iter, validation_iter = create_training_iters(cnf,
                                                           data_set,
                                                           standardizer,
                                                           model_def.crop_size,
                                                           start_epoch,
                                                           parallel=parallel)
    trainer = SupervisedTrainer(model,
                                cnf,
                                training_iter,
                                validation_iter,
                                resume_lr=resume_lr,
                                classification=cnf['classification'],
                                gpu_memory_fraction=gpu_memory_fraction,
                                is_summary=is_summary,
                                loss_type='kappa_log')
    trainer.fit(data_set,
                weights_from,
                start_epoch,
                verbose=1,
                summary_every=399)
Exemplo n.º 12
0
def main(model, training_cnf, data_dir, start_epoch, resume_lr, weights_from, clean):
    util.check_required_program_args([model, training_cnf, data_dir])
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    util.init_logging('train.log', file_log_level=logging.INFO, console_log_level=logging.INFO, clean=clean)
    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    training_iter = BatchIterator(cnf['batch_size_train'], True)
    validation_iter = BatchIterator(cnf['batch_size_test'], True)
    trainer = SupervisedTrainer(model, cnf, training_iter, validation_iter, classification=cnf['classification'])
    trainer.fit(data_set, weights_from, start_epoch, resume_lr, verbose=1,
                summary_every=cnf.get('summary_every', 10), clean=clean)
Exemplo n.º 13
0
def predict(frozen_model, training_cnf, predict_dir, image_size, output_path,
            num_classes, gpu_memory_fraction):
    cnf = util.load_module(training_cnf).cnf
    standardizer = cnf['standardizer']
    graph = util.load_frozen_graph(frozen_model)
    preprocessor = convert_preprocessor(image_size)
    predictor = SegmentPredictor(graph, standardizer, preprocessor)
    # images = data.get_image_files(predict_dir)
    image_names = [
        filename.strip() for filename in os.listdir(predict_dir)
        if filename.endswith('.jpg')
    ]
    hist = np.zeros((num_classes, num_classes))
    for image_filename in image_names:
        final_prediction_map = predictor.predict(
            os.path.join(predict_dir, image_filename))
        final_prediction_map = final_prediction_map.transpose(0, 2,
                                                              1).squeeze()
        gt_name = os.path.join(predict_dir,
                               image_filename[:-4] + '_final_mask' + '.png')
        gt = convert(gt_name, image_size)
        gt = np.asarray(gt)
        gt = convert_labels(gt, image_size, image_size)
        hist += compute_hist(gt, final_prediction_map, num_classes=num_classes)
    iou = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
    meaniou = np.nanmean(iou)
    print('Mean IOU %5.5f' % meaniou)
Exemplo n.º 14
0
  def __init__(self,
               model_cnf,
               data_dir,
               image_size,
               crop_size,
               channel_dim=3,
               start_epoch=1,
               parallel=True):
    self.cnf = util.load_module(model_cnf).cnf
    self.image_size = image_size
    self.crop_size = crop_size
    self.channel_dim = channel_dim
    self.data_set = DataSet(data_dir, image_size)
    self.standardizer = self.cnf.get('standardizer', NoOpStandardizer())
    self.training_iter, self.validation_iter = create_training_iters(
        self.cnf,
        self.data_set,
        self.standardizer, [crop_size, crop_size],
        start_epoch,
        parallel=parallel)

    self.training_X = self.data_set.training_X
    self.training_y = self.data_set.training_y
    self.validation_X = self.data_set.validation_X
    self.validation_y = self.data_set.validation_y
Exemplo n.º 15
0
def model_info(model):
    util.check_required_program_args([model])
    model_def = util.load_module(model)
    model = model_def.model
    end_points = model(False, None)
    util.show_layer_shapes(end_points)
    util.show_vars()
Exemplo n.º 16
0
def try_config(args, cnf):
    """For trying out configurations.

  Args:
      args: command line arguments regarding training
      cnf: training configuration sampled from hyperband search space

  Returns:
      a dictionary containing final loss value and early stop flag
  """
    model_def = util.load_module(args['model'])
    model = model_def.model

    if args['weights_from']:
        weights_from = str(args['weights_from'])
    else:
        weights_from = args['weights_from']

    data_set = DataSet(args['data_dir'],
                       model_def.image_size[0],
                       mode=cnf.get('mode'),
                       multilabel=cnf.get('multilabel', False))

    standardizer = cnf.get('standardizer', NoOpStandardizer())
    cutout = cnf.get('cutout', None)

    training_iter, validation_iter = create_training_iters(
        cnf,
        data_set,
        standardizer,
        model_def.crop_size,
        args['start_epoch'],
        parallel=args['parallel'],
        cutout=cutout,
        data_balancing=cnf.get('data_balancing', False))
    learner = SupervisedLearner(
        model,
        cnf,
        training_iterator=training_iter,
        validation_iterator=validation_iter,
        resume_lr=args['resume_lr'],
        classification=cnf['classification'],
        gpu_memory_fraction=args['gpu_memory_fraction'],
        num_classes=args['num_classes'],
        is_summary=args['is_summary'],
        loss_type=args['loss_type'],
        weighted=args['weighted'],
        log_file_name=args['log_file_name'],
        verbosity=args['verbose'],
        is_early_stop=cnf.get('is_early_stop', True))

    _early_stop, _loss = learner.fit(data_set,
                                     weights_from=weights_from,
                                     start_epoch=args['start_epoch'],
                                     weights_dir=args['weights_dir'],
                                     summary_every=399)
    return {'early_stop': _early_stop, 'loss': _loss}
Exemplo n.º 17
0
def main(model, training_cnf, data_dir, parallel, start_epoch, weights_from,
         weights_dir, resume_lr, gpu_memory_fraction, num_classes, is_summary,
         loss_type, weighted, log_file_name):
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir,
                       model_def.image_size[0],
                       mode=cnf.get('mode'),
                       multilabel=cnf.get('multilabel', False))
    standardizer = cnf.get('standardizer', NoOpStandardizer())
    cutout = cnf.get('cutout', None)

    training_iter, validation_iter = create_training_iters(
        cnf,
        data_set,
        standardizer,
        model_def.crop_size,
        start_epoch,
        parallel=parallel,
        cutout=cutout,
        data_balancing=cnf.get('data_balancing', False))
    learner = SupervisedLearner(model,
                                cnf,
                                training_iterator=training_iter,
                                validation_iterator=validation_iter,
                                resume_lr=resume_lr,
                                classification=cnf['classification'],
                                gpu_memory_fraction=gpu_memory_fraction,
                                num_classes=num_classes,
                                is_summary=is_summary,
                                loss_type=loss_type,
                                weighted=weighted,
                                log_file_name=log_file_name)
    learner.fit(data_set,
                weights_from,
                start_epoch=start_epoch,
                weights_dir=weights_dir,
                summary_every=399)
Exemplo n.º 18
0
def main(model, training_cnf, data_dir, start_epoch, resume_lr, weights_from,
         weights_exclude_scopes, trainable_scopes, clean, visuals):
    util.check_required_program_args([model, training_cnf, data_dir])
    sys.path.insert(0, '.')
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    util.init_logging('train.log',
                      file_log_level=logging.INFO,
                      console_log_level=logging.INFO,
                      clean=clean)
    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    training_iter, validation_iter = create_training_iters(
        cnf, data_set, model_def.crop_size, start_epoch,
        cnf.get('iterator_type', 'parallel') == 'parallel')

    try:
        input_shape = (-1, model_def.crop_size[1], model_def.crop_size[0],
                       model_def.num_channels)
    except AttributeError:
        input_shape = (-1, model_def.crop_size[1], model_def.crop_size[0], 3)

    trainer = SupervisedTrainerQ(model,
                                 cnf,
                                 input_shape,
                                 trainable_scopes,
                                 training_iter,
                                 validation_iter,
                                 classification=cnf['classification'])
    trainer.fit(data_set,
                weights_from,
                weights_exclude_scopes,
                start_epoch,
                resume_lr,
                verbose=1,
                summary_every=cnf.get('summary_every', 10),
                clean=clean,
                visuals=visuals)
Exemplo n.º 19
0
def predict(model, training_cnf, predict_dir, weights_from, dataset_name,
            convert, image_size, sync, test_type):
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf
    weights_from = str(weights_from)
    images = data.get_image_files(predict_dir)

    standardizer = cnf.get('standardizer', None)

    preprocessor = convert_preprocessor(image_size) if convert else None
    prediction_iterator = create_prediction_iter(cnf, standardizer,
                                                 model_def.crop_size,
                                                 preprocessor, sync)

    if test_type == 'quasi':
        predictor = QuasiCropPredictor(model, cnf, weights_from,
                                       prediction_iterator, 20)
        predictions = predictor.predict(images)

    if not os.path.exists(os.path.join(predict_dir, '..', 'results')):
        os.mkdir(os.path.join(predict_dir, '..', 'results'))
    if not os.path.exists(
            os.path.join(predict_dir, '..', 'results', dataset_name)):
        os.mkdir(os.path.join(predict_dir, '..', 'results', dataset_name))

    names = data.get_names(images)
    image_prediction_prob = np.column_stack([names, predictions])
    headers = ['score%d' % (i + 1) for i in range(predictions.shape[1])]
    title = np.array(['image'] + headers)
    image_prediction_prob = np.vstack([title, image_prediction_prob])
    labels_file_prob = os.path.abspath(
        os.path.join(predict_dir, '..', 'results', dataset_name,
                     'predictions.csv'))
    np.savetxt(labels_file_prob,
               image_prediction_prob,
               delimiter=",",
               fmt="%s")
Exemplo n.º 20
0
def predict(frozen_model, training_cnf, image_path, image_size, output_path, gpu_memory_fraction):
  cnf = util.load_module(training_cnf).cnf
  standardizer = cnf['standardizer']
  graph = util.load_frozen_graph(frozen_model)
  preprocessor = convert_preprocessor(448)
  predictor = SegmentPredictor(graph, standardizer, preprocessor)
  final_prediction_map = predictor.predict(image_path)
  final_prediction_map = final_prediction_map.transpose(0, 2, 1).squeeze()
  image = data.load_image(image_path, preprocessor=preprocessor)
  img = image.transpose(2, 1, 0)
  img = Image.fromarray(img.astype('uint8'), 'RGB')
  img.save('/tmp/test.png')
  image_filename = image_path.split('/')[-1]
  plot_masks('/tmp/test.png', final_prediction_map, output_path)
  """
Exemplo n.º 21
0
def save_graph(model, output_dir, output_model):
    model_def = util.load_module(model)
    model = model_def.model
    try:
        with tf.Graph().as_default():
            sess = tf.Session()

            end_points_predict = model(is_training=False, reuse=None)
            inputs = end_points_predict['inputs']
            predictions = end_points_predict['predictions']
            init = tf.global_variables_initializer()
            sess.run(init)
        tf.train.write_graph(sess.graph_def, output_dir, output_model)
    except Exception as e:
        print(e.message)
Exemplo n.º 22
0
def predict(frozen_model, training_cnf, predict_dir, image_size, output_path,
            num_classes, gpu_memory_fraction):
    cnf = util.load_module(training_cnf).cnf
    standardizer = cnf['standardizer']
    graph = util.load_frozen_graph(frozen_model)
    preprocessor = convert_preprocessor(image_size)
    predictor = SegmentPredictor(graph, standardizer, preprocessor)
    # images = data.get_image_files(predict_dir)
    image_names = [
        filename.strip() for filename in os.listdir(predict_dir)
        if filename.endswith('.jpg')
    ]

    iou = IOU()
    per_class_iou = iou.per_class_iou(predictor, predict_dir, image_size)
    meaniou = iou.meaniou(predictor, predict_dir, image_size)
    print(per_class_iou)
    print('Mean IOU %5.5f' % meaniou)
Exemplo n.º 23
0
def main(training_cnf, interactive, gpu_memory_fraction):
    """Train a word2vec model."""
    cnf = util.load_module(training_cnf).cnf
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=gpu_memory_fraction)
    with tf.Graph().as_default(), tf.Session(config=tf.ConfigProto(
            gpu_options=gpu_options)) as sess:
        with tf.device("/cpu:0"):
            model = Word2Vec(cnf, sess)
            model.read_analogies()  # Read analogy questions
        for _ in xrange(cnf.get('epochs_to_train')):
            model.train()  # Process one epoch
            model.eval()  # Eval analogies.
        # Perform a final save.
        model.saver.save(sess,
                         os.path.join(cnf.get('save_path'), "model.ckpt"),
                         global_step=model.global_step)
        if interactive:
            # E.g.,
            # [0]: model.analogy(b'france', b'paris', b'russia')
            # [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
            _start_shell(locals())
Exemplo n.º 24
0
def try_config(args, cnf):
    """For trying out configurations.

  Args:
      args: command line arguments regarding training
      cnf: training configuration sampled from hyperband search space

  Returns:
      a dictionary containing final loss value and early stop flag
  """
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)

    width = 28
    height = 28

    train_images = mnist[0].images
    train_labels = mnist[0].labels

    validation_images = mnist[1].images
    validation_labels = mnist[1].labels

    data_set = DataSet(train_images, train_labels, validation_images,
                       validation_labels)

    model_def = util.load_module(args['model'])
    model = model_def.model

    learner = SupervisedLearner(model,
                                cnf,
                                classification=cnf['classification'],
                                is_summary=False,
                                num_classes=10,
                                verbosity=args['verbose'],
                                is_early_stop=cnf.get('is_early_stop', True))
    _early_stop, _loss = learner.fit(data_set,
                                     weights_from=None,
                                     start_epoch=1)

    return {'early_stop': _early_stop, 'loss': _loss}
Exemplo n.º 25
0
def test_model(model, input_shape, loss_type='softmax'):
    print(input_shape)
    input_shape = [c.strip() for c in input_shape.split(",")]
    model_def = util.load_module(model)
    model = model_def.model
    inputs = tf.placeholder(tf.float32, input_shape)
    end_points = model(inputs, True, None)
    logits = end_points['logits']
    labels = tf.placeholder(tf.int32, (None, logits.get_shape().as_list()[1]))
    prediction = end_points['prediction']
    if loss_type == 'sigmoid':
        loss = tf.losses.sigmoid_cross_entropy(labels, logits)
    else:
        loss = tf.losses.softmax_cross_entropy(labels, logits)
    optimizer = tf.train.AdamOptimizer()
    train_op = optimizer.minimize(loss)
    feed_dict = {
        inputs: np.random.normal(size=inputs.get_shape().as_list()),
        labels: np.random.randint(0, high=1, size=logits.get_shape().as_list())
    }
    mltest.test_suite(prediction,
                      train_op,
                      feed_dict=feed_dict,
                      output_range=(0, 1))
Exemplo n.º 26
0
def predict(model, output_layer, training_cnf, predict_dir, weights_from, tag,
            convert, image_size, sync, predict_type):
    util.check_required_program_args(
        [model, training_cnf, predict_dir, weights_from])
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf
    weights_from = str(weights_from)
    images = data.get_image_files(predict_dir)

    preprocessor = convert_preprocessor(image_size) if convert else None
    prediction_iterator = create_prediction_iter(cnf, model_def.crop_size,
                                                 preprocessor, sync)

    if predict_type == 'quasi':
        predictor = QuasiCropPredictor(model, cnf, weights_from,
                                       prediction_iterator, 20, output_layer)
    elif predict_type == '1_crop':
        predictor = OneCropPredictor(model, cnf, weights_from,
                                     prediction_iterator, output_layer)
    elif predict_type == '10_crop':
        predictor = TenCropPredictor(model, cnf, weights_from,
                                     prediction_iterator,
                                     model_def.crop_size[0],
                                     model_def.image_size[0], output_layer)
    else:
        raise ValueError('Unknown predict_type: %s' % predict_type)
    predictions = predictor.predict(images)

    prediction_results_dir = os.path.abspath(
        os.path.join(predict_dir, '..', 'predictions', tag))
    if not os.path.exists(prediction_results_dir):
        os.makedirs(prediction_results_dir)

    if output_layer == 'predictions':
        names = data.get_names(images)
        image_prediction_probs = np.column_stack([names, predictions])
        headers = ['score%d' % (i + 1) for i in range(predictions.shape[1])]
        title = np.array(['image'] + headers)
        image_prediction_probs = np.vstack([title, image_prediction_probs])
        prediction_probs_file = os.path.join(prediction_results_dir,
                                             'predictions.csv')
        np.savetxt(prediction_probs_file,
                   image_prediction_probs,
                   delimiter=",",
                   fmt="%s")
        print('Predictions saved to: %s' % prediction_probs_file)

        if cnf['classification']:
            class_predictions = np.argmax(predictions, axis=1)
            image_class_predictions = np.column_stack(
                [names, class_predictions])
            title = np.array(['image', 'label'])
            image_class_predictions = np.vstack(
                [title, image_class_predictions])
            prediction_class_file = os.path.join(prediction_results_dir,
                                                 'predictions_class.csv')
            np.savetxt(prediction_class_file,
                       image_class_predictions,
                       delimiter=",",
                       fmt="%s")
            print('Class predictions saved to: %s' % prediction_class_file)
    else:
        # feature extraction
        features_file = os.path.join(prediction_results_dir, 'features.npy')
        np.save(features_file, predictions)
        print('Features from layer: %s saved to: %s' %
              (output_layer, features_file))
Exemplo n.º 27
0
def predict(model, training_cnf, predict_dir, weights_from, dataset_name,
            convert, image_size, sync, predict_type):
    images = data.get_image_files(predict_dir)

    # Form now, hard coded models, cnfs, and weights
    # Need to take these from program inputs or an ensembling config file

    print('Creating predictor 1')
    weights_from1 = 'weights.sa/model-epoch-97.ckpt'
    model1 = 'examples/mnist_model_sa.py'
    training_cnf1 = 'examples/mnist_cnf.py'
    model_def1 = util.load_module(model1)
    model1 = model_def1.model
    cnf1 = util.load_module(training_cnf1).cnf
    standardizer = cnf1.get('standardizer', NoOpStandardizer())
    preprocessor = convert_preprocessor(
        model_def1.image_size[0]) if convert else None
    prediction_iterator1 = create_prediction_iter(cnf1, standardizer,
                                                  model_def1.crop_size,
                                                  preprocessor, sync)
    predictor1 = QuasiCropPredictor(model1, cnf1, weights_from1,
                                    prediction_iterator1, 20)
    # predictor1 = OneCropPredictor(model1, cnf1, weights_from1, prediction_iterator1)

    print('Creating predictor 2')
    weights_from2 = 'weights.rv/model-epoch-31.ckpt'
    model2 = 'examples/mnist_model.py'
    training_cnf2 = 'examples/mnist_cnf.py'
    model_def2 = util.load_module(model2)
    model2 = model_def2.model
    cnf2 = util.load_module(training_cnf2).cnf
    standardizer = cnf2.get('standardizer', NoOpStandardizer())
    preprocessor = convert_preprocessor(
        model_def2.image_size[0]) if convert else None
    prediction_iterator2 = create_prediction_iter(cnf2, standardizer,
                                                  model_def2.crop_size,
                                                  preprocessor, sync)
    predictor2 = QuasiCropPredictor(model2, cnf2, weights_from2,
                                    prediction_iterator2, 20)
    # predictor2 = OneCropPredictor(model2, cnf2, weights_from2, prediction_iterator2)

    predictor = EnsemblePredictor([predictor1, predictor2])

    predictions = predictor.predict(images)

    if not os.path.exists(os.path.join(predict_dir, '..', 'results')):
        os.mkdir(os.path.join(predict_dir, '..', 'results'))
    if not os.path.exists(
            os.path.join(predict_dir, '..', 'results', dataset_name)):
        os.mkdir(os.path.join(predict_dir, '..', 'results', dataset_name))

    names = data.get_names(images)
    image_prediction_probs = np.column_stack([names, predictions])
    headers = ['score%d' % (i + 1) for i in range(predictions.shape[1])]
    title = np.array(['image'] + headers)
    image_prediction_probs = np.vstack([title, image_prediction_probs])
    prediction_probs_file = os.path.abspath(
        os.path.join(predict_dir, '..', 'results', dataset_name,
                     'predictions.csv'))
    np.savetxt(prediction_probs_file,
               image_prediction_probs,
               delimiter=",",
               fmt="%s")
    print('Predictions saved to: %s' % prediction_probs_file)

    if cnf1['classification']:
        class_predictions = np.argmax(predictions, axis=1)
        image_class_predictions = np.column_stack([names, class_predictions])
        title = np.array(['image', 'label'])
        image_class_predictions = np.vstack([title, image_class_predictions])
        prediction_class_file = os.path.abspath(
            os.path.join(predict_dir, '..', 'results', dataset_name,
                         'predictions_class.csv'))
        np.savetxt(prediction_class_file,
                   image_class_predictions,
                   delimiter=",",
                   fmt="%s")
        print('Class predictions saved to: %s' % prediction_class_file)