示例#1
0
def main(_):
    util.check_tensorflow_version()

    processor = ProcessorSdc()
    #processor.mu_law_steering = True
    #processor.standardize_labels = False
    processor.standardize_input = 'frame'

    feed = FeedImagesWithLabels(dataset=SdcData(), processor=processor)

    model_params = {
        'outputs': {
            'steer': 1,
            #    'xyz': 2,
        },

        #'network': 'resnet_v1_152',
        #'version': 1,

        #'network': 'nvidia_sdc',   # 160x120
        #'version': 2,

        #'network': 'resnet_v1_101',  # 192x128
        #'version': 3,

        #'network': 'resnet_v1_50',
        #'version': 5,
        'network': FLAGS.root_network,
        'version': FLAGS.top_version,
    }
    model = ModelSdc(params=model_params)

    exec_eval.evaluate(feed, model)
示例#2
0
def main(_):
    util.check_tensorflow_version()

    processor = ProcessorSdc()
    processor.standardize_input = 'frame'
    #processor.num_input_images = 2

    feed = FeedImagesWithLabels(dataset=SdcData(), processor=processor)

    model_params = {
        'outputs': {
            'steer': 1,
            #    'xyz': 2,
        },

        #'network': 'resnet_v1_50',
        #'version': 5,
        'network': FLAGS.root_network,
        'version': FLAGS.top_version,
        'bayesian': False,
        'lock_root': FLAGS.lock_root,
        'regression_loss': 'mse',
    }
    model = ModelSdc(params=model_params)

    exec_train.train(feed, model)
def main(_):
    util.check_tensorflow_version()

    dataset = ExampleData(subset='')

    processor = ProcessorImagenet()
    processor.output_offset = FLAGS.output_offset

    feed = FeedImagesWithLabels(dataset=dataset, processor=processor)

    model_params = {
        'num_classes': feed.num_classes_for_network(),
        'network': FLAGS.network,
    }
    if FLAGS.my:
        # My variants of Resnet, Inception, and VGG networks
        model = ModelMySlim(params=model_params)
    else:
        # Google's tf.slim models
        model = ModelGoogleSlim(params=model_params)
        model.check_norm(processor.normalize)

    output, num_entries = exec_predict.predict(feed, model)

    output_columns = ['Img']
    if FLAGS.output_prob:
        # Dump class probabilities to CSV file.
        class_labels = []
        for c in range(dataset.num_classes()):
            class_labels.append("c%s" % c)
        output_columns += class_labels
        output = np.vstack([np.column_stack([o[1], o[0]]) for o in output])
    else:
        # Dump class index to CSV file
        output_columns += ['Class']
        output = np.vstack(
            [np.column_stack([o[1], np.argmax(o[0], axis=1)]) for o in output])

    df = pd.DataFrame(output, columns=output_columns)
    df.Img = df.Img.apply(lambda x: os.path.basename(x.decode()))
    df.to_csv('./output.csv', index=False)
示例#4
0
def main(_):
    util.check_tensorflow_version()

    dataset = ImagenetData(subset=FLAGS.subset)

    processor = ProcessorImagenet()
    processor.label_offset = FLAGS.label_offset

    feed = FeedImagesWithLabels(dataset=dataset, processor=processor)

    model_params = {
        'num_classes': feed.num_classes_for_network(),
        'network': FLAGS.network,
    }

    if FLAGS.my:
        # My variants of Resnet, Inception, and VGG networks
        model = ModelMySlim(params=model_params)
    else:
        # Google's tf.slim models
        model = ModelGoogleSlim(params=model_params)
        model.check_norm(processor.normalize)

    exec_eval.evaluate(feed=feed, model=model)
def main(_):
    util.check_tensorflow_version()
    assert os.path.isfile(FLAGS.checkpoint_path) or os.path.isfile(FLAGS.ensemble_path)

    model_args_list = []
    if FLAGS.checkpoint_path:
        model_args_list.append(
            {
                'root_network': FLAGS.root_network,
                'top_version': FLAGS.top_version,
                'image_norm': FLAGS.image_norm,
                'image_size': FLAGS.image_size,
                'image_aspect': FLAGS.image_aspect,
                'checkpoint_path': FLAGS.checkpoint_path,
                'bayesian': FLAGS.bayesian,
                'weight': 1.0,
            }
        )
    else:
        ensemble_df = pd.DataFrame.from_csv(FLAGS.ensemble_path, index_col=None)
        model_args_list += ensemble_df.to_dict('records')

    model_params_common = {
        'outputs': {
            'steer': 1,
        #    'xyz': 2,
        },
    }
    model_list = []
    for i, args in enumerate(model_args_list):
        print(args)
        model_name = 'model_%d' % i
        model_params = deepcopy(model_params_common)
        model_params['network'] = args['root_network']
        model_params['version'] = args['top_version']
        model_params['bayesian'] = FLAGS.bayesian
        model = ModelSdc(params=model_params)

        processor_params = {}
        processor_params['image_norm'] = args['image_norm']
        processor_params['image_size'] = args['image_size']
        processor_params['image_aspect'] = args['image_aspect']
        processor = ProcessorSdc(params=processor_params)

        model_list.append({
            'model': model,
            'processor': processor,
            'weight': args['weight'],
            'name': model_name,
            'checkpoint_path': args['checkpoint_path']
        })

    name_prefix = FLAGS.name
    with tf.Graph().as_default() as g:
        batch_size = 1 if not FLAGS.samples else FLAGS.samples
        build_export_graph(models=model_list, batch_size=batch_size)
        model_variables = tf.contrib.framework.get_model_variables()
        saver = tf.train.Saver(model_variables)

        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
            init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
            sess.run(init_op)

            g_def = g.as_graph_def(add_shapes=True)
            tf.train.write_graph(g_def, './', name='%s-graph_def.pb.txt' % name_prefix)

            for m in model_list:
                checkpoint_variable_set = set()
                checkpoint_path, global_step = util.resolve_checkpoint_path(m['checkpoint_path'])
                if not checkpoint_path:
                    print('No checkpoint file found at %s' % m['checkpoint_path'])
                    return
                reader = tf.train.NewCheckpointReader(checkpoint_path)
                checkpoint_variable_set.update(reader.get_variable_to_shape_map().keys())
                variables_to_restore = m['model'].variables_to_restore(
                    restore_outputs=True,
                    checkpoint_variable_set=checkpoint_variable_set,
                    prefix_scope=m['name'])

                saver_local = tf.train.Saver(variables_to_restore)
                saver_local.restore(sess, checkpoint_path)
                print('Successfully loaded model from %s at step=%d.' % (checkpoint_path, global_step))

            saver.export_meta_graph('./%s-meta_graph.pb.txt' % name_prefix, as_text=True)
            saver.save(sess, './%s-checkpoint' % name_prefix, write_meta_graph=True)