예제 #1
0
 def _create_cnn_model():
     model = Sequential()
     model.add(SpatialConvolution(3, 1, 5, 5))
     model.add(View([1 * 220 * 220]))
     model.add(Linear(1 * 220 * 220, 20))
     model.add(LogSoftMax())
     return model
예제 #2
0
    def test_flatten_layers_method(self):
        resource_path = os.path.join(
            os.path.split(__file__)[0], "../../../resources")
        model_path = os.path.join(resource_path,
                                  "models/bigdl/bigdl_lenet.model")
        model = Net.load_bigdl(model_path)

        assert len(Sequential().add(model).flattened_layers()) == 12
예제 #3
0
def predict(model_path, img_path, partition_num=4):
    inputs = "ToFloat:0"
    outputs = ["num_detections:0", "detection_boxes:0",
               "detection_scores:0", "detection_classes:0"]
    detector = TFNet(model_path, inputs, outputs)
    model = Sequential()
    # Transpose TensorFlow NHWC format to Analytics Zoo NCHW format.
    model.add(Transpose([(2, 4), (2, 3)]))
    model.add(Contiguous())
    model.add(detector)
    # Select the detection_boxes from the output.
    model.add(SelectTable(2))
    image_set = ImageSet.read(img_path, sc, partition_num)
    transformer = ChainedPreprocessing([ImageResize(256, 256), ImageMatToTensor(),
                                        ImageSetToSample()])
    transformed_image_set = image_set.transform(transformer)
    output = model.predict_image(transformed_image_set.to_image_frame(), batch_per_partition=1)
    # Print the detection box with the highest score of the first prediction result.
    result = output.get_predict().first()
    print(result[1][0])
예제 #4
0
sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, "file:///home/hduser/slim/checkpoint/inception_v1.ckpt")
#saver.restore(sess, "hdfs:///slim/checkpoint/inception_v1.ckpt")
# You need to edit this path to the checkpoint you downloaded

from zoo.util.tf import export_tf
avg_pool = end_points['Mixed_3c']
export_tf(sess,
          "file:///home/hduser/slim/tfnet/",
          inputs=[images],
          outputs=[avg_pool])
from zoo.pipeline.api.net import TFNet
amodel = TFNet.from_export_folder("file:///home/hduser/slim/tfnet/")
from bigdl.nn.layer import Sequential, Transpose, Contiguous, Linear, ReLU, SoftMax, Reshape, View, MulConstant, SpatialAveragePooling
full_model = Sequential()
full_model.add(Transpose([(2, 4), (2, 3)]))
scalar = 1. / 255
full_model.add(MulConstant(scalar))
full_model.add(Contiguous())
full_model.add(amodel)
full_model.add(View([1024]))
full_model.add(Linear(1024, 5))
import re
from bigdl.nn.criterion import CrossEntropyCriterion
from pyspark import SparkConf
from pyspark.ml import Pipeline
from pyspark.sql import SQLContext
from pyspark.sql.functions import col, udf
from pyspark.sql.types import DoubleType, StringType
from zoo.common.nncontext import *
예제 #5
0
def predict(model_path, img_path, partition_num=4):
    inputs = "ToFloat:0"
    outputs = [
        "num_detections:0", "detection_boxes:0", "detection_scores:0",
        "detection_classes:0"
    ]
    detector = TFNet(model_path, inputs, outputs)
    model = Sequential()
    # Transpose TensorFlow NHWC format to Analytics Zoo NCHW format.
    model.add(Transpose([(2, 4), (2, 3)]))
    model.add(Contiguous())
    model.add(detector)
    # Select the detection_boxes from the output.
    model.add(SelectTable(2))
    image_set = ImageSet.read(img_path, sc, partition_num)
    transformer = ChainedPreprocessing(
        [ImageResize(256, 256),
         ImageMatToTensor(),
         ImageSetToSample()])
    transformed_image_set = image_set.transform(transformer)
    output = model.predict_image(transformed_image_set.to_image_frame(),
                                 batch_per_partition=1)
    # Print the detection box with the highest score of the first prediction result.
    result = output.get_predict().first()
    print(result[1][0])
예제 #6
0
# load pretrained caffe model
preTrained_model = Net.load_caffe(model_def_path, model_weight_path)

# create a new model by remove layers after pool5/drop_7x7_s1
part_model = preTrained_model.new_graph(["pool5/drop_7x7_s1"])

# optionally freeze layers from input to pool4/3x3_s2 inclusive
# model.freeze_up_to(["pool4/3x3_s2"])


from bigdl.nn.layer import Sequential, Linear, View, LogSoftMax
from bigdl.nn.criterion import CrossEntropyCriterion

# add a new linear layer with numClass outputs, in our example, it's 6.
scene_network = Sequential().add(part_model).add(View([1024])).add(Linear(1024, 6)).add(LogSoftMax())

transformer = ChainedPreprocessing(
    [RowToImageFeature(), ImageResize(256, 256), ImageCenterCrop(224, 224),
     ImageChannelNormalize(123.0, 117.0, 104.0), ImageMatToTensor(), ImageFeatureToTensor()])

classifier = NNClassifier(scene_network, CrossEntropyCriterion(), transformer).setLearningRate(0.001).setLearningRateDecay(1e-3).setBatchSize(20).setMaxEpoch(2).setFeaturesCol("image").setCachingSample(False)

# train the model
scene_classification_model = classifier.fit(trainingDF)
print("Finished training")


# evaluate the model
print("Start evaluation:")
predictionDF = scene_classification_model.transform(validationDF).cache()