コード例 #1
0
    parser.add_option("-l",
                      "--learning_rate",
                      dest="learning_rate",
                      default="0.01")
    parser.add_option("--log_dir", dest="log_dir", default="/tmp/.bigdl")
    parser.add_option("--model", dest="model")

    (options, args) = parser.parse_args(sys.argv)
    data_path = options.data_path
    token_length = int(options.token_length)
    sequence_len = int(options.sequence_length)
    max_words_num = int(options.max_words_num)
    training_split = float(options.training_split)
    batch_size = int(options.batch_size)

    sc = get_nncontext(
        create_spark_conf().setAppName("Text Classification Example"))

    print('Processing text dataset...')
    texts = get_news20(base_dir=data_path)
    text_data_rdd = sc.parallelize(texts, options.partition_num)

    word_meta = analyze_texts(text_data_rdd)
    # Remove the top 10 words roughly. You might want to fine tune this.
    word_meta = dict(word_meta[10:max_words_num])
    word_mata_broadcast = sc.broadcast(word_meta)

    word2vec = get_glove(base_dir=data_path, dim=token_length)
    # Ignore those unknown words.
    filtered_word2vec = dict(
        (w, v) for w, v in word2vec.items() if w in word_meta)
    filtered_word2vec_broadcast = sc.broadcast(filtered_word2vec)
コード例 #2
0
         ImageSetToSample()])
    transformed_image_set = image_set.transform(transformer)
    output = model.predict_image(transformed_image_set.to_image_frame(),
                                 batch_per_partition=1)
    # Print the detection box with the highest score of the first prediction result.
    result = output.get_predict().first()
    print(result[1][0])


if __name__ == "__main__":
    parser = OptionParser()
    parser.add_option("--image",
                      type=str,
                      dest="img_path",
                      help="The path where the images are stored, "
                      "can be either a folder or an image path")
    parser.add_option("--model",
                      type=str,
                      dest="model_path",
                      help="The path of the TensorFlow object detection model")
    parser.add_option("--partition_num",
                      type=int,
                      dest="partition_num",
                      default=4,
                      help="The number of partitions")
    (options, args) = parser.parse_args(sys.argv)

    sc = get_nncontext("TFNet Object Detection Example")

    predict(options.model_path, options.img_path, options.partition_num)
コード例 #3
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import argparse
import cv2

from zoo.common.nncontext import get_nncontext
from zoo.feature.image.imageset import *
from zoo.models.image.objectdetection.object_detector import *

sc = get_nncontext(create_spark_conf().setAppName("Object Detection Example"))

parser = argparse.ArgumentParser()
parser.add_argument('model_path', help="Path where the model is stored")
parser.add_argument('img_path', help="Path where the images are stored")
parser.add_argument('output_path', help="Path to store the detection results")


def predict(model_path, img_path, output_path):
    model = ObjectDetector.load_model(model_path)
    image_set = ImageSet.read(img_path, sc)
    output = model.predict_image_set(image_set)

    config = model.get_config()
    visualizer = Visualizer(config.label_map(), encoding="jpg")
    visualized = visualizer(output).get_image(to_chw=False).collect()