df.show()

    if args.mode == 'train':
        estimator = TFEstimator(main_fun, args) \
            .setInputMapping({'image': 'image', 'label': 'label'}) \
            .setModelDir(args.model_dir) \
            .setExportDir(args.export_dir) \
            .setClusterSize(args.cluster_size) \
            .setTensorboard(args.tensorboard) \
            .setEpochs(args.epochs) \
            .setBatchSize(args.batch_size) \
            .setGraceSecs(60)
        model = estimator.fit(df)
    else:  # args.mode == 'inference':
        # using a trained/exported model
        model = TFModel(args) \
            .setInputMapping({'image': 'conv2d_input'}) \
            .setOutputMapping({'dense_1': 'prediction'}) \
            .setSignatureDefKey('serving_default') \
            .setExportDir(args.export_dir) \
            .setBatchSize(args.batch_size)

        def argmax_fn(l):
            return max(range(len(l)), key=lambda i: l[i])

        argmax = udf(argmax_fn, IntegerType())

        preds = model.transform(df).withColumn('argmax', argmax('prediction'))
        preds.show()
        preds.write.json(args.output)
Exemple #2
0
            .setExportDir(args.export_dir) \
            .setClusterSize(args.cluster_size) \
            .setNumPS(args.num_ps) \
            .setInputMode(TFCluster.InputMode.TENSORFLOW) \
            .setTFRecordDir(args.tfrecord_dir) \
            .setProtocol(args.protocol) \
            .setReaders(args.readers) \
            .setTensorboard(args.tensorboard) \
            .setEpochs(args.epochs) \
            .setBatchSize(args.batch_size) \
            .setSteps(args.steps)
    model = estimator.fit(df)
else:
    # use a previously trained/exported model
    model = TFModel(args) \
          .setExportDir(args.export_dir) \
          .setBatchSize(args.batch_size)

# NO INFERENCING
if args.inference_mode == 'none':
    sys.exit(0)

# INFER USING EXPORTED SIGNATURES OF TENSORFLOW SAVED_MODEL
elif args.inference_mode == 'signature':
    model.setModelDir(None)
    model.setExportDir(
        args.export_dir)  # load saved_model from args.export_dir
    model.setTagSet(
        tf.saved_model.tag_constants.SERVING)  # using default SERVING tagset
    model.setInputMapping(
        {'image': 'image'}
Exemple #3
0
  df.show()

  if args.mode == 'train':
    estimator = TFEstimator(main_fun, args) \
        .setInputMapping({'image': 'image', 'label': 'label'}) \
        .setModelDir(args.model_dir) \
        .setExportDir(args.export_dir) \
        .setClusterSize(args.cluster_size) \
        .setTensorboard(args.tensorboard) \
        .setEpochs(args.epochs) \
        .setBatchSize(args.batch_size) \
        .setGraceSecs(60)
    model = estimator.fit(df)
  else:  # args.mode == 'inference':
    # using a trained/exported model
    model = TFModel(args) \
        .setInputMapping({'image': 'features'}) \
        .setOutputMapping({'logits': 'prediction'}) \
        .setExportDir(args.export_dir) \
        .setBatchSize(args.batch_size)

    def argmax_fn(l):
      return max(range(len(l)), key=lambda i: l[i])

    argmax = udf(argmax_fn, IntegerType())

    preds = model.transform(df).withColumn('argmax', argmax('prediction'))
    preds.show()
    preds.write.json(args.output)
    #mergeCols = udf(lambda Date, time: Date + time)
    #mem_data.withColumn("dt", mergeCols(col("Date"), col("time"))).show(1,False)


    
    

    mem_data = mem_data.head(i)
    mem_data = spark.createDataFrame(mem_data)
    #mem_data.resample('H')

    #mem_data.show(60)

    model = TFModel(args) \
          .setInputMapping({'%used': 'gru_16_input'}) \
          .setOutputMapping({'dense_6': 'prediction'}) \
          .setSignatureDefKey('serving_default') \
          .setExportDir(args.export_dir) \
          .setBatchSize(args.batch_size)

    preds = model.transform(mem_data)
    #preds.show()

    end = time.process_time()  
    proccess_time_list.append(end-start)  
      
    output_dir = datetime.datetime.now().strftime('%Y%m%d%H%M%S-prediction')
    tf.io.gfile.makedirs(ATKH_mem_used_output_path+"/"+output_dir)
    #print("=======output_path : "+ATKH_mem_used_output_path+"/"+output_dir)
    preds.write.json(ATKH_mem_used_output_path+"/"+output_dir,mode='append')
    result_json = spark.read.json(ATKH_mem_used_output_path+"/"+output_dir+"/*.json")