Esempio n. 1
0
def sampleMesh(paths):
    meshPath = paths[0]
    recordPath = paths[1]
    print("[INFO] Generating Data for mesh: {}".format(meshPath))
    # open mesh
    mesh = gm.Mesh(meshPath=meshPath)
    # generate sample queries
    sdf = gm.SDF(mesh)
    cubeMarcher = gm.CubeMarcher()
    # query for train data
    print("[INFO] sampling {} training points".format(args.numPoints))
    sampler = gm.PointSampler(mesh,
                              ratio=args.randomRatio,
                              std=args.std,
                              verticeSampling=False)
    queries = sampler.sample(args.numPoints)
    print("[INFO] inferring sdf of training points...")
    S = sdf.query(queries)
    trainSDF = np.append(queries, S, axis=1)

    valSDF = None
    if args.validationRes > 0:
        # query for validation data
        print("[INFO] sampling {} validation points".format(
            args.validationRes**3))
        queries = cubeMarcher.createGrid(args.validationRes)
        print("[INFO] inferring sdf of validation points...")
        S = sdf.query(queries)
        valSDF = np.append(queries, S, axis=1)

    print("[INFO] writing data to npz")

    np.savez_compressed(recordPath,
                        train=trainSDF,
                        validation=valSDF if not valSDF is None else [])
Esempio n. 2
0
            if np.all(res_norm < tol): break
    return uvw

if __name__ == "__main__":
    # this should handle folders of meshes, parallelizing the meshing to avail cores
    parser = argparse.ArgumentParser(description='given a sdf weight set, generate mesh')
    parser.add_argument('weightPath', help='path to weight sets!')
    parser.add_argument('meshPath', help='path to corresponding mesh geometries.') 
    parser.add_argument('--archPath', default=None)
    parser.add_argument('--res', default=128, type=int)
    args = parser.parse_args()

    trainedModels = list([f.split('.h5')[0] for f in os.listdir(args.weightPath) if '.h5' in f])
    print("TrainedModels:")
    print(trainedModels)
    cubeMarcher = gm.CubeMarcher()
    uniformGrid = cubeMarcher.createGrid(args.res)
    # parametricGrid = (uniformGrid)
    # uniformGrid = uniformGrid
    ### DEBUGGING
    print('uniformGrid type:', type(uniformGrid))
    print('uniformGrid shape:', uniformGrid.shape[0])
    print('uniformGrid shape:', uniformGrid.shape[1])
    # np.savetxt("org.csv", uniformGrid, delimiter=",")
    # uniformGrid = (uniformGrid + 1) / 2.
    assert uniformGrid.shape[1] == 3
    transformed = uniformGrid
    # transformed = InverseTrilinearMap(hex_pts, uniformGrid)
    # forward = TrilinearMap(uniformGrid, hex_pts)
    # new_forward = TrilinearMap(transformed, hex_pts)
    #     jobs.append((hex_pts, xyz))
Esempio n. 3
0
if __name__ == "__main__":
    # this should handle folders of meshes, parallelizing the meshing to avail cores
    parser = argparse.ArgumentParser(
        description='given a sdf weight set, generate mesh')
    parser.add_argument('weightPath', help='path to weight sets!')
    parser.add_argument('meshPath',
                        help='path to corresponding mesh geometries.')
    parser.add_argument('--archPath', default=None)
    parser.add_argument('--res', default=128, type=int)
    args = parser.parse_args()

    trainedModels = list(
        [f.split('.h5')[0] for f in os.listdir(args.weightPath) if '.h5' in f])

    cubeMarcher = gm.CubeMarcher()

    uniformGrid = cubeMarcher.createGrid(args.res)

    csvFile = open('results.csv', 'w')

    csvWriter = csv.writer(csvFile, delimiter=',')
    csvWriter.writerow(
        ['Name', 'Grid Error', 'Surface Error', 'Importance Error'])

    for m in trainedModels:
        modelPath = os.path.join(args.weightPath, m)
        meshPath = os.path.join(args.meshPath, m)
        try:
            print("[INFO] Loading model: ", m)
            sdfModel = loadModel(modelPath, archPath=args.archPath)
Esempio n. 4
0
def marchMesh(S, res):
    cubeMarcher = gm.CubeMarcher()
    inferGrid = cubeMarcher.createGrid(res)
    cubeMarcher.march(inferGrid,S)
    marchedMesh = cubeMarcher.getMesh() 
    return marchedMesh
Esempio n. 5
0
def inferSDF(sdfModel, res):
    # create data sequences
    cubeMarcher = gm.CubeMarcher()
    inferGrid = cubeMarcher.createGrid(res)
    S = sdfModel.predict(inferGrid)
    return -S
Esempio n. 6
0
def singleModelTrain(
  meshFn, 
  precomputedFn,
  config,
  showVis = True):

  outputDir = os.path.abspath(config.saveDir)

  if (not meshFn is None):
    cubeMarcher = gm.CubeMarcher()    
    mesh = gm.Mesh(meshFn, doNormalize=True)

    samplingMethod = config.samplingMethod

    sdf = gm.SDF(mesh)

    if samplingMethod['type'] == 'SurfaceUniform':
      pointSampler = gm.PointSampler(mesh, ratio = samplingMethod['ratio'], std = samplingMethod['std'])
    elif samplingMethod['type'] == 'Uniform':
      pointSampler = gm.PointSampler(mesh, ratio = 1.0)
    elif samplingMethod['type'] == 'Importance':
      pointSampler = gm.ImportanceSampler(mesh, int(config.epochLength/samplingMethod['ratio']), samplingMethod['weight'])
    else:
      raise("uhhhh")

    # create data sequences
    validationGrid = cubeMarcher.createGrid(config.validationRes) if config.validationRes > 0 else None
    print("[INFO], created validation grid")
    sdfTrain, sdfEval = createSequences(sdf, validationGrid, pointSampler, config.batchSize, config.epochLength)
    print("[INFO], here hardik after creating sequences")
  elif (not precomputedFn is None) :
    # precomputed!
    if 'h5' in precomputedFn:
      if config.queryPath is None:
        raise("Must supply path to queries if using h5 data!")
      else:
        f = h5py.File(config.queryPath, 'r')
        queries = np.array(f['queries'])
        f = h5py.File(precomputedFn, 'r')
        S = np.array(f['sdf'])
        S = S.reshape((S.shape[0],1))

        if config.samplingMethod['type'] == 'Importance':
          importanceSampler = gm.ImportanceSampler(None, S.shape[0], config.samplingMethod['weight'])
          queries,S = importanceSampler.sampleU(int(S.shape[0]/config.samplingMethod['ratio']), queries, S)

        precomputedData = {
          'train': np.concatenate((queries, S), axis=1)
        }
    else:
      precomputedData = np.load(precomputedFn)

    trainData = precomputedData['train']
    validationData = precomputedData['validation'] if 'validation' in precomputedData else None
    
    sdfTrain = SDFSequence(
      trainData,
      None,
      config.batchSize
    )

    if validationData is None:
      sdfEval = None
    else:
      sdfEval = SDFSequence(
        validationData,
        None,
        config.batchSize
      )

  else:
    raise(ValueError("uhh I need data"))


  # create model
  print("[INFO] Initiate the model...")
  sdfModel = model.SDFModel(config)
  print("[INFO] Starting to train the model...")
  # train the model
  sdfModel.train(
    trainGenerator = sdfTrain,
    validationGenerator = sdfEval,
    epochs = config.epochs
  )

  # prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
  # # Define model for pruning.
  # pruning_params = {
  #       'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.50,
  #                                                               final_sparsity=0.80,
  #                                                               begin_step=0,
  #                                                               end_step=end_step)
  # }


  if showVis:
    # predict against grid
    rGrid = cubeMarcher.createGrid(config.reconstructionRes)
    S = sdfModel.predict(rGrid)

    # plot results
    # sdfModel.plotTrainResults()

    cubeMarcher.march(rGrid,S)
    marchedMesh = cubeMarcher.getMesh() 
    marchedMesh.show()

  if (not (outputDir == None)):
    sdfModel.save()
    if showVis:
      marchedMesh.save(os.path.join(outputDir,config.name + '.obj'))
      sdfModel.plotTrainResults(show = False, save = True)