示例#1
0
def main():
    data_dir = sys.argv[1]
    model_dir = sys.argv[2]
    rep_dir = sys.argv[3]
    gpu_mem = sys.argv[4]
    main_mem = sys.argv[5]
    numsplits = int(sys.argv[6])

    data_pbtxt_file = os.path.join(data_dir, 'flickr.pbtxt')
    data_pb = util.ReadData(data_pbtxt_file)
    EditPaths(data_pb, data_dir, gpu_mem, main_mem)
    with open(data_pbtxt_file, 'w') as f:
        text_format.PrintMessage(data_pb, f)
    EditTrainers(data_dir, model_dir, rep_dir, numsplits)

    data_pbtxt_file_z = os.path.join(data_dir, 'flickr_z.pbtxt')
    data_pbtxt_file_nnz = os.path.join(data_dir, 'flickr_nnz.pbtxt')
    if not os.path.exists(data_pbtxt_file_z):
        CreateMissingTextData(data_pb, data_pbtxt_file_z, data_pbtxt_file_nnz)
    data_pb = util.ReadData(data_pbtxt_file_z)
    EditPaths(data_pb, data_dir, gpu_mem, main_mem)
    with open(data_pbtxt_file_z, 'w') as f:
        text_format.PrintMessage(data_pb, f)
    data_pb = util.ReadData(data_pbtxt_file_nnz)
    EditPaths(data_pb, data_dir, gpu_mem, main_mem)
    with open(data_pbtxt_file_nnz, 'w') as f:
        text_format.PrintMessage(data_pb, f)
示例#2
0
def main():
    #Input parameters
    data_dir = sys.argv[1]
    model_dir = sys.argv[2]
    representation_dir = sys.argv[3]
    avdata_pbtxt_file = os.path.join(data_dir, 'audiovisualdata.pbtxt')
    vdata_pbtxt_file = os.path.join(data_dir, 'visualonlydata.pbtxt')
    gpu_mem = sys.argv[4]
    main_mem = sys.argv[5]

    #Edit the data configuration file
    avdata_pb = util.ReadData(avdata_pbtxt_file)
    avdata_pb.gpu_memory = gpu_mem
    avdata_pb.main_memory = main_mem
    avdata_pb.prefix = data_dir
    with open(avdata_pbtxt_file, 'w') as f:
        text_format.PrintMessage(avdata_pb, f)

    vdata_pb = util.ReadData(vdata_pbtxt_file)
    vdata_pb.gpu_memory = gpu_mem
    vdata_pb.main_memory = main_mem
    vdata_pb.prefix = data_dir
    with open(vdata_pbtxt_file, 'w') as f:
        text_format.PrintMessage(vdata_pb, f)

    #Set up the trainer configuration file
    SetUpTrainer(data_dir, model_dir, representation_dir)
示例#3
0
def EditModels(args):

    # Common changes in all models
    model_files = glob.glob("models/*.pbtxt")
    for model_file in model_files:
        model = util.ReadModel(model_file)
        model.hyperparams.base_epsilon = args.base_epsilon
        model.hyperparams.epsilon_decay = deepnet_pb2.Hyperparams.INVERSE_T \
                if args.epsilon_decay else deepnet_pb2.Hyperparams.NONE
        model.hyperparams.sparsity = args.sparsity
        model.hyperparams.dropout = args.dropout
        model.hyperparams.l2_decay = args.l2_decay
        model.hyperparams.initial_momentum = args.initial_momentum
        model.hyperparams.final_momentum = args.final_momentum
        with open(model_file, 'w') as f:
            text_format.PrintMessage(model, f)

    # Specific changes to rbm1
    model_file = os.path.join('models', 'rbm1.pbtxt')
    model = util.ReadModel(model_file)
    for layer in model.layer:
        if layer.name == 'input_layer':
            layer.dimensions = args.input_width
            layer.numlabels = args.input_numlabels
        if layer.name == 'hidden1':
            layer.dimensions = args.hidden1_width
    with open(model_file, 'w') as f:
        text_format.PrintMessage(model, f)

    # Specific changes to rbm2
    model_file = os.path.join('models', 'rbm2.pbtxt')
    model = util.ReadModel(model_file)
    for layer in model.layer:
        if layer.name == 'hidden1':
            layer.dimensions = args.hidden1_width
        if layer.name == 'hidden2':
            layer.dimensions = args.hidden2_width
    with open(model_file, 'w') as f:
        text_format.PrintMessage(model, f)

    # Specific changes in joint
    model_file = os.path.join('models', 'joint.pbtxt')
    model = util.ReadModel(model_file)
    model.prefix = args.model_dir
    for layer in model.layer:
        if layer.name == 'input_layer':
            layer.dimensions = args.input_width
            layer.numlabels = args.input_numlabels
        if layer.name == 'hidden1':
            layer.dimensions = args.hidden1_width
        if layer.name == 'hidden2':
            layer.dimensions = args.hidden2_width

    with open(model_file, 'w') as f:
        text_format.PrintMessage(model, f)
示例#4
0
def EditModelsDBM(args):
    """ DBM specific only """

    if args.model not in ['dbm', 'lcdbm']:
        raise ValueError('Unknown model {}'.format(args.model))

    # Common changes in all models
    model_files = glob.glob("models/*.pbtxt")
    for model_file in model_files:
        model = util.ReadModel(model_file)
        model.hyperparams.base_epsilon = args.base_epsilon
        model.hyperparams.sparsity = args.sparsity
        model.hyperparams.dropout = args.dropout
        model.hyperparams.l2_decay = args.l2_decay
        model.hyperparams.initial_momentum = args.initial_momentum
        model.hyperparams.final_momentum = args.final_momentum
        with open(model_file, 'w') as f:
            text_format.PrintMessage(model, f)

    # Specific changes to rbm2
    model_file = os.path.join('models', 'rbm2.pbtxt')
    model = util.ReadModel(model_file)
    for layer in model.layer:
        if layer.name == 'hidden1' or layer.name == 'bernoulli_hidden1':
            layer.dimensions = args.hidden1_width
        if layer.name == 'hidden2':
            layer.dimensions = args.hidden2_width
    with open(model_file, 'w') as f:
        text_format.PrintMessage(model, f)

    # Specific changes to joint
    model_file = os.path.join('models', 'joint.pbtxt')
    model = util.ReadModel(model_file)
    model.prefix = args.model_dir
    for layer in model.layer:
        if layer.name == 'input_layer':
            layer.dimensions = args.input_width
            layer.numlabels = args.input_numlabels
        if layer.name == 'bernoulli_hidden1' or layer.name == 'hidden1':
            layer.dimensions = args.hidden1_width
        if layer.name == 'hidden2':
            layer.dimensions = args.hidden2_width

    # Sparsity mask
    if args.model in ['lcdbm']:
        edge = next(e for e in model.edge if e.node1 == 'input_layer' and \
                e.node2 == 'bernoulli_hidden1')
        param = next(p for p in edge.param if p.name == 'weight')
        sparsity_mask_file = param.sparsity_mask
        param.sparsity_mask = os.path.join(args.data_dir, sparsity_mask_file)

    with open(model_file, 'w') as f:
        text_format.PrintMessage(model, f)
示例#5
0
async def main(db, start=None):
    async with db.acquire() as conn:
        async for pkg, version in iter_missing_upstream_branch_packages(conn):
            if start and pkg < start:
                continue
            logging.info('Package: %s' % pkg)
            urls = []
            for name, guesser in [
                    ('aur', guess_from_aur),
                    ('lp', guess_from_launchpad),
                    ('pecl', guess_from_pecl)]:
                try:
                    metadata = dict(guesser(pkg))
                except Exception:
                    traceback.print_exc()
                    continue
                try:
                    repo_url = metadata['Repository']
                except KeyError:
                    continue
                else:
                    urls.append((name, repo_url))
            if not urls:
                continue
            if len(urls) > 1:
                print('# Note: Conflicting URLs for %s: %r' % (pkg, urls))
            config = package_overrides_pb2.OverrideConfig()
            override = config.package.add()
            override.name = pkg
            override.upstream_branch_url = urls[0][1]
            print("# From %s" % urls[0][0])
            text_format.PrintMessage(config, sys.stdout)
示例#6
0
def export_tensorflow_model(model, name, output_path, version=1):
    """Exports a TensorFlow model for serving with Triton

    Parameters
    ----------
    model:
        The tensorflow model that should be served
    name:
        The name of the triton model to export
    output_path:
        The path to write the exported model to
    """
    tf_model_path = os.path.join(output_path, str(version), "model.savedmodel")
    model.save(tf_model_path)
    config = model_config.ModelConfig(name=name,
                                      backend="tensorflow",
                                      platform="tensorflow_savedmodel")

    for col in model.inputs:
        config.input.append(
            model_config.ModelInput(name=col.name,
                                    data_type=_convert_dtype(col.dtype),
                                    dims=[-1, 1]))

    for col in model.outputs:
        config.output.append(
            model_config.ModelOutput(name=col.name.split("/")[0],
                                     data_type=_convert_dtype(col.dtype),
                                     dims=[-1, 1]))

    with open(os.path.join(output_path, "config.pbtxt"), "w") as o:
        text_format.PrintMessage(config, o)
    return config
示例#7
0
def _generate_pytorch_config(name,
                             output_path,
                             model_info,
                             max_batch_size=None):
    """given a workflow generates the trton modelconfig proto object describing the inputs
    and outputs to that workflow"""
    config = model_config.ModelConfig(name=name,
                                      platform="onnxruntime_onnx",
                                      max_batch_size=max_batch_size)

    for col, val in model_info["input"].items():
        config.input.append(
            model_config.ModelInput(name=col,
                                    data_type=_convert_dtype(val["dtype"]),
                                    dims=[-1, len(val["columns"])]))

    for col, val in model_info["output"].items():
        if len(val["columns"]) == 1:
            dims = [-1]
        else:
            dims = [-1, len(val["columns"])]
        config.output.append(
            model_config.ModelOutput(name=col,
                                     data_type=_convert_dtype(val["dtype"]),
                                     dims=dims))

    with open(os.path.join(output_path, "config.pbtxt"), "w") as o:
        text_format.PrintMessage(config, o)
    return config
def main(argv):
    # Authenticate and construct service.
    service, _ = sample_tools.init(
        argv,
        'adexchangebuyer',
        'v1.3',
        __doc__,
        __file__,
        parents=[argparser],
        scope='https://www.googleapis.com/auth/adexchange.buyer')

    try:
        # Construct a list request.
        request = service.creatives().list()
        # Execute the request and store the response.
        response = request.execute()
    except client.AccessTokenRefreshError:
        print(
            'The credentials have been revoked or expired, please re-run the '
            'application to re-authorize')

    # Generate a Snippet Status Report Protocol buffer object
    report = GenerateSnippetStatusReportPBObject(response)

    # Write the report to files in txt, pb and csv format
    with open('SnippetStatusReport.txt', 'w') as report_txt:
        text_format.PrintMessage(report, report_txt)
    with open('SnippetStatusReport.pb', 'wb') as report_pb:
        report_pb.write(report.SerializeToString())
    with open('SnippetStatusReport.csv', 'w') as report_csv:
        WriteSnippetStatusReportInCSV(report, report_csv)
示例#9
0
 def ParseOther(self, baseDir, withMS = False):
     self.baseDir = baseDir
     pathDir = os.path.join(baseDir, "*.npy")
     files = glob.glob(pathDir)
     instanceCount = 0
     dataPb = deepnet_pb2.Dataset()
     
     for i, feature in enumerate(self.featureGroups):
         data = deepnet_pb2.Dataset.Data()
         data.name = feature + "_"+ os.path.basename(baseDir)
         data.file_pattern = "*"+feature+"*.npy"
         if withMS:
             data.dimensions.extend([self.featureGroupsIndex[i+1]-self.featureGroupsIndex[i]])
         else:
             dimensions = 0
             for entry in self.featureGroupsDict[feature]:
                 dimensions = dimensions + entry[1] - entry[0]
             data.dimensions.extend([dimensions])
         dataPb.data.extend([data]) 
         
     data = deepnet_pb2.Dataset.Data()
     data.name = "label_" + os.path.basename(baseDir) 
     data.dimensions.extend([1]) 
     data.file_pattern = "*label.npy"
     dataPb.data.extend([data]) 
     
     if withMS:
         MS = "withMS"
         outputProtoFile = os.path.join(baseDir, MS, "data_withMS.pbtxt")
     else:
         MS = "withoutMS"
         outputProtoFile = os.path.join(baseDir, MS, "data_withoutMS.pbtxt")
         
     dataPb.name = os.path.basename(baseDir) + "_"+ MS       
     dirPath = os.path.join(baseDir, MS)
     dataPb.prefix = dirPath
     for fileEntry in files:
         tempData = np.load(fileEntry)
         if len(tempData.shape) == 1 or tempData.shape[1] != 17593:
             continue
         instanceCount = instanceCount + tempData.shape[0]
         baseName = os.path.basename(fileEntry)
         fileName = os.path.join(dirPath,os.path.splitext(baseName)[0]) + "_" + MS
         np.save(fileName + '_label.npy', tempData[:, 17592])
         if withMS:
             for i, feature in enumerate(self.featureGroups):
                 np.save(fileName + '_' + feature + "_withMS.npy", tempData[:, self.featureGroupsIndex[i]:self.featureGroupsIndex[i + 1]])               
         else:
             for feature in self.featureGroups:
                 tempTuple = self.featureGroupsDict[feature][0]
                 tempArray = tempData[:, tempTuple[0]: tempTuple[1]]
                 if len(self.featureGroupsDict[feature]) > 1:
                     for i in range(1, len(self.featureGroupsDict[feature])):
                         tempTuple = self.featureGroupsDict[feature][i]
                         tempArray = np.concatenate((tempArray, tempData[:,tempTuple[0]: tempTuple[1]]), axis = 1)
                 np.save(fileName + '_' + feature + "_withoutMS.npy", tempArray) 
     for entry in dataPb.data:
         entry.size = instanceCount
     with open(outputProtoFile, 'w') as f:
         text_format.PrintMessage(dataPb, f) 
示例#10
0
def _generate_ensemble_config(name,
                              output_path,
                              nvt_config,
                              nn_config,
                              name_ext=""):
    config = model_config.ModelConfig(name=name + name_ext,
                                      platform="ensemble",
                                      max_batch_size=nvt_config.max_batch_size)
    config.input.extend(nvt_config.input)
    config.output.extend(nn_config.output)

    nvt_step = model_config.ModelEnsembling.Step(model_name=nvt_config.name,
                                                 model_version=-1)
    for input_col in nvt_config.input:
        nvt_step.input_map[input_col.name] = input_col.name
    for output_col in nvt_config.output:
        nvt_step.output_map[output_col.name] = output_col.name + "_nvt"

    tf_step = model_config.ModelEnsembling.Step(model_name=nn_config.name,
                                                model_version=-1)
    for input_col in nn_config.input:
        tf_step.input_map[input_col.name] = input_col.name + "_nvt"
    for output_col in nn_config.output:
        tf_step.output_map[output_col.name] = output_col.name

    config.ensemble_scheduling.step.append(nvt_step)
    config.ensemble_scheduling.step.append(tf_step)

    with open(os.path.join(output_path, "config.pbtxt"), "w") as o:
        text_format.PrintMessage(config, o)
    return config
示例#11
0
def main():
    if len(sys.argv) != 2:
        print(f'{sys.argv[0]} <file type>', file=sys.stderr)
        return 1

    tex_pairs = os.getenv('SOBJ_TEX_MAP', '').split(',')
    tex_map = {}
    for pair in tex_pairs:
        split_pair = pair.split('=')
        if len(split_pair) != 2:
            continue

        tex_map[split_pair[0]] = split_pair[1]

    with pyassimp.load(sys.stdin.buffer, file_type=sys.argv[1], processing=
            aiProcess_Triangulate           |
            aiProcess_JoinIdenticalVertices |
            aiProcess_GenSmoothNormals      |
            aiProcess_SortByPType           |
            aiProcess_FlipUVs               |
            aiProcess_CalcTangentSpace
        ) as scene:
        converter = Converter(scene, tex_map=tex_map, skip_textures=bool(os.getenv('SOBJ_SKIP_TEXTURES')))
        obj = converter.convert()

        if os.getenv('SOBJ_TEXT'):
            text_format.PrintMessage(obj, sys.stdout)
        else:
            sys.stdout.buffer.write(obj.SerializeToString())

    return 0
示例#12
0
 def log_rcvd_msg(self, msg_type, msgID, msg_time, msg):
     # log msg
     if self.log != os.devnull:
         self.log.write("Now:%s, RCVD, %s, MsgType:%d, MsgID:%d, MsgTime:%d\n" %\
               (time.time(), msg.DESCRIPTOR.name, msg_type, msgID, msg_time))
         text_format.PrintMessage(msg, out=self.log)
         self.log.write("--/n")
示例#13
0
文件: helper.py 项目: sony/nnabla
def generate_case_from_nntxt_str(nntxt_str,
                                 param_format,
                                 dataset_sample_num,
                                 batch_size=None):
    proto = proto_from_str(nntxt_str)
    with generate_csv_png(dataset_sample_num,
                          get_input_size(proto)) as dataset_csv_file:
        # To test dataset, we create a randomly generated dataset.
        for ds in proto.dataset:
            ds.batch_size = batch_size if batch_size else ds.batch_size
            ds.uri = dataset_csv_file
            ds.cache_dir = os.path.join(os.path.dirname(dataset_csv_file),
                                        "data.cache")
        nntxt_io = io.StringIO()
        text_format.PrintMessage(proto, nntxt_io)
        nntxt_io.seek(0)

        version = io.StringIO()
        version.write('{}\n'.format(nnp_version()))
        version.seek(0)

        param = io.BytesIO()
        prepare_parameters(nntxt_str)
        nn.parameter.save_parameters(param, extension=param_format)

        with create_temp_with_dir(NNP_FILE) as temp_nnp_file_name:
            with get_file_handle_save(temp_nnp_file_name, ".nnp") as nnp:
                nnp.writestr('nnp_version.txt', version.read())
                nnp.writestr('network.nntxt', nntxt_io.read())
                nnp.writestr('parameter{}'.format(param_format), param.read())
            yield temp_nnp_file_name
示例#14
0
def WriteProto(fPath, proto):
    if os.path.splitext(fPath)[1] == '.pbtxt':
        with open(fPath, 'w') as f:
            text_format.PrintMessage(proto, f)
    else:
        f = open(fPath, 'wb')
        f.write(proto.SerializeToString())
        f.close()
示例#15
0
def get_dynamic_batch_size(new_batch_size, deploy_prototxt):
    net_param = caffe_pb2.NetParameter()
    text_format.Merge(open(deploy_prototxt, 'r').read(), net_param)
    net_param.layer[0].input_param.shape[0].dim[0] = new_batch_size
    new_deploy_file = deploy_prototxt+'.generated'
    with open(new_deploy_file, 'wb') as fp:
        text_format.PrintMessage(net_param, fp)
    return new_deploy_file
示例#16
0
def main():
  data_pbtxt = sys.argv[1]
  output_dir = sys.argv[2]
  prefix = sys.argv[3]
  r = int(sys.argv[4])
  gpu_mem = sys.argv[5]
  main_mem = sys.argv[6]
  if not os.path.isdir(output_dir):
    os.makedirs(output_dir)

  rep_dict, stats_files = MakeDict(data_pbtxt)
  reps = rep_dict.keys()

  indices_file = os.path.join(prefix, 'splits', 'train_indices_%d.npy' % r)
  if os.path.exists(indices_file):
    train = np.load(indices_file)
    valid = np.load(os.path.join(prefix, 'splits', 'valid_indices_%d.npy' % r))
    test = np.load(os.path.join(prefix, 'splits', 'test_indices_%d.npy' % r))
  else:
    print( 'Creating new split.' )
    indices = np.arange(25000)
    np.random.shuffle(indices)
    train = indices[:10000]
    valid = indices[10000:15000]
    test = indices[15000:]
    np.save(os.path.join(prefix, 'splits', 'train_indices_%d.npy' % r), train)
    np.save(os.path.join(prefix, 'splits', 'valid_indices_%d.npy' % r), valid)
    np.save(os.path.join(prefix, 'splits', 'test_indices_%d.npy' % r), test)

    
  print( 'Splitting data' )
  dataset_pb = deepnet_pb2.Dataset()
  dataset_pb.name = 'flickr_split_%d' % r
  dataset_pb.gpu_memory = gpu_mem
  dataset_pb.main_memory = main_mem
  for rep in reps:
    data = rep_dict[rep]
    stats_file = stats_files[rep]
    DumpDataSplit(data[train], output_dir, 'train_%s' % rep, dataset_pb, stats_file)
    DumpDataSplit(data[valid], output_dir, 'valid_%s' % rep, dataset_pb, stats_file)
    DumpDataSplit(data[test], output_dir, 'test_%s' % rep, dataset_pb, stats_file)

  print( 'Splitting labels' )
  labels = np.load(os.path.join(prefix, 'labels.npy')).astype('float32')
  DumpLabelSplit(labels[train,], output_dir, 'train_labels', dataset_pb)
  DumpLabelSplit(labels[valid,], output_dir, 'valid_labels', dataset_pb)
  DumpLabelSplit(labels[test,], output_dir, 'test_labels', dataset_pb)

  #d = 'indices'
  #np.save(os.path.join(output_dir, 'train_%s.npy' % d), train)
  #np.save(os.path.join(output_dir, 'valid_%s.npy' % d), valid)
  #np.save(os.path.join(output_dir, 'test_%s.npy' % d), test)

  with open(os.path.join(output_dir, 'data.pbtxt'), 'w') as f:
    text_format.PrintMessage(dataset_pb, f)

  print( 'Output written in directory %s' % output_dir )
示例#17
0
文件: batch.py 项目: croft/netgen
def write(spec, output):
    """
    Write a protobuf batch specification to an output file.

    Arguments:
    spec   -- protobuf batch specification
    output -- file-like object to write batch specification to
    """
    text_format.PrintMessage(spec, output)
def _save_label_map(label_map, label_map_path):
    """Save label map proto.

      Args:
        label_map: StringIntLabelMap proto object
        label_map_path: path to StringIntLabelMap proto text file.
      Returns:
        a StringIntLabelMapProto
      """
    with tf.gfile.GFile(label_map_path, 'w') as fid:
        text_format.PrintMessage(label_map, fid)
示例#19
0
文件: text.py 项目: eaugeas/hilo-tfx
def serialize(
    stream: IOBase,
    message: Union[Type[Message], Message],
    set_defaults: bool = False,
):
    if isinstance(message, Message):
        message: Message = message
    else:
        message = message()
        if set_defaults:
            _set_defaults(message)
    text_format.PrintMessage(message, stream)
def _PrintInfo(metadata_path, output_format, out=None):
    """Prints out the info contained in the metadata protobuf."""
    out = out or sys.stdout

    proto = emulator_meta_data_pb2.EmulatorMetaDataPb()
    with open(metadata_path, 'rb') as proto_file:
        proto.ParseFromString(proto_file.read())

    if output_format == 'text':
        text_format.PrintMessage(proto, out, indent=2)
    else:
        out.write(proto.SerializeToString())
示例#21
0
def EditTrainers(args):
    t_op_files = glob.glob("trainers/*.pbtxt")
    for t_op_file in t_op_files:
        t_op = util.ReadOperation(t_op_file)
        if 'rbm1' in t_op_file or 'joint' in t_op_file:
            t_op.data_proto_prefix = args.data_dir
        else:
            t_op.data_proto_prefix = args.rep_dir
        t_op.checkpoint_directory = args.model_dir
        t_op.batchsize = args.batchsize
        with open(t_op_file, 'w') as f:
            text_format.PrintMessage(t_op, f)
示例#22
0
def callback(name, nnp, inputs, outputs):
    if name not in result_nums:
        result_nums[name] = 0

    save_filename = '{}_{:03d}.nntxt'.format(name, result_nums[name])

    for n, i in enumerate(inputs):
        i.tofile('{}_{:03d}_input_{}.bin'.format(name, result_nums[name], n))
    for n, o in enumerate(outputs):
        o.tofile('{}_{:03d}_output_{}.bin'.format(name, result_nums[name], n))
    with open(save_filename, 'w') as f:
        text_format.PrintMessage(nnp, f)
    result_nums[name] += 1
示例#23
0
def export_tensorflow_model(model, name, output_path, version=1):
    """Exports a TensorFlow model for serving with Triton

    Parameters
    ----------
    model:
        The tensorflow model that should be served
    name:
        The name of the triton model to export
    output_path:
        The path to write the exported model to
    """
    tf_model_path = os.path.join(output_path, str(version), "model.savedmodel")
    model.save(tf_model_path, include_optimizer=False)
    config = model_config.ModelConfig(name=name,
                                      backend="tensorflow",
                                      platform="tensorflow_savedmodel")

    inputs, outputs = model.inputs, model.outputs

    if not inputs or not outputs:
        signatures = getattr(model, "signatures", {}) or {}
        default_signature = signatures.get("serving_default")
        if not default_signature:
            # roundtrip saved model to disk to generate signature if it doesn't exist
            import tensorflow as tf

            reloaded = tf.keras.models.load_model(tf_model_path)
            default_signature = reloaded.signatures["serving_default"]

        inputs = list(default_signature.structured_input_signature[1].values())
        outputs = list(default_signature.structured_outputs.values())

    for col in inputs:
        config.input.append(
            model_config.ModelInput(name=col.name,
                                    data_type=_convert_dtype(col.dtype),
                                    dims=[-1, col.shape[1]]))

    for col in outputs:
        # this assumes the list columns are 1D tensors both for cats and conts
        config.output.append(
            model_config.ModelOutput(
                name=col.name.split("/")[0],
                data_type=_convert_dtype(col.dtype),
                dims=[-1, col.shape[1]],
            ))

    with open(os.path.join(output_path, "config.pbtxt"), "w") as o:
        text_format.PrintMessage(config, o)
    return config
示例#24
0
def main():
    data_dir = sys.argv[1]
    model_dir = sys.argv[2]
    rep_dir = sys.argv[3]
    gpu_mem = sys.argv[4]
    main_mem = sys.argv[5]
    numsplits = int(sys.argv[6])

    data_pbtxt_file = os.path.join(data_dir, 'RNAseq.pbtxt')
    data_pb = util.ReadData(data_pbtxt_file)
    EditPaths(data_pb, data_dir, gpu_mem, main_mem)
    with open(data_pbtxt_file, 'w') as f:
        text_format.PrintMessage(data_pb, f)
    EditTrainers(data_dir, model_dir, rep_dir, numsplits)
示例#25
0
def EditTrainers(data_dir, model_dir, rep_dir, numsplits):
    tnames = [
        'train_CD_image_layer1.pbtxt', 'train_CD_image_layer2.pbtxt',
        'train_CD_text_layer1.pbtxt', 'train_CD_text_layer2.pbtxt',
        'train_CD_joint_layer.pbtxt'
    ]
    for tname in tnames:
        t_op_file = os.path.join('trainers', 'dbn', tname)
        t_op = util.ReadOperation(t_op_file)
        if 'layer1' in tname:
            t_op.data_proto_prefix = data_dir
        else:
            t_op.data_proto_prefix = rep_dir
        t_op.checkpoint_directory = model_dir
        with open(t_op_file, 'w') as f:
            text_format.PrintMessage(t_op, f)

    t_op_file = os.path.join('trainers', 'classifiers', 'baseclassifier.pbtxt')
    t_op = util.ReadOperation(t_op_file)
    for i in range(1, numsplits + 1):
        t_op_file = os.path.join('trainers', 'classifiers',
                                 'split_%d.pbtxt' % i)
        t_op.data_proto_prefix = rep_dir
        t_op.data_proto = os.path.join('split_%d' % i, 'data.pbtxt')
        t_op.checkpoint_prefix = model_dir
        t_op.checkpoint_directory = os.path.join('classifiers', 'split_%d' % i)
        with open(t_op_file, 'w') as f:
            text_format.PrintMessage(t_op, f)

    # Change prefix in multimodal dbn model
    mnames = ['multimodal_dbn.pbtxt']
    for mname in mnames:
        model_file = os.path.join('models', mname)
        model = util.ReadModel(model_file)
        model.prefix = model_dir
        with open(model_file, 'w') as f:
            text_format.PrintMessage(model, f)
示例#26
0
def _generate_nvtabular_config(workflow,
                               name,
                               output_path,
                               output_model=None,
                               max_batch_size=None,
                               cats=None,
                               conts=None):
    """given a workflow generates the trton modelconfig proto object describing the inputs
    and outputs to that workflow"""

    config = model_config.ModelConfig(name=name,
                                      backend="python",
                                      max_batch_size=max_batch_size)

    if output_model == "hugectr":
        for column in workflow.column_group.input_column_names:
            dtype = workflow.input_dtypes[column]
            config.input.append(
                model_config.ModelInput(name=column,
                                        data_type=_convert_dtype(dtype),
                                        dims=[-1]))

        config.output.append(
            model_config.ModelOutput(name="DES",
                                     data_type=model_config.TYPE_FP32,
                                     dims=[-1]))

        config.output.append(
            model_config.ModelOutput(name="CATCOLUMN",
                                     data_type=model_config.TYPE_INT64,
                                     dims=[-1]))

        config.output.append(
            model_config.ModelOutput(name="ROWINDEX",
                                     data_type=model_config.TYPE_INT32,
                                     dims=[-1]))
    else:
        for column, dtype in workflow.input_dtypes.items():
            _add_model_param(column, dtype, model_config.ModelInput,
                             config.input)

        for column, dtype in workflow.output_dtypes.items():
            _add_model_param(column, dtype, model_config.ModelOutput,
                             config.output)

    with open(os.path.join(output_path, "config.pbtxt"), "w") as o:
        text_format.PrintMessage(config, o)
    return config
示例#27
0
def SetUpTrainer(data_dir, model_dir, representation_dir):
    trainer_config_names = [
        'train_CD_visual_layer1.pbtxt', 'train_CD_visual_layer2.pbtxt',
        'train_CD_audio_layer1.pbtxt', 'train_CD_audio_layer2.pbtxt',
        'train_CD_joint_layer.pbtxt'
    ]
    for trainer_config_name in trainer_config_names:
        filename = os.path.join('Trainers', trainer_config_name)
        trainer_operation = util.ReadOperation(filename)
        if 'layer1' in trainer_config_name:
            trainer_operation.data_proto_prefix = data_dir
        else:
            trainer_operation.data_proto_prefix = representation_dir
        trainer_operation.checkpoint_directory = model_dir
        with open(filename, 'w') as f:
            text_format.PrintMessage(trainer_operation, f)
示例#28
0
 def testPrintMessageSetByFieldNumber(self):
     out = text_format.TextWriter(False)
     message = unittest_mset_pb2.TestMessageSetContainer()
     ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
     ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
     message.message_set.Extensions[ext1].i = 23
     message.message_set.Extensions[ext2].str = 'foo'
     text_format.PrintMessage(message, out, use_field_number=True)
     self.CompareToGoldenText(
         out.getvalue(), '1 {\n'
         '  1545008 {\n'
         '    15: 23\n'
         '  }\n'
         '  1547769 {\n'
         '    25: \"foo\"\n'
         '  }\n'
         '}\n')
     out.close()
示例#29
0
def change_model(proto, layers=None):
  model = util.ReadModel(proto)
  if layers is None:
    layers = ['image_hidden1', 'image_hidden2', 'image_hidden3',
              'text_hidden1', 'text_hidden2', 'text_hidden3',
              'image_layer', 'text_layer', 'joint_layer',
              'image_tied_hidden', 'text_tied_hidden',
              'image_hidden2_recon', 'text_hidden2_recon',
              'cross_image_hidden2_recon', 'cross_text_hidden2_recon']
  
  for layer in layers:
    try:
      layer_proto = next(lay for lay in model.layer if lay.name == layer)
      layer_proto.dimensions = dimensions
    except StopIteration:
        pass
  
  with open(proto, 'w') as f:
    text_format.PrintMessage(model, f)
示例#30
0
def write_label_file(label_map_list, dataset_dir, filename=LABELS_FILENAME):
    """Writes a file with the list of class names.

    Args:
        label_map_list: A list of dict consisting of 
        {'id':<id>,'class_name':<class_name>,'display_name':<display_name>}.
        dataset_dir: The directory in which the labels file should be written.
        filename: The filename where the class names are written.
    """
    label_map = string_int_label_map_pb2.StringIntLabelMap()
    items = sorted(label_map_list, key=lambda x: int(x["id"]))
    labels_filename = os.path.join(dataset_dir, filename)
    for item_dict in items:
        item = label_map.item.add()
        item.id = item_dict["id"]
        item.name = item_dict["class"]
        item.display_name = item_dict["display_name"]
    with tf.gfile.Open(labels_filename, 'w') as fid:
        text_format.PrintMessage(label_map, fid)