Ejemplo n.º 1
0
def onnx2keras(onnx_model_path,
               input_names,
               output_dir,
               swap_channel_ordering=True):
    from onnx2keras import onnx_to_keras

    onnx_model = onnx.load(onnx_model_path)
    k_model = onnx_to_keras(onnx_model,
                            input_names,
                            change_ordering=swap_channel_ordering,
                            name_policy='renumerate')
    weights = k_model.get_weights()

    K.set_learning_phase(0)

    with K.get_session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        k_model.set_weights(weights)
        tf.saved_model.simple_save(
            sess,
            output_dir,
            inputs={
                input.name: tensor
                for input, tensor in zip(onnx_model.graph.input,
                                         k_model.inputs)
            },
            outputs={
                output.name: tensor
                for output, tensor in zip(onnx_model.graph.output,
                                          k_model.outputs)
            })
Ejemplo n.º 2
0
def convertToKerasAndBack(fileName, outputFileName, inputs):
    ret = None
    onnxModel = onnx.load(fileName)
    kwArgs = dict()
    if "shuffle" in fileName:
        kwArgs["input_shapes"] = [1, 3, 224, 224]
        import shufflenet
        import keras
        kerasModel = shufflenet.ShuffleNet(groups=3)
        kerasModel.load_weights(
            "keras-shufflenet/weights/ShuffleNet_1X_g3_br_0.25_373.hdf5")
        kerasModel.compile(optimizer=keras.optimizers.SGD(lr=.05,
                                                          decay=5e-4,
                                                          momentum=0.9),
                           metrics=['accuracy'],
                           loss='categorical_crossentropy')
        ret = kerasModel.predict(inputs[0][0].transpose(0, 2, 3, 1))
    else:
        kerasModel = onnx_to_keras(onnxModel,
                                   getOnnxInputNames(fileName),
                                   verbose=False,
                                   **kwArgs)

    #tf.keras.utils.plot_model(kerasModel, show_shapes=True)
    backconvOnnxModel = onnxmltools.convert_keras(kerasModel)
    onnxmltools.utils.save_model(backconvOnnxModel, outputFileName)
    return ret
Ejemplo n.º 3
0
def onnx2keras_pb(onnx_model_path,
                  input_names,
                  output_names,
                  output_path,
                  swap_channel_ordering=True):
    from onnx2keras import onnx_to_keras
    from convert.utils import freeze_session

    output_dir, filename = os.path.split(output_path)

    onnx_model = onnx.load(onnx_model_path)
    k_model = onnx_to_keras(onnx_model,
                            input_names,
                            change_ordering=swap_channel_ordering,
                            name_policy='renumerate')
    weights = k_model.get_weights()

    K.set_learning_phase(0)

    with K.get_session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        k_model.set_weights(weights)

        frozen_graph = freeze_session(sess,
                                      keep_var_names=None,
                                      output_names=output_names)
        tf.train.write_graph(frozen_graph, output_dir, filename, as_text=False)
def pytorch2savedmodel(onnx_model_path, saved_model_dir):
    onnx_model = onnx.load(onnx_model_path)

    input_names = ['input_ids', "attention_mask", "token_type_ids", "task_id"]
    k_model = onnx_to_keras(onnx_model=onnx_model,
                            input_names=input_names,
                            change_ordering=True,
                            verbose=False)

    weights = k_model.get_weights()

    K.set_learning_phase(0)

    saved_model_dir = Path(saved_model_dir)
    if saved_model_dir.exists():
        shutil.rmtree(str(saved_model_dir))
    saved_model_dir.mkdir()

    with K.get_session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        k_model.set_weights(weights)

        tf.saved_model.simple_save(
            sess,
            str(saved_model_dir.joinpath('1')),
            inputs={'image_array': k_model.input},
            outputs=dict((output.name, tensor) for output, tensor in zip(
                onnx_model.graph.output, k_model.outputs)))
Ejemplo n.º 5
0
def pytorch2savedmodel(onnx_model_path, saved_model_dir):

    onnx_model = onnx.load(onnx_model_path)

    input_names = ["image_array"]
    k_model = onnx_to_keras(
        onnx_model=onnx_model,
        input_names=input_names,
        change_ordering=True,
        verbose=False,
    )

    weights = k_model.get_weights()

    K.set_learning_phase(0)

    saved_model_dir = Path(saved_model_dir)
    if saved_model_dir.exists():
        shutil.rmtree(str(saved_model_dir))
    saved_model_dir.mkdir()

    tf.saved_model.save(
        k_model,
        str(saved_model_dir.joinpath("1")),
    )
Ejemplo n.º 6
0
def get_keras(dynamic_input=False):
    onnx_model = onnx.load(
        "/home/fico/DeepL/Gans/Mobile_Cyclegan/vangogh2photo_fixed.onnx")
    # Call the converter (input - is the main model input name, can be different for your model)
    k_model = onnx_to_keras(onnx_model, ['input'],
                            test_c_order=True,
                            dynamic_input=dynamic_input)

    # if dynamic_input == True:
    #     model_config = k_model.get_config()
    #     print(model_config['layers'][0])

    print(k_model.summary())

    return k_model
Ejemplo n.º 7
0
def input_processing_onnx(file_path=None):
    '''
    Process input from onnx file.

    Parameters:
    -----------
    file_path: path to onnx input file. (still in developed)

    Return
    -----------
    number_of_layer: Number of layers in the neural network
    number_of_neurons_each_layer: list of number of neurons in each layer
    weight: list of weights of the neurons network
    bias: list of bias at each layer
    ../sample_input/eranmnist_benchmark/onnx/tf/mnist_relu_3_50.onnx
    '''
    onnx_model = onnx.load(file_path)
    onnx_model.graph.node[2].output = 'dense/Relu_0'
    print(onnx_model.graph.node[2].output)
    # print(onnx_model.graph.input)
    exit(0)
    kera_model = onnx_to_keras(onnx_model, input_names=['dense_input'])
    number_of_layer = 0
    number_of_neurons_each_layer = []
    weight = []
    bias = []
    for layer in kera_model.layers:
        layer_weight = layer.get_weights()
        if not layer_weight:
            continue
        number_of_layer += 1
        if number_of_layer == 1:
            number_of_neurons_each_layer.append(
                np.array(layer_weight[0]).shape[0])
        number_of_neurons_each_layer.append(np.array(layer_weight[0]).shape[1])
        weight.append(np.array(layer_weight[0]))
        bias.append(np.array(layer_weight[1]))
    number_of_layer += 1
    print(number_of_layer)
    print(number_of_neurons_each_layer)
    print(weight)
    print(bias)
    return number_of_layer, number_of_neurons_each_layer, weight, bias
Ejemplo n.º 8
0
    def ToKeras(self,
                output_name,
                path=None,
                genotype=None,
                init_channels=None,
                layers=None):

        model = self.load_model(path, genotype, init_channels, layers)
        model.eval()

        input_np = np.random.uniform(0, 1, (1, 3, 32, 32))
        input_var = torch.FloatTensor(input_np)
        output = model(input_var)

        torch.onnx.export(model, (input_var),
                          output_name + ".onnx",
                          verbose=True,
                          input_names=['input'],
                          output_names=['output'])
        import onnx
        from onnx2keras import onnx_to_keras
        onnx_model = onnx.load(output_name + ".onnx")
        k_model = onnx_to_keras(onnx_model, ['input'])
        k_model.save(output_name + ".h5")
Ejemplo n.º 9
0
def driver(inputdir,
           outputdir,
           datadir,
           plotdir,
           preddir,
           trainflag,
           validflag,
           testflag,
           normalize,
           fmean,
           fstdev,
           scale,
           fmin,
           fmax,
           scalelims,
           fsize,
           rmse_file,
           r2_file,
           inD,
           outD,
           ilog,
           olog,
           TFRfile,
           batch_size,
           ncores,
           buffer_size,
           gridsearch,
           architectures,
           layers,
           lay_params,
           activations,
           act_params,
           nodes,
           lengthscale,
           max_lr,
           clr_mode,
           clr_steps,
           epochs,
           patience,
           weight_file,
           resume,
           plot_cases,
           fxvals,
           xlabel,
           ylabel,
           filters=None,
           filtconv=1.):
    """
    Driver function to handle model training and evaluation.

    Inputs
    ------
    inputdir   : string. Path/to/directory of inputs.
    outputdir  : string. Path/to/directory of outputs.
    datadir    : string. Path/to/directory of data.
    plotdir    : string. Path/to/directory of plots.
    preddir    : string. Path/to/directory of predictions.
    trainflag  : bool.   Determines whether to train    the NN model.
    validflag  : bool.   Determines whether to validate the NN model.
    testflag   : bool.   Determines whether to test     the NN model.
    normalize  : bool.   Determines whether to normalize the data.
    fmean      : string. Path/to/file of mean  of training data.
    fstdev     : string. Path/to/file of stdev of training data.
    scale      : bool.   Determines whether to scale the data.
    fmin       : string. Path/to/file of minima of training data.
    fmax       : string. Path/to/file of maxima of training data.
    scalelims  : list, floats. [min, max] of range of scaled data.
    rmse_file  : string. Prefix for savefiles for RMSE calculations.
    r2_file    : string. Prefix for savefiles for R2 calculations.
    inD        : int.    Dimensionality of the input  data.
    outD       : int.    Dimensionality of the output data.
    ilog       : bool.   Determines whether to take the log10 of intput  data.
    olog       : bool.   Determines whether to take the log10 of output data.
    TFRfile    : string. Prefix for TFRecords files.
    batch_size : int.    Size of batches for training/validating/testing.
    ncores     : int.    Number of cores to use to load data cases.
    buffer_size: int.    Number of data cases to pre-load in memory.
    gridsearch : bool.   Determines whether to perform a grid search over 
                         `architectures`.
    architectures: list. Model architectures to consider.
    layers     : list, str.  Types of hidden layers.
    lay_params : list, ints. Parameters for the layer type 
                             E.g., kernel size
    activations: list, str.  Activation functions for each hidden layer.
    act_params : list, floats. Parameters for the activation functions.
    nodes      : list, ints. For layers with nodes, number of nodes per layer.
    lengthscale: float.  Minimum learning rat.e
    max_lr     : float.  Maximum learning rate.
    clr_mode   : string. Sets the cyclical learning rate function.
    clr_steps  : int.    Number of steps per cycle of the learning rate.
    epochs     : int.    Max number of iterations through dataset for training.
    patience   : int.    If no model improvement after `patience` epochs, 
                         halts training.
    weight_file: string. Path/to/file where NN weights are saved.
    resume     : bool.   Determines whether to resume training.
    plot_cases : list, ints. Cases from test set to plot.
    fxvals     : string. Path/to/file of X-axis values to correspond to 
                         predicted Y values.
    xlabel     : string. X-axis label for plotting.
    ylabel     : string. Y-axis label for plotting.
    filters    : list, strings.  Paths/to/filter files.  Default: None
                         If specified, will compute RMSE/R2 stats over the 
                         integrated filter bandpasses.
    filtconv   : float.  Conversion factor for filter file x-axis values to 
                         desired unit.  Default: 1.0
    """
    # Get file names, calculate number of cases per file
    print('Loading files & calculating total number of batches...')

    try:
        datsize = np.load(inputdir + fsize)
        num_train = datsize[0]
        num_valid = datsize[1]
        num_test = datsize[2]
    except:
        ftrain = glob.glob(datadir + 'train' + os.sep + '*.npy')
        fvalid = glob.glob(datadir + 'valid' + os.sep + '*.npy')
        ftest = glob.glob(datadir + 'test' + os.sep + '*.npy')
        num_train = U.data_set_size(ftrain, ncores)
        num_valid = U.data_set_size(fvalid, ncores)
        num_test = U.data_set_size(ftest, ncores)
        np.save(inputdir + fsize,
                np.array([num_train, num_valid, num_test], dtype=int))
        del ftrain, fvalid, ftest

    print("Data set sizes")
    print("Training   data:", num_train)
    print("Validation data:", num_valid)
    print("Testing    data:", num_test)
    print("Total      data:", num_train + num_valid + num_test)

    train_batches = num_train // batch_size
    valid_batches = num_valid // batch_size
    test_batches = num_test // batch_size

    # Update `clr_steps`
    if clr_steps == "range test":
        clr_steps = train_batches * epochs
        rng_test = True
    else:
        clr_steps = train_batches * int(clr_steps)
        rng_test = False

    # Get mean/stdev for normalizing
    if normalize:
        print('\nNormalizing the data...')
        try:
            mean = np.load(inputdir + fmean)
            stdev = np.load(inputdir + fstdev)
        except:
            print("Calculating the mean and standard deviation of the data " +\
                  "using Welford's method.")
            # Compute stats
            ftrain = glob.glob(datadir + 'train' + os.sep + '*.npy')
            mean, stdev, datmin, datmax = S.mean_stdev(ftrain, inD, ilog, olog)
            np.save(inputdir + fmean, mean)
            np.save(inputdir + fstdev, stdev)
            np.save(inputdir + fmin, datmin)
            np.save(inputdir + fmax, datmax)
            del datmin, datmax, ftrain
        print("mean :", mean)
        print("stdev:", stdev)
        # Slice desired indices
        x_mean, y_mean = mean[:inD], mean[inD:]
        x_std, y_std = stdev[:inD], stdev[inD:]
        # Memory cleanup -- no longer need full mean/stdev arrays
        del mean, stdev
    else:
        x_mean = 0.
        x_std = 1.
        y_mean = 0.
        y_std = 1.

    if olog:
        # To properly calculate RMSE & R2 for log-scaled output
        try:
            y_mean_delog = np.load(inputdir +
                                   fmean.replace(".npy", "_delog.npy"))
        except:
            ftrain = glob.glob(datadir + 'train' + os.sep + '*.npy')
            mean_delog = S.mean_stdev(ftrain, inD, ilog, False)[0]
            del ftrain
            y_mean_delog = mean_delog[inD:]
            np.save(inputdir + fmean.replace(".npy", "_delog.npy"),
                    y_mean_delog)
    else:
        y_mean_delog = y_mean

    # Get min/max values for scaling
    if scale:
        print('\nScaling the data...')
        try:
            datmin = np.load(inputdir + fmin)
            datmax = np.load(inputdir + fmax)
        except:
            ftrain = glob.glob(datadir + 'train' + os.sep + '*.npy')
            mean, stdev, datmin, datmax = S.mean_stdev(ftrain, inD, ilog, olog)
            np.save(inputdir + fmean, mean)
            np.save(inputdir + fstdev, stdev)
            np.save(inputdir + fmin, datmin)
            np.save(inputdir + fmax, datmax)
            del mean, stdev, ftrain
        print("min  :", datmin)
        print("max  :", datmax)
        # Slice desired indices
        x_min, y_min = datmin[:inD], datmin[inD:]
        x_max, y_max = datmax[:inD], datmax[inD:]
        # Memory cleanup -- no longer need min/max arrays
        del datmin, datmax

        # Normalize min/max values
        if normalize:
            x_min = U.normalize(x_min, x_mean, x_std)
            x_max = U.normalize(x_max, x_mean, x_std)
            y_min = U.normalize(y_min, y_mean, y_std)
            y_max = U.normalize(y_max, y_mean, y_std)
    else:
        x_min = 0.
        x_max = 1.
        y_min = 0.
        y_max = 1.
        scalelims = [0., 1.]

    # Get TFRecord file names
    print('\nLoading TFRecords file names...')
    TFRpath = inputdir + 'TFRecords' + os.sep + TFRfile
    ftrain_TFR = glob.glob(TFRpath + 'train*.tfrecords')
    fvalid_TFR = glob.glob(TFRpath + 'valid*.tfrecords')
    ftest_TFR = glob.glob(TFRpath + 'test*.tfrecords')

    if len(ftrain_TFR) == 0 or len(fvalid_TFR) == 0 or len(ftest_TFR) == 0:
        # Doesn't exist -- make them
        print("\nSome TFRecords files do not exist yet.")
        ftrain = glob.glob(datadir + 'train' + os.sep + '*.npy')
        fvalid = glob.glob(datadir + 'valid' + os.sep + '*.npy')
        ftest = glob.glob(datadir + 'test' + os.sep + '*.npy')
        if len(ftrain_TFR) == 0:
            print("Making TFRecords for training data...")
            U.make_TFRecord(
                inputdir + 'TFRecords' + os.sep + TFRfile + 'train.tfrecords',
                ftrain, inD, ilog, olog, batch_size, train_batches)
        if len(fvalid_TFR) == 0:
            print("\nMaking TFRecords for validation data...")
            U.make_TFRecord(
                inputdir + 'TFRecords' + os.sep + TFRfile + 'valid.tfrecords',
                fvalid, inD, ilog, olog, batch_size, valid_batches)
        if len(ftest_TFR) == 0:
            print("\nMaking TFRecords for test data...")
            U.make_TFRecord(
                inputdir + 'TFRecords' + os.sep + TFRfile + 'test.tfrecords',
                ftest, inD, ilog, olog, batch_size, test_batches)
        print("\nTFRecords creation complete.")
        # Free memory
        del ftrain, fvalid, ftest
        # Get TFR file names for real this time
        ftrain_TFR = glob.glob(TFRpath + 'train*.tfrecords')
        fvalid_TFR = glob.glob(TFRpath + 'valid*.tfrecords')
        ftest_TFR = glob.glob(TFRpath + 'test*.tfrecords')

    # Load the xvals
    if fxvals is not None:
        xvals = np.load(fxvals)
    else:
        xvals = None

    # Perform grid search
    if gridsearch:
        # Train a model for each architecture, w/ unique directories
        print("\nPerforming a grid search.\n")
        maxlen = 0
        for i, arch in enumerate(architectures):
            if len(arch) > maxlen:
                maxlen = len(arch)
            archdir = os.path.join(outputdir, arch, '')
            wsplit = weight_file.rsplit(os.sep, 1)[1].rsplit('.', 1)
            wfile = ''.join([archdir, wsplit[0], '_', arch, '.', wsplit[1]])
            U.make_dir(archdir)
            nn = NNModel(ftrain_TFR,
                         fvalid_TFR,
                         ftest_TFR,
                         inD,
                         outD,
                         olog,
                         x_mean,
                         x_std,
                         y_mean,
                         y_std,
                         x_min,
                         x_max,
                         y_min,
                         y_max,
                         scalelims,
                         ncores,
                         buffer_size,
                         batch_size,
                         [train_batches, valid_batches, test_batches],
                         layers[i],
                         lay_params[i],
                         activations[i],
                         act_params[i],
                         nodes[i],
                         lengthscale,
                         max_lr,
                         clr_mode,
                         clr_steps,
                         wfile,
                         stop_file='./STOP',
                         resume=resume,
                         train_flag=True,
                         shuffle=True)
            nn.train(train_batches, valid_batches, epochs, patience)
            P.loss(nn, archdir)
        # Print/save out the minmium validation loss for each architecture
        minvl = np.ones(len(architectures)) * np.inf
        print('Grid search summary')
        print('-------------------')
        with open(outputdir + 'gridsearch.txt', 'w') as foo:
            foo.write('Grid search summary\n')
            foo.write('-------------------\n')
        for i, arch in enumerate(architectures):
            archdir = os.path.join(outputdir, arch, '')
            history = np.load(archdir + 'history.npz')
            minvl[i] = np.amin(history['val_loss'])
            print(arch.ljust(maxlen, ' ') + ': ' + str(minvl[i]))
            with open(outputdir + 'gridsearch.txt', 'a') as foo:
                foo.write(arch.ljust(maxlen, ' ') + ': ' \
                          + str(minvl[i]) + '\n')
        return

    # Train a model
    if trainflag:
        print('\nBeginning model training.\n')
        nn = NNModel(ftrain_TFR,
                     fvalid_TFR,
                     ftest_TFR,
                     inD,
                     outD,
                     olog,
                     x_mean,
                     x_std,
                     y_mean,
                     y_std,
                     x_min,
                     x_max,
                     y_min,
                     y_max,
                     scalelims,
                     ncores,
                     buffer_size,
                     batch_size, [train_batches, valid_batches, test_batches],
                     layers,
                     lay_params,
                     activations,
                     act_params,
                     nodes,
                     lengthscale,
                     max_lr,
                     clr_mode,
                     clr_steps,
                     weight_file,
                     stop_file='./STOP',
                     train_flag=True,
                     shuffle=True,
                     resume=resume)
        nn.train(train_batches, valid_batches, epochs, patience)
        # Plot the loss
        P.loss(nn, plotdir)

    # Call new model with shuffle=False
    nn = NNModel(ftrain_TFR,
                 fvalid_TFR,
                 ftest_TFR,
                 inD,
                 outD,
                 olog,
                 x_mean,
                 x_std,
                 y_mean,
                 y_std,
                 x_min,
                 x_max,
                 y_min,
                 y_max,
                 scalelims,
                 ncores,
                 buffer_size,
                 batch_size, [train_batches, valid_batches, test_batches],
                 layers,
                 lay_params,
                 activations,
                 act_params,
                 nodes,
                 lengthscale,
                 max_lr,
                 clr_mode,
                 clr_steps,
                 weight_file,
                 stop_file='./STOP',
                 train_flag=False,
                 shuffle=False,
                 resume=False)
    if '.h5' in weight_file or '.hdf5' in weight_file:
        nn.model.load_weights(weight_file)  # Load the model
        # Save in ONNX format
        try:
            onnx_model = keras2onnx.convert_keras(nn.model)
            onnx.save_model(onnx_model,
                            nn.weight_file.rsplit('.', 1)[0] + '.onnx')
        except Exception as e:
            print("Unable to convert the Keras model to ONNX:")
            print(e)
    else:
        nn.model = onnx_to_keras(onnx.load_model(weight_file), ['input_1'])

    # Validate model
    if (validflag or trainflag) and not rng_test:
        print('\nValidating the model...\n')
        # Y values
        print('  Predicting...')
        fvalpred = nn.Yeval('pred',
                            'valid',
                            preddir,
                            denorm=(normalize == False and scale == False))
        fvalpred = glob.glob(fvalpred + '*')

        print('  Loading the true Y values...')
        fvaltrue = nn.Yeval('true',
                            'valid',
                            preddir,
                            denorm=(normalize == False and scale == False))
        fvaltrue = glob.glob(fvaltrue + '*')
        ### RMSE & R2
        print('\n Calculating RMSE & R2...')
        if not normalize and not scale:
            val_stats = S.rmse_r2(fvalpred,
                                  fvaltrue,
                                  y_mean,
                                  olog=olog,
                                  y_mean_delog=y_mean_delog,
                                  x_vals=xvals,
                                  filters=filters,
                                  filtconv=filtconv)
        else:
            val_stats = S.rmse_r2(fvalpred, fvaltrue, y_mean, y_std, y_min,
                                  y_max, scalelims, olog, y_mean_delog, xvals,
                                  filters, filtconv)
        # RMSE
        if np.any(val_stats[0] != -1) and np.any(val_stats[1] != -1):
            print('  Normalized RMSE       : ', val_stats[0])
            print('  Mean normalized RMSE  : ', np.mean(val_stats[0]))
            print('  Denormalized RMSE     : ', val_stats[1])
            print('  Mean denormalized RMSE: ', np.mean(val_stats[1]))
            np.savez(outputdir + rmse_file + '_val_norm.npz',
                     rmse=val_stats[0],
                     rmse_mean=np.mean(val_stats[0]))
            saveRMSEnorm = True
            saveRMSEdenorm = True
        elif np.any(val_stats[0] != -1):
            print('  RMSE     : ', val_stats[0])
            print('  Mean RMSE: ', np.mean(val_stats[0]))
            saveRMSEnorm = True
            saveRMSEdenorm = False
        elif np.any(val_stats[1] != -1):
            print('  RMSE     : ', val_stats[1])
            print('  Mean RMSE: ', np.mean(val_stats[1]))
            saveRMSEnorm = False
            saveRMSEdenorm = True
        else:
            print("  No files passed in to compute RMSE.")
            saveRMSEnorm = False
            saveRMSEdenorm = False
        if saveRMSEnorm:
            P.plot(''.join([plotdir, rmse_file, '_val_norm.png']), xvals,
                   val_stats[0], xlabel, 'RMSE')
            np.savez(outputdir + rmse_file + '_val_norm.npz',
                     rmse=val_stats[0],
                     rmse_mean=np.mean(val_stats[0]))
        if saveRMSEdenorm:
            P.plot(''.join([plotdir, rmse_file, '_val_denorm.png']), xvals,
                   val_stats[1], xlabel, 'RMSE')
            np.savez(outputdir + rmse_file + '_val_denorm.npz',
                     rmse=val_stats[1],
                     rmse_mean=np.mean(val_stats[1]))
        # R2
        if np.any(val_stats[2] != -1) and np.any(val_stats[3] != -1):
            print('  Normalized R2       : ', val_stats[2])
            print('  Mean normalized R2  : ', np.mean(val_stats[2]))
            print('  Denormalized R2     : ', val_stats[3])
            print('  Mean denormalized R2: ', np.mean(val_stats[3]))
            saveR2norm = True
            saveR2denorm = True
        elif np.any(val_stats[2] != -1):
            print('  R2     : ', val_stats[2])
            print('  Mean R2: ', np.mean(val_stats[2]))
            saveR2norm = True
            saveR2denorm = False
        elif np.any(val_stats[3] != -1):
            print('  R2     : ', val_stats[3])
            print('  Mean R2: ', np.mean(val_stats[3]))
            saveR2norm = False
            saveR2denorm = True
        else:
            print("  No files passed in to compute R2.")
            saveR2norm = False
            saveR2denorm = False
        if saveR2norm:
            P.plot(''.join([plotdir, r2_file, '_val_norm.png']), xvals,
                   val_stats[2], xlabel, '$R^2$')
            np.savez(outputdir + r2_file + '_val_norm.npz',
                     r2=val_stats[2],
                     r2_mean=np.mean(val_stats[2]))
        if saveR2denorm:
            P.plot(''.join([plotdir, r2_file, '_val_denorm.png']), xvals,
                   val_stats[3], xlabel, '$R^2$')
            np.savez(outputdir + r2_file + '_val_denorm.npz',
                     r2=val_stats[3],
                     r2_mean=np.mean(val_stats[3]))

    # Evaluate model on test set
    if testflag and not rng_test:
        print('\nTesting the model...\n')
        # Y values
        print('  Predicting...')
        ftestpred = nn.Yeval('pred',
                             'test',
                             preddir,
                             denorm=(normalize == False and scale == False))
        ftestpred = glob.glob(ftestpred + '*')

        print('  Loading the true Y values...')
        ftesttrue = nn.Yeval('true',
                             'test',
                             preddir,
                             denorm=(normalize == False and scale == False))
        ftesttrue = glob.glob(ftesttrue + '*')
        ### RMSE & R2
        print('\n Calculating RMSE & R2...')
        if not normalize and not scale:
            test_stats = S.rmse_r2(ftestpred,
                                   ftesttrue,
                                   y_mean,
                                   olog=olog,
                                   y_mean_delog=y_mean_delog,
                                   x_vals=xvals,
                                   filters=filters,
                                   filtconv=filtconv)
        else:
            test_stats = S.rmse_r2(ftestpred, ftesttrue, y_mean, y_std, y_min,
                                   y_max, scalelims, olog, y_mean_delog, xvals,
                                   filters, filtconv)
        # RMSE
        if np.any(test_stats[0] != -1) and np.any(test_stats[1] != -1):
            print('  Normalized RMSE       : ', test_stats[0])
            print('  Mean normalized RMSE  : ', np.mean(test_stats[0]))
            print('  Denormalized RMSE     : ', test_stats[1])
            print('  Mean denormalized RMSE: ', np.mean(test_stats[1]))
            np.savez(outputdir + rmse_file + '_val_norm.npz',
                     rmse=test_stats[0],
                     rmse_mean=np.mean(test_stats[0]))
            saveRMSEnorm = True
            saveRMSEdenorm = True
        elif np.any(test_stats[0] != -1):
            print('  RMSE     : ', test_stats[0])
            print('  Mean RMSE: ', np.mean(test_stats[0]))
            saveRMSEnorm = True
            saveRMSEdenorm = False
        elif np.any(test_stats[1] != -1):
            print('  RMSE     : ', test_stats[1])
            print('  Mean RMSE: ', np.mean(test_stats[1]))
            saveRMSEnorm = False
            saveRMSEdenorm = True
        else:
            print("  No files passed in to compute RMSE.")
            saveRMSEnorm = False
            saveRMSEdenorm = False
        if saveRMSEnorm:
            P.plot(''.join([plotdir, rmse_file, '_test_norm.png']), xvals,
                   test_stats[0], xlabel, 'RMSE')
            np.savez(outputdir + rmse_file + '_test_norm.npz',
                     rmse=test_stats[0],
                     rmse_mean=np.mean(test_stats[0]))
        if saveRMSEdenorm:
            P.plot(''.join([plotdir, rmse_file, '_test_denorm.png']), xvals,
                   test_stats[1], xlabel, 'RMSE')
            np.savez(outputdir + rmse_file + '_test_denorm.npz',
                     rmse=test_stats[1],
                     rmse_mean=np.mean(test_stats[1]))
        # R2
        if np.any(test_stats[2] != -1) and np.any(test_stats[3] != -1):
            print('  Normalized R2       : ', test_stats[2])
            print('  Mean normalized R2  : ', np.mean(test_stats[2]))
            print('  Denormalized R2     : ', test_stats[3])
            print('  Mean denormalized R2: ', np.mean(test_stats[3]))
            saveR2norm = True
            saveR2denorm = True
        elif np.any(test_stats[2] != -1):
            print('  R2     : ', test_stats[2])
            print('  Mean R2: ', np.mean(test_stats[2]))
            saveR2norm = True
            saveR2denorm = False
        elif np.any(test_stats[3] != -1):
            print('  R2     : ', test_stats[3])
            print('  Mean R2: ', np.mean(test_stats[3]))
            saveR2norm = False
            saveR2denorm = True
        else:
            print("  No files passed in to compute R2.")
            saveR2norm = False
            saveR2denorm = False
        if saveR2norm:
            P.plot(''.join([plotdir, r2_file, '_test_norm.png']), xvals,
                   test_stats[2], xlabel, '$R^2$')
            np.savez(outputdir + r2_file + '_test_norm.npz',
                     r2=test_stats[2],
                     r2_mean=np.mean(test_stats[2]))
        if saveR2denorm:
            P.plot(''.join([plotdir, r2_file, '_test_denorm.png']), xvals,
                   test_stats[3], xlabel, '$R^2$')
            np.savez(outputdir + r2_file + '_test_denorm.npz',
                     r2=test_stats[3],
                     r2_mean=np.mean(test_stats[3]))

    # Plot requested cases
    if not rng_test:
        predfoo = sorted(glob.glob(preddir + 'test' + os.sep + 'pred*'))
        truefoo = sorted(glob.glob(preddir + 'test' + os.sep + 'true*'))
        if len(predfoo) > 0 and len(truefoo) > 0:
            print("\nPlotting the requested cases...")
            nplot = 0
            for v in plot_cases:
                fname = plotdir + 'spec' + str(v) + '_pred-vs-true.png'
                predspec = np.load(predfoo[v // batch_size])[v % batch_size]
                predspec = U.denormalize(
                    U.descale(predspec, y_min, y_max, scalelims), y_mean,
                    y_std)
                truespec = np.load(truefoo[v // batch_size])[v % batch_size]
                truespec = U.denormalize(
                    U.descale(truespec, y_min, y_max, scalelims), y_mean,
                    y_std)
                if olog:
                    predspec[olog] = 10**predspec[olog]
                    truespec[olog] = 10**truespec[olog]
                P.plot_spec(fname, predspec, truespec, xvals, xlabel, ylabel)
                nplot += 1
                print("  Plot " + str(nplot) + "/" + str(len(plot_cases)),
                      end='\r')
            print("")
        else:
            raise Exception("No predictions found in " + preddir + "test.")

    return
Ejemplo n.º 10
0
                    model = LayerTest(kernel_size=kernel_size,
                                      padding=padding,
                                      stride=stride)
                    model.eval()

                    input_np = np.random.uniform(0, 1, (1, 3, 224, 224))
                    input_var = Variable(torch.FloatTensor(input_np))

                    torch.onnx.export(model,
                                      input_var,
                                      "_tmpnet.onnx",
                                      verbose=True,
                                      input_names=['test_in'],
                                      output_names=['test_out'])

                    onnx_model = onnx.load('_tmpnet.onnx')
                    k_model = onnx_to_keras(onnx_model, ['test_in'],
                                            change_ordering=change_ordering)

                    error = check_torch_keras_error(
                        model,
                        k_model,
                        input_np,
                        change_ordering=change_ordering)
                    print('Error:', error)

                    if max_error < error:
                        max_error = error

    print('Max error: {0}'.format(max_error))
Ejemplo n.º 11
0
### sudo pip3 install onnx2keras tf-nightly
### tf-nightly-2.2.0-dev20200502

import onnx
from onnx2keras import onnx_to_keras
import tensorflow as tf
import shutil

onnx_model = onnx.load('human-pose-estimation-3d-0001.onnx')
k_model = onnx_to_keras(onnx_model=onnx_model,
                        input_names=['data'],
                        change_ordering=True)

shutil.rmtree('saved_model', ignore_errors=True)
tf.saved_model.save(k_model, 'saved_model')
"""
$ saved_model_cli show --dir saved_model --all

MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:

signature_def['__saved_model_init_op']:
  The given SavedModel SignatureDef contains the following input(s):
  The given SavedModel SignatureDef contains the following output(s):
    outputs['__saved_model_init_op'] tensor_info:
        dtype: DT_INVALID
        shape: unknown_rank
        name: NoOp
  Method name is: 

signature_def['serving_default']:
  The given SavedModel SignatureDef contains the following input(s):
Ejemplo n.º 12
0
            FHardtanhTest,
            LayerReLUTest,
            FReLUTest,
            LayerELUTest,
            FPELUTest,
    ]:
        for i in range(10):
            model = act_type()
            model.eval()

            input_np = np.random.uniform(0, 1, (1, 3, 224, 224))
            input_var = torch.FloatTensor(input_np)

            torch.onnx.export(model,
                              input_var,
                              "_tmpnet.onnx",
                              verbose=True,
                              input_names=['test_in'],
                              output_names=['test_out'])

            onnx_model = onnx.load('_tmpnet.onnx')
            k_model = onnx_to_keras(onnx_model, ['test_in'])
            os.unlink('_tmpnet.onnx')

            error = check_torch_keras_error(model, k_model, input_np)
            print('Error:', error)
            if max_error < error:
                max_error = error

    print('Max error: {0}'.format(max_error))
Ejemplo n.º 13
0
import numpy as np
import cv2

import onnx
import onnxmltools
import onnx2keras
from onnx2keras import onnx_to_keras

# convert to keras
onnx_path = './base-model-new-320.onnx'
keras_path = onnx_path.split('/')[-1].split('.')[0]

onnx_model = onnxmltools.utils.load_model(onnx_path)
keras_model = onnx_to_keras(onnx_model, ['input'])
keras_model.save(keras_path)
keras_model.summary()
Ejemplo n.º 14
0
def load(path, filename):
    """Load network from file.

    Parameters
    ----------

    path: str
        Path to directory where to load model from.

    filename: str
        Name of file to load model from.

    Returns
    -------

    : dict[str, Union[keras.models.Sequential, function]]
        A dictionary of objects that constitute the input model. It must
        contain the following two keys:

        - 'model': keras.models.Sequential
            Keras model instance of the network.
        - 'val_fn': function
            Function that allows evaluating the original model.
    """

    filepath = str(os.path.join(path, filename))

    # Load the Pytorch model.
    mod = import_script(path, filename)
    kwargs = mod.kwargs if hasattr(mod, 'kwargs') else {}
    model_pytorch = mod.Model(**kwargs)
    map_location = 'cpu' if not torch.cuda.is_available() else None
    for ext in ['.pth', '.pkl']:
        model_path = filepath + ext
        if os.path.exists(model_path):
            break
    assert model_path, "Pytorch state_dict not found at {}".format(model_path)
    model_pytorch.load_state_dict(
        torch.load(model_path, map_location=map_location))

    # Switch from train to eval mode to ensure Dropout / BatchNorm is handled
    # correctly.
    model_pytorch.eval()

    # Run on dummy input with correct shape to trace the Pytorch model.
    input_shape = [1] + list(model_pytorch.input_shape)
    input_numpy = np.random.random_sample(input_shape).astype(np.float32)
    input_torch = torch.from_numpy(input_numpy).float()
    output_torch = model_pytorch(input_torch)
    output_numpy = to_numpy(output_torch)

    # Export as onnx model, and then reload.
    input_names = ['input_0']
    output_names = ['output_{}'.format(i) for i in range(len(output_torch))]
    dynamic_axes = {'input_0': {0: 'batch_size'}}
    dynamic_axes.update({name: {0: 'batch_size'} for name in output_names})
    torch.onnx.export(model_pytorch,
                      input_torch,
                      filepath + '.onnx',
                      input_names=input_names,
                      output_names=output_names,
                      dynamic_axes=dynamic_axes)
    model_onnx = onnx.load(filepath + '.onnx')
    # onnx.checker.check_model(model_onnx)  # Crashes with segmentation fault.

    # Compute ONNX Runtime output prediction.
    ort_session = onnxruntime.InferenceSession(filepath + '.onnx')
    input_onnx = {ort_session.get_inputs()[0].name: input_numpy}
    output_onnx = ort_session.run(None, input_onnx)

    # Compare ONNX Runtime and PyTorch results.
    err_msg = "Pytorch model could not be ported to ONNX. Output difference: "
    np.testing.assert_allclose(output_numpy,
                               output_onnx[0],
                               rtol=1e-03,
                               atol=1e-05,
                               err_msg=err_msg)
    print("Pytorch model was successfully ported to ONNX.")

    change_ordering = keras.backend.image_data_format() == 'channels_last'
    if change_ordering:
        input_numpy = np.moveaxis(input_numpy, 1, -1)
        output_numpy = np.moveaxis(output_numpy, 1, -1)

    # Import this here; import changes image_data_format to channels_first.
    from onnx2keras import onnx_to_keras
    # Port ONNX model to Keras.
    model_keras = onnx_to_keras(model_onnx,
                                input_names, [input_shape[1:]],
                                change_ordering=change_ordering,
                                verbose=False)
    if change_ordering:
        keras.backend.set_image_data_format('channels_last')

    # Save the keras model.
    keras.models.save_model(model_keras, filepath + '.h5')

    # Loading the model here is a workaround for version conflicts with
    # TF > 2.0.1 and keras > 2.2.5. Should be able to remove this later.
    model_keras = keras.models.load_model(filepath + '.h5')
    model_keras.compile('sgd', 'categorical_crossentropy',
                        ['accuracy', keras.metrics.top_k_categorical_accuracy])

    # Compute Keras output and compare against ONNX.
    output_keras = model_keras.predict(input_numpy)
    err_msg = "ONNX model could not be ported to Keras. Output difference: "
    np.testing.assert_allclose(output_numpy,
                               output_keras,
                               rtol=1e-03,
                               atol=1e-05,
                               err_msg=err_msg)
    print("ONNX model was successfully ported to Keras.")

    return {'model': model_keras, 'val_fn': model_keras.evaluate}
Ejemplo n.º 15
0
import warnings
from onnx_tf.backend import prepare
import onnx
import tensorflow as tf
import onnx2keras
import numpy as np

warnings.filterwarnings('ignore')
#sess = ort.InferenceSession('model.onnx')

model = onnx.load_model("model.onnx")
k_model = onnx2keras.onnx_to_keras(onnx_model=model,
                                   input_names=["input_data"],
                                   change_ordering=True,
                                   verbose=False)
print(k_model.layers)
Ejemplo n.º 16
0
import torch
from torch.autograd import Variable
from onnx2keras import onnx_to_keras, check_torch_keras_error
import onnx
from torchvision.models.densenet import densenet121

if __name__ == '__main__':
    model = densenet121()
    model.eval()

    input_np = np.random.uniform(0, 1, (1, 3, 224, 224))
    input_var = Variable(torch.FloatTensor(input_np), requires_grad=False)
    output = model(input_var)

    torch.onnx.export(model, (input_var),
                      "_tmpnet.onnx",
                      verbose=True,
                      input_names=['test_in1'],
                      output_names=['test_out'])

    onnx_model = onnx.load('_tmpnet.onnx')
    k_model = onnx_to_keras(onnx_model, ['test_in1', 'test_in2'],
                            change_ordering=True)

    error = check_torch_keras_error(model,
                                    k_model,
                                    input_np,
                                    change_ordering=True)

    print('Max error: {0}'.format(error))
Ejemplo n.º 17
0
import onnx
from onnx2keras import onnx_to_keras
import tensorflow as tf
# Converts ONNX networks to keras networks
# Currently only for the actor network

# Load ONNX model
onnx_actor = onnx.load('actor.onnx')

# Replace incompatible placeholder node from Matlab
onnx_actor.graph.node[5].op_type = 'Tanh'
print('Loaded and replaced')
# Convert model to keras
actor_onnx = onnx_to_keras(onnx_actor, ['observation'])

# Rebuild model in tensorflow to get the right layers
actor_tf = tf.keras.Sequential()
actor_tf.add(tf.keras.Input(shape=(26, )))
actor_tf.add(tf.keras.layers.Dense(400))
actor_tf.add(tf.keras.layers.Dense(300, activation='relu'))
actor_tf.add(tf.keras.layers.Dense(8))
actor_tf.add(tf.keras.layers.Activation('tanh'))
# actor_tf.add(tf.keras.layers.Flatten())

# Apply weights and biases
weight_0 = actor_onnx.layers[1].get_weights()
actor_tf.layers[0].set_weights(
    [tf.reshape(weight_0[0], (26, 400)), weight_0[1]])
weight_1 = actor_onnx.layers[3].get_weights()
actor_tf.layers[1].set_weights(
    [tf.reshape(weight_1[0], (400, 300)), weight_1[1]])
import onnx2keras
from onnx2keras import onnx_to_keras
import keras
import onnx

path = "C:/Users/msa/Documents/datasets/pretrained architectures/PointPillars/pfe.onnx"
onnx_model = onnx.load(path)
k_model = onnx_to_keras(onnx_model)
print(k_model)
Ejemplo n.º 19
0
# Versions of tensorflow used -------------------------
print("tf.__version__ is", tf.__version__)
print("tf.keras.__version__ is:", tf.keras.__version__)
print("Num GPUs Available: ",
      len(tf.config.experimental.list_physical_devices('GPU')))
# Versions of tensorflow used -------------------------

# transform the given images --------------------------
PIL_transform = transforms.ToPILImage()
tensor_transform = transforms.ToTensor()
# transform the given images --------------------------

# Loading the onnx model ------------------------------
onnx_model = onnx.load("Material_Classifier.onnx")
k_model = onnx_to_keras(onnx_model, ['imageinput'])
keras.models.save_model(k_model,
                        "Material_Classifier.h5",
                        overwrite=True,
                        include_optimizer=True)
# Loading the onnx model ------------------------------

# Folder and image size initialization ----------------
IMG_SIZE = 200
CARBON = "CARBON/"
FIBERGLASS = "FIBERGLASS/"
LABELS = {CARBON: 0, FIBERGLASS: 1}
# Folder and image size initialization ----------------

# Counts and training data ----------------------------
training_data = []
Ejemplo n.º 20
0
    return error


if __name__ == '__main__':
    max_error = 0

    for i in range(10):
        model = FTest()
        model.eval()

        input_np1 = np.random.uniform(0, 1, (1, 3, 224, 224))
        input_np2 = np.random.uniform(0, 1, (1, 3, 224, 224))
        input_var1 = Variable(torch.FloatTensor(input_np1))
        input_var2 = Variable(torch.FloatTensor(input_np2))
        output = model(input_var1, input_var2)

        torch.onnx.export(model, (input_var1, input_var2),
                          "_tmpnet.onnx",
                          verbose=True,
                          input_names=['test_in1', 'test_in2'],
                          output_names=['test_out'])

        onnx_model = onnx.load('_tmpnet.onnx')
        k_model = onnx_to_keras(onnx_model, ['test_in1', 'test_in2'])

        error = check_error(output, k_model, [input_np1, input_np2])
        if max_error < error:
            max_error = error

    print('Max error: {0}'.format(max_error))
Ejemplo n.º 21
0
                      input_names=["input"],
                      output_names=["output"],
                      dynamic_axes={
                          "input": {
                              0: "batch_size"
                          },
                          "output": {
                              0: "batch_size"
                          }
                      })
    onnx_model = onnx.load("./temp.onnx")
    onnx.checker.check_model(onnx_model)
    inpt = ['input']

    keras_model = onnx_to_keras(onnx_model=onnx_model,
                                input_names=inpt,
                                change_ordering=True,
                                verbose=False)
    converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
    converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
    tflite_model = converter.convert()
    open('modelzoo/model_' + str(i) + '.tflite', "wb").write(tflite_model)

    data = ''
    for itr in netEmbedding:
        for itr2 in itr:
            data = data + str(itr2) + ','
    data = data[:-1]
    data = data + '\n'
    file.write(data)
file.close()
# print('==> Resuming from checkpoint..')
# assert os.path.isfile(resume), 'Error: no checkpoint directory found!'
# checkpoint = os.path.dirname(resume)
# checkpoint = torch.load(resume)
# model.load_state_dict(checkpoint['state_dict'])

# dummy_input = Variable(torch.randn(1, 3, 32, 32)) # nchw
# # dummy_output = model.module(dummy_input)
# # print(dummy_output)

# onnx_filename = "model.onnx"
# torch.onnx.export(model.module, dummy_input, onnx_filename, output_names=['test_output'])

model_onnx = onnx.load('./model.onnx')

k_model = onnx_to_keras(model_onnx, input_names=['0'])

# Export model as .pb file
# tf_rep.export_graph('model_tf.h5')

# def load_pb(path_to_pb):
#     with tf.gfile.GFile(path_to_pb, 'rb') as f:
#         graph_def = tf.GraphDef()
#         graph_def.ParseFromString(f.read())
#     with tf.Graph().as_default() as graph:
#         tf.import_graph_def(graph_def, name='')
#         return graph

# tf_graph = load_pb('model_tf.pb')
# sess = tf.Session(graph=tf_graph)
Ejemplo n.º 23
0
!pip install onnx
!git clone https://github.com/nerox8664/onnx2keras.git
'''
#%cd onnx2keras


import tensorflow as tf
import onnx
from onnx2keras import onnx_to_keras
from tensorflow.keras.models import load_model

# Load ONNX model
onnx_model = onnx.load('/content/pnet_video.onnx')

# Call the converter and save keras model
k_model = onnx_to_keras(onnx_model, ['input.1'],change_ordering=True)
k_model.save('/content/pnet_video.h5')

from tensorflow.keras.models import Model
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Activation, Lambda, Reshape

# Load keras model
k_model=load_model('/content/pnet_video.h5')
k_model.summary()

# Remove edge branch from output
edge_model=Model(inputs=k_model.input,outputs=k_model.layers[-2].output)
edge_model.summary()

# Add softmax on output
Ejemplo n.º 24
0
args = parser.parse_args()

print('Loading data...')
(x_train, y_train), (x_test,
                     y_test) = imdb.load_data(num_words=args.vocab_size,
                                              maxlen=args.max_len)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')

print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=args.max_len)
x_test = sequence.pad_sequences(x_test, maxlen=args.max_len)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)

print('Load ONNX model...')
onnx_model = onnx.load(args.model_path)

#onnx.checker.check_model(onnx_model)

print('Convert ONNX to Keras...')
k_model = onnx_to_keras(onnx_model, ['embedding_input'])

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

loss, acc = model.evaluate(x_test, y_test, batch_size=args.batch_size)

print("Evaluation result: Loss:", loss, " Accuracy:", acc)
Ejemplo n.º 25
0
def pytorch_to_keras(model,
                     args,
                     input_shapes=None,
                     change_ordering=False,
                     verbose=False,
                     name_policy=None,
                     use_optimizer=False,
                     do_constant_folding=False):
    """
    By given PyTorch model convert layers with ONNX.

    Args:
        model: pytorch model
        args: pytorch model arguments
        input_shapes: keras input shapes (using for each InputLayer)
        change_ordering: change CHW to HWC
        verbose: verbose output
        name_policy: use short names, use random-suffix or keep original names for keras layers

    Returns:
        model: created keras model.
    """
    logger = logging.getLogger('pytorch2keras')

    if verbose:
        logging.basicConfig(level=logging.DEBUG)

    logger.info('Converter is called.')

    if name_policy:
        logger.warning('Name policy isn\'t supported now.')

    if input_shapes:
        logger.warning('Custom shapes isn\'t supported now.')

    if input_shapes and not isinstance(input_shapes, list):
        input_shapes = [input_shapes]

    if not isinstance(args, list):
        args = [args]

    args = tuple(args)

    dummy_output = model(*args)

    if isinstance(dummy_output, torch.autograd.Variable):
        dummy_output = [dummy_output]

    input_names = ['input_{0}'.format(i) for i in range(len(args))]
    output_names = ['output_{0}'.format(i) for i in range(len(dummy_output))]

    logger.debug('Input_names:')
    logger.debug(input_names)

    logger.debug('Output_names:')
    logger.debug(output_names)

    stream = io.BytesIO()
    torch.onnx.export(model,
                      args,
                      stream,
                      do_constant_folding=do_constant_folding,
                      verbose=verbose,
                      input_names=input_names,
                      output_names=output_names)

    stream.seek(0)
    onnx_model = onnx.load(stream)
    if use_optimizer:
        if use_optimizer is True:
            optimizer2run = optimizer.get_available_passes()
        else:
            use_optimizer = set(use_optimizer)
            optimizer2run = [
                x for x in optimizer.get_available_passes()
                if x in use_optimizer
            ]
        logger.info("Running optimizer:\n%s", "\n".join(optimizer2run))
        onnx_model = optimizer.optimize(onnx_model, optimizer2run)

    k_model = onnx_to_keras(onnx_model=onnx_model,
                            input_names=input_names,
                            input_shapes=input_shapes,
                            name_policy=name_policy,
                            verbose=verbose,
                            change_ordering=change_ordering)

    return k_model
Ejemplo n.º 26
0
### tensorflow-gpu==2.2.0
### onnx==1.7.0
### onnx2keras==0.0.21
### https://github.com/amir-abdi/keras_to_tensorflow.git

import onnx
from onnx2keras import onnx_to_keras
import tensorflow as tf
import shutil
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2

onnx_model = onnx.load('dbface.onnx')
k_model = onnx_to_keras(onnx_model=onnx_model, input_names=['x'])

shutil.rmtree('saved_model', ignore_errors=True)
tf.saved_model.save(k_model, 'saved_model')

# Convert Keras model to ConcreteFunction
full_model = tf.function(lambda x: k_model(x))
full_model = full_model.get_concrete_function(
    tf.TensorSpec(k_model.inputs[0].shape, k_model.inputs[0].dtype))

# Get frozen ConcreteFunction
frozen_func = convert_variables_to_constants_v2(full_model)
frozen_func.graph.as_graph_def()

tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
                  logdir=".",
                  name="dbface.pb",
                  as_text=False)