Пример #1
0
def main():
    # Fixing seeds for reproducibility
    tf.set_random_seed(42)
    np.random.seed(42)

    # Hyper Param pre-processing
    args = helpermethods.getArgs()

    sigma = args.sigma
    depth = args.depth

    regT = args.rT
    regW = args.rW
    regV = args.rV

    totalEpochs = args.epochs

    learningRate = args.learning_rate

    dataDir = args.data_dir

    outFile = args.output_file

    (dataDimension, numClasses,
        Xtrain, Ytrain, Xtest, Ytest) = helpermethods.preProcessData(dataDir)

    sparW = args.sW
    sparV = args.sV
    sparT = args.sT

    if args.batch_size is None:
        batchSize = np.maximum(100, int(np.ceil(np.sqrt(Ytrain.shape[0]))))
    else:
        batchSize = args.batch_size

    useMCHLoss = True

    if numClasses == 2:
        numClasses = 1

    X = tf.placeholder("float32", [None, dataDimension])
    Y = tf.placeholder("float32", [None, numClasses])

    currDir = helpermethods.createTimeStampDir(dataDir)

    helpermethods.dumpCommand(sys.argv, currDir)

    # numClasses = 1 for binary case
    bonsaiObj = Bonsai(numClasses, dataDimension, depth, sigma)

    bonsaiTrainer = BonsaiTrainer(bonsaiObj,
                                  regW, regT, regV,
                                  sparW, sparT, sparV,
                                  learningRate, X, Y, useMCHLoss, outFile)

    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    bonsaiTrainer.train(batchSize, totalEpochs, sess,
                        Xtrain, Xtest, Ytrain, Ytest, dataDir, currDir)
Пример #2
0
def main():
    # change cuda:0 to cuda:gpuid for specific allocation
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # Fixing seeds for reproducibility
    torch.manual_seed(42)
    np.random.seed(42)

    # Hyper Param pre-processing
    args = helpermethods.getArgs()

    dataDir = args.data_dir
    cell = args.cell
    inputDims = args.input_dim
    batch_first = args.batch_first
    hiddenDims = args.hidden_dim

    totalEpochs = args.epochs
    learningRate = args.learning_rate
    outFile = args.output_file
    batchSize = args.batch_size
    decayStep = args.decay_step
    decayRate = args.decay_rate

    wRank = args.wRank
    uRank = args.uRank

    sW = args.sW
    sU = args.sU

    update_non_linearity = args.update_nl
    gate_non_linearity = args.gate_nl

    (dataDimension, numClasses, Xtrain, Ytrain, Xtest, Ytest,
     mean, std) = helpermethods.preProcessData(dataDir)

    assert dataDimension % inputDims == 0, "Infeasible per step input, " + \
        "Timesteps have to be integer"

    currDir = helpermethods.createTimeStampDir(dataDir, cell)

    helpermethods.dumpCommand(sys.argv, currDir)
    helpermethods.saveMeanStd(mean, std, currDir)

    if cell == "FastGRNN":
        FastCell = FastGRNNCell(inputDims, hiddenDims,
                                gate_nonlinearity=gate_non_linearity,
                                update_nonlinearity=update_non_linearity,
                                wRank=wRank, uRank=uRank)
    elif cell == "FastRNN":
        FastCell = FastRNNCell(inputDims, hiddenDims,
                               update_nonlinearity=update_non_linearity,
                               wRank=wRank, uRank=uRank)
    elif cell == "UGRNN":
        FastCell = UGRNNLRCell(inputDims, hiddenDims,
                               update_nonlinearity=update_non_linearity,
                               wRank=wRank, uRank=uRank)
    elif cell == "GRU":
        FastCell = GRULRCell(inputDims, hiddenDims,
                             update_nonlinearity=update_non_linearity,
                             wRank=wRank, uRank=uRank)
    elif cell == "LSTM":
        FastCell = LSTMLRCell(inputDims, hiddenDims,
                              update_nonlinearity=update_non_linearity,
                              wRank=wRank, uRank=uRank)
    else:
        sys.exit('Exiting: No Such Cell as ' + cell)

    FastCellTrainer = FastTrainer(FastCell, numClasses, sW=sW, sU=sU,
                                  learningRate=learningRate, outFile=outFile,
                                  device=device, batch_first=batch_first)

    FastCellTrainer.train(batchSize, totalEpochs,
                          torch.from_numpy(Xtrain.astype(np.float32)),
                          torch.from_numpy(Xtest.astype(np.float32)),
                          torch.from_numpy(Ytrain.astype(np.float32)),
                          torch.from_numpy(Ytest.astype(np.float32)),
                          decayStep, decayRate, dataDir, currDir)
Пример #3
0
def main():
    # Fixing seeds for reproducibility
    tf.set_random_seed(42)
    np.random.seed(42)

    # Hyper Param pre-processing
    args = helpermethods.getArgs()

    # Set 'isRegression' to be True, for regression. Default is 'False'.
    isRegression = args.regression

    sigma = args.sigma
    depth = args.depth

    projectionDimension = args.proj_dim
    regZ = args.rZ
    regT = args.rT
    regW = args.rW
    regV = args.rV

    totalEpochs = args.epochs

    learningRate = args.learning_rate

    dataDir = args.data_dir

    outFile = args.output_file

    (dataDimension, numClasses, Xtrain, Ytrain, Xtest, Ytest,
     mean, std) = helpermethods.preProcessData(dataDir, isRegression)

    sparZ = args.sZ

    if numClasses > 2:
        sparW = 0.2
        sparV = 0.2
        sparT = 0.2
    else:
        sparW = 1
        sparV = 1
        sparT = 1

    if args.sW is not None:
        sparW = args.sW
    if args.sV is not None:
        sparV = args.sV
    if args.sT is not None:
        sparT = args.sT

    if args.batch_size is None:
        batchSize = np.maximum(100, int(np.ceil(np.sqrt(Ytrain.shape[0]))))
    else:
        batchSize = args.batch_size

    useMCHLoss = True

    if numClasses == 2:
        numClasses = 1

    X = tf.placeholder("float32", [None, dataDimension])
    Y = tf.placeholder("float32", [None, numClasses])

    currDir = helpermethods.createTimeStampDir(dataDir)

    helpermethods.dumpCommand(sys.argv, currDir)
    helpermethods.saveMeanStd(mean, std, currDir)

    # numClasses = 1 for binary case
    bonsaiObj = Bonsai(numClasses, dataDimension,
                       projectionDimension, depth, sigma, isRegression)

    bonsaiTrainer = BonsaiTrainer(bonsaiObj,
                                  regW, regT, regV, regZ,
                                  sparW, sparT, sparV, sparZ,
                                  learningRate, X, Y, useMCHLoss, outFile)

    sess = tf.InteractiveSession()

    sess.run(tf.global_variables_initializer())

    bonsaiTrainer.train(batchSize, totalEpochs, sess,
                        Xtrain, Xtest, Ytrain, Ytest, dataDir, currDir)

    sess.close()
    sys.stdout.close()
Пример #4
0
def main():
    # Fixing seeds for reproducibility
    tf.set_random_seed(42)
    np.random.seed(42)

    # Hyper Param pre-processing
    args = helpermethods.getArgs()

    dataDir = args.data_dir
    cell = args.cell
    inputDims = args.input_dim
    hiddenDims = args.hidden_dim

    totalEpochs = args.epochs
    learningRate = args.learning_rate
    outFile = args.output_file
    batchSize = args.batch_size
    decayStep = args.decay_step
    decayRate = args.decay_rate

    wRank = args.wRank
    uRank = args.uRank

    sW = args.sW
    sU = args.sU

    update_non_linearity = args.update_nl
    gate_non_linearity = args.gate_nl

    (dataDimension, numClasses, Xtrain, Ytrain, Xtest, Ytest, mean,
     std) = helpermethods.preProcessData(dataDir)

    assert dataDimension % inputDims == 0, "Infeasible per step input, " + \
        "Timesteps have to be integer"

    X = tf.placeholder("float",
                       [None, int(dataDimension / inputDims), inputDims])
    Y = tf.placeholder("float", [None, numClasses])

    currDir = helpermethods.createTimeStampDir(dataDir, cell)

    helpermethods.dumpCommand(sys.argv, currDir)
    helpermethods.saveMeanStd(mean, std, currDir)

    if cell == "FastGRNN":
        FastCell = FastGRNNCell(hiddenDims,
                                gate_non_linearity=gate_non_linearity,
                                update_non_linearity=update_non_linearity,
                                wRank=wRank,
                                uRank=uRank)
    elif cell == "FastRNN":
        FastCell = FastRNNCell(hiddenDims,
                               update_non_linearity=update_non_linearity,
                               wRank=wRank,
                               uRank=uRank)
    else:
        sys.exit('Exiting: No Such Cell as ' + cell)

    FastCellTrainer = FastTrainer(FastCell,
                                  X,
                                  Y,
                                  sW=sW,
                                  sU=sU,
                                  learningRate=learningRate,
                                  outFile=outFile)

    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    FastCellTrainer.train(batchSize, totalEpochs, sess, Xtrain, Xtest, Ytrain,
                          Ytest, decayStep, decayRate, dataDir, currDir)
Пример #5
0
def main():
    assert tf.__version__.startswith(
        '2') == True, 'Only Tensorflow-2.X API is supported.'

    # Hyper Param pre-processing
    parser = argparse.ArgumentParser(
        description='HyperParams for Bonsai Algorithm')
    parser.add_argument('-dir',
                        '--data-dir',
                        required=True,
                        help='Data directory containing' +
                        'train.npy and test.npy')

    parser.add_argument('-model',
                        '--model-dir',
                        required=True,
                        help='Model directory containing' +
                        'model parameter matrices and hyper-parameters')

    parser.add_argument(
        '-regression',
        type=str2bool,
        default=False,
        help='boolean argument which controls whether to perform ' +
        'regression or classification.' +
        'default : False (Classification) values: [True, False]')

    args = parser.parse_args()

    # Set 'isRegression' to be True, for regression. Default is 'False'.
    isRegression = args.regression
    assert isRegression == False, 'Currently tflite is not supported for regression tasks.'

    dataDir = args.data_dir
    model_dir = args.model_dir

    (dataDimension, numClasses, Xtrain, Ytrain, Xtest, Ytest, mean,
     std) = helpermethods.preProcessData(dataDir, isRegression)

    if numClasses == 2:
        numClasses = 1

    print('Model dir = ', model_dir)

    Z = np.load(model_dir + 'Z.npy', allow_pickle=True)
    W = np.load(model_dir + 'W.npy', allow_pickle=True)
    V = np.load(model_dir + 'V.npy', allow_pickle=True)
    T = np.load(model_dir + 'T.npy', allow_pickle=True)
    hyperparams = np.load(model_dir + 'hyperParam.npy',
                          allow_pickle=True).item()

    n_dim = dataDimension = hyperparams['dataDim']
    projectionDimension = hyperparams['projDim']
    numClasses = hyperparams['numClasses']
    depth = hyperparams['depth']
    sigma = hyperparams['sigma']

    print('dataDim = ', dataDimension)
    print('projectionDim = ', projectionDimension)
    print('numClasses = ', numClasses)
    print('depth = ', depth)
    print('sigma = ', sigma)

    dense = BonsaiLayer(numClasses, dataDimension, projectionDimension, depth,
                        sigma)

    model = keras.Sequential([keras.Input(shape=(n_dim)), dense])

    dummy_tensor = tf.convert_to_tensor(np.zeros((1, n_dim), np.float32))
    out_tensor = model(dummy_tensor)

    model.summary()

    dense.set_weights([Z, W, V, T])

    # Save the Keras model in tflite format
    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    #converter.optimizations = [tf.lite.Optimize.DEFAULT]
    tflite_model = converter.convert()

    # Save the TF Lite model as file
    out_tflite_model_file = model_dir + '/bonsai_model.tflite'
    f = open(out_tflite_model_file, "wb")
    f.write(tflite_model)
    f.close()

    # Delete any reference to existing models in order to avoid conflicts
    del model
    del tflite_model

    # Prediction on an example input using tflite model we just saved
    x, y = Xtrain[0], Ytrain[0]
    x = x.astype(np.float32)
    x = np.expand_dims(x, 0)

    # Run inference with TensorFlow Lite
    interpreter = tf.lite.Interpreter(model_path=out_tflite_model_file)
    interpreter.allocate_tensors()
    interpreter.set_tensor(interpreter.get_input_details()[0]["index"], x)
    interpreter.invoke()

    output = interpreter.tensor(
        interpreter.get_output_details()[0]["index"])()[0]
    print('true y = ', np.argmax(y))
    print('predicted y = ', output)
Пример #6
0
def main():
    # change cuda:0 to cuda:gpuid for specific allocation
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # Fixing seeds for reproducibility
    torch.manual_seed(42)
    np.random.seed(42)

    # Hyper Param pre-processing
    args = helpermethods.getArgs()

    sigma = args.sigma
    depth = args.depth

    projectionDimension = args.proj_dim
    regZ = args.rZ
    regT = args.rT
    regW = args.rW
    regV = args.rV

    totalEpochs = args.epochs

    learningRate = args.learning_rate

    dataDir = args.data_dir

    outFile = args.output_file

    (dataDimension, numClasses, Xtrain, Ytrain, Xtest, Ytest, mean,
     std) = helpermethods.preProcessData(dataDir)

    sparZ = args.sZ

    if numClasses > 2:
        sparW = 0.2
        sparV = 0.2
        sparT = 0.2
    else:
        sparW = 1
        sparV = 1
        sparT = 1

    if args.sW is not None:
        sparW = args.sW
    if args.sV is not None:
        sparV = args.sV
    if args.sT is not None:
        sparT = args.sT

    if args.batch_size is None:
        batchSize = np.maximum(100, int(np.ceil(np.sqrt(Ytrain.shape[0]))))
    else:
        batchSize = args.batch_size

    useMCHLoss = True

    if numClasses == 2:
        numClasses = 1

    currDir = helpermethods.createTimeStampDir(dataDir)

    helpermethods.dumpCommand(sys.argv, currDir)
    helpermethods.saveMeanStd(mean, std, currDir)

    # numClasses = 1 for binary case
    bonsaiObj = Bonsai(numClasses, dataDimension, projectionDimension, depth,
                       sigma).to(device)

    bonsaiTrainer = BonsaiTrainer(bonsaiObj, regW, regT, regV, regZ, sparW,
                                  sparT, sparV, sparZ, learningRate,
                                  useMCHLoss, outFile, device)

    bonsaiTrainer.train(batchSize, totalEpochs,
                        torch.from_numpy(Xtrain.astype(np.float32)),
                        torch.from_numpy(Xtest.astype(np.float32)),
                        torch.from_numpy(Ytrain.astype(np.float32)),
                        torch.from_numpy(Ytest.astype(np.float32)), dataDir,
                        currDir)
    sys.stdout.close()