Exemplo n.º 1
0
    def forward(ctx, x, codebook):
        """
        Parameters
        ----------
            x : tensor
                Input of the forward method
            codebook : tuple
                the fixed codebook of the module of shape `(M x 1)`
                which will construct the quantized input

        Returns
        -------
        quantized_input
                The quantized input formed using codebook. The quantized input
                is the closest codeword available in codeword.
        """
        ctx.save_for_backward(x)
        input_data = x.data
        input_numpy = input_data.numpy()
        # noinspection PyUnresolvedReferences
        quantized_input = torch.zeros(x.size())
        for ii in range(0, input_data.size(0)):
            for jj in range(0, input_data.size(1)):
                quantized_input[ii,
                                jj], __ = UniformQuantizer.get_optimal_word(
                                    input_numpy[ii, jj], codebook)
        return quantized_input
Exemplo n.º 2
0
def justQuantize(input, codebook):
    input_data = input.data
    input_numpy = input_data.numpy()
    qunatized_input = torch.zeros(input.size())
    for ii in range(0, input_data.size(0)):
        for jj in range(0, input_data.size(1)):
            qunatized_input[ii][jj], __ = UniformQuantizer.get_optimal_word(
                input_numpy[ii, jj], codebook)
    return qunatized_input
Exemplo n.º 3
0
 def forward(ctx, input, codebook, testCodebook):
     ctx.save_for_backward(input)
     input_data = input.data
     input_numpy = input_data.numpy()
     qunatized_input = torch.zeros(input.size())
     retcodebook = list(testCodebook.data.numpy())
     for ii in range(0, input_data.size(0)):
         for jj in range(0, input_data.size(1)):
             qunatized_input[ii][
                 jj], __ = UniformQuantizer.get_optimal_word(
                     input_numpy[ii, jj], codebook)
             itrVal, quantized_idx = UniformQuantizer.get_optimal_word(
                 input_numpy[ii, jj], tuple(retcodebook))
             # update winner codeword
             retcodebook[quantized_idx] = retcodebook[quantized_idx] + \
                 CODEBOOK_LR*(input_numpy[ii, jj] - itrVal)
     retcodebook = torch.from_numpy(np.asarray(retcodebook))
     retcodebook = Variable(retcodebook.float())
     return qunatized_input, retcodebook
Exemplo n.º 4
0
 def forward(ctx, input, codebook):
     ctx.save_for_backward(input)
     input_data = input.data
     input_numpy = input_data.numpy()
     qunatized_input = torch.zeros(input.size())
     for ii in range(0, input_data.size(0)):
         for jj in range(0, input_data.size(1)):
             qunatized_input[ii,
                             jj], __ = UniformQuantizer.get_optimal_word(
                                 input_numpy[ii, jj], codebook)
     return qunatized_input
Exemplo n.º 5
0
def trainSOM(modelname, epoch, initCodebook, lr):
    print('Init Codebook:\n{0}'.format(initCodebook))
    newCodebook = list(initCodebook)
    for batch_idx, (data, target) in enumerate(trainLoader):
        data, target = Variable(data.float()), Variable(target.float())
        target_numpy = target.data.numpy()
        for ii in range(0, target.size(0)):
            for jj in range(0, target.size(1)):
                itrVal, quantized_idx = UniformQuantizer.get_optimal_word(
                    target_numpy[ii, jj], tuple(newCodebook))
                # update winner codeword
                newCodebook[quantized_idx] = newCodebook[quantized_idx] + \
                    CODEBOOK_LR*(target_numpy[ii, jj] - itrVal)
    testCodebook = tuple(newCodebook)
    print('SOM Codebook:\n{0}'.format(testCodebook))
    SOMtestModel = linearModels.UniformQuantizerNet(testCodebook)
    SOMtestOptim = optim.SGD(SOMtestModel.parameters(), lr=lr, momentum=0.5)
    train(modelname, epoch, SOMtestModel, SOMtestOptim)
    return SOMtestModel
Exemplo n.º 6
0
    testS = loadedDataFile['dataS']

    # Get the class containing the train data from DataLoader.py
    trainData = ShlezDatasetTrain(trainX, trainS)
    # define training dataloader
    trainLoader = DataLoader(dataset=trainData,
                             batch_size=BATCH_SIZE,
                             shuffle=True)
    # Do the same for the test data
    testData = ShlezDatasetTest(testX, testS)
    testLoader = DataLoader(dataset=testData,
                            batch_size=BATCH_SIZE,
                            shuffle=True)

    # Generate uniform codebooks for the train sets
    S_codebook = UniformQuantizer.codebook_uniform(trainData.S_var,
                                                   codebookSize)

    # device = torch.device('cpu')
    criterion = nn.MSELoss()

    ########################################################################
    #                  Training and testing all networks                   #
    ########################################################################

    # ------------------------------------------------
    # ---            'Passing Gradient'            ---
    # ------------------------------------------------

    if 'Passing Gradient' in modelsToActivate:
        model_name = 'Passing Gradient'