Esempio n. 1
0
def convert_fl32_to_fl16(model):
    # get model's weights
    weights = model.get_weights()
    # get model's name
    model_name, layer_name = get_layer_name(model)

    # make the directory
    if not os.path.exists('./{}'.format(model_name)):
        os.mkdir('./{}'.format(model_name))

    if not os.path.exists('./{}/fl16'.format(model_name)):
        os.mkdir('./{}/fl16'.format(model_name))

    # change to float16 and save
    for i in range(len(weights)):
        weights[i] = np.array(weights[i], np.float16)
        np.save('./{}/fl16/{}'.format(model_name, layer_name[i]), weights[i])
Esempio n. 2
0
def swap_layer(model, layer_num, quanti_type):
    tmp_model = model
    # get the all weights
    weights = tmp_model.get_weights()
    weights = np.array(weights)

    # get the model name and the name of each layer
    model_name, layer_name = get_layer_name(model)

    # find the layer index
    kernel_num = layer_num * 2
    bias_num = layer_num * 2 + 1

    # find weights file
    kernel_path = './{}/{}/{}.npy'.format(model_name, quanti_type,
                                          layer_name[kernel_num])
    bias_path = './{}/{}/{}.npy'.format(model_name, quanti_type,
                                        layer_name[bias_num])

    # load layer' weights
    load_kernel = np.load(kernel_path)
    load_bias = np.load(bias_path)

    if quanti_type == 'fix8':
        load_kernel = convert_fix8_to_fl32(load_kernel)
        load_bias = convert_fix8_to_fl32(load_bias)

    # reshape for setting
    load_kernel = load_kernel.reshape(weights[kernel_num].shape)
    load_bias = load_bias.reshape(weights[bias_num].shape)

    # apply values
    weights[kernel_num] = load_kernel
    weights[bias_num] = load_bias

    # set model
    tmp_model.set_weights(weights)
    return tmp_model
Esempio n. 3
0
def pruning(model, threshold):
    # get model's weights
    weights = model.get_weights()
    # get model's name
    model_name, layer_name = get_layer_name(model)

    # make the directory
    if not os.path.exists('./{}'.format(model_name)):
        os.mkdir('./{}'.format(model_name))

    if not os.path.exists('./{}/prun_{}'.format(model_name, threshold)):
        os.mkdir('./{}/prun_{}'.format(model_name, threshold))

    # pruning the weights
    i = 0
    for weight in weights:
        weight_f = weight.flatten()
        for j in range(len(weight_f)):
            if abs(weight_f[j]) < threshold:
                weight_f[j] = 0
        np.save('./{}/prun_{}/{}'.format(model_name, threshold, layer_name[i]),
                weight_f)
        i += 1
Esempio n. 4
0
def convert_fl32_to_fix8(model):
    # get the model's  weights
    weights = model.get_weights()
    # get the model's name
    model_name, layer_name = get_layer_name(model)
    # make the directory
    if not os.path.exists('./{}'.format(model_name)):
        os.mkdir('./{}'.format(model_name))

    if not os.path.exists('./{}/fix8'.format(model_name)):
        os.mkdir('./{}/fix8'.format(model_name))

    # change to fixed 8bit
    k = 0
    for j in range(len(weights)):
        weight_f = weights[j].flatten()
        for i in range(len(weight_f)):
            # convert method
            weight_f[i] = float32_to_fixed8(weight_f[i])
        # saving to 8bit variable
        weight_f = np.array(weight_f, np.int8)
        # save weights
        np.save('./{}/fix8/{}'.format(model_name, layer_name[k]), weight_f)
        k += 1
Esempio n. 5
0
def quantify(model, *mask_nums):
    # get the all weights
    weights = model.get_weights()
    weights = np.array(weights)
    
    # put the mask into mask_array
    mask_array = []
    for mask_num in mask_nums:
        mask = masking(mask_num)  # masking method of bit_wise_And.py
        mask_array.append(mask)
    
    # get the model name and the name of each layer  * use to save weights file 
    model_name, layer_name = get_layer_name(model)
    
    # make directory  * use to save weights file    
    if not os.path.exists('./'+ model_name):
        os.mkdir('./'+ model_name)
    dir_origin = './{}/original/'.format(model_name)
    if not os.path.exists(dir_origin): 
        os.mkdir(dir_origin)

        # save the original weights
        layer_index = 0
        for weight in weights:
            weight = weight.flatten()
            filepath = dir_origin + layer_name[layer_index]
            np.save(filepath, weight)
            layer_index += 1
    
    # quantify start. for loop runs all mask
    for i in range(len(mask_array)):
        layer = []

        # make directories for modified weights
        dir_modified = './{}/bitwiseAnd_{}/'.format(model_name, mask_nums[i])
        if not os.path.exists(dir_modified): 
            os.mkdir(dir_modified)
        else:
            continue
        
        layer_index = 0
        # 'weight' variable is weights of each layer
        for weight in weights:
            # flatten method changes the multi dimension array to a one dimension
            weight_flatten = weight.flatten()
            # for loop runs all number 
            for num in range(len(weight_flatten)):
                # bit_wise_And method
                weight_flatten[num] = bit_wise_And(weight_flatten[num], mask_array[i])
            
            # save the modified weights
            filepath = dir_modified + layer_name[layer_index]
            np.save(filepath, weight_flatten)
            print("save complete : {}".format(filepath))
            layer_index += 1
            
            #reshape for saving model
            weight_flatten = weight_flatten.reshape(weight.shape)
            layer.append(weight_flatten)

        # set modified weighs 
        layer = np.array(layer)
        model.set_weights(layer)

        # serialize model to JSON
        model_json = model.to_json()
        with open(dir_modified+"model.json", "w") as json_file:
            json_file.write(model_json)
        # serialize weights to HDF5
        model.save_weights(dir_modified+"model.h5")
        print("Saved model to disk")