コード例 #1
0
def fine_tune_faces_unites():
    # constants used in fine-tuning
    training_steps = 200
    num_batch = 100
    lr = 0.01

    # read the pretrained weights
    weight_vh1 = np.loadtxt('./pretrain/yale_faces/Weight_matrix_1.txt')
    weight_vh2 = np.loadtxt('./pretrain/yale_faces/Weight_matrix_2.txt')
    weight_vh3 = np.loadtxt('./pretrain/yale_faces/Weight_matrix_3.txt')
    weight_vh4 = np.loadtxt('./pretrain/yale_faces/Weight_matrix_4.txt')
    # construct the fine-tuning input weights
    w_list_4_layer = [weight_vh1, weight_vh2, weight_vh3, weight_vh4]

    # read the pretrained bias
    # hidden bias
    bias1 = np.array([np.loadtxt('./pretrain/yale_faces/Hidden_bias_1.txt')
                      ]).transpose()
    bias2 = np.array([np.loadtxt('./pretrain/yale_faces/Hidden_bias_2.txt')
                      ]).transpose()
    bias3 = np.array([np.loadtxt('./pretrain/yale_faces/Hidden_bias_3.txt')
                      ]).transpose()
    bias4 = np.array([np.loadtxt('./pretrain/yale_faces/Hidden_bias_4.txt')
                      ]).transpose()
    # visual bias
    bias5 = np.array([np.loadtxt('./pretrain/yale_faces/Visible_bias_4.txt')
                      ]).transpose()
    bias6 = np.array([np.loadtxt('./pretrain/yale_faces/Visible_bias_3.txt')
                      ]).transpose()
    bias7 = np.array([np.loadtxt('./pretrain/yale_faces/Visible_bias_2.txt')
                      ]).transpose()
    bias8 = np.array([np.loadtxt('./pretrain/yale_faces/Visible_bias_1.txt')
                      ]).transpose()
    # construct the fine-tuning input bias
    b_list_4_layer = [bias1, bias2, bias3, bias4, bias5, bias6, bias7, bias8]

    #N = np.array([2016,1400,896,504,224])
    data_train, data_test = face_read(2000)
    data_train_finetune = np.transpose(data_train)

    # Do the fine-tuning
    total_cost_record_4_lay, w_list_output_4_lay, b_list_output_4_lay = fine_tuning(
        w_list_4_layer, b_list_4_layer, data_train_finetune, num_batch,
        training_steps, lr)

    # write the weight matrix
    for i in np.arange(4):
        filename = 'W_matrix_' + str(i + 1) + '_Final.txt'
        np.savetxt(filename, w_list_output_4_lay[i])

    # write the bias
    for j in np.arange(8):
        filename = 'B_bias_' + str(j + 1) + '_Final.txt'
        np.savetxt(filename, b_list_output_4_lay[j])

    # write the total cost
    np.savetxt('Total_Cost_Final.txt', total_cost_record_4_lay)
コード例 #2
0
np.transpose(ini_value_w2), np.transpose(ini_value_w1)]
w_list_4_layer = [ini_value_w1, ini_value_w2, ini_value_w3, ini_value_w4, \
np.transpose(ini_value_w4),np.transpose(ini_value_w3),np.transpose(ini_value_w2),np.transpose(ini_value_w1)]

ini_value_b1 = rng.randn(2000,1)
ini_value_b2 = rng.randn(1000,1)
ini_value_b3 = rng.randn(500,1)
ini_value_b4 = rng.randn(30,1)
ini_value_b5 = rng.randn(500,1)
ini_value_b6 = rng.randn(1000,1)
ini_value_b7 = rng.randn(2000,1)
ini_value_b8 = rng.randn(784,1)
b_list_3_layer = [ini_value_b1, ini_value_b2, ini_value_b3, ini_value_b6, ini_value_b7, ini_value_b8]
b_list_4_layer = [ini_value_b1, ini_value_b2, ini_value_b3, ini_value_b4, ini_value_b5, ini_value_b6, ini_value_b7, ini_value_b8]

simulate_data = rng.randn(784,simulate_data_num)  # 50 data points

#total_cost_record_3_lay, w_list_output_3_lay, b_list_output_3_lay = fine_tuning(w_list_3_layer, b_list_3_layer, simulate_data, num_batch, training_steps, lr)
total_cost_record_4_lay, w_list_output_4_lay, b_list_output_4_lay = fine_tuning(w_list_4_layer, b_list_4_layer, simulate_data, num_batch, training_steps, lr)

end_time = time.time()

print(end_time-start_time)

fig, ax = plt.subplots()

#ax.scatter(range(len(total_cost_record_3_lay)),total_cost_record_3_lay)

ax.scatter(range(len(total_cost_record_4_lay)),total_cost_record_4_lay)
plt.show()
コード例 #3
0
def PNN_test_HSMS_ratio_3(
        I_MS_LR, I_PAN, inputImg, param, net, path, mode,
        epochs):  #iniz è uguale a 0 QUI ERA IL PROBLEMA FACEVA UN OVERRIDE

    test_dir_out = path['test_dir_out']
    FTnetwork_dir_out = path['ftnetwork_dir_out']

    #    import pdb; pdb.set_trace()

    # san paolo hs/ms
    param['L'] = 15  # modifica del 23/04
    #    param['ratio']=3
    param['ratio'] = 3  # modificato il 04/04
    param[
        'lr'] = 0.0001  # prec 10e-4                                #modificato il 25/04
    param['patchSize'] = 33  #preced = 33

    #    param['padSize'] = 8

    # per il debug
    #    import pdb; pdb.set_trace()

    if 'inputType' not in param.keys():
        param['inputType'] = 'MS_PAN'

    #fine tuning
    if epochs != 0:

        #        pdb.set_trace()

        fine_tuning(I_MS_LR, I_PAN, param, epochs, FTnetwork_dir_out)
        ft_model_path = FTnetwork_dir_out + '/PNN_model.mat'

        FT_model = sio.loadmat(ft_model_path, squeeze_me=True)  #provo25/04

        from PNN_testing_model import Network, ConvLayer

        layer = []
        for j in range(0, len(FT_model['layers']), 2):
            layer.append(
                ConvLayer(FT_model['layers'][j], FT_model['layers'][j + 1]))
        net = Network(layer)

#        layer=[]
#        for j in range(0,len(param['layers']),2):
#            layer.append(ConvLayer(param['layers'][j], param['layers'][j+1]))
#        net=Network(layer)

#        pdb.set_trace()

    if mode != 'full':
        I_MS_LR, I_PAN = downgrade_images(I_MS_LR, I_PAN, param['ratio'],
                                          param['sensor'])

    I_PAN = np.expand_dims(I_PAN, axis=0)

    NDxI_LR = []
    mav_value = 2**(np.float32(param['L']))

    # compute radiometric indexes
    if param['inputType'] == 'MS_PAN_NDxI':
        if I_MS_LR.shape[0] == 8:
            NDxI_LR = np.stack(((I_MS_LR[4, :, :] - I_MS_LR[7, :, :]) /
                                (I_MS_LR[4, :, :] + I_MS_LR[7, :, :]),
                                (I_MS_LR[0, :, :] - I_MS_LR[7, :, :]) /
                                (I_MS_LR[0, :, :] + I_MS_LR[7, :, :]),
                                (I_MS_LR[2, :, :] - I_MS_LR[3, :, :]) /
                                (I_MS_LR[2, :, :] + I_MS_LR[3, :, :]),
                                (I_MS_LR[5, :, :] - I_MS_LR[0, :, :]) /
                                (I_MS_LR[5, :, :] + I_MS_LR[0, :, :])),
                               axis=0)
        else:
            NDxI_LR = np.stack(((I_MS_LR[3, :, :] - I_MS_LR[2, :, :]) /
                                (I_MS_LR[3, :, :] + I_MS_LR[2, :, :]),
                                (I_MS_LR[1, :, :] - I_MS_LR[3, :, :]) /
                                (I_MS_LR[1, :, :] + I_MS_LR[3, :, :])),
                               axis=0)

#    %input preparation
    if param['typeInterp'] == 'interp23tap':

        #        import pdb; pdb.set_trace()

        I_MS = interp23(I_MS_LR, param['ratio'])
#        if len(NDxI_LR)!=0:
#            NDxI = interp23(NDxI_LR, param['ratio'])
#    else:
#        sys.exit('interpolation not supported')

    if param['inputType'] == 'MS':
        I_in = I_MS.astype('single') / mav_value
    elif param['inputType'] == 'MS_PAN':
        #        import pdb; pdb.set_trace()

        I_in = np.vstack((I_MS, I_PAN)).astype('single') / mav_value
    elif param['inputType'] == 'MS_PAN_NDxI':
        I_in = np.vstack((I_MS, I_PAN)).astype('single') / mav_value
        I_in = np.vstack((I_in, NDxI)).astype('single')
    else:
        sys.exit('Configuration not supported')
    print(I_in.shape)

    #    import pdb; pdb.set_trace()

    I_in_residual = np.expand_dims(I_in, axis=0)
    I_in_residual = I_in_residual[:, :I_MS.shape[0], :, :]

    import pdb
    pdb.set_trace()

    I_in = np.pad(I_in, ((0, 0), (param['padSize'] / 2, param['padSize'] / 2),
                         (param['padSize'] / 2, param['padSize'] / 2)),
                  mode='edge')
    I_in = np.expand_dims(I_in, axis=0)

    #Pansharpening MODIFICA 17/04
    #    param['residual'] = 'false'

    #    I_MS_residual = np.expand_dims(I_MS,axis=0)

    if param['residual'] == 'true' or param['residual'] == 1:
        I_out = net.build(I_in) + I_in_residual[:, :I_MS.shape[0], :, :]
#        I_out = net.build(I_in) + I_MS_residual
    else:
        I_out = net.build(I_in)

    I_out = I_out * mav_value

    return np.squeeze(I_out)
コード例 #4
0
    np.loadtxt('/home/zhshang/DeepEncoder/mnist/Visible_bias_2.txt')
]).transpose()
bias8 = np.array([
    np.loadtxt('/home/zhshang/DeepEncoder/mnist/Visible_bias_1.txt')
]).transpose()
# construct the fine-tuning input bias
b_list_4_layer = [bias1, bias2, bias3, bias4, bias5, bias6, bias7, bias8]

#N = np.array([784,1000,500,250,2])
data_train, label_train, data_test, label_test = mnist_read(2000)

data_train_finetune = np.transpose(data_train)

# Do the fine-tuning
total_cost_record_4_lay, w_list_output_4_lay, b_list_output_4_lay = fine_tuning(
    w_list_4_layer, b_list_4_layer, data_train_finetune, num_batch,
    training_steps, lr)

# write the weight matrix
for i in np.arange(4):
    filename = 'W_matrix_' + str(i + 1) + '_Final.txt'
    np.savetxt(filename, w_list_output_4_lay[i])

# write the bias
for j in np.arange(8):
    filename = 'B_bias_' + str(j + 1) + '_Final.txt'
    np.savetxt(filename, b_list_output_4_lay[j])

# write the total cost
np.savetxt('Total_Cost_Final.txt', total_cost_record_4_lay)
コード例 #5
0
def PNN_test(I_MS_LR, I_PAN, inputImg, param, net, path, mode, epochs=0):

    test_dir_out = path['test_dir_out']
    FTnetwork_dir_out = path['ftnetwork_dir_out']
    param['L'] = inputImg['L']
    param['ratio'] = inputImg['ratio']
    if 'inputType' not in param.keys():
        param['inputType'] = 'MS_PAN'

    #fine tuning
    if epochs != 0:
        fine_tuning(I_MS_LR, I_PAN, param, epochs, FTnetwork_dir_out)
        ft_model_path = FTnetwork_dir_out + '/PNN_model.mat'

        FT_model = sio.loadmat(ft_model_path, squeeze_me=True)

        from PNN_testing_model import Network, ConvLayer

        layer = []
        for j in range(0, len(FT_model['layers']), 2):
            layer.append(
                ConvLayer(FT_model['layers'][j], FT_model['layers'][j + 1]))
        net = Network(layer)

    if mode != 'full':
        I_MS_LR, I_PAN = downgrade_images(I_MS_LR, I_PAN, param['ratio'],
                                          param['sensor'])

    I_PAN = np.expand_dims(I_PAN, axis=0)
    NDxI_LR = []
    mav_value = 2**(np.float32(param['L']))

    # compute radiometric indexes
    if param['inputType'] == 'MS_PAN_NDxI':
        if I_MS_LR.shape[0] == 8:
            NDxI_LR = np.stack(((I_MS_LR[4, :, :] - I_MS_LR[7, :, :]) /
                                (I_MS_LR[4, :, :] + I_MS_LR[7, :, :]),
                                (I_MS_LR[0, :, :] - I_MS_LR[7, :, :]) /
                                (I_MS_LR[0, :, :] + I_MS_LR[7, :, :]),
                                (I_MS_LR[2, :, :] - I_MS_LR[3, :, :]) /
                                (I_MS_LR[2, :, :] + I_MS_LR[3, :, :]),
                                (I_MS_LR[5, :, :] - I_MS_LR[0, :, :]) /
                                (I_MS_LR[5, :, :] + I_MS_LR[0, :, :])),
                               axis=0)
        else:
            NDxI_LR = np.stack(((I_MS_LR[3, :, :] - I_MS_LR[2, :, :]) /
                                (I_MS_LR[3, :, :] + I_MS_LR[2, :, :]),
                                (I_MS_LR[1, :, :] - I_MS_LR[3, :, :]) /
                                (I_MS_LR[1, :, :] + I_MS_LR[3, :, :])),
                               axis=0)

    #input preparation
    if param['typeInterp'] == 'interp23tap':
        I_MS = interp23(I_MS_LR, param['ratio'])
        if len(NDxI_LR) != 0:
            NDxI = interp23(NDxI_LR, param['ratio'])
    else:
        sys.exit('interpolation not supported')

    if param['inputType'] == 'MS':
        I_in = I_MS.astype('single') / mav_value
    elif param['inputType'] == 'MS_PAN':
        I_in = np.vstack((I_MS, I_PAN)).astype('single') / mav_value
    elif param['inputType'] == 'MS_PAN_NDxI':
        I_in = np.vstack((I_MS, I_PAN)).astype('single') / mav_value
        I_in = np.vstack((I_in, NDxI)).astype('single')
    else:
        sys.exit('Configuration not supported')
    print I_in.shape

    I_in_residual = np.expand_dims(I_in, axis=0)
    I_in_residual = I_in_residual[:, :I_MS.shape[0], :, :]
    I_in = np.pad(I_in, ((0, 0), (param['padSize'] / 2, param['padSize'] / 2),
                         (param['padSize'] / 2, param['padSize'] / 2)),
                  mode='edge')
    I_in = np.expand_dims(I_in, axis=0)

    #Pansharpening
    if param['residual']:
        I_out = net.build(I_in) + I_in_residual[:, :I_MS.shape[0], :, :]
    else:
        I_out = net.build(I_in)

    I_out = I_out * mav_value

    return np.squeeze(I_out)
コード例 #6
0
ファイル: _main.py プロジェクト: Wenhui-Yu/LCFN
        para_name += [
            'FREQUENCY_USER', 'FREQUENCY_ITEM', 'FREQUENCY', 'KEEP_PORB',
            'SAMPLE_RATE', 'GRAPH_CONV', 'PREDICTION', 'LOSS_FUNCTION',
            'GENERALIZATION', 'OPTIMIZATION', 'IF_TRASFORMATION', 'ACTIVATION',
            'POOLING'
        ]
    if all_para[2] == 'SGNN': para_name += ['PROP_DIM', 'PROP_EMB', 'IF_NORM']
    # if testing the model, we need to read in test set
    if tuning_method == 'test': all_para[11] = para[11] = 'Test'

    ## read data
    data = read_all_data(all_para)
    para[10] = data[-1]

    ## tuning the model
    os.environ["CUDA_VISIBLE_DEVICES"] = all_para[0]
    if tuning_method == 'tuning':
        tuning(path_excel_dir, para_name, para, data, lr_coarse, lamda_coarse,
               min_num_coarse, max_num_coarse, min_num_fine, max_num_fine)
    if tuning_method == 'fine_tuning':
        fine_tuning(path_excel_dir, para_name, para, data, lr_fine, lamda_fine,
                    min_num_fine, max_num_fine)
    if tuning_method == 'cross_tuning':
        cross_tuning(path_excel_dir, para_name, para, data, lr_fine,
                     lamda_fine, min_num_fine, max_num_fine)
    if tuning_method == 'coarse_tuning':
        coarse_tuning(path_excel_dir, para_name, para, data, lr_coarse,
                      lamda_coarse, min_num_coarse, max_num_coarse)
    if tuning_method == 'test':
        test(path_excel_dir, para_name, para, data, iter_num_test)