Пример #1
0
load_weights_from_file_q = input('Load weights from file? (y/n)')
more_train_q = input('Train more? (y/n)')

time_str = time.strftime("%Y_%m_%d-%H_%M_%S")
result_output_name = "../output/predictions/{}_output.csv".format(method_name)
with open(result_output_name, 'w') as file:
    writer = csv.writer(file)
    writer.writerow([
        'Method Name', '# Total Folds', '# Fold Number', '# Predictions',
        '# Truth', 'Computation Time (ms)', 'Prediction Indices',
        'True Indices'
    ])

for fold_counter in range(1, k_fold + 1):
    x_train, y_train, x_test, y_test = dblp.get_fold_data(
        fold_counter, dataset, train_test_indices)

    input_dim = x_train[0][0].shape[1]
    output_dim = y_train[0][0].shape[1]
    print("Input/output Dimensions:  ", input_dim, output_dim)

    # this is our input placeholder
    # network parameters
    intermediate_dim_encoder = input_dim
    intermediate_dim_decoder = output_dim

    # VAE model = encoder + decoder
    # build encoder model
    inputs = Input(shape=(input_dim, ), name='encoder_input')
    x = Dense(intermediate_dim_encoder, activation='relu')(inputs)
    z_mean = Dense(latent_dim, name='z_mean')(x)
Пример #2
0
lambda_val = 0.001  # Weight decay , refer : https://stackoverflow.com/questions/44495698/keras-difference-between-kernel-and-activity-regularizers

load_weights_from_file_q = input('Load weights from file? (y/n)')
more_train_q = input('Train more? (y/n)')

time_str = time.strftime("%Y_%m_%d-%H_%M_%S")
result_output_name = "../output/predictions/{}_output.csv".format(method_name)
with open(result_output_name, 'w') as file:
    writer = csv.writer(file)
    writer.writerow(
        ['Method Name', '# Total Folds', '# Fold Number', '# Predictions', '# Truth', 'Computation Time (ms)',
         'Prediction Indices', 'True Indices'])

for fold_counter in range(1,k_fold+1):
    x_train_onehot, y_train_onehot, x_test_onehot, y_test_onehot = dblp.get_fold_data(fold_counter, dataset_onehot, train_test_indices)
    x_train_t2v, y_train_t2v, x_test_t2v, y_test_t2v = dblp.get_fold_data(fold_counter, dataset_t2v, train_test_indices)

    input_dim_onehot = x_train_onehot[0][0].shape[1]
    input_dim_t2v = x_train_t2v.shape[1]
    output_dim = y_train_onehot[0][0].shape[1]
    print("Input/output Dimensions:  ", input_dim_onehot+input_dim_t2v, output_dim)

    # this is our input placeholder
    # network parameters
    intermediate_dim_encoder_onehot = input_dim_onehot
    intermediate_dim_encoder_t2v = input_dim_t2v
    intermediate_dim_decoder = output_dim

    # VAE model = encoder + decoder
    # build encoder model