MAX_QRY_LENGTH = 200
MAX_DOC_LENGTH = 200
NUM_OF_FEATS = 7
PSG_SIZE = [(50, 1), (150, 1), (MAX_QRY_LENGTH, MAX_DOC_LENGTH)]
NUM_OF_FILTERS = 1
tau = 1

optimizer = "rmsprop"
loss = "logcosh"
batch_size = 512
epochs = 100
exp_path = "exp/Sub_cnn_noraml_rmsprop_" + loss + "_weights-{epoch:02d}-{val_loss:.2f}.hdf5"

input_data_process = InputDataProcess(NUM_OF_FEATS, MAX_QRY_LENGTH,
                                      MAX_DOC_LENGTH)
# Parameters
params = {
    'input_data_process': input_data_process,
    'dim_x': MAX_QRY_LENGTH,
    'dim_y': MAX_DOC_LENGTH,
    'dim_x1': NUM_OF_FEATS,
    'batch_size': batch_size,
    'shuffle': True
}
'''
# Datasets
partition = # IDs
{'train': ['id-1', 'id-2', 'id-3'], 'validation': ['id-4']}
labels = # Labels
{'id-1': 0, 'id-2': 1, 'id-3': 2, 'id-4': 1}
示例#2
0
model_name = "SubSampling_categorical_cnn_Adam_categorical_crossentropy_weights-08-0.56.hdf5"


def precision(y_true, y_pred):
    """Precision metric.
    Only computes a batch-wise average of precision.
    Computes the precision, a metric for multi-label classification of
    how many selected items are relevant.
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision


input_data_process = InputDataProcess(NUM_OF_FEATS, MAX_QRY_LENGTH,
                                      MAX_DOC_LENGTH)  #, test_path)
evaluate_model = EvaluateModel(
    "../Corpus/TDT2/Train/QDRelevanceTDT2_forHMMOutSideTrain", True)
# Parameters
params = {
    'input_data_process': input_data_process,
    'dim_x': MAX_QRY_LENGTH,
    'dim_y': MAX_DOC_LENGTH,
    'dim_x1': NUM_OF_FEATS,
    'batch_size': batch_size,
    'shuffle': False
}

[partition, labels,
 partition_answer] = input_data_process.genTrainValidSet(percent)