Exemplo n.º 1
0
    'C': 1.0,
    'mu': 1.0,
    'alpha': 1.0,
    'h**o': 27.2114,
    'lumo': 27.2114,
    'gap': 27.2114,
    'r2': 1.0,
    'zpve': 27211.4,
    'u0': 27.2114,
    'u298': 27.2114,
    'h298': 27.2114,
    'g298': 27.2114,
    'cv': 1.0,
    'u0_atom': 27.2114,
    'u298_atom': 27.2114,
    'h298_atom': 27.2114,
    'g298_atom': 27.2114,
    'cv_atom': 1.0
}

for label in labels:
    cf = conversion[label]
    rloader = res.ResultsENN('smp-' + label, reps=[1, 2, 3, 4, 5])
    results = rloader.get_all_predictions()
    summary = met.evaluate_average(results,
                                   metric=met.mae,
                                   verbose=True,
                                   select=3)
    summary = [(cf * s[0], cf * s[1]) for s in summary]
    print('%9s: %6.3f \pm %6.3f' % (label, *summary[2]))
Exemplo n.º 2
0
    'A': 1.0,
    'B': 1.0,
    'C': 1.0,
    'mu': 1.0,
    'alpha': 1.0,
    'h**o': 27.2114,
    'lumo': 27.2114,
    'gap': 27.2114,
    'r2': 1.0,
    'zpve': 27211.4,
    'u0': 27.2114,
    'u298': 27.2114,
    'h298': 27.2114,
    'g298': 27.2114,
    'cv': 1.0,
    'u0_atom': 27.2114,
    'u298_atom': 27.2114,
    'h298_atom': 27.2114,
    'g298_atom': 27.2114,
    'cv_atom': 1.0
}

for label in labels:
    name = f'logs/smp_test_{label}/smp'
    cf = conversion[label]
    rloader = res.ResultsGNN(name, reps=[0, 1, 2])
    results = rloader.get_all_predictions()
    summary = met.evaluate_average(results, metric=met.mae, verbose=False)
    summary = [(cf * s[0], cf * s[1]) for s in summary]
    print('%9s: %6.3f \pm %6.3f' % (label, *summary[2]))
Exemplo n.º 3
0
import numpy as np
import torch
import atom3d.util.results as res
import atom3d.util.metrics as met

# Define the training run
name = 'logs/msp_test/msp'
print(name)

# Load training results
rloader = res.ResultsGNN(name, reps=[0, 1, 2])
results = rloader.get_all_predictions()

# Calculate and print results
summary = met.evaluate_average(results, metric=met.auroc, verbose=False)
print('Test AUROC: %6.3f \pm %6.3f' % summary[2])
summary = met.evaluate_average(results, metric=met.auprc, verbose=False)
print('Test AUPRC: %6.3f \pm %6.3f' % summary[2])
Exemplo n.º 4
0
import sys
import numpy as np
import torch
import atom3d.util.results as res
import atom3d.util.metrics as met

seqid = sys.argv[1]

# Define the training run
name = f'logs/lba_test_{seqid}/lba'
print(name)

# Load training results
rloader = res.ResultsGNN(name, reps=[0, 1, 2])
results = rloader.get_all_predictions()

# Calculate and print results
summary = met.evaluate_average(results, metric=met.rmse, verbose=False)
print('Test RMSE: %6.3f \pm %6.3f' % summary[2])
summary = met.evaluate_average(results, metric=met.spearman, verbose=False)
print('Test Spearman: %6.3f \pm %6.3f' % summary[2])
summary = met.evaluate_average(results, metric=met.pearson, verbose=False)
print('Test Pearson: %6.3f \pm %6.3f' % summary[2])
Exemplo n.º 5
0
# Load training results
rloader = res.ResultsGNN(name, reps=[0, 1, 2])
results = rloader.get_target_specific_predictions()

# Calculate and print results
summary = met.evaluate_per_target_average(results['per_target'],
                                          metric=met.spearman,
                                          verbose=False)
print('Test Spearman (per-target): %6.3f \pm %6.3f' % summary[2])
summary = met.evaluate_per_target_average(results['per_target'],
                                          metric=met.pearson,
                                          verbose=False)
print('Test Pearson (per-target): %6.3f \pm %6.3f' % summary[2])
summary = met.evaluate_per_target_average(results['per_target'],
                                          metric=met.kendall,
                                          verbose=False)
print('Test Kendall (per-target): %6.3f \pm %6.3f' % summary[2])

summary = met.evaluate_average(results['global'],
                               metric=met.spearman,
                               verbose=False)
print('Test Spearman (global): %6.3f \pm %6.3f' % summary[2])
summary = met.evaluate_average(results['global'],
                               metric=met.pearson,
                               verbose=False)
print('Test Pearson (global): %6.3f \pm %6.3f' % summary[2])
summary = met.evaluate_average(results['global'],
                               metric=met.kendall,
                               verbose=False)
print('Test Kendall (global): %6.3f \pm %6.3f' % summary[2])