def predict_autograd(test_data):
    checkpoint_path = 'saved_models/qm9_ens_seed60/fold_0/model_0/model.pt'
    state = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
    args, loaded_state_dict = state['args'], state['state_dict']
    scaler = StandardScaler(state['data_scaler']['means'],
                            state['data_scaler']['stds']) if state['data_scaler'] is not None else None

    for k in ['encoder.encoder.cached_zero_vector', 'encoder.encoder.W_i.weight', 'encoder.encoder.W_h.weight',
              'encoder.encoder.W_o.weight', 'encoder.encoder.W_o.bias']:
        loaded_state_dict.pop(k, None)

    # Build model
    model = build_model(args)
    model_state_dict = model.state_dict()

    # Skip missing parameters and parameters of mismatched size
    pretrained_state_dict = {}
    for param_name in loaded_state_dict.keys():

        if param_name not in model_state_dict:
            print(f'Pretrained parameter "{param_name}" cannot be found in model parameters.')
        elif model_state_dict[param_name].shape != loaded_state_dict[param_name].shape:
            print(f'Pretrained parameter "{param_name}" '
                  f'of shape {loaded_state_dict[param_name].shape} does not match corresponding '
                  f'model parameter of shape {model_state_dict[param_name].shape}.')
        else:
            # print(f'Loading pretrained parameter "{param_name}".')
            pretrained_state_dict[param_name] = loaded_state_dict[param_name]

    # Load pretrained weights
    model_state_dict.update(pretrained_state_dict)
    model.load_state_dict(model_state_dict)

    model.eval()
    test_data = v(torch.from_numpy(test_data).float(), requires_grad=True)

    # with torch.no_grad():
    model_preds, ale_pred = model(test_data)
    ale_pred = torch.exp(ale_pred)

    model_preds.backward()

    if scaler is not None:
        model_preds = scaler.inverse_transform(model_preds.detach())
        ale_pred = scaler.inverse_transform_variance(ale_pred.detach())



    model_preds = np.array(model_preds.tolist(), dtype=np.float)
    ale_pred = np.array(ale_pred.tolist(), dtype=np.float)
    grad_rms = torch.sqrt(torch.sum(torch.square(test_data.grad.data))/1000).numpy()
    grad_max = torch.max(torch.sqrt(torch.square(test_data.grad.data))).numpy()


    return model_preds, ale_pred, grad_rms, grad_max
예제 #2
0
def val(model,dataloder,device):
    model.eval()
    confusion_matrix = meter.ConfusionMeter(7)
    for ii, (input,label) in enumerate(dataloder):
        val_input = v(input)
        val_input=val_input.to(device)
        label=label.to(device)
        score = model(val_input)
        confusion_matrix.add(score.data,label.data)
    model.train()
    cm_value = confusion_matrix.value()
    print(cm_value)
    correct = 0
    for i in range(7):
         correct+=cm_value[i][i]
    accuracy = 100.*(correct)/(cm_value.sum())
    return confusion_matrix, accuracy
예제 #3
0
def val(args, model, dataloder, device, critertion):
    model.eval()
    confusion_matrix = meter.ConfusionMeter(cfg.num_classes)
    for ii, (input, label) in enumerate(dataloder):
        with t.no_grad():
            val_input = v(input)
            val_input = val_input.to(device)
            #label = label.reshape(args.batchsize,6)
            label = label.to(device)
            score = model(val_input)
            val_loss = critertion.forward(score, label.long())
            confusion_matrix.add(score.data, label.data)
    cm_value = confusion_matrix.value()
    print(cm_value)
    correct = 0
    for i in range(6):
        correct += cm_value[i][i]
    accuracy = 100. * (correct) / (cm_value.sum())
    return confusion_matrix, accuracy, val_loss.item()
예제 #4
0
import torch as t
from torch.autograd import Variable as v

# simple gradient
a = v(t.FloatTensor([2, 3]), requires_grad=True)
b = a + 3
c = b * b * 3
out = c.mean()
out.backward()
print('*'*10)
print('=====simple gradient======')
print('input')
print(a.data)
print('compute result is')
print(out.data[0])
print('input gradients are')
print(a.grad.data)

# backward on non-scalar output
m = v(t.FloatTensor([[2, 3]]), requires_grad=True)
n = v(t.zeros(1, 2))
n[0, 0] = m[0, 0] ** 2
n[0, 1] = m[0, 1] ** 3
n.backward(t.FloatTensor([[1, 1]]))
print('*'*10)
print('=====non scalar output======')
print('input')
print(m.data)
print('input gradients are')
print(m.grad.data)
def predict_autograd_a(smile, checkpoint_path):
    state = torch.load(checkpoint_path,
                       map_location=lambda storage, loc: storage)
    args, loaded_state_dict = state['args'], state['state_dict']
    scaler = StandardScaler(
        state['data_scaler']['means'], state['data_scaler']
        ['stds']) if state['data_scaler'] is not None else None

    mol_smile = mol2graph_autograd_a(smile, args)
    b_num = mol_smile.f_bonds.shape[
        0] - 1  # vector number = total - cached_zero_vector(1)
    if b_num == 0:
        return np.zeros((1, 1)), 0, 0, 0
    # print('f_bond', mol_smile.f_bonds)

    # Build model
    model = build_model_autograd_a(args)
    model_state_dict = model.state_dict()

    # Skip missing parameters and parameters of mismatched size
    pretrained_state_dict = {}
    for param_name in loaded_state_dict.keys():

        if param_name not in model_state_dict:
            print(
                f'Pretrained parameter "{param_name}" cannot be found in model parameters.'
            )
        elif model_state_dict[param_name].shape != loaded_state_dict[
                param_name].shape:
            print(
                f'Pretrained parameter "{param_name}" '
                f'of shape {loaded_state_dict[param_name].shape} does not match corresponding '
                f'model parameter of shape {model_state_dict[param_name].shape}.'
            )
        else:
            # print(f'Loading pretrained parameter "{param_name}".')
            pretrained_state_dict[param_name] = loaded_state_dict[param_name]

    # Load pretrained weights
    model_state_dict.update(pretrained_state_dict)
    model.load_state_dict(model_state_dict)

    model.eval()

    # load bond_vec
    mol_graph = mol2graph_autograd_a(smile, args)
    f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, smiles_batch = mol_graph.get_components(
    )

    f_bonds = v(f_bonds, requires_grad=True)
    model_preds, ale_pred = model(smile, f_bonds)
    ale_pred = torch.exp(ale_pred)
    model_preds.backward()

    atom_grad = []
    for i in torch.square(f_bonds.grad.data):
        atom_grad.append((torch.sum(i).numpy() / len(i))**(1 / 2))
    atom_max = max(atom_grad)
    atom_rms = sum(atom_grad[1:]) / len(atom_grad[1:])

    if scaler is not None:
        model_preds = scaler.inverse_transform(model_preds.detach())
        ale_pred = scaler.inverse_transform_variance(ale_pred.detach())

    model_preds = np.array(model_preds.tolist(), dtype=np.float)
    ale_pred = np.array(ale_pred.tolist(), dtype=np.float)
    return model_preds, ale_pred, atom_rms, atom_max
예제 #6
0
import torch as t
from torch.autograd import Variable as v

# simple gradient
a = v(t.FloatTensor([2, 3]), requires_grad=True)
b = a + 3
c = b * b * 3
out = c.mean()
out.backward()
print('*'*20)
print('------- simple gradient -------')
print('input')
print(a.data)
print('compute result is')
print(out.item())
print('input gradients are')
# As c = (3*(a+3)^2)/2
# so, d(c)/d(a) = 3*(a+3)
# => a.grad.data = [3*(2+3), 3*(3+3)] = [15, 18]
print(a.grad.data)

# backward on non-scalar output
m = v(t.FloatTensor([[2, 3]]), requires_grad=True)
n = v(t.zeros(1, 2))
n[0, 0] = m[0, 0] ** 2
n[0, 1] = m[0, 1] ** 3
n.backward(t.FloatTensor([[1, 1]]))

print('*' * 20)
print('------- non scalar output -------')
print('input')
예제 #7
0
model = make_model(args.model,num_classes=6,pretrained=args.pretrained,input_size = (cfg.IMAGE_SIZE,cfg.IMAGE_SIZE)) 
device = t.device("cuda" if t.cuda.is_available() else "cpu")
model = nn.DataParallel(model)
model = model.to(device)
model.load_state_dict(t.load(resume_model))
model.eval()
confusion_matrix = meter.ConfusionMeter(6)
fpr = dict()
tpr = dict()
roc_auc = dict()
thresholds = dict()
labels = np.zeros((cfg.BATCHSIZE,6))
scores = np.zeros((cfg.BATCHSIZE,6))
for ii, (input,label) in enumerate(test_dataloader):
    with t.no_grad():
        val_input = v(input)
        val_input=val_input.to(device)
        label=label.to(device)
        score = model(val_input)
        confusion_matrix.add(score.data,label.data)
        label = label_binarize(label.cpu().detach().numpy(),classes=[0,1,2,3,4,5])
        score = score.cpu().detach().numpy()
        labels = np.concatenate((labels,label),axis=0)
        scores = np.concatenate((scores,score),axis=0)
for i in range(6):
    fpr[i], tpr[i], _ = roc_curve(labels[:,i],scores[:,i])
    roc_auc[i] = auc(fpr[i],tpr[i]) 
# Compute macro-average ROC curve and ROC area(方法一)
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(6)]))
예제 #8
0
import torch as t
from torch.autograd import Variable as v
from matplotlib import pyplot as plt

t.manual_seed(1000)


def get_fake_data(batch_size=8):
    x = t.rand(batch_size, 1) * 20
    y = x * 2 + (1 + t.rand(batch_size, 1)) * 3
    return x, y


w = v(t.rand(1, 1), requires_grad=True)
b = v(t.zeros(1, 1), requires_grad=True)
lr = 0.001

for ii in range(1000):
    x, y = get_fake_data()
    x, y = v(x), v(y)

    y_pred = x.mm(w) + b.expand_as(y)
    loss = 0.5 * (y_pred - y)**2
    loss = loss.sum()

    loss.backward()

    w.data.sub_(lr * w.grad.data)
    b.data.sub_(lr * b.grad.data)
    w.grad.data.zero_()
    b.grad.data.zero_()