Ejemplo n.º 1
0
    def observe(self, data_loader, loss_criterion, task_i, args, prev_model):

        self.net.train()
        train_meter = Meter()
        for batch_id, batch_data in enumerate(data_loader):
            smiles, bg, labels, masks = batch_data
            labels, masks = labels.cuda(), masks.cuda()
            logits = predict(args, self.net, bg)

            # Mask non-existing labels
            loss = loss_criterion(logits, labels) * (masks != 0).float()
            loss = loss[:, task_i].mean()

            if task_i > 0:
                target = prev_model.forward(args, bg)
                for oldt in range(task_i):
                    logits_dist = torch.unsqueeze(logits[:, oldt], 0)
                    dist_target = torch.unsqueeze(target[:, oldt], 0)
                    dist_loss = MultiClassCrossEntropy(logits_dist,
                                                       dist_target, 2)
                    loss = loss + dist_loss

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            train_meter.update(logits, labels, masks)

        train_score = np.mean(train_meter.compute_metric(args['metric_name']))
Ejemplo n.º 2
0
def run_a_train_epoch(args, epoch, model, data_loader, loss_criterion,
                      optimizer):
    model.train()
    train_meter = Meter()
    for batch_id, batch_data in enumerate(data_loader):
        smiles, bg, labels, masks = batch_data
        if len(smiles) == 1:
            # Avoid potential issues with batch normalization
            continue

        labels, masks = labels.to(args['device']), masks.to(args['device'])
        prediction = predict(args, model, bg)
        loss = (loss_criterion(prediction, labels) *
                (masks != 0).float()).mean()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_meter.update(prediction, labels, masks)
        if batch_id % args['print_every'] == 0:
            print('epoch {:d}/{:d}, batch {:d}/{:d}, loss {:.4f}'.format(
                epoch + 1, args['num_epochs'], batch_id + 1, len(data_loader),
                loss.item()))
    train_score = np.mean(train_meter.compute_metric(args['metric']))
    print('epoch {:d}/{:d}, training {} {:.4f}'.format(epoch + 1,
                                                       args['num_epochs'],
                                                       args['metric'],
                                                       train_score))
Ejemplo n.º 3
0
def run_an_eval_epoch(args, model, data_loader):
    model.eval()
    eval_meter = Meter()
    epoch_tot_pos_ps = []
    train_y = []
    epoch_tot_pos_ps = []
    with torch.no_grad():
        for batch_id, batch_data in enumerate(data_loader):
            bg, labels = batch_data
            labels = labels.to(args['device'])
            prediction = regress(args, model, bg)
            # print(prediction,labels)
            eval_meter.update(prediction, labels)

            true_label = np.argmax(labels.detach().to('cpu').numpy(),
                                   axis=1).tolist()
            pred_cls = prediction.detach().to('cpu').numpy()
            pred_cls = softmax(pred_cls)
            train_y.extend(true_label)
            # print(true_label)
            # print(pred_y)
            tot_pos_ps = [pred_cls[i][1] for i in range(len(true_label))]
            # tot_pos_ps = [pred_cls[i][true_label[i]] for i in range(len(true_label))]
            epoch_tot_pos_ps.extend(tot_pos_ps)

        roc_auc = roc_auc_score(train_y, epoch_tot_pos_ps)
        p, r, thr = precision_recall_curve(train_y, epoch_tot_pos_ps)
        prc_auc = auc(r, p)
        test_roc_accuracies.append(roc_auc)
        test_prc_accuracies.append(prc_auc)

        total_score = np.mean(eval_meter.compute_metric(args['metric_name']))
    return total_score, prc_auc
Ejemplo n.º 4
0
    def observe(self, data_loader, loss_criterion, task_i, args):

        self.net.train()

        if task_i != self.current_task:
            self.optpar = []
            self.fisher = []
            self.optimizer.zero_grad()
            
            for batch_id, batch_data in enumerate(self.data_loader):
                smiles, bg, labels, masks = batch_data
                labels, masks = labels.cuda(), masks.cuda()
                output = predict(args, self.net, bg)[:,self.current_task]

                output.pow_(2)
                loss = output.mean()
                self.net.zero_grad()
                loss.backward()
    
                for p in self.net.parameters():
                    pd = p.data.clone()
                    try:
                        pg = p.grad.data.clone().pow(2)  # error
                        self.fisher.append(pg)
                        self.optpar.append(pd)
                    except:
                        1
                self.current_task = task_i


        train_meter = Meter()
        for batch_id, batch_data in enumerate(data_loader):
            smiles, bg, labels, masks = batch_data
            labels, masks = labels.cuda(), masks.cuda()
            logits = predict(args, self.net, bg)

            # Mask non-existing labels
            loss = loss_criterion(logits, labels) * (masks != 0).float()
            loss = loss[:,task_i].mean()

            if task_i > 0:
                i = 0
                for p in self.net.parameters():
                    try:
                        pg = p.grad.data.clone().pow(2) 
                        l = self.reg * self.fisher[i]
                        l = l * (p - self.optpar[i]).pow(2)
                        loss += l.sum()
                        i += 1
                    except:
                        1

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            train_meter.update(logits, labels, masks)
        
        train_score = np.mean(train_meter.compute_metric(args['metric_name']))
Ejemplo n.º 5
0
def run_an_eval_epoch(args, model, data_loader):
    model.eval()
    eval_meter = Meter()
    with torch.no_grad():
        for batch_id, batch_data in enumerate(data_loader):
            smiles, bg, labels, masks = batch_data
            labels = labels.to(args['device'])
            logits = predict(args, model, bg)
            eval_meter.update(logits, labels, masks)
    return np.mean(eval_meter.compute_metric(args['metric']))
Ejemplo n.º 6
0
def run_an_eval_epoch(args, model, data_loader):
    model.eval()
    eval_meter = Meter()
    with torch.no_grad():
        for batch_id, batch_data in enumerate(data_loader):
            smiles, bg, labels, masks = batch_data
            labels = labels.to(args['device'])
            prediction = regress(args, model, bg)
            eval_meter.update(prediction, labels, masks)
        total_score = np.mean(eval_meter.compute_metric(args['metric_name']))
    return total_score
Ejemplo n.º 7
0
def run_an_eval_epoch(args, model, data_loader):
    model.eval()
    eval_meter = Meter()
    with torch.no_grad():
        for batch_id, batch_data in enumerate(data_loader):
            smiles, bg, labels, masks = batch_data
            atom_feats = bg.ndata.pop(args['atom_data_field'])
            atom_feats, labels = atom_feats.to(args['device']), labels.to(args['device'])
            logits = model(bg, atom_feats)
            eval_meter.update(logits, labels, masks)
    return np.mean(eval_meter.compute_metric(args['metric_name']))
Ejemplo n.º 8
0
def run_a_train_epoch(args, epoch, model, data_loader, loss_criterion,
                      optimizer):
    model.train()
    train_meter = Meter()
    train_y = []
    epoch_tot_pos_ps = []
    label = []
    for batch_id, batch_data in enumerate(data_loader):
        tot_pos_ps = []
        true_label = []
        bg, labels = batch_data
        labels = labels.to(args['device'])
        prediction = regress(args, model, bg)
        # print(prediction.dtype,labels.dtype)
        loss = (loss_criterion(prediction, labels)).mean()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_meter.update(prediction, labels)

        # l = labels.detach(/)

        true_label = np.argmax(labels.detach().to('cpu').numpy(),
                               axis=1).tolist()
        # true_label = np.sum(labels.detach().to('cpu').numpy())
        # pred_cls = prediction.detach().to('cpu').numpy()
        # print(pred_cls)
        pred_cls = torch.sigmoid(prediction)
        pred_cls = pred_cls.detach().to('cpu').numpy()
        train_y.extend(true_label)
        # print(true_label)
        # print(pred_cls)
        tot_pos_ps = [pred_cls[i][1] for i in range(len(true_label))]
        # print(tot_pos_ps)
        # tot_pos_ps = [pred_cls[i][true_label[i]] for i in range(len(true_label))]
        epoch_tot_pos_ps.extend(tot_pos_ps)

    roc_auc = roc_auc_score(train_y, epoch_tot_pos_ps)
    p, r, thr = precision_recall_curve(train_y, epoch_tot_pos_ps)
    prc_auc = auc(r, p)
    epoch_roc_auc = roc_auc
    epoch_prc_auc = prc_auc
    epoch_roc_accuracies.append(epoch_roc_auc)
    epoch_prc_accuracies.append(epoch_prc_auc)
    # print(roc_auc, prc_auc)
    # print(classification_report(np.array(train_y,dtype=float),epoch_tot_pos_ps))

    total_score = np.mean(train_meter.compute_metric(args['metric_name']))
    training_score.append(total_score)
    print('epoch {:d}/{:d}, training {} {:.4f} prc_auc {:.4f}'.format(
        epoch + 1, args['num_epochs'], args['metric_name'], total_score,
        prc_auc))
Ejemplo n.º 9
0
def run_an_eval_epoch(args, model, data_loader, task_i):
    model.eval()
    eval_meter = Meter()
    with torch.no_grad():
        for batch_id, batch_data in enumerate(data_loader):
            smiles, bg, labels, masks = batch_data
            labels = labels.cuda()
            logits = predict(args, model, bg)
            if isinstance(logits, tuple):
                logits = logits[0]
            eval_meter.update(logits, labels, masks)

    return eval_meter.compute_metric(args['metric_name'])[task_i]
Ejemplo n.º 10
0
def run_a_train_epoch(args, epoch, model, data_loader,
                      loss_criterion, optimizer):
    model.train()
    train_meter = Meter()
    for batch_id, batch_data in enumerate(data_loader):
        smiles, bg, labels, masks = batch_data
        labels, masks = labels.to(args['device']), masks.to(args['device'])
        prediction = regress(args, model, bg)
        loss = (loss_criterion(prediction, labels) * (masks != 0).float()).mean()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_meter.update(prediction, labels, masks)
    total_score = np.mean(train_meter.compute_metric(args['metric_name']))
    print('epoch {:d}/{:d}, training {} {:.4f}'.format(
        epoch + 1, args['num_epochs'], args['metric_name'], total_score))
Ejemplo n.º 11
0
def run_a_train_epoch(args, epoch, model, data_loader, loss_criterion, optimizer):
    model.train()
    train_meter = Meter()
    for batch_id, batch_data in enumerate(data_loader):
        smiles, bg, labels, masks = batch_data
        labels, masks = labels.to(args['device']), masks.to(args['device'])
        logits = predict(args, model, bg)
        # Mask non-existing labels
        loss = (loss_criterion(logits, labels) * (masks != 0).float()).mean()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_meter.update(logits, labels, masks)
        if batch_id % args['print_every'] == 0:
            print('epoch {:d}/{:d}, batch {:d}/{:d}, loss {:.4f}'.format(
                epoch + 1, args['num_epochs'], batch_id + 1, len(data_loader), loss.item()))
    train_score = np.mean(train_meter.compute_metric(args['metric']))
    print('epoch {:d}/{:d}, training {} {:.4f}'.format(
        epoch + 1, args['num_epochs'], args['metric'], train_score))
Ejemplo n.º 12
0
    def observe(self, data_loader, loss_criterion, task_i, args):

        self.net.train()
        train_meter = Meter()
        for batch_id, batch_data in enumerate(data_loader):
            smiles, bg, labels, masks = batch_data
            labels, masks = labels.cuda(), masks.cuda()
            logits = predict(args, self.net, bg)

            # Mask non-existing labels
            loss = loss_criterion(logits, labels) * (masks != 0).float()
            loss = loss[:, task_i].mean()

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            train_meter.update(logits, labels, masks)

        train_score = np.mean(train_meter.compute_metric(args['metric_name']))
Ejemplo n.º 13
0
def run_a_train_epoch(args, epoch, model, data_loader, loss_criterion,
                      optimizer, task_i):
    model.train()
    train_meter = Meter()
    for batch_id, batch_data in enumerate(data_loader):
        smiles, bg, labels, masks = batch_data
        labels, masks = labels.cuda(), masks.cuda()
        logits = predict(args, model, bg)
        if isinstance(logits, tuple):
            logits = logits[0]

        # Mask non-existing labels
        loss = loss_criterion(logits, labels) * (masks != 0).float()
        loss = loss[:, task_i].mean()

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_meter.update(logits, labels, masks)

    train_score = np.mean(train_meter.compute_metric(args['metric_name']))
Ejemplo n.º 14
0
def run_a_train_epoch(args, epoch, model, data_loader, loss_criterion, optimizer):
    model.train()
    train_meter = Meter()
    for batch_id, batch_data in enumerate(data_loader):
        smiles, bg, labels, masks = batch_data
        atom_feats = bg.ndata.pop(args['atom_data_field'])
        atom_feats, labels, masks = atom_feats.to(args['device']), \
                                    labels.to(args['device']), \
                                    masks.to(args['device'])
        logits = model(bg, atom_feats)
        # Mask non-existing labels
        loss = (loss_criterion(logits, labels) * (masks != 0).float()).mean()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print('epoch {:d}/{:d}, batch {:d}/{:d}, loss {:.4f}'.format(
            epoch + 1, args['num_epochs'], batch_id + 1, len(data_loader), loss.item()))
        train_meter.update(logits, labels, masks)
    train_score = np.mean(train_meter.compute_metric(args['metric_name']))
    print('epoch {:d}/{:d}, training {} {:.4f}'.format(
        epoch + 1, args['num_epochs'], args['metric_name'], train_score))
Ejemplo n.º 15
0
def run_eval_epoch(args, model, data_loader):
    model.eval()
    eval_meter = Meter()
    with torch.no_grad():
        for batch_id, batch_data in enumerate(data_loader):
            smiles, bg, labels, masks = batch_data
            labels = labels.cuda()
            logits = predict(args, model, bg)
            if isinstance(logits, tuple):
                logits = logits[0]
            eval_meter.update(logits, labels, masks)

        test_score = eval_meter.compute_metric(args['metric_name'])
        score_mean = round(np.mean(test_score), 4)

        for t in range(12):
            score = test_score[t]
            print(f"T{t:02d} {score:.4f}|", end="")

        print(f"score_mean: {score_mean}", end="")
        print()

    return test_score
Ejemplo n.º 16
0
    def observe(self, data_loader, loss_criterion, task_i, args):

        if task_i != self.old_task:
            self.observed_tasks.append(task_i)
            self.old_task = task_i

        # Update ring buffer storing examples from current task
        if task_i >= len(self.memory_data):
            tmask = np.random.choice(self.mask, self.n_memories, replace=False)
            tmask = np.array(tmask)
            self.memory_data.append(tmask)

        # compute gradient on previous tasks
        for old_task_i in self.observed_tasks[:-1]:
            self.net.zero_grad()
            # fwd/bwd on the examples in the memory

            for batch_id, batch_data in enumerate(self.data_loader):
                smiles, bg, labels, masks = batch_data
                labels, masks = labels.cuda(), masks.cuda()
                logits = predict(args, self.net, bg)

                loss = loss_criterion(logits, labels) * (masks != 0).float()
                old_task_loss = loss[:, old_task_i].mean()

                old_task_loss.backward()
                store_grad(self.net.parameters, self.grads, self.grad_dims,
                           old_task_i)

        self.net.train()
        train_meter = Meter()
        for batch_id, batch_data in enumerate(data_loader):
            smiles, bg, labels, masks = batch_data
            labels, masks = labels.cuda(), masks.cuda()
            logits = predict(args, self.net, bg)

            # Mask non-existing labels
            loss = loss_criterion(logits, labels) * (masks != 0).float()
            loss = loss[:, task_i].mean()

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            train_meter.update(logits, labels, masks)

        train_score = np.mean(train_meter.compute_metric(args['metric_name']))

        # check if gradient violates constraints
        if len(self.observed_tasks) > 1:
            t = task_i
            # copy gradient
            store_grad(self.net.parameters, self.grads, self.grad_dims, t)
            indx = torch.cuda.LongTensor(self.observed_tasks[:-1])
            dotp = torch.mm(self.grads[:, t].unsqueeze(0),
                            self.grads.index_select(1, indx))
            if (dotp < 0).sum() != 0:
                project2cone2(self.grads[:, t].unsqueeze(1),
                              self.grads.index_select(1, indx), self.margin)
                # copy gradients back
                overwrite_grad(self.net.parameters, self.grads[:, t],
                               self.grad_dims)

        self.optimizer.step()
Ejemplo n.º 17
0
    def observe(self, data_loader, loss_criterion, task_i, args):

        self.net.train()

        if task_i != self.current_task:
            self.optimizer.zero_grad()

            self.fisher_loss[self.current_task] = []
            self.fisher_att[self.current_task] = []
            self.optpar[self.current_task] = []

            for batch_id, batch_data in enumerate(self.data_loader):
                smiles, bg, labels, masks = batch_data
                labels, masks = labels.cuda(), masks.cuda()
                logits, elist = predict(args, self.net, bg)

                loss = loss_criterion(logits, labels) * (masks != 0).float()
                loss = loss[:, self.current_task].mean()
                loss.backward(retain_graph=True)

                for p in self.net.parameters():
                    pd = p.data.clone()
                    try:
                        pg = p.grad.data.clone().pow(2)
                        self.fisher_loss[self.current_task].append(pg)
                        self.optpar[self.current_task].append(pd)
                    except:
                        1

                if isinstance(elist, list):
                    eloss = torch.norm(elist[0])
                else:
                    eloss = elist
                eloss.backward()
                for p in self.net.parameters():
                    try:
                        pg = p.grad.data.clone().pow(2)
                        self.fisher_att[self.current_task].append(pg)
                    except:
                        1
                self.current_task = task_i

        train_meter = Meter()
        for batch_id, batch_data in enumerate(data_loader):
            smiles, bg, labels, masks = batch_data
            labels, masks = labels.cuda(), masks.cuda()
            logits, elist = predict(args, self.net, bg)

            # Mask non-existing labels
            loss = loss_criterion(logits, labels) * (masks != 0).float()
            loss = loss[:, task_i].mean()

            loss.backward(retain_graph=True)
            grad_norm = 0
            for p in self.net.parameters():
                try:
                    pg = p.grad.data.clone()
                    grad_norm += torch.norm(pg, p=1)
                except:
                    1

            for tt in range(task_i):
                i = 0
                for p in self.net.parameters():
                    try:
                        pg = p.grad.data.clone().pow(2)  # error
                        l = self.lambda_l * self.fisher_loss[tt][
                            i] + self.lambda_t * self.fisher_att[tt][i]
                        l = l * (p - self.optpar[tt][i]).pow(2)
                        loss += l.sum()
                        i += 1
                    except:
                        1

            loss = loss + self.beta * grad_norm
            loss.backward()
            self.optimizer.step()
            train_meter.update(logits, labels, masks)

        train_score = np.mean(train_meter.compute_metric(args['metric_name']))