Exemple #1
0
def run_a_train_epoch(args, epoch, model, data_loader, loss_criterion,
                      optimizer):
    model.train()
    train_meter = Meter()
    for batch_id, batch_data in enumerate(data_loader):
        smiles, bg, labels, masks = batch_data
        atom_feats = bg.ndata.pop(args['atom_data_field'])
        atom_feats, labels, masks = atom_feats.to(args['device']), \
                                    labels.to(args['device']), \
                                    masks.to(args['device'])
        logits = model(bg, atom_feats)
        # Mask non-existing labels
        loss = (loss_criterion(logits, labels) * (masks != 0).float()).mean()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print('epoch {:d}/{:d}, batch {:d}/{:d}, loss {:.4f}'.format(
            epoch + 1, args['num_epochs'], batch_id + 1, len(data_loader),
            loss.item()))
        train_meter.update(logits, labels, masks)
    train_score = np.mean(train_meter.compute_metric(args['metric_name']))
    print('epoch {:d}/{:d}, training {} {:.4f}'.format(epoch + 1,
                                                       args['num_epochs'],
                                                       args['metric_name'],
                                                       train_score))
    def _iterate(self, epoch, phase):
        meter = Meter(phase, epoch)
        start = time.strftime('%H:%M:%S')
        print("Starting epoch: {} | phase: {} | Time: {}".format(
            epoch + 1, phase, start))
        dl = self.dataloaders[phase]
        running_loss = 0.0
        total_steps = len(dl)
        self.optimizer.zero_grad()
        for itr, sample in enumerate(tqdm(dl)):
            images = sample['image']
            targets = sample['mask']
            loss, outputs = self._forward(images, targets)
            loss /= self.accumlation_steps
            if phase == 'train':
                loss.backward()
                if (itr + 1) % self.accumlation_steps == 0:
                    self.optimizer.step()
                    self.optimizer.zero_grad()
            running_loss += loss.item()
            outputs = outputs.detach().cpu()
            meter.update(targets, outputs)
        epoch_loss = (running_loss * self.accumlation_steps) / total_steps
        dice, iou = epoch_log(phase, epoch, epoch_loss, meter, start)
        visualize(sample, outputs, epoch, phase)

        self.losses[phase].append(epoch_loss)
        self.dice_scores[phase].append(dice)
        self.iou_scores[phase].append(iou)

        return epoch_loss
Exemple #3
0
def run_a_train_epoch(args, epoch, model, data_loader, loss_criterion,
                      optimizer):
    model.train()
    train_meter = Meter(args['train_mean'], args['train_std'])
    epoch_loss = 0
    for batch_id, batch_data in enumerate(data_loader):
        indices, ligand_mols, protein_mols, bg, labels = batch_data
        labels, bg = labels.to(args['device']), bg.to(args['device'])
        prediction = model(bg)
        loss = loss_criterion(prediction, (labels - args['train_mean']) /
                              args['train_std'])
        epoch_loss += loss.data.item() * len(indices)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_meter.update(prediction, labels)
    avg_loss = epoch_loss / len(data_loader.dataset)
    total_scores = {
        metric: train_meter.compute_metric(metric)
        for metric in args['metrics']
    }
    msg = 'epoch {:d}/{:d}, training | loss {:.4f}'.format(
        epoch + 1, args['num_epochs'], avg_loss)
    msg = update_msg_from_scores(msg, total_scores)
    print(msg)
Exemple #4
0
def run_an_eval_epoch(args, model, data_loader):
    model.eval()
    eval_meter = Meter()
    with torch.no_grad():
        for batch_id, batch_data in enumerate(data_loader):
            smiles, bg, labels, masks = batch_data
            labels = labels.to(args['device'])
            prediction = regress(args, model, bg)
            eval_meter.update(prediction, labels, masks)
        total_score = np.mean(eval_meter.compute_metric(args['metric_name']))
    return total_score
Exemple #5
0
def run_an_eval_epoch(args, model, data_loader):
    model.eval()
    eval_meter = Meter()
    with torch.no_grad():
        for batch_id, batch_data in enumerate(data_loader):
            smiles, bg, labels, mask = batch_data
            atom_feats = bg.ndata.pop(args['atom_data_field'])
            atom_feats, labels = atom_feats.to(args['device']), labels.to(args['device'])
            logits = model(bg, atom_feats)
            eval_meter.update(logits, labels, mask)
    return eval_meter.roc_auc_averaged_over_tasks()
Exemple #6
0
def run_an_eval_epoch(args, model, data_loader):
    model.eval()
    eval_meter = Meter()
    with torch.no_grad():
        for batch_id, batch_data in enumerate(data_loader):
            smiles, bg, labels, masks = batch_data
            atom_feats = bg.ndata.pop(args['atom_data_field'])
            atom_feats, labels = atom_feats.to(args['device']), labels.to(
                args['device'])
            logits = model(bg, atom_feats)
            eval_meter.update(logits, labels, masks)
    return np.mean(eval_meter.compute_metric(args['metric_name']))
Exemple #7
0
def run_an_eval_epoch(args, model, data_loader):
    model.eval()
    eval_meter = Meter(args['train_mean'], args['train_std'])
    with torch.no_grad():
        for batch_id, batch_data in enumerate(data_loader):
            indices, ligand_mols, protein_mols, bg, labels = batch_data
            labels, bg = labels.to(args['device']), bg.to(args['device'])
            prediction = model(bg)
            eval_meter.update(prediction, labels)
    total_scores = {
        metric: eval_meter.compute_metric(metric)
        for metric in args['metrics']
    }
    return total_scores
Exemple #8
0
def run_a_train_epoch(args, epoch, model, data_loader, loss_criterion,
                      optimizer):
    model.train()
    train_meter = Meter()
    for batch_id, batch_data in enumerate(data_loader):
        smiles, bg, labels, masks = batch_data
        labels, masks = labels.to(args['device']), masks.to(args['device'])
        prediction = regress(args, model, bg)
        loss = (loss_criterion(prediction, labels) *
                (masks != 0).float()).mean()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_meter.update(prediction, labels, masks)
    total_score = np.mean(train_meter.compute_metric(args['metric_name']))
    print('epoch {:d}/{:d}, training {} {:.4f}'.format(epoch + 1,
                                                       args['num_epochs'],
                                                       args['metric_name'],
                                                       total_score))
Exemple #9
0
def evaluate(data_loader):
    meter = Meter('eval', 0)
    model.eval()
    total_loss = 0
    with torch.no_grad():
        for idx, (img, segm) in enumerate(data_loader):
            img = img.cuda() 
            segm = segm.cuda() 
            outputs = model(img) 
            loss = criterion(outputs, segm)
            outputs = outputs.detach().cpu()
            segm = segm.detach().cpu() 
            meter.update(segm, outputs) 
            total_loss += loss.item()
        dices, iou = meter.get_metrics() 
        dice, dice_neg, dice_pos = dices
        torch.cuda.empty_cache()

        return total_loss/len(data_loader), iou, dice, dice_neg, dice_pos
Exemple #10
0
def run_a_train_epoch(args, epoch, model, data_loader, loss_criterion, optimizer):
    model.train()
    train_meter = Meter()
    for batch_id, batch_data in enumerate(data_loader):
        smiles, bg, labels, mask = batch_data
        atom_feats = bg.ndata.pop(args['atom_data_field'])
        atom_feats, labels, mask = atom_feats.to(args['device']), \
                                   labels.to(args['device']), \
                                   mask.to(args['device'])
        logits = model(bg, atom_feats)
        # Mask non-existing labels
        loss = (loss_criterion(logits, labels) * (mask != 0).float()).mean()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print('epoch {:d}/{:d}, batch {:d}/{:d}, loss {:.4f}'.format(
            epoch + 1, args['num_epochs'], batch_id + 1, len(data_loader), loss.item()))
        train_meter.update(logits, labels, mask)
    train_roc_auc = train_meter.roc_auc_averaged_over_tasks()
    print('epoch {:d}/{:d}, training roc-auc score {:.4f}'.format(
        epoch + 1, args['num_epochs'], train_roc_auc))
Exemple #11
0
print('Training models...')
for it in range(50000):
    # data
    start_time = time.time()

    voice, voice_label = next(voice_iterator)
    face, face_label = next(face_iterator)
    noise = 0.05 * torch.randn(DATASET_PARAMETERS['batch_size'], 64, 1, 1)

    # use GPU or not
    if NETWORKS_PARAMETERS['GPU']:
        voice, voice_label = voice.cuda(), voice_label.cuda()
        face, face_label = face.cuda(), face_label.cuda()
        real_label, fake_label = real_label.cuda(), fake_label.cuda()
        noise = noise.cuda()
    data_time.update(time.time() - start_time)

    # get embeddings and generated faces
    embeddings = e_net(voice)
    embeddings = F.normalize(embeddings)
    # introduce some permutations
    embeddings = embeddings + noise
    embeddings = F.normalize(embeddings)
    fake = g_net(embeddings)

    # Discriminator
    f_optimizer.zero_grad()
    d_optimizer.zero_grad()
    c_optimizer.zero_grad()
    real_score_out = d_net(f_net(face))
    fake_score_out = d_net(f_net(fake.detach()))
Exemple #12
0
def main(args):
    device = "cuda" if torch.cuda.is_available() else "cpu"
    batch_size = 128
    learning_rate = 0.001
    num_epochs = 100
    set_random_seed()

    # Interchangeable with other Dataset
    dataset = Tox21()
    atom_data_field = 'h'

    trainset, valset, testset = split_dataset(dataset, [0.8, 0.1, 0.1])
    train_loader = DataLoader(
        trainset, batch_size=batch_size, collate_fn=collate_molgraphs)
    val_loader = DataLoader(
        valset, batch_size=batch_size, collate_fn=collate_molgraphs)
    test_loader = DataLoader(
        testset, batch_size=batch_size, collate_fn=collate_molgraphs)

    if args.pre_trained:
        num_epochs = 0
        model = model_zoo.chem.load_pretrained('GCN_Tox21')
    else:
        # Interchangeable with other models
        model = model_zoo.chem.GCNClassifier(in_feats=74,
                                             gcn_hidden_feats=[64, 64],
                                             n_tasks=dataset.n_tasks)
        loss_criterion = BCEWithLogitsLoss(pos_weight=torch.tensor(
            dataset.task_pos_weights).to(device), reduction='none')
        optimizer = Adam(model.parameters(), lr=learning_rate)
        stopper = EarlyStopping(patience=10)
    model.to(device)

    for epoch in range(num_epochs):
        model.train()
        print('Start training')
        train_meter = Meter()
        for batch_id, batch_data in enumerate(train_loader):
            smiles, bg, labels, mask = batch_data
            atom_feats = bg.ndata.pop(atom_data_field)
            atom_feats, labels, mask = atom_feats.to(device), labels.to(device), mask.to(device)
            logits = model(atom_feats, bg)
            # Mask non-existing labels
            loss = (loss_criterion(logits, labels)
                    * (mask != 0).float()).mean()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print('epoch {:d}/{:d}, batch {:d}/{:d}, loss {:.4f}'.format(
                epoch + 1, num_epochs, batch_id + 1, len(train_loader), loss.item()))
            train_meter.update(logits, labels, mask)
        train_roc_auc = train_meter.roc_auc_averaged_over_tasks()
        print('epoch {:d}/{:d}, training roc-auc score {:.4f}'.format(
            epoch + 1, num_epochs, train_roc_auc))
        
        val_meter = Meter()
        model.eval()
        with torch.no_grad():
            for batch_id, batch_data in enumerate(val_loader):
                smiles, bg, labels, mask = batch_data
                atom_feats = bg.ndata.pop(atom_data_field)
                atom_feats, labels = atom_feats.to(device), labels.to(device)
                logits = model(atom_feats, bg)
                val_meter.update(logits, labels, mask)
        
        val_roc_auc = val_meter.roc_auc_averaged_over_tasks()
        if stopper.step(val_roc_auc, model):
            break

        print('epoch {:d}/{:d}, validation roc-auc score {:.4f}, best validation roc-auc score {:.4f}'.format(
            epoch + 1, num_epochs, val_roc_auc, stopper.best_score))

    test_meter = Meter()
    model.eval()
    for batch_id, batch_data in enumerate(test_loader):
        smiles, bg, labels, mask = batch_data
        atom_feats = bg.ndata.pop(atom_data_field)
        atom_feats, labels = atom_feats.to(device), labels.to(device)
        logits = model(atom_feats, bg)
        test_meter.update(logits, labels, mask)
    print('test roc-auc score {:.4f}'.format(test_meter.roc_auc_averaged_over_tasks()))
Exemple #13
0
    # D_fake_loss = F.relu(fake_score_out).mean()                  # hinge loss
    # fake_id_label_out = f1_net(fake_face)
    # fake_emotion_label_out = f2_net(fake_face)
    # C1_fake_loss = F.nll_loss(F.log_softmax(fake_id_label_out, dim=1), voice_identity_label)
    # C2_fake_loss = F.nll_loss(F.log_softmax(fake_emotion_label_out, dim=1), voice_emotion_label)

    (D_fake_loss + D_real_loss + C1_real_loss + C2_real_loss).backward()
    d1_optimizer.step()
    d2_optimizer.step()
    f1_optimizer.step()
    f2_optimizer.step()
    c1_optimizer.step()
    c2_optimizer.step()

    #   ---------------------------------------------
    D_real.update(D_real_loss.item())
    D_fake.update(D_fake_loss.item())
    C1_real.update(C1_real_loss.item())
    C2_real.update(C2_real_loss.item())
    #   ---------------------------------------------

    #### Generator #####
    g_optimizer.zero_grad()
    # fake_score_out = d_net(fake_face, voice_EM_label_)   #  D条件输入:voice_EM_label_, 条件为情绪
    fake_score_out_1 = d1_net(f1_net(fake_face))  # D无条件输入
    fake_score_out_2 = d2_net(f2_net(fake_face))
    fake_id_label_out = c1_net(f1_net(fake_face))
    fake_emotion_label_out = c2_net(f2_net(fake_face))
    GC1_fake_loss = F.nll_loss(F.log_softmax(fake_id_label_out, dim=1),
                               voice_identity_label)  # 用真实标签替代随机标签?
    GC2_fake_loss = F.nll_loss(F.log_softmax(fake_emotion_label_out, dim=1),
Exemple #14
0
        if EVAL["apply_tta"]:
            outputs = TTAModel(images)
        else:
            outputs = model(images)
        if EVAL["activate"]:
            outputs = torch.sigmoid(outputs)
        if DATASET["resize"]:
            outputs = torch.nn.functional.interpolate(
                outputs,
                size=DATASET["orig_size"],
                mode='bilinear',
                align_corners=True)

        outputs = outputs.detach().cpu()
        if not EVAL["test_mode"]:
            meter.update("val", targets, outputs)

        # dump predictions as images
        outputs = (outputs > EVAL["base_threshold"]).int()  # thresholding
        outputs = torch2np(outputs)
        pic = image_dataset.label_encoder.class2color(
            outputs,
            clean_up_clusters=EVAL["drop_clusters"],
            mode="catId" if DATASET["train_on_cats"] else "trainId")
        if EVAL["images_morphing"]:
            # Add here image+mask morphing
            orig_image = open_img(image_id[0])
            alpha = 0.5
            if (TARGET == "kitti") and (orig_image.shape[:2] != pic.shape[:2]):
                orig_image = cv2.resize(
                    orig_image,