def Eval_I2P():
    eval_loss = 0
    eval_acc = 0
    eval_data = I2P_data(os.path.join(dataroot, 'valid'), dataset_valid_I2P)
    data_eval = torch.utils.data.DataLoader(eval_data,
                                            batch_size=opt.batchSize,
                                            pin_memory=True,
                                            num_workers=4,
                                            shuffle=True)
    with torch.no_grad():
        model.eval()
        for i, (Inputf, Inputr, Inputt, Inputa, Label) in enumerate(data_eval):
            Inputf, Inputr, Inputt, Inputa, Label = Inputf.to(
                device), Inputr.to(device), Inputt.to(device), Inputa.to(
                    device), Label.to(device)
            y = F.normalize(model(Inputf.float(), Inputr.float(),
                                  Inputt.float(), Inputa.float()),
                            dim=1)
            y = torch.cat((y[:, :2], y[:, 4:6]), axis=1)
            Label = Label.reshape(Inputf.shape[0])
            loss = criterion_I2P(y, Label)
            eval_loss += loss.item() * Inputf.shape[0]
            eval_acc += (y.argmax(1) == Label).sum().item()
        epoch_eval_loss = eval_loss / len(eval_data)
        epoch_eval_acc = eval_acc / len(eval_data)
    return epoch_eval_loss, epoch_eval_acc
Exemple #2
0
def train_model():
    batch_loss = 0
    batch_acc = 0
    batch_loss_list = []
    path = opt.Training_dataroot
    train_data = I2P_data(path)
    data_train = torch.utils.data.DataLoader(train_data,
                                             batch_size=opt.batchSize,
                                             pin_memory=True,
                                             num_workers=4,
                                             shuffle=True)
    task_4_model.train()
    for i, (Dic, Inputf, Inputr, Inputt, Inputa,
            Label) in enumerate(data_train):
        optimizer.zero_grad()
        Inputf, Inputr, Inputt, Inputa, Label = Inputf.to(device), Inputr.to(
            device), Inputt.to(device), Inputa.to(device), Label.to(device)
        y = task_4_model(Inputf.float(), Inputr.float(), Inputt.float(),
                         Inputa.float())
        Label = Label.reshape(Inputf.shape[0])
        loss = criterion(y, Label)
        batch_loss += loss.item() * Inputf.shape[0]
        batch_loss_list.append(loss.item() * Inputf.shape[0] / len(train_data))
        loss.backward()
        #optimizer.module.step()
        optimizer.step()
        batch_acc += (y.argmax(1) == Label).sum().item()
    epoch_loss = batch_loss / len(train_data)
    epoch_acc = batch_acc / len(train_data)
    return epoch_loss, epoch_acc, np.array(batch_loss_list)
Exemple #3
0
def Eval():
    eval_loss = 0
    eval_acc = 0
    path = opt.Validating_dataroot
    eval_data = I2P_data(path)
    data_eval = torch.utils.data.DataLoader(eval_data,
                                            batch_size=opt.batchSize,
                                            pin_memory=True,
                                            num_workers=4,
                                            shuffle=True)
    failed_shape = []
    with torch.no_grad():
        task_4_model.eval()
        for i, (Dic, Inputf, Inputr, Inputt, Inputa,
                Label) in enumerate(data_eval):
            Inputf, Inputr, Inputt, Inputa, Label = Inputf.to(
                device), Inputr.to(device), Inputt.to(device), Inputa.to(
                    device), Label.to(device)
            y = task_4_model(Inputf.float(), Inputr.float(), Inputt.float(),
                             Inputa.float())
            Label = Label.reshape(Inputf.shape[0])
            loss = criterion(y, Label)
            Dic = np.array(Dic)
            Dic = Dic[(y.argmax(1) != Label).cpu()]
            for dic in Dic:
                failed_shape.append(dic)
            eval_loss += loss.item() * Inputf.shape[0]
            eval_acc += (y.argmax(1) == Label).sum().item()
        epoch_eval_loss = eval_loss / len(eval_data)
        epoch_eval_acc = eval_acc / len(eval_data)
    return failed_shape, epoch_eval_loss, epoch_eval_acc
Exemple #4
0
def Train():
    train_loss = 0
    train_acc = 0
    train_data = I2P_data(os.path.join(dataroot, 'train'), dataset_train_I2P)
    data_train = torch.utils.data.DataLoader(train_data,
                                             batch_size=opt.batchSize,
                                             pin_memory=True,
                                             num_workers=8,
                                             shuffle=True)
    model.train()
    for i, (Inputf, Inputr, Inputt, Inputa, Label) in enumerate(data_train):
        Inputf, Inputr, Inputt, Inputa, Label = Inputf.to(device), Inputr.to(
            device), Inputt.to(device), Inputa.to(device), Label.to(device)
        y = F.normalize(model(Inputf.float(), Inputr.float(), Inputt.float(),
                              Inputa.float()),
                        dim=1)
        Label = Label.reshape(Inputf.shape[0])
        loss = criterion_I2P(y, Label)
        train_loss += loss.item() * Inputf.shape[0]
        train_acc += (y.argmax(1) == Label).sum().item()
        loss.backward()
        optimizer.step()
    epoch_train_loss = train_loss / len(train_data)
    epoch_train_acc = train_acc / len(train_data)
    return epoch_train_loss, epoch_train_acc