Пример #1
0
def train(model, dataset):
    graph = dataset[0]

    optimizer = nn.AdamW(model.parameters(), lr=0.01)
    loss_function = nn.CrossEntropyLoss()

    train_mask = tensor2jit(graph.train_mask)
    test_mask = tensor2jit(graph.test_mask)
    val_mask = tensor2jit(graph.val_mask)
    labels = tensor2jit(graph.y)

    for epoch in range(100):
        model.train()
        output = model(graph)
        loss = loss_function(output[train_mask], labels[train_mask])
        optimizer.step(loss)

        model.eval()
        with jt.no_grad():
            output = model(graph)
            pred = output.argmax(1)[0]
            train_acc = (pred[train_mask] == labels[train_mask]).float().mean()
            val_acc = (pred[val_mask] == labels[val_mask]).float().mean()
            test_acc = (pred[test_mask] == labels[test_mask]).float().mean()

        print(
            f"Epoch:{epoch}, loss:{loss:.3f}, val_acc:{val_acc:.3f}, test_acc:{test_acc:.3f}"
        )
 def test_cross_entropy_loss(self):
     jt_loss=jnn.CrossEntropyLoss()
     tc_loss=tnn.CrossEntropyLoss()
     output=np.random.randn(10,10).astype(np.float32)
     target=np.random.randint(10, size=(10))
     jt_y=jt_loss(jt.array(output), jt.array(target))
     tc_y=tc_loss(torch.from_numpy(output), torch.from_numpy(target))
     assert np.allclose(jt_y.numpy(), tc_y.numpy())
Пример #3
0
 def test_cross_entropy_weight_ignore(self):
     weight = np.random.rand(4).astype('float32')
     jt_loss = jnn.CrossEntropyLoss(weight=jt.array(weight), ignore_index=1)
     tc_loss = tnn.CrossEntropyLoss(weight=torch.from_numpy(weight),
                                    ignore_index=1)
     output = np.random.rand(32, 4, 512, 512).astype(np.float32)
     target = np.random.randint(4, size=(32, 512, 512))
     jt_y = jt_loss(jt.array(output), jt.array(target))
     tc_y = tc_loss(torch.from_numpy(output), torch.from_numpy(target))
     assert np.allclose(jt_y.numpy(), tc_y.numpy())
Пример #4
0
def validate():
    bs = 256
    # create model
    model = create_model('vit_base_patch16_224',
                         pretrained=True,
                         num_classes=1000)
    criterion = nn.CrossEntropyLoss()

    dataset = create_val_dataset(root='/data/imagenet',
                                 batch_size=bs,
                                 num_workers=4,
                                 img_size=224)

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    model.eval()
    with jt.no_grad():
        input = jt.random((bs, 3, 224, 224))
        model(input)

        end = time.time()
        for batch_idx, (input, target) in enumerate(dataset):
            # dataset.display_worker_status()
            batch_size = input.shape[0]
            # compute output
            output = model(input)
            loss = criterion(output, target)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss, batch_size)
            top1.update(acc1, batch_size)
            top5.update(acc5, batch_size)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if batch_idx % 10 == 0:
                # jt.sync_all(True)
                print(
                    'Test: [{0:>4d}/{1}]  '
                    'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s)  '
                    'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f})  '
                    'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f})  '
                    'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(
                        batch_idx,
                        len(dataset),
                        batch_time=batch_time,
                        rate_avg=batch_size / batch_time.avg,
                        loss=losses,
                        top1=top1,
                        top5=top5))

            # if batch_idx>50:break

    top1a, top5a = top1.avg, top5.avg
    top1 = round(top1a, 4)
    top1_err = round(100 - top1a, 4)
    top5 = round(top5a, 4)
    top5_err = round(100 - top5a, 4)

    print(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})'.format(
        top1, top1_err, top5, top5_err))
Пример #5
0
b1 = 0.5
b2 = 0.9
decay = (2.5 * 1e-05)
n_skip_iter = args.n_critic
img_size = args.img_size
channels = 1
latent_dim = args.latent_dim
n_c = 10
betan = 10
betac = 10
wass_metric = args.wass_flag
print(wass_metric)
x_shape = (channels, img_size, img_size)

bce_loss = nn.BCELoss()
xe_loss = nn.CrossEntropyLoss()
mse_loss = nn.MSELoss()

# Initialize generator and discriminator
generator = Generator_CNN(latent_dim, n_c, x_shape)
encoder = Encoder_CNN(latent_dim, n_c)
discriminator = Discriminator_CNN(wass_metric=wass_metric)

# Configure data loader
transform = transform.Compose([
    transform.Resize(size=img_size),
    transform.Gray(),
])
dataloader = MNIST(train=True,
                   transform=transform).set_attrs(batch_size=batch_size,
                                                  shuffle=True)
Пример #6
0
                            num_workers=num_workers)

print(len(train_data))
print(len(valid_data))

model = ViT(dim=128,
            image_size=224,
            patch_size=32,
            num_classes=2,
            depth=12,
            heads=8,
            mlp_dim=128)

### Training
# loss function
criterion = nn.CrossEntropyLoss()
# optimizer
optimizer = optim.Adam(model.parameters(), lr=lr)

for epoch in range(epochs):
    epoch_loss = 0
    epoch_accuracy = 0
    for data, label in tqdm(train_data):
        output = model(data)
        loss = criterion(output, label)
        optimizer.step(loss)

        acc = (output.argmax(dim=1)[0] == label).float().mean()
        epoch_accuracy += acc / len(train_data)
        epoch_loss += loss / len(train_data)
Пример #7
0
        for m in self.modules():
            weights_init_normal(m)

    def execute(self, img):
        out = self.conv_blocks(img)
        out = out.view((out.shape[0], (-1)))
        validity = self.adv_layer(out)
        label = self.aux_layer(out)
        latent_code = self.latent_layer(out)
        return (validity, label, latent_code)


# Loss functions
adversarial_loss = nn.MSELoss()
categorical_loss = nn.CrossEntropyLoss()
continuous_loss = nn.MSELoss()

# Loss weights
lambda_cat = 1
lambda_con = 0.1

# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()

# Configure data loader
from jittor.dataset.mnist import MNIST
import jittor.transform as transform

transform = transform.Compose([
Пример #8
0
        img_all.append(np.zeros((C,padding,img_all[0].shape[2])))
    img = np.concatenate(img_all, 1)
    img = np.concatenate([np.zeros((C,padding,img.shape[2])), img], 1)
    img = np.concatenate([np.zeros((C,img.shape[1],padding)), img], 2)
    min_=img.min()
    max_=img.max()
    img=(img-min_)/(max_-min_)*255
    img=img.transpose((1,2,0))
    if C==3:
        img = img[:,:,::-1]
    cv2.imwrite(path,img)


# Loss functions
adversarial_loss = nn.BCELoss()
auxiliary_loss = nn.CrossEntropyLoss()

# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()

# Configure data loader
from jittor.dataset.mnist import MNIST
import jittor.transform as transform

transform = transform.Compose([
    transform.Resize(opt.img_size),
    transform.Gray(),
    transform.ImageNormalize(mean=[0.5], std=[0.5]),
])
dataloader = MNIST(train=True, transform=transform).set_attrs(batch_size=opt.batch_size, shuffle=True)
Пример #9
0
        img_all.append(np.zeros((C, padding, img_all[0].shape[2])))
    img = np.concatenate(img_all, 1)
    img = np.concatenate([np.zeros((C, padding, img.shape[2])), img], 1)
    img = np.concatenate([np.zeros((C, img.shape[1], padding)), img], 2)
    min_ = img.min()
    max_ = img.max()
    img = (img - min_) / (max_ - min_) * 255
    img = img.transpose((1, 2, 0))
    if C == 3:
        img = img[:, :, ::-1]
    cv2.imwrite(path, img)


# Loss function
adversarial_loss = nn.MSELoss()
task_loss = nn.CrossEntropyLoss()

# Loss weights
lambda_adv = 1
lambda_task = 0.1

# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
classifier = Classifier()

# Configure data loader
transform = transform.Compose([
    transform.Resize(opt.img_size),
    transform.ImageNormalize(mean=[0.5], std=[0.5]),
])