def test_mse_loss(self):
     jt_loss=jnn.MSELoss()
     tc_loss=tnn.MSELoss()
     output=np.random.randn(10,100).astype(np.float32)
     target=np.random.randn(10,100).astype(np.float32)
     jt_y=jt_loss(jt.array(output), jt.array(target))
     tc_y=tc_loss(torch.from_numpy(output), torch.from_numpy(target))
     assert np.allclose(jt_y.numpy(), tc_y.numpy())
Ejemplo n.º 2
0
            nn.Linear((opt.n_classes + int(np.prod(img_shape))), 512),
            nn.LeakyReLU(0.2), nn.Linear(512, 512), nn.Dropout(0.4),
            nn.LeakyReLU(0.2), nn.Linear(512, 512), nn.Dropout(0.4),
            nn.LeakyReLU(0.2), nn.Linear(512, 1))

    def execute(self, img, labels):
        d_in = jt.contrib.concat((img.view(
            (img.shape[0], (-1))), self.label_embedding(labels)),
                                 dim=1)
        return self.model(d_in)


# 损失函数:平方误差
# 调用方法:adversarial_loss(网络输出A, 分类标签B)
# 计算结果:(A-B)^2
adversarial_loss = nn.MSELoss()

generator = Generator()
discriminator = Discriminator()

# 导入MNIST数据集
from jittor.dataset.mnist import MNIST
import jittor.transform as transform
transform = transform.Compose([
    transform.Resize(opt.img_size),
    transform.Gray(),
    transform.ImageNormalize(mean=[0.5], std=[0.5]),
])
dataloader = MNIST(train=True,
                   transform=transform).set_attrs(batch_size=opt.batch_size,
                                                  shuffle=True)
Ejemplo n.º 3
0
b2 = 0.9
decay = (2.5 * 1e-05)
n_skip_iter = args.n_critic
img_size = args.img_size
channels = 1
latent_dim = args.latent_dim
n_c = 10
betan = 10
betac = 10
wass_metric = args.wass_flag
print(wass_metric)
x_shape = (channels, img_size, img_size)

bce_loss = nn.BCELoss()
xe_loss = nn.CrossEntropyLoss()
mse_loss = nn.MSELoss()

# Initialize generator and discriminator
generator = Generator_CNN(latent_dim, n_c, x_shape)
encoder = Encoder_CNN(latent_dim, n_c)
discriminator = Discriminator_CNN(wass_metric=wass_metric)

# Configure data loader
transform = transform.Compose([
    transform.Resize(size=img_size),
    transform.Gray(),
])
dataloader = MNIST(train=True,
                   transform=transform).set_attrs(batch_size=batch_size,
                                                  shuffle=True)
testdata = MNIST(train=False,
Ejemplo n.º 4
0
        img = np.concatenate(
            [img, img2[:, W * ncol * i:W * ncol * (i + 1), :]], axis=2)
    min_ = img.min()
    max_ = img.max()
    img = (img - min_) / (max_ - min_) * 255
    img = img.transpose((1, 2, 0))
    if C == 3:
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    cv2.imwrite(path, img)


os.makedirs("images/%s" % opt.dataset_name, exist_ok=True)
os.makedirs("saved_models/%s" % opt.dataset_name, exist_ok=True)

# Loss functions
criterion_GAN = nn.MSELoss()
criterion_pixelwise = nn.L1Loss()

# Loss weight of L1 pixel-wise loss between translated image and real image
lambda_pixel = 100

# Calculate output of image discriminator (PatchGAN)
patch = (1, opt.img_height // 2**4, opt.img_width // 2**4)

# Initialize generator and discriminator
generator = GeneratorUNet()
discriminator = Discriminator()

if opt.epoch != 0:
    # Load pretrained models
    generator.load("saved_models/%s/generator_last.pkl" % (opt.dataset_name))
Ejemplo n.º 5
0
            nn.Linear((128 * (ds_size**2)), opt.code_dim))

        for m in self.modules():
            weights_init_normal(m)

    def execute(self, img):
        out = self.conv_blocks(img)
        out = out.view((out.shape[0], (-1)))
        validity = self.adv_layer(out)
        label = self.aux_layer(out)
        latent_code = self.latent_layer(out)
        return (validity, label, latent_code)


# Loss functions
adversarial_loss = nn.MSELoss()
categorical_loss = nn.CrossEntropyLoss()
continuous_loss = nn.MSELoss()

# Loss weights
lambda_cat = 1
lambda_con = 0.1

# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()

# Configure data loader
from jittor.dataset.mnist import MNIST
import jittor.transform as transform
Ejemplo n.º 6
0
            nn.ReLU(),
            nn.Linear(32, down_dim),
            nn.BatchNorm1d(down_dim),
            nn.ReLU()
        )
        self.up = nn.Sequential(nn.Upsample(scale_factor=2), nn.Conv(64, opt.channels, 3, stride=1, padding=1))

    def execute(self, img):
        out = self.down(img)
        embedding = self.embedding(out.reshape((out.shape[0], (- 1))))
        out = self.fc(embedding)
        out = self.up(out.reshape((out.shape[0], 64, self.down_size, self.down_size)))
        return (out, embedding)

# Reconstruction loss of AE
pixelwise_loss = nn.MSELoss()

# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()

# Optimizers
optimizer_G = jt.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = jt.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))

def pullaway_loss(embeddings):
    norm = jt.sqrt((embeddings ** 2).sum(1,keepdims=True))
    normalized_emb = embeddings / norm
    similarity = jt.matmul(normalized_emb, normalized_emb.transpose(1, 0))
    batch_size = embeddings.size(0)
    loss_pt = (jt.sum(similarity) - batch_size) / (batch_size * (batch_size - 1))