def test_bce_loss(self):
        jt_loss=jnn.BCELoss()
        tc_loss=tnn.BCELoss()
        jt_sig = jnn.Sigmoid()
        tc_sig = tnn.Sigmoid()
        output=np.random.randn(100).astype(np.float32)
        target=np.random.randint(2, size=(100)).astype(np.float32)
        jt_y=jt_loss(jt_sig(jt.array(output)), jt.array(target))
        tc_y=tc_loss(tc_sig(torch.from_numpy(output)), torch.from_numpy(target))
        assert np.allclose(jt_y.numpy(), tc_y.numpy())

        weight=np.random.randn(100).astype(np.float32)
        jt_loss=jnn.BCELoss(weight=jt.array(weight), size_average=False)
        tc_loss=tnn.BCELoss(weight=torch.Tensor(weight), size_average=False)
        jt_y=jt_loss(jt_sig(jt.array(output)), jt.array(target))
        tc_y=tc_loss(tc_sig(torch.from_numpy(output)), torch.from_numpy(target))
        assert np.allclose(jt_y.numpy(), tc_y.numpy())
Esempio n. 2
0
        self.model = nn.Sequential(
            *discriminator_block(opt.channels, 16, bn=False),
            *discriminator_block(16, 32), *discriminator_block(32, 64),
            *discriminator_block(64, 128))
        ds_size = (opt.img_size // (2**4))
        self.adv_layer = nn.Sequential(nn.Linear((128 * (ds_size**2)), 1),
                                       nn.Sigmoid())

    def execute(self, img):
        out = self.model(img)
        out = out.view((out.shape[0], (-1)))
        validity = self.adv_layer(out)
        return validity


adversarial_loss = nn.BCELoss()
lambda_gp = 10

# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()

# Configure data loader
transform = transform.Compose([
    transform.Resize(size=opt.img_size),
    transform.Gray(),
    transform.ImageNormalize(mean=[0.5], std=[0.5]),
])
dataloader = MNIST(train=True,
                   transform=transform).set_attrs(batch_size=opt.batch_size,
                                                  shuffle=True)
    N, C, W, H = img.shape
    img = img[0]
    min_ = -1
    max_ = 1
    img = (img - min_) / (max_ - min_) * 255
    img = img.transpose((1, 2, 0))
    if C == 3:
        img = img[:, :, ::-1]
    cv2.imwrite(path, img)


os.makedirs("images/%s" % opt.dataset_name, exist_ok=True)
os.makedirs("checkpoints/%s" % opt.dataset_name, exist_ok=True)

# Loss functions
criterion_GAN = nn.BCELoss()  # no lsgan
criterion_pixelwise = nn.L1Loss()

# Calculate output of image discriminator (PatchGAN)
patch = (1, opt.img_height // 2**4, opt.img_width // 2**4)

# Initialize generator and discriminator
G_global = GeneratorResNet(in_channels=opt.in_channels,
                           out_channels=opt.out_channels)
G_l_eyel = GeneratorResNet(in_channels=opt.in_channels,
                           out_channels=opt.out_channels,
                           num_res_blocks=3)
G_l_eyer = GeneratorResNet(in_channels=opt.in_channels,
                           out_channels=opt.out_channels,
                           num_res_blocks=3)
G_l_nose = GeneratorResNet(in_channels=opt.in_channels,
Esempio n. 4
0
lr = args.learning_rate
b1 = 0.5
b2 = 0.9
decay = (2.5 * 1e-05)
n_skip_iter = args.n_critic
img_size = args.img_size
channels = 1
latent_dim = args.latent_dim
n_c = 10
betan = 10
betac = 10
wass_metric = args.wass_flag
print(wass_metric)
x_shape = (channels, img_size, img_size)

bce_loss = nn.BCELoss()
xe_loss = nn.CrossEntropyLoss()
mse_loss = nn.MSELoss()

# Initialize generator and discriminator
generator = Generator_CNN(latent_dim, n_c, x_shape)
encoder = Encoder_CNN(latent_dim, n_c)
discriminator = Discriminator_CNN(wass_metric=wass_metric)

# Configure data loader
transform = transform.Compose([
    transform.Resize(size=img_size),
    transform.Gray(),
])
dataloader = MNIST(train=True,
                   transform=transform).set_attrs(batch_size=batch_size,
Esempio n. 5
0
                                   nn.Leaky_relu(0.2), nn.Linear(512, 256),
                                   nn.Leaky_relu(0.2), nn.Linear(256, 1),
                                   nn.Sigmoid())

    def execute(self, img):
        img_flat = jt.reshape(img, [img.shape[0], (-1)])
        validity = self.model(img_flat)
        return validity


def boundary_seeking_loss(y_pred, y_true):
    '\n    Boundary seeking loss.\n    Reference: https://wiseodd.github.io/techblog/2017/03/07/boundary-seeking-gan/\n    '
    return (0.5 * jt.mean(((jt.log(y_pred) - jt.log((1 - y_pred)))**2)))


discriminator_loss = nn.BCELoss()

# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()

# Configure data loader
transform = transform.Compose([
    transform.Resize(size=opt.img_size),
    transform.Gray(),
    transform.ImageNormalize(mean=[0.5], std=[0.5]),
])
dataloader = MNIST(train=True,
                   transform=transform).set_attrs(batch_size=opt.batch_size,
                                                  shuffle=True)