def generate_results(M, D, x0, delta, mu, m, n, t_step=2, num_days=200):
    I = np.eye(n)
    A = delta * I + mu * M

    time = [t * t_step for t in range(int(np.ceil(num_days / t_step)))]

    # Simulate and plot
    x, u, lambda_pf = simulate_viral_population(x0=x0, time=time, A=A, D=D)
    z = {}
    handles = ["x" + str(i) for i in range(1, n + 1)]

    for x_i in x:
        z[x_i] = np.log10(x[x_i])

    xlabel, ylabel = "time [days]", "population"
    top, bottom = 3.1, -5
    ylim = (top, bottom)
    utils.create_figure(time,
                        z,
                        handles=handles,
                        xlabel=xlabel,
                        ylabel=ylabel,
                        title="Viral Population vs Time",
                        ylim=ylim,
                        num_items=n)

    handles = ["u" + str(i) for i in reversed(range(1, m + 1))]
    top, bottom = 1, 0
    ylim = (top, bottom)

    ylabel = "relative concentration"
    utils.create_figure(time,
                        u,
                        handles=handles,
                        xlabel=xlabel,
                        ylabel=ylabel,
                        title="Treatment Concentration vs. Time",
                        ylim=ylim,
                        num_items=m)

    title = "Perron-Frobenius Eigenvalue Distribution for Random Graphs"
    generate_eig_plots(M, D, x0, delta, mu, n, title=title)
Exemple #2
0
                      config.cgan_parameters["max"], -1, 1)
        fake_image = scale(fake_image, config.cgan_parameters["min"],
                           config.cgan_parameters["max"], -1, 1)
        masked_image = scale(masked_image, config.cgan_parameters["min"],
                             config.cgan_parameters["max"], -1, 1)
        image = mask_lungs(image, mask)
        fake_image = mask_lungs(fake_image, mask)

        l1_diff = mae(image,
                      fake_image,
                      mask=mask,
                      mask_val=config.mask_values["non_lung_tissue"])
        writer.add_scalar("L1 diff/Train", l1_diff, epoch)

        f = create_figure([
            masked_image[0, 0, :, :], fake_image[0, 0, :, :], image[0, 0, :, :]
        ],
                          figsize=(12, 4))

        writer.add_figure("Image outputs/Real image, fake image, mask", f,
                          epoch)

        log_images([masked_image, fake_image, image],
                   path=config.image_logs,
                   run_id=start_time,
                   step=epoch,
                   context="train",
                   figsize=(12, 4))

        data = next(iter(valid_dataloader))
        valid_image, valid_masked_image, valid_mask = data
        valid_image, valid_masked_image = valid_image.float().to(
Exemple #3
0
    print("Start training")
    for epoch in range(n_epoch):
        s_t = time.time()
        ## Training
        model.train()
        for i, (img, label) in enumerate(labeled_dataloader):
            # if i > 30:
            #     break
            s_t = time.time()

            model.zero_grad()
            # Supervised branch #
            #####################
            img, label = img.float().to(device), label.float().to(device)
            if i % 100 == 0:
                fig = create_figure(img, label)
                writer.add_figure("Labeled", fig,
                                  epoch * len(labeled_dataloader) + i)
            sup_output = model(img)
            ce_loss, d_loss, a_loss = cross_entropy(
                sup_output,
                label), dice_loss(sup_output,
                                  label), auc_loss(sup_output, label)
            supervised_loss = proxy_weight * (
                ce_weight * ce_loss + d_weight * d_loss) + a_weight * a_loss
            supervised_loss *= supervised_weight

            train_loss = supervised_loss
            if i % 20 == 0:
                writer.add_scalars(
                    "Supervised_Loss", {
Exemple #4
0
import numpy as np
from matplotlib import pyplot as plt
from scipy.constants import mega

from extradata import ExtraData, CollisionMeta
from utils import filename_from_argv, create_figure, plot_settings, mode_from_fn, scenario_colors

plot_settings()

dotsize = 1.5

angle_label = "Impact angle [deg]"
v_label = "v/v_esc"
time_label = "Time [Myr]"

fig1, ax1 = create_figure()

ax1.set_xlabel(angle_label)
ax1.set_ylabel(v_label)

fig2, ax2 = create_figure()

ax2.set_xlabel(time_label)
ax2.set_ylabel(angle_label)
ax2.set_xscale("log")

fig3, ax3 = create_figure()

ax3.set_xlabel(time_label)
ax3.set_ylabel(v_label)
ax3.set_xscale("log")
Exemple #5
0
 def create_figure(self):
     self.figure = utils.create_figure()
     utils.on_figure_window_close(self.figure, self.close)
Exemple #6
0
 def create_figure(self):
     self.figure = utils.create_figure()
                          total_batch_counter)
        total_batch_counter += 1

    scheduler_G.step()
    scheduler_D_A.step()
    scheduler_D_B.step()
    with torch.no_grad():
        real_A = real_A.cpu().numpy()
        real_B = real_B.cpu().numpy()
        fake_image_A = fake_image_A.cpu().numpy()
        fake_image_B = fake_image_B.cpu().numpy()
        recovered_image_A = recovered_image_A.cpu().numpy()
        recovered_image_B = recovered_image_B.cpu().numpy()
        f = create_figure([
            real_A[0, 0, :, :], fake_image_B[0, 0, :, :],
            recovered_image_A[0, 0, :, :]
        ],
                          figsize=(12, 4))
        writer.add_figure("Image outputs/A to B to A", f, epoch)

        f = create_figure([
            real_B[0, 0, :, :], fake_image_A[0, 0, :, :],
            recovered_image_B[0, 0, :, :]
        ],
                          figsize=(12, 4))
        writer.add_figure("Image outputs/B to A to B", f, epoch)

        log_images([real_A, fake_image_B, recovered_image_A],
                   path=config.image_logs,
                   run_id=start_time,
                   step=epoch,
Exemple #8
0
 def create_figure(self):
     self.figure = utils.create_figure()
     utils.on_figure_window_close(self.figure, self.close)
     self.figure.canvas.mpl_connect('draw_event', self._on_draw)
Exemple #9
0
    def save_sample_imgs(self, img, mask, prediction, epoch, iter):
        fig = create_figure(img, mask, prediction.float())

        self.checkpoint_mng.save_image(f"{epoch}-{iter}", fig)
        plt.savefig("result_deb.jpg")
        plt.close(fig)
Exemple #10
0
 def create_figure(self):
     self.figure = utils.create_figure()
     utils.on_window_close(self.figure, self.close)
Exemple #11
0
            netD.zero_grad()
            real_cpu = out_img.to(device)
            batch_size = real_cpu.size(0)
            label = torch.full((batch_size, ), real_label, device=device)

            output = netD(real_cpu)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.mean().item(
            )  # should stay close to 1 and converge to 0.5

            # train with fake
            # fake = netG(in_img)

            if i % 10 == 0:
                fig = create_figure(in_img, out_img, format_Gx(fake))
                writer.add_figure("Images", fig, i, close=True)

            label.fill_(fake_label)
            output = netD(fake.detach())  # no gradients flow back Generator
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.mean().item(
            )  # before update, should stay close to 0 and converge to 0.5
            errD = errD_real + errD_fake

            torch.nn.utils.clip_grad_value_(netD.parameters(), clip_value=0.01)
            optimizerD.step()
            if i != 0:
                wait_D = 0
Exemple #12
0
 def create_figure(self):
     self.figure = utils.create_figure()
Exemple #13
0
    def save_sample_imgs(self, img, mask, prediction, epoch, iter):
        fig = create_figure(img, mask, prediction.float())

        self.checkpoint_mng.save_image(f'{epoch}-{iter}', fig)
        plt.close(fig)