Esempio n. 1
0
        checkpoint = torch.load(
            ROOT_DIR + '/models/SIR_bundle_total/{}'.format(model_name))
    except FileNotFoundError:
        # Train
        optimizer = torch.optim.Adam(sir.parameters(), lr=lr)
        writer = SummaryWriter(
            'runs/' + '{}'.format(model_name))
        sir, train_losses, run_time, optimizer = train_bundle(sir, initial_conditions_set, t_final=t_final,
                                                              epochs=epochs,
                                                              num_batches=10, hack_trivial=hack_trivial,
                                                              train_size=train_size, optimizer=optimizer,
                                                              decay=decay,
                                                              writer=writer, betas=betas,
                                                              gammas=gammas)
        # Save the model
        torch.save({'model_state_dict': sir.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict()},
                   ROOT_DIR + '/models/SIR_bundle_total/{}'.format(model_name))

        # Load the checkpoint
        checkpoint = torch.load(
            ROOT_DIR + '/models/SIR_bundle_total/{}'.format(model_name))

    # Load the model
    sir.load_state_dict(checkpoint['model_state_dict'])

    writer_dir = 'runs/' + 'real_{}'.format(model_name)

    # Check if the writer directory exists, if yes delete it and overwrite
    if os.path.isdir(writer_dir):
        rmtree(writer_dir)
            sir,
            initial_conditions_set,
            t_final=t_final,
            epochs=epochs,
            num_batches=10,
            hack_trivial=hack_trivial,
            train_size=train_size,
            optimizer=optimizer,
            decay=decay,
            writer=writer,
            betas=beta_bundle,
            gammas=gamma_bundle)
        # Save the model
        torch.save(
            {
                'model_state_dict': sir.state_dict(),
                'optimizer_state_dict': optimizer.state_dict()
            }, ROOT_DIR + '/models/SIR_bundle_total/b_s_0={}'
            '_betas={}_gammas={}_noise_{}.pt'.format(s_0_bundle, beta_bundle,
                                                     gamma_bundle, sigma))

        # Load the checkpoint
        checkpoint = torch.load(
            ROOT_DIR + '/models/SIR_bundle_total/b_s_0={}'
            '_betas={}_gammas={}_noise_{}.pt'.format(s_0_bundle, beta_bundle,
                                                     gamma_bundle, sigma))

    # Load the model
    sir.load_state_dict(checkpoint['model_state_dict'])

    # exact_points = get_known_points(model=sir, t_final=t_final, s_0=exact_s_0,