def run(self):  ## train the network with jac

        print('GLOnet can Start Here!!!!!!!!!!!!!!!!!!!!!!!!!!')
        output_dir = 'E:\\LAB\\Project GLOnet-LumeircalAPI\\GLOnet-LumericalAPI\\results'
        restore_from = None

        json_path = os.path.join(output_dir, 'Params.json')
        assert os.path.isfile(json_path), "No json file found at {}".format(
            json_path)
        params = utils.Params(json_path)

        params.output_dir = output_dir
        params.cuda = torch.cuda.is_available()
        params.restore_from = restore_from
        params.numIter = int(params.numIter)
        params.noise_dims = int(params.noise_dims)
        params.gkernlen = int(params.gkernlen)
        params.step_size = int(params.step_size)

        # make directory
        os.makedirs(output_dir + '/outputs', exist_ok=True)
        os.makedirs(output_dir + '/model', exist_ok=True)
        os.makedirs(output_dir + '/figures/histogram', exist_ok=True)
        os.makedirs(output_dir + '/figures/deviceSamples', exist_ok=True)

        generator = Generator(params)
        ## save model
        #torch.save(generator,output_dir+'/net.pth')

        if params.cuda:
            generator.cuda()

        # Define the optimizer
        optimizer = torch.optim.Adam(generator.parameters(),
                                     lr=params.lr,
                                     betas=(params.beta1, params.beta2))

        # Define the scheduler
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                    step_size=params.step_size,
                                                    gamma=params.gamma)

        # Load model data
        if restore_from is not None:
            params.checkpoint = utils.load_checkpoint(restore_from, generator,
                                                      optimizer, scheduler)
            logging.info('Model data loaded')

        # Train the model and save
        if params.numIter != 0:
            logging.info('Start training')
            train(generator,
                  optimizer,
                  scheduler,
                  params,
                  func=self.callable_fom,
                  jac=self.callable_jac,
                  callback=self.callback)

        # Generate images and save
        logging.info('Start generating devices')
        evaluate(generator, func=self.callable_fom, numImgs=10, params=params)

        return
Esempio n. 2
0
def main(args):

    parser = argparse.ArgumentParser(
        description="Trains a simple neural net on simple data"
    )

    parser.add_argument(
        "--dataset", type=Dataset, choices=Dataset, default=Dataset.IRIS
    )
    parser.add_argument("--plot-image-grid", action="store_true")
    parser.add_argument("--lr", default=1e-3, type=float)
    parser.add_argument("--epochs", default=100, type=int)
    parser.add_argument(
        "--quiet",
        action="store_true",
        help="Do not show any of the generated plots",
    )

    parser.add_argument(
        "--savefig", action="store_true", help="Save the figures as pdfs"
    )

    parsed_args, _ = parser.parse_known_args()
    epochs = parsed_args.epochs

    if parsed_args.dataset == Dataset.IRIS:
        train_data, test_data, train_targets, test_targets, classes = load_iris_data()
    elif parsed_args.dataset == Dataset.KMNIST:
        train_data, test_data, train_targets, test_targets, classes = load_kmnist_data()
        if parsed_args.plot_image_grid:
            make_image_grid(train_data, train_targets, savefig=parsed_args.savefig)
            plt.show()
            return

    features = train_data.size(1)

    no_classes = train_targets.max() + 1

    if parsed_args.dataset == Dataset.IRIS:
        layers = [
            FullyConnected(features, 32),
            FullyConnected(32, no_classes, nonlinearity="linear"),
        ]
    elif parsed_args.dataset == Dataset.KMNIST:
        layers = [
            Conv2d(1, 8, 3),
            Conv2d(8, 8, 3),
            MaxPool2d(2),
            Conv2d(8, 16, 3),
            Conv2d(16, 16, 3),
            MaxPool2d(2),
            Flatten(start_dim=1, end_dim=-1),
            FullyConnected(256, no_classes, nonlinearity="linear"),
        ]
    model = Model(layers)
    optimiser = RMSProp(model.parameters(), parsed_args.lr)

    it = range(epochs)
    if parsed_args.dataset != Dataset.KMNIST:
        it = tqdm(it)

    epochs_loss = []
    for epoch in it:
        epochs_loss.append(
            train(model, optimiser, train_data, train_targets, size=128, use_tqdm=True)
        )

    plot_loss(epochs_loss, dataset=parsed_args.dataset, savefig=parsed_args.savefig)

    loss, accuracy, accuracy_per_class = test(model, test_data, test_targets)
    plot_bar(
        accuracy_per_class,
        classes,
        dataset=parsed_args.dataset,
        savefig=parsed_args.savefig,
    )
    if not parsed_args.quiet:
        plt.show()
    print(f"Final Loss: {loss}, Final Accuracy: {accuracy}")
Esempio n. 3
0
    # Define the optimizer
    optimizer = torch.optim.Adam(generator.parameters(), lr=params.lr, betas=(params.beta1, params.beta2))
    
    # Define the scheduler
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=params.step_size, gamma = params.gamma)


    # Load model data
    if args.restore_from is not None :
        params.checkpoint = utils.load_checkpoint(restore_from, generator, optimizer, scheduler)
        logging.info('Model data loaded')

    #set the timer
    timer=utils.timer()

    # Train the model and save 
    if params.numIter != 0 :
        logging.info('Start training')   
        train(generator, optimizer, scheduler, eng, params)

    # Generate images and save 
    logging.info('Start generating devices')
    evaluate(generator, eng, numImgs=500, params=params)
    
    timer.out()
    writer.close()



Esempio n. 4
0
    generator = Generator(params)
    discriminator = Discriminator(params)
    if params.cuda:
        generator.cuda()
        discriminator.cuda()

    # Define the optimizers
    optimizer_G = torch.optim.Adam(generator.parameters(),
                                   lr=params.lr_gen,
                                   betas=(params.beta1_gen, params.beta2_gen))
    optimizer_D = torch.optim.Adam(discriminator.parameters(),
                                   lr=params.lr_dis,
                                   betas=(params.beta1_dis, params.beta2_dis))

    # train the model and save
    logging.info('Start training')
    loss_history = train((generator, discriminator),
                         (optimizer_G, optimizer_D), dataloader, params)

    # plot loss history and save
    utils.plot_loss_history(loss_history, output_dir)

    # Generate images and save
    wavelengths = [w for w in range(500, 1301, 50)]
    angles = [a for a in range(35, 86, 5)]

    logging.info(
        'Start generating devices for wavelength range {} to {} and angle range from {} to {} \n'
        .format(min(wavelengths), max(wavelengths), min(angles), max(angles)))
    evaluate(generator, wavelengths, angles, num_imgs=500, params=params)
Esempio n. 5
0
def autosim(args, eng):

    os.makedirs(args.output_dir, exist_ok=True)
    # Set the logger
    utils.set_logger(os.path.join(args.output_dir, 'train.log'))

    copyfile(args.output_dir)

    # Load parameters from json file
    json_path = os.path.join(args.output_dir, 'Params.json')
    assert os.path.isfile(json_path), "No json file found at {}".format(
        json_path)
    params = utils.Params(json_path)

    # Add attributes to params
    params.output_dir = args.output_dir
    params.cuda = torch.cuda.is_available()
    params.restore_from = args.restore_from
    params.numIter = int(params.numIter)
    params.noise_dims = int(params.noise_dims)
    params.gkernlen = int(params.gkernlen)
    params.step_size = int(params.step_size)
    params.gen_ver = int(args.gen_ver)
    params.dime = 1
    if args.wavelength is not None:
        params.wavelength = int(args.wavelength)
    if args.angle is not None:
        params.angle = int(args.angle)
        #build a recorder
    max_recorder = utils.max_recorder()
    params.recorder = max_recorder

    #build tools
    writer = SummaryWriter(log_dir=r'./scan/runs')
    max_recorder = utils.max_recorder()
    params.recorder = max_recorder
    params.writer = writer

    # make directory
    os.makedirs(args.output_dir + '/outputs', exist_ok=True)
    os.makedirs(args.output_dir + '/model', exist_ok=True)
    os.makedirs(args.output_dir + '/figures/histogram', exist_ok=True)
    os.makedirs(args.output_dir + '/figures/deviceSamples', exist_ok=True)
    os.makedirs(args.output_dir + '/figures/deviceSamples_max', exist_ok=True)
    os.makedirs(args.output_dir + '/deg{}_wl{}_gen_ver{}'.format(
        params.angle, params.wavelength, params.gen_ver),
                exist_ok=True)
    # Define the models
    if params.gen_ver == 0:
        generator = Generator0(params)
    else:
        generator = Generator(params)

    # Move to gpu if possible
    if params.cuda:
        generator.cuda()

    # Define the optimizer
    optimizer = torch.optim.Adam(generator.parameters(),
                                 lr=params.lr,
                                 betas=(params.beta1, params.beta2))

    # Define the scheduler
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=params.step_size,
                                                gamma=params.gamma)

    # Load model data
    if args.restore_from is not None:
        params.checkpoint = utils.load_checkpoint(restore_from, generator,
                                                  optimizer, scheduler)
        logging.info('Model data loaded')

    #set the timer
    timer = utils.timer()

    # Train the model and save
    if params.numIter != 0:
        logging.info('Start training')
        train(generator, optimizer, scheduler, eng, params)

    # Generate images and save
    logging.info('Start generating devices')
    evaluate(generator, eng, numImgs=500, params=params)

    timer.out()
    writer.close()
Esempio n. 6
0
 def test_start_train(self):
     te.train(self.conf)