예제 #1
0
def Gan3DTrain(discriminator,
               generator,
               datapath,
               EventsperFile,
               nEvents,
               WeightsDir,
               pklfile,
               resultfile,
               mod=0,
               nb_epochs=30,
               batch_size=128,
               latent_size=200,
               gen_weight=6,
               aux_weight=0.2,
               ecal_weight=0.1,
               lr=0.001,
               rho=0.9,
               decay=0.0,
               g_weights='params_generator_epoch_',
               d_weights='params_discriminator_epoch_',
               xscale=1,
               analysis=False,
               energies=[]):
    start_init = time.time()
    verbose = False
    particle = 'Ele'
    f = [0.9, 0.1]
    print('[INFO] Building discriminator')
    #discriminator.summary()
    discriminator.compile(optimizer=RMSprop(),
                          loss=[
                              'binary_crossentropy',
                              'mean_absolute_percentage_error',
                              'mean_absolute_percentage_error'
                          ],
                          loss_weights=[gen_weight, aux_weight, ecal_weight])

    # build the generator
    print('[INFO] Building generator')
    #generator.summary()
    generator.compile(optimizer=RMSprop(), loss='binary_crossentropy')

    # build combined Model
    latent = Input(shape=(latent_size, ), name='combined_z')
    fake_image = generator(latent)
    discriminator.trainable = False
    fake, aux, ecal = discriminator(fake_image)
    combined = Model(input=[latent],
                     output=[fake, aux, ecal],
                     name='combined_model')
    combined.compile(
        #optimizer=Adam(lr=adam_lr, beta_1=adam_beta_1),
        optimizer=RMSprop(),
        loss=[
            'binary_crossentropy', 'mean_absolute_percentage_error',
            'mean_absolute_percentage_error'
        ],
        loss_weights=[gen_weight, aux_weight, ecal_weight])

    # Getting Data
    Trainfiles, Testfiles = gan.DivideFiles(datapath,
                                            nEvents=nEvents,
                                            EventsperFile=EventsperFile,
                                            datasetnames=["ECAL"],
                                            Particles=[particle])
    print('The total data was divided in {} Train files and {} Test files'.
          format(len(Trainfiles), len(Testfiles)))
    nb_test = int(nEvents * f[1])

    #Read test data into a single array
    for index, dtest in enumerate(Testfiles):
        if index == 0:
            X_test, Y_test, ecal_test = GetprocData(dtest, xscale=xscale)
        else:
            if X_test.shape[0] < nb_test:
                X_temp, Y_temp, ecal_temp = GetprocData(dtest, xscale=xscale)
                X_test = np.concatenate((X_test, X_temp))
                Y_test = np.concatenate((Y_test, Y_temp))
                ecal_test = np.concatenate((ecal_test, ecal_temp))
    X_test, Y_test, ecal_test = X_test[:
                                       nb_test], Y_test[:
                                                        nb_test], ecal_test[:
                                                                            nb_test]

    nb_train = int(nEvents * f[0])  #
    total_batches = int(nb_train / batch_size)
    print(
        'In this experiment {} events will be used for training as {}batches'.
        format(nb_train, total_batches))
    print('{} events will be used for Testing'.format(nb_test))

    train_history = defaultdict(list)
    test_history = defaultdict(list)
    analysis_history = defaultdict(list)

    init_time = time.time() - start_init
    print('Initialization time is {} seconds'.format(init_time))
    for epoch in range(nb_epochs):
        epoch_start = time.time()
        print('Epoch {} of {}'.format(epoch + 1, nb_epochs))
        X_train, Y_train, ecal_train = GetprocData(Trainfiles[0],
                                                   xscale=xscale)
        nb_file = 1
        nb_batches = int(X_train.shape[0] / batch_size)
        if verbose:
            progress_bar = Progbar(target=total_batches)

        epoch_gen_loss = []
        epoch_disc_loss = []
        file_index = 0

        for index in np.arange(total_batches):
            if verbose:
                progress_bar.update(index)
            else:
                if index % 100 == 0:
                    print('processed {}/{} batches'.format(
                        index + 1, total_batches))
            loaded_data = X_train.shape[0]
            used_data = file_index * batch_size
            if (loaded_data - used_data) < batch_size + 1 and (
                    nb_file < len(Trainfiles)):
                X_temp, Y_temp, ecal_temp = GetprocData(Trainfiles[nb_file],
                                                        xscale=xscale)
                print("\nData file loaded..........", Trainfiles[nb_file])
                nb_file += 1
                X_left = X_train[(file_index * batch_size):]
                Y_left = Y_train[(file_index * batch_size):]
                ecal_left = ecal_train[(file_index * batch_size):]
                X_train = np.concatenate((X_left, X_temp))
                Y_train = np.concatenate((Y_left, Y_temp))
                ecal_train = np.concatenate((ecal_left, ecal_temp))
                nb_batches = int(X_train.shape[0] / batch_size)
                print("{} batches loaded..........".format(nb_batches))
                file_index = 0

            image_batch = X_train[(file_index * batch_size):(file_index + 1) *
                                  batch_size]
            energy_batch = Y_train[(file_index * batch_size):(file_index + 1) *
                                   batch_size]
            ecal_batch = ecal_train[(file_index *
                                     batch_size):(file_index + 1) * batch_size]
            file_index += 1
            noise = np.random.normal(0, 1, (batch_size, latent_size))
            sampled_energies = np.random.uniform(0.1, 5, (batch_size, 1))
            generator_ip = np.multiply(sampled_energies, noise)

            #ecal sum from fit
            ecal_ip = gan.GetEcalFit(sampled_energies, particle, mod, xscale)
            generated_images = generator.predict(generator_ip, verbose=0)
            real_batch_loss = discriminator.train_on_batch(
                image_batch,
                [gan.BitFlip(np.ones(batch_size)), energy_batch, ecal_batch])
            fake_batch_loss = discriminator.train_on_batch(
                generated_images,
                [gan.BitFlip(np.zeros(batch_size)), sampled_energies, ecal_ip])
            epoch_disc_loss.append([
                (a + b) / 2 for a, b in zip(real_batch_loss, fake_batch_loss)
            ])

            trick = np.ones(batch_size)
            gen_losses = []
            for _ in np.arange(2):
                noise = np.random.normal(0, 1, (batch_size, latent_size))
                sampled_energies = np.random.uniform(0.1, 5, (batch_size, 1))
                generator_ip = np.multiply(sampled_energies, noise)
                ecal_ip = gan.GetEcalFit(sampled_energies, particle, mod,
                                         xscale)
                gen_losses.append(
                    combined.train_on_batch(
                        [generator_ip],
                        [trick,
                         sampled_energies.reshape((-1, 1)), ecal_ip]))
            epoch_gen_loss.append([(a + b) / 2 for a, b in zip(*gen_losses)])
        print('The training took {} seconds.'.format(time.time() -
                                                     epoch_start))
        print('\nTesting for epoch {}:'.format(epoch + 1))
        test_start = time.time()
        noise = np.random.normal(0.1, 1, (nb_test, latent_size))
        sampled_energies = np.random.uniform(0.1, 5, (nb_test, 1))
        generator_ip = np.multiply(sampled_energies, noise)
        generated_images = generator.predict(generator_ip,
                                             verbose=False,
                                             batch_size=batch_size)
        ecal_ip = gan.GetEcalFit(sampled_energies, particle, mod, xscale)
        sampled_energies = np.squeeze(sampled_energies, axis=(1, ))
        X = np.concatenate((X_test, generated_images))
        y = np.array([1] * nb_test + [0] * nb_test)
        ecal = np.concatenate((ecal_test, ecal_ip))
        aux_y = np.concatenate((Y_test, sampled_energies), axis=0)
        discriminator_test_loss = discriminator.evaluate(X, [y, aux_y, ecal],
                                                         verbose=False,
                                                         batch_size=batch_size)
        discriminator_train_loss = np.mean(np.array(epoch_disc_loss), axis=0)

        noise = np.random.normal(0.1, 1, (2 * nb_test, latent_size))
        sampled_energies = np.random.uniform(0.1, 5, (2 * nb_test, 1))
        generator_ip = np.multiply(sampled_energies, noise)
        ecal_ip = gan.GetEcalFit(sampled_energies, particle, mod, xscale)
        trick = np.ones(2 * nb_test)
        generator_test_loss = combined.evaluate(
            generator_ip,
            [trick, sampled_energies.reshape((-1, 1)), ecal_ip],
            verbose=False,
            batch_size=batch_size)
        generator_train_loss = np.mean(np.array(epoch_gen_loss), axis=0)
        train_history['generator'].append(generator_train_loss)
        train_history['discriminator'].append(discriminator_train_loss)
        test_history['generator'].append(generator_test_loss)
        test_history['discriminator'].append(discriminator_test_loss)

        print('{0:<22s} | {1:4s} | {2:15s} | {3:5s}| {4:5s}'.format(
            'component', *discriminator.metrics_names))
        print('-' * 65)

        ROW_FMT = '{0:<22s} | {1:<4.2f} | {2:<15.2f} | {3:<5.2f}| {4:<5.2f}'
        print(
            ROW_FMT.format('generator (train)',
                           *train_history['generator'][-1]))
        print(
            ROW_FMT.format('generator (test)', *test_history['generator'][-1]))
        print(
            ROW_FMT.format('discriminator (train)',
                           *train_history['discriminator'][-1]))
        print(
            ROW_FMT.format('discriminator (test)',
                           *test_history['discriminator'][-1]))

        # save weights every epoch
        generator.save_weights(WeightsDir +
                               '/{0}{1:03d}.hdf5'.format(g_weights, epoch),
                               overwrite=True)
        discriminator.save_weights(WeightsDir +
                                   '/{0}{1:03d}.hdf5'.format(d_weights, epoch),
                                   overwrite=True)
        print(
            "The Testing for {} epoch took {} seconds. Weights are saved in {}"
            .format(epoch,
                    time.time() - test_start, WeightsDir))
        pickle.dump({
            'train': train_history,
            'test': test_history
        }, open(pklfile, 'wb'))
        if analysis:
            var = gan.sortEnergy([X_test, Y_test], ecal_test, energies, ang=0)
            result = gan.OptAnalysisShort(var,
                                          generated_images,
                                          energies,
                                          ang=0)
            print('Analysing............')
            # All of the results correspond to mean relative errors on different quantities
            analysis_history['total'].append(result[0])
            analysis_history['energy'].append(result[1])
            analysis_history['moment'].append(result[2])
            print('Result = ', result)
            pickle.dump({'results': analysis_history}, open(resultfile, 'wb'))
예제 #2
0
파일: HoroTest.py 프로젝트: whhopkins/3Dgan
            image_batch = X_train[index * batch_size:(index + 1) * batch_size]
            energy_batch = y_train[index * batch_size:(index + 1) * batch_size]
            ecal_batch = ecal_train[index * batch_size:(index + 1) *
                                    batch_size]

            print(image_batch.shape)
            print(ecal_batch.shape)
            sampled_energies = np.random.uniform(0, 5, (batch_size, 1))
            generator_ip = np.multiply(sampled_energies, noise)
            ecal_ip = np.multiply(2, sampled_energies)
            generated_images = generator.predict(generator_ip, verbose=0)

            #   loss_weights=[np.ones(batch_size), 0.05 * np.ones(batch_size)]

            real_batch_loss = discriminator.train_on_batch(
                image_batch,
                [bit_flip(np.ones(batch_size)), energy_batch, ecal_batch])
            fake_batch_loss = discriminator.train_on_batch(
                generated_images,
                [bit_flip(np.zeros(batch_size)), sampled_energies, ecal_ip])
            #    print(real_batch_loss)
            #   print(fake_batch_loss)

            #            fake_batch_loss = discriminator.train_on_batch(disc_in_fake, disc_op_fake, loss_weights)

            epoch_disc_loss.append([
                (a + b) / 2 for a, b in zip(real_batch_loss, fake_batch_loss)
            ])

            trick = np.ones(batch_size)