Exemplo n.º 1
0
    def train(self,
              iterations,
              batch_size=128,
              sample_interval=100,
              save_model_interval=100,
              train_D_iters=1,
              train_G_iters=1,
              img_dir='./',
              model_dir='./'):

        imgs, digits = self.imgs, self.digits

        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        os.makedirs(img_dir, exist_ok=True)
        os.makedirs(model_dir, exist_ok=True)

        for itr in range(1, iterations + 1):

            # ---------------------
            #  Train D_realness
            # ---------------------
            for _ in range(train_D_iters):
                # Select a random half batch of images
                idx_real = np.random.randint(0, imgs.shape[0], batch_size)
                idx_fake = np.random.randint(0, imgs.shape[0], batch_size)
                fake_target_digits = onehot(
                    np.random.randint(0, 10, batch_size), 10)
                unmatch_digits = onehot(exclude(digits[idx_real]), 10)
                real_imgs = imgs[idx_real]
                real_digits = onehot(digits[idx_real], 10)
                fake_imgs = self.G.predict(
                    [imgs[idx_fake], fake_target_digits])

                # real image
                d_loss_real = self.D_realness.train_on_batch(real_imgs, valid)
                # fake image
                d_loss_fake = self.D_realness.train_on_batch(fake_imgs, fake)

            # ---------------------
            #  Train Generator
            # ---------------------
            for _ in range(train_G_iters):
                # Condition on labels
                idx = np.random.randint(0, imgs.shape[0], batch_size)
                fake_target_digits = onehot(
                    np.random.randint(0, 10, batch_size), 10)

                g_loss = self.combined.train_on_batch(
                    [imgs[idx], fake_target_digits],
                    [valid, fake_target_digits])

            print(f'--------\nEPOCH {itr}\n--------')
            print(
                pd.DataFrame({
                    'D_realness': self.D_realness.metrics_names,
                    'real': d_loss_real,
                    'fake': d_loss_fake
                }).to_string(index=False))
            print(
                pd.DataFrame({
                    'combined': self.combined.metrics_names,
                    'value': g_loss,
                }).to_string(index=False))
            print()

            # If at save interval => save generated image samples
            if sample_interval > 0 and itr % sample_interval == 0:
                # self.sample_imgs(itr, img_dir)
                plot_table(self.G,
                           self.D,
                           os.path.join(img_dir, f'{itr}.png'),
                           save=True)

            if save_model_interval > 0 and itr % save_model_interval == 0:
                self.D.save(os.path.join(model_dir, f'D{itr}.hdf5'))
                self.G.save(os.path.join(model_dir, f'G{itr}.hdf5'))
                self.G_mask.save(os.path.join(model_dir, f'G_mask{itr}.hdf5'))

        self.tb.on_train_end(None)
Exemplo n.º 2
0
    def train(self, iterations, batch_size=128, sample_interval=100, save_model_interval=100,
                            train_D_iters=1, train_G_iters=1, img_dir='./', model_dir='./'):

        imgs, digits = self.imgs, self.digits

        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        os.makedirs(img_dir, exist_ok=True)
        os.makedirs(model_dir, exist_ok=True)

        for itr in range(1, iterations + 1):

            # ---------------------
            #  Train Discriminator
            # ---------------------
            for _ in range(train_D_iters):
                # Select a random half batch of images
                idx_real = np.random.randint(0, imgs.shape[0], batch_size)
                idx_fake = np.random.randint(0, imgs.shape[0], batch_size)
                random_target_digits = onehot( np.random.randint(0, 10, batch_size), 10 )
                unmatch_digits = onehot( exclude(digits[idx_real]), 10 )
                real_imgs = imgs[idx_real]
                real_digits = onehot( digits[idx_real], 10 )
                fake_imgs = self.G.predict([imgs[idx_fake], random_target_digits])

                # real image and correct digit
                d_loss_real = self.D.train_on_batch([real_imgs, real_digits], valid)
                # fake image and random digit
                d_loss_fake = self.D.train_on_batch([fake_imgs, random_target_digits], fake)
                # real image but wrong digit
                d_loss_fake2 = self.D.train_on_batch([real_imgs, unmatch_digits], fake)
                # d_loss_fake2 = self.D.train_on_batch([real_imgs, unmatch_digits], fake)


            # tensorboard
            logs = {
                'D_loss_real': d_loss_real[0],
                'D_loss_fake': d_loss_fake[0],
                'D_loss_fake2': d_loss_fake2[0]
            }
            self.tb.on_epoch_end(itr, logs)

            # ---------------------
            #  Train Generator
            # ---------------------
            for _ in range(train_G_iters):
                # Condition on labels
                idx = np.random.randint(0, imgs.shape[0], batch_size)
                random_target_digits = onehot( np.random.randint(0, 10, batch_size), 10 )

                g_loss = self.combined.train_on_batch([imgs[idx], random_target_digits], valid)

                # tensorboard
                logs = {
                    'G_loss': g_loss[0],
                }
                self.tb.on_epoch_end(itr, logs)


            # If at save interval => save generated image samples
            if sample_interval > 0 and itr % sample_interval == 0:
                # self.sample_imgs(itr, img_dir)
                plot_table(self.G, self.D, os.path.join(img_dir, f'{itr}.png'), save=True)

            if save_model_interval > 0 and itr % save_model_interval == 0:
                if not os.path.isdir(model_dir):
                    os.makedirs(model_dir)
                self.D.save(os.path.join(model_dir, f'D{itr}.hdf5'))
                self.G.save(os.path.join(model_dir, f'G{itr}.hdf5'))
                self.G_mask.save(os.path.join(model_dir, f'G_mask{itr}.hdf5'))

            # Plot the progress
            print(f'{itr} [G loss: {g_loss[0]} | acc: {g_loss[1]}]')
            print(f'{itr} [D real: {d_loss_real[0]} | acc: {d_loss_real[1]}]')
            print(f'{itr} [D fake: {d_loss_fake[0]} | acc: {d_loss_fake[1]}]')
            print(f'{itr} [D fake2: {d_loss_fake2[0]} | acc: {d_loss_fake2[1]}]')
            print()

        self.tb.on_train_end(None)
Exemplo n.º 3
0
    def train(self,
              iterations,
              batch_size=128,
              sample_interval=100,
              save_model_interval=100,
              train_D_iters=1,
              train_G_iters=1,
              img_dir='./',
              model_dir='./'):

        imgs, digits = self.imgs, self.digits

        os.makedirs(img_dir, exist_ok=True)
        os.makedirs(model_dir, exist_ok=True)

        for itr in range(1, iterations + 1):

            # ---------------------
            #  Train D_realness
            # ---------------------
            for _ in range(train_D_iters):
                # Select a random half batch of images
                idx_real = np.random.randint(0, imgs.shape[0], batch_size)
                idx_fake = np.random.randint(0, imgs.shape[0], batch_size)

                real_imgs = imgs[idx_real]
                real_ans = onehot(digits[idx_real],
                                  11)  # 11 class for D's output

                fake_target_digits = onehot(
                    np.random.randint(0, 10, batch_size), 10)  # for G's input
                fake_imgs = self.G.predict(
                    [imgs[idx_fake], fake_target_digits])
                fake_ans = np.zeros((batch_size, 11))
                fake_ans[:, 10] = 1  # only the 11th class is 1

                # real image
                d_loss_real = self.D.train_on_batch(real_imgs, real_ans)
                # fake image
                d_loss_fake = self.D.train_on_batch(fake_imgs, fake_ans)

            # ---------------------
            #  Train Generator
            # ---------------------
            for _ in range(train_G_iters):
                # Condition on labels
                idx = np.random.randint(0, imgs.shape[0], batch_size)
                target_nums = np.random.randint(0, 10, batch_size)
                fake_target_digits = onehot(target_nums, 10)
                fake_ans = onehot(target_nums, 11)

                g_loss = self.combined.train_on_batch(
                    [imgs[idx], fake_target_digits], fake_ans)

            print(f'--------\nEPOCH {itr}\n--------')

            print(
                pd.DataFrame({
                    'D': self.D.metrics_names,
                    'real': d_loss_real,
                    'fake': d_loss_fake
                }).to_string(index=False))
            print()

            print(
                pd.DataFrame({
                    'G': self.combined.metrics_names,
                    'value': g_loss,
                }).to_string(index=False))
            print()

            # If at save interval => save generated image samples
            if sample_interval > 0 and itr % sample_interval == 0:
                # self.sample_imgs(itr, img_dir)
                plot_table(self.G,
                           self.D,
                           os.path.join(img_dir, f'{itr}.png'),
                           save=True)

            if save_model_interval > 0 and itr % save_model_interval == 0:
                self.D.save(os.path.join(model_dir, f'D{itr}.hdf5'))
                self.G.save(os.path.join(model_dir, f'G{itr}.hdf5'))
                self.G_mask.save(os.path.join(model_dir, f'G_mask{itr}.hdf5'))
Exemplo n.º 4
0
from hotbit.parametrization import SlaterKosterTable
from hotbit.parametrization import KSAllElectron
from util import plot_table
from hotbit import Element
from pickle import load
import pylab as pl
from box.data import data

e1=KSAllElectron('C',confinement={'mode':'quadratic','r0':5.04})
e1.run()
e2=KSAllElectron('H',confinement={'mode':'quadratic','r0':5.04})
e2.run()


sk=SlaterKosterTable(e1,e2)
sk.run(1,15,20,ntheta=50,nr=25)
sk.write()
plot_table('C_H.par',screen=True)   

    
        
        
Exemplo n.º 5
0
#sk.write()
#compare_tables('Au_Au.par','Au_Au_NR.par',s1='Au',s2='Au',screen=False)
              
lst=[('C','C',1.85*1.46,1.85*1.46),\
     ('C','H',1.85*1.46,1.85*0.705),\
     ('Na','C',1.85*2.9,1.85*1.46),\
     ('O','H',1.85*1.38,1.85*0.705),\
     ('Mg','O',1.85*1.41/0.529177,1.85*1.38),\
     ('Na','O',1.85*2.9,1.85*1.38),\
     ('H','H',1.85*0.705,1.85*0.705)]
     
     
     
for s1,s2,r01,r02 in lst:     
    e1=KSAllElectron(s1,nodegpts=500,confinement={'mode':'quadratic','r0':r01})
    e1.run()
    if s1==s2:  
        e2=e1
    else:   
        e2=KSAllElectron(s2,confinement={'mode':'quadratic','r0':r02})
        e2.run()    
        
    sk=SlaterKosterTable(e1,e2)
    sk.run(1E-3,12,10) #,ntheta=20,nr=20)
    sk.write()
    file='%s_%s.par' %(s1,s2)
    #compare_tables( param+'/'+file,file,s1,s2,screen=False)
    plot_table(file,s1=s1,s2=s2,screen=True,der=0)
    plot_table(file,s1=s1,s2=s2,screen=True,der=1)
        
Exemplo n.º 6
0
	results_ord = []
	for i, population in enumerate(results):
		if population.best_individual.fitness < best_solution.best_individual.fitness:
			best_solution = population
		if population.best_individual.fitness == best_solution.best_individual.fitness:
			if population.nger < best_solution.nger:
				best_solution = population
		if population.best_individual.fitness > worst_solution.best_individual.fitness:
			worst_solution = population
		results_ord.append((i, population.best_individual.fitness))

	results_ord.sort(key=lambda x: x[1])
	results_ord = results_ord[:50]

	util.plot_table(results, results_ord, file)
	util.plot_graphics(best_solution, file[len(file)-1]+"_best_solution")
	util.plot_graphics(worst_solution, file[len(file)-1]+"_worst_solution")

	print(colored('\033[1m'+"\n#####################################\n-> Best solution found: ", "green"))
	print(best_solution.get_parameters())
	print(colored('\033[1m'+"\n-> Best Individual: %.10f" % best_solution.best_individual.fitness, "green")) 
	print(colored('\033[1m'+"\n-> Avg Fitness : %.10f" % best_solution.avg_fitness, "blue")) 
	print(colored('\033[1m'+"\n-> Median Fitness : %.10f" % best_solution.median_fitness, "blue")) 
	print(colored('\033[1m'+"\n-> STD Fitness : %.10f" % best_solution.std_fitness, "blue")) 

	print(colored('\033[1m'+"\n#####################################\n-> Worst solution found: ", "red")) 
	print(worst_solution.get_parameters())
	print(colored('\033[1m'+"\n-> Best Individual: %.10f" % worst_solution.best_individual.fitness, "green")) 
	print(colored('\033[1m'+"\n-> Avg Fitness : %.10f" % worst_solution.avg_fitness, "blue")) 
	print(colored('\033[1m'+"\n-> Median Fitness : %.10f" % worst_solution.median_fitness, "blue"))