async def unclaim(ctx): data = utility.load() if ctx.author.name in data['claims']: await ctx.send(ctx.author.name + " hath unclaimed Sir " + data['claims'][ctx.author.name]) del data['claims'][ctx.author.name] utility.save(data) else: await ctx.send("Thou has not claimed a knight")
async def claim(ctx, name): data = utility.load() if name in data['knights']: if ctx.author.name in data['claims']: await ctx.send("Thou hath already claimed Sir " + data['claims'][ctx.author.name]) else: data['claims'][ctx.author.name] = name await ctx.send(ctx.author.name + " has claimed Sir " + name) utility.save(data) else: await ctx.send("I know not this Sir " + name)
async def uncheck(ctx, skill): data = utility.load() if ctx.author.name in data['claims']: name = data['claims'][ctx.author.name] knight = data['knights'][name] if skill in knight['personality']: knight['personality'][skill]['check'] = False utility.save(data) await ctx.send("Unchecked " + skill + " for Sir " + name) elif skill in knight['passions']: knight['passions'][skill]['check'] = False utility.save(data) await ctx.send("Unchecked " + skill + " for Sir " + name) elif skill in knight['statistics']: knight['statistics'][skill]['check'] = False utility.save(data) await ctx.send("Unchecked " + skill + " for Sir " + name) elif skill in knight['skills']: knight['skills'][skill]['check'] = False utility.save(data) await ctx.send("Unchecked " + skill + " for Sir " + name) else: await ctx.send("Sir " + name + " does not have " + skill + ". Did you mean" + '\'' + closestSkill(skill) + "\'?") else: await ctx.send("Thou must first claim a knight")
async def remove_passion(ctx, passion): data = utility.load() if ctx.author.name in data['claims']: name = data['claims'][ctx.author.name] knight = data['knights'][name] if passion in knight['passions']: knight['passions'].pop(passion) utility.save(data) await ctx.send("Removed " + passion + " from Sir " + name) else: ctx.send("Sir " + name + " does not have the passion " + passion) else: await ctx.send("Thou must first claim a knight")
async def remove_note(ctx, note): data = utility.load() if ctx.author.name in data['claims']: name = data['claims'][ctx.author.name] knight = data['knights'][name] if note in knight['notes']: knight['notes'].pop(note) utility.save(data) await ctx.send("Removed note " + str(note) + " from Sir " + name) else: ctx.send("Sir " + name + " does not have that note") else: await ctx.send("Thou must first claim a knight")
async def note(ctx, note, value): data = utility.load() if ctx.author.name in data['claims']: name = data['claims'][ctx.author.name] knight = data['knights'][name] if not note in knight['notes']: knight['notes'][note] = str(value) else: knight['notes'][note] = str(value) utility.save(data) await ctx.send("Note added for Sir " + name + " - " + str(note) + ": " + str(value)) else: await ctx.send("Thou must first claim a knight")
async def add_passion(ctx, passion, value): data = utility.load() if ctx.author.name in data['claims']: name = data['claims'][ctx.author.name] knight = data['knights'][name] if not passion in knight['passions']: knight['passions'][passion] = {'check': False, 'value': int(value)} else: knight['passions'][passion]['value'] = int(value) utility.save(data) await ctx.send("Sir " + name + " has " + str(value) + " " + passion) else: await ctx.send("Thou must first claim a knight")
def test_network(training_file, testing_file, k=10, verbose=False, save=False): # function for neatness print( f"Beginning training and testing of {training_file} and {testing_file}..." ) start = time() mse, mss, ent, seed, c_matrix, non_convergence_inst = kmeans( training_file, testing_file, k, verbose, save) result = (mse, mss, ent, seed) end = time() test_time = end - start print( f'...ending training and testing of {training_file} and {testing_file}, process completed' f' in {helper.translate_seconds(test_time)} (HH:MM:SS).\n') if save: helper.save(helper.translate_seconds(test_time), k, mse, mss, ent, seed, c_matrix, non_convergence_inst) return test_time, result, c_matrix
async def set(ctx, skill, value): data = utility.load() if ctx.author.name in data['claims']: name = data['claims'][ctx.author.name] knight = data['knights'][name] if skill in personality_mirror: if not skill in knight['personality']: knight['personality'][skill] = {'check': False, 'value': 10} knight['personality'][personality_mirror[skill]] = { 'check': False, 'value': 10 } knight['personality'][skill]['value'] = int(value) knight['personality'][ personality_mirror[skill]]['value'] = 20 - int(value) utility.save(data) await ctx.send("Sir " + name + " has " + str(value) + " " + skill + " and " + str(knight['personality'][ personality_mirror[skill]]['value']) + " " + personality_mirror[skill]) elif skill in skills: if not skill in knight['skills']: knight['skills'][skill] = {'check': False, 'value': 10} knight['skills'][skill]['value'] = int(value) utility.save(data) await ctx.send("Sir " + name + " has " + str(value) + " " + skill) elif skill in statistics: if not skill in knight['statistics']: knight['statistics'][skill] = {'check': False, 'value': 10} knight['statistics'][skill]['value'] = int(value) utility.save(data) await ctx.send("Sir " + name + " has " + str(value) + " " + skill) elif skill in knight['passions']: knight['passions'][skill]['value'] = int(value) utility.save(data) await ctx.send("Sir " + name + " has " + str(value) + " " + skill) else: await ctx.send( skill + " is not a valid trait, skill, passion, or statistic. Did you mean " + '\'' + closestSkill(skill) + "\'?") else: await ctx.send("Thou must first claim a knight")
async def glorify(ctx, *argv): # Parse the argument list arg_list = [] for arg in argv: arg_list += [arg] name = arg_list[0] glory = arg_list[1] event = arg_list[-1] data = utility.load() if name in data['knights']: data['knights'][name]['history'].append({ 'glory': int(glory), "reason": event }) utility.save(data) await ctx.send("May the deeds of Sir " + name + " be celebrated for countless generations") else: await ctx.send( "I could not find that name. Use !knight to add a new knight or check thine spelling" )
def train(trainloader, generator, discriminator, loss, optimizer_g, optimizer_d): ctr = 0 minibatch_disc_losses = [] minibatch_gen_losses = [] fixed_noise = Variable(torch.FloatTensor(8 * 8, z_dim, 1, 1).normal_(0, 1), volatile=True) if cuda_available: print("CUDA is available!") fixed_noise.cuda() print("Epoch, Inception Score, MMD Score", file=open("logs/eval.csv", "a")) for epoch in range(50): for batch_idx, (inputs, targets) in enumerate(trainloader): ctr += 1 if cuda_available: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = Variable(inputs), Variable(targets) zeros = Variable(torch.zeros(inputs.size(0))) ones = Variable(torch.ones(inputs.size(0))) if cuda_available: zeros, ones = zeros.cuda(), ones.cuda() # print("Updating discriminator...") minibatch_noise = sample_noise(inputs.size(0), z_dim) # Zero gradients for the discriminator optimizer_d.zero_grad() # Train with real examples d_real = discriminator(inputs) if discriminator.model_name == 'DCGAN': d_real_loss = loss(d_real, ones) # Train discriminator to recognize real examples else: d_real_loss = 0.5 * torch.mean((d_real - ones) ** 2) # print("Applying gradients to discriminator...") d_real_loss.backward() # print("Train with fake examples from the generator") fake = generator(minibatch_noise).detach() # Detach to prevent backpropping through the generator d_fake = discriminator(fake) d_fake_loss = loss(d_fake, zeros) # Train discriminator to recognize generator samples d_fake_loss.backward() minibatch_disc_losses.append(d_real_loss.data[0] + d_fake_loss.data[0]) # # the discriminator optimizer_d.step() # print("Updating the generator...") optimizer_g.zero_grad() # print("Sample z ~ N(0, 1)") minibatch_noise = sample_noise(inputs.size(0), z_dim) d_fake = discriminator(generator(minibatch_noise)) if generator.model_name == 'DCGAN': g_loss = loss(d_fake, ones) # Train generator to fool the discriminator into thinking these are real. else: g_loss = 0.5 * torch.mean((d_fake - ones) ** 2) g_loss.backward() # print("Applying gradients to generator...") optimizer_g.step() minibatch_gen_losses.append(g_loss.data[0]) if ctr % 10 == 0: print("Iteration {} of epoch {}".format(ctr, epoch)) print('Generator loss : %.3f' % (np.mean(minibatch_gen_losses))) print('Discriminator loss : %.3f' % (np.mean(minibatch_disc_losses))) inc_score = inception_score.evaluate(generator, z_dim, cuda=cuda_available) mmd_score = eval_mmd(generator, z_dim) print('MMD score : {}'.format(mmd_score)) print('Inception score: {}'.format(inc_score)) print("{}, {}, {}".format(epoch, inc_score, mmd_score), file=open("logs/eval.csv", "a")) utility.plot_result(generator, fixed_noise, epoch) loss_name = "{0}_epoch{1}".format(generator.model_name, epoch) utility.save_losses(minibatch_disc_losses, minibatch_gen_losses, loss_name) utility.save(discriminator, generator, epoch)
async def knight(ctx, name): data = utility.load() if name in data['knights']: await ctx.send("Sir " + name + " is already in the annals of history") else: data['knights'][name] = {} data['knights'][name]['personality'] = { 'chaste': { 'check': False, 'value': 10 }, 'lustful': { 'check': False, 'value': 10 }, 'energetic': { 'check': False, 'value': 10 }, 'lazy': { 'check': False, 'value': 10 }, 'forgiving': { 'check': False, 'value': 10 }, 'vengeful': { 'check': False, 'value': 10 }, 'generous': { 'check': False, 'value': 10 }, 'selfish': { 'check': False, 'value': 10 }, 'honest': { 'check': False, 'value': 10 }, 'deceitful': { 'check': False, 'value': 10 }, 'just': { 'check': False, 'value': 10 }, 'arbitrary': { 'check': False, 'value': 10 }, 'merciful': { 'check': False, 'value': 10 }, 'cruel': { 'check': False, 'value': 10 }, 'modest': { 'check': False, 'value': 10 }, 'proud': { 'check': False, 'value': 10 }, 'prudent': { 'check': False, 'value': 10 }, 'reckless': { 'check': False, 'value': 10 }, 'spiritual': { 'check': False, 'value': 10 }, 'worldly': { 'check': False, 'value': 10 }, 'temperate': { 'check': False, 'value': 10 }, 'indulgent': { 'check': False, 'value': 10 }, 'trusting': { 'check': False, 'value': 10 }, 'suspicious': { 'check': False, 'value': 10 }, 'valorous': { 'check': False, 'value': 10 }, 'cowardly': { 'check': False, 'value': 10 }, } data['knights'][name]['passions'] = { 'fealty(lord)': { 'check': False, 'value': 15 }, 'love(family)': { 'check': False, 'value': 15 }, 'hospitality': { 'check': False, 'value': 15 }, 'honor': { 'check': False, 'value': 15 } } data['knights'][name]['statistics'] = { 'siz': { 'check': False, 'value': 10 }, 'dex': { 'check': False, 'value': 10 }, 'str': { 'check': False, 'value': 10 }, 'con': { 'check': False, 'value': 10 }, 'app': { 'check': False, 'value': 10 } } data['knights'][name]['skills'] = { "battle": { 'check': False, 'value': 10 }, "horsemanship": { 'check': False, 'value': 10 }, "sword": { 'check': False, 'value': 10 }, "lance": { 'check': False, 'value': 10 }, "spear": { 'check': False, 'value': 6 }, "dagger": { 'check': False, 'value': 5 }, "awareness": { 'check': False, 'value': 5 }, "boating": { 'check': False, 'value': 1 }, "compose": { 'check': False, 'value': 1 }, "courtesy": { 'check': False, 'value': 3 }, "dancing": { 'check': False, 'value': 2 }, "faerie lore": { 'check': False, 'value': 1 }, "falconry": { 'check': False, 'value': 3 }, "first aid": { 'check': False, 'value': 10 }, "flirting": { 'check': False, 'value': 3 }, "folklore": { 'check': False, 'value': 2 }, "gaming": { 'check': False, 'value': 3 }, "heraldry": { 'check': False, 'value': 3 }, "hunting": { 'check': False, 'value': 2 }, "intrigue": { 'check': False, 'value': 3 }, "orate": { 'check': False, 'value': 3 }, "play": { 'check': False, 'value': 3 }, "read": { 'check': False, 'value': 0 }, "recognize": { 'check': False, 'value': 3 }, "religion": { 'check': False, 'value': 2 }, "romance": { 'check': False, 'value': 2 }, "singing": { 'check': False, 'value': 2 }, "stewardship": { 'check': False, 'value': 2 }, "swimming": { 'check': False, 'value': 2 }, "tourney": { 'check': False, 'value': 2 }, } data['knights'][name]['history'] = [] data['knights'][name]['notes'] = {} utility.save(data) await ctx.send("Thus marks the chapter of Sir " + name + " in the annals of history")
# Copyright (C) 2020 Andreas Pentaliotis # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Generate Module # Module to generate images using a trained generator. from keras.models import load_model import utility input_arguments = utility.parse_input_arguments(module="generate") generator = load_model(input_arguments.generator_path) images = utility.generate_images(generator, input_arguments.image_number) utility.save(images, input_arguments.output_path)
from bs4 import BeautifulSoup import utility from scraper import Story, HTMLFormatter from utility import Settings settings_file = "settings.dat" settings = Settings(settings_file) # http://forums.spacebattles.com/threads/survival-of-the-fittest-worm-si.297753/ # http://forums.spacebattles.com/threads/to-go-a-viking-asoiaf-au.294304/ url = easygui.enterbox("URL:", "SBDownloader") if not utility.is_site_down(url): doc = utility.get_page(url) else: doc = utility.load("test.html") soup = BeautifulSoup(doc, "html.parser") story = Story.parse(soup) story.download_messages() fmt = HTMLFormatter() doc = fmt.export_story(story, settings.author_only) utility.mkdir(settings.download_path) utility.save(doc, os.path.join(settings.download_path, story.clean_title+".html")) settings.store()
if __name__ == "__main__": files = os.listdir(DATA_SET_PATH) bar = progressbar.ProgressBar(maxval=len(files), widgets=[ progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage() ]) bar.start() print("Extacting features ...") features = [] words_labels = [] gender_lables = [] def ext_feat(): for i, file in enumerate(files): _, sig = wav.read(os.path.join(DATA_SET_PATH, file)) feats = extract(sig) features.append(feats) words_labels.append(Label_Map[file[0]]) gender_lables.append(Label_Map[file[1]]) bar.update(i + 1) ext_feat() save(features, WORDS_FEATURES, MODELS_PATH) save(words_labels, WORDS_LABLES, MODELS_PATH) save(gender_lables, GENDER_LABLES, MODELS_PATH) bar.finish() print("Done.")
def train(self, images, epochs, batch_size, saving_frequency, output_path): batches = int(images.shape[0] / batch_size) training_generator = self._data_generator.flow(images, batch_size=int( batch_size / 2)) discriminator_history_real = [] discriminator_history_fake = [] generator_history = [] for epoch in range(1, epochs + 1): discriminator_statistics_real = [] discriminator_statistics_fake = [] generator_statistics = [] for _ in range(batches): # Select a mini batch of real images randomly, with size half of batch size. Account for the # case where the size of images is not divisible by batch size. real_images = training_generator.next() if real_images.shape[0] != int(batch_size / 2): real_images = training_generator.next() real_labels = np.ones((int(batch_size / 2), 1)) # Generate fake images from noise, with size half of batch size. noise = np.random.normal(0, 1, (int(batch_size / 2), 100)) fake_images = self._generator.predict(noise) fake_labels = np.zeros((int(batch_size / 2), 1)) # Train the discriminator. discriminator_statistics_real.append( self._discriminator.train_on_batch(real_images, real_labels)) discriminator_statistics_fake.append( self._discriminator.train_on_batch(fake_images, fake_labels)) # Sample data points from the noise distribution, with size of batch size and create # real labels for them. noise = np.random.normal(0, 1, (batch_size, 100)) real_labels = np.ones((batch_size, 1)) # Train the generator. generator_statistics.append( self._adversarial.train_on_batch(noise, real_labels)) discriminator_history_real.append( np.average(discriminator_statistics_real, axis=0)) discriminator_history_fake.append( np.average(discriminator_statistics_fake, axis=0)) generator_history.append(np.average(generator_statistics, axis=0)) # Print the statistics for the current epoch. print() print("Epoch %d/%d" % (epoch, epochs)) utility.print_line() print( "Discriminator: [Loss real: %f | Accuracy real: %.2f%% | Loss fake: %f | Accuracy fake: %.2f%%]" % (discriminator_history_real[-1][0], 100 * discriminator_history_real[-1][1], discriminator_history_fake[-1][0], 100 * discriminator_history_fake[-1][1])) print("Generator: [Loss: %f]" % generator_history[-1]) if epoch % saving_frequency == 0: # Save a sample of fake images, the generator, the discriminator and the training history up # to the current epoch. saving_directory_path = "{}/epoch-{}".format( output_path, str(epoch)) images = utility.generate_images(self._generator, 10) utility.save(images, saving_directory_path) self.save_models(saving_directory_path) self._save_training_plots(saving_directory_path, discriminator_history_real, discriminator_history_fake, generator_history)