def load_generators(self, X_train, X_val, y_train, y_val): self.train_generator = generator.Generator(X_train, y_train) self.train_generator.load_indexes() self.train_generator.load_genre_binarizer() self.val_generator = generator.Generator(X_val, y_val) self.val_generator.load_indexes() self.val_generator.load_genre_binarizer()
def main(): user32 = ctypes.windll.user32 screensize = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1) os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % ( screensize[0] // 2 - WINDOW_SIZE[0] // 2, screensize[1] // 2 - WINDOW_SIZE[1] // 2 - 10) pygame.init() pygame.mixer.init() clock = pygame.time.Clock() state = game_state.State(True, Music()) screen = pygame.display.set_mode(WINDOW_SIZE) generator = g.Generator() while state.running: clock.tick(30) controller.handle_events(state) if state.reset: m = state.music state = game_state.State(False, m) generator = g.Generator() generator.update(state) # Has to be before state.update() state.update() graphics.update_screen(screen, state) pygame.quit()
def setUp(self): #linear SAMPLE objects ct = 4 lim_t = 31 lbpar_t = cd.LbFixParams([ct], [lim_t]) shf_t = 5.0 band_t = 40.0 dt = band_t / ct diff = [dt] shf = [shf_t] band = [band_t] ptpar = cd.PtFixParams(diff, shf, band) lb_t = cd.LbGen(lbpar_t) pt_t = cd.PtGen(ptpar) dim_lb = (0, 0) obj_lb = [lb_t] dims_lb = [dim_lb] mst_lb = mst.MdimStruct(obj_lb, dims_lb) dims_pt = [(0, 0)] obj_pt = [pt_t] mst_pt = mst.MdimStruct(obj_pt, dims_pt) genS = gn.Generator(mst_lb, mst_pt) self.gdS = gd.Grid(genS) #BOUNDS diff = [dt] shf = [shf_t - 5.0] band = [band_t] ptpar = cd.PtFixParams(diff, shf, band) pt_t = cd.PtGen(ptpar) dims_pt = [(0, 0)] obj_pt = [pt_t] mst_pt = mst.MdimStruct(obj_pt, dims_pt) genB = gn.Generator(mst_lb, mst_pt) self.gdB = gd.Grid(genB) return
def main(): holme_basegen = generator.Generator(os.path.join(folder, 'holmefinal')) barabasi_basegen = generator.Generator(os.path.join(folder, 'barabasidens')) densities = {10: 20, 25: 40, 45: 60, 70: 80} for m in [10,25,45,70]: for p in range(0,10,2): for instance in range(1): for nodes in [90]: h_gen = holme_basegen.with_name('n%dd%02dp%02d.%03d' % (nodes, densities[m], p,instance)) h_gen.holme_kim(nodes,(m,float(p)/10.0,float(m)/float(nodes)))
def build_generator(self): """initializing the generator""" with tf.compat.v1.variable_scope("generator"): self.generator = generator.Generator(self.graph.n_node, self.node_embed_init_g, config)
def resume(self): sys.stdout.write("Resuming.") self.progress = 'Opening quenches' self.qm = quench.QuenchManager() self.qm.open_quenches(self.open_quenches, \ atten_v = self.initial_atten_vs, \ is_on = self.quench_is_on) self.qm.cavities_off(self.off_quenches) self.progress = 'Opening digitizer' self.digi = digitizer.Digitizer(self.run_dictionary \ .ix['Digitizer Address for ' + \ 'Power Combiners'].Value, ch1_range = self.ch_range, ch2_range = self.ch_range, sampling_rate = self.sampling_rate, num_samples = self.num_samples) self.progress = 'Opening generator' self.gen = generator.Generator(offset_freq = self.offset_freq, \ e_field = self.rf_e_field, \ scan_range = self.rf_scan_range, calib = True) self.gen.set_rf_frequency(910.0, offset_channel = 'A', change_power = True) self.fc = faradaycupclass.FaradayCup() self.progress = 'Resume complete' super(PhaseMonitor, self).resume() return
def test_generator(): synopses, genres = load_preprocessed_data( settings.INPUT_PREPROCESSED_FILMS) print(synopses[:20]) print(genres[:20]) X_train, X_val, y_train, y_val = train_test_split( synopses, genres, test_size=settings.VALIDATION_SPLIT, random_state=settings.SEED) c = generator.Generator(X_train, y_train) c.load_genre_binarizer() c.load_indexes() #a = g.generate().__next__() #g.get_train_val_generators() from time import sleep while 1: for a, b in c.generate(): print(a[0].shape, a[1].shape, b.shape) continue for i in range(a[0].shape[0]): s = str(c.to_synopsis(a[1][i])) if len(s) > 100: print(s) continue print(c.to_genre(a[0][i]), a[0][i].shape) print(c.to_synopsis(a[1][i]), len(a[1][i]), type(a[1][i][0])) print(c.index_to_word[b[i]], type(b[i])) print('_______________________________________')
def build_generator(self): #with tf.variable_scope("generator"): self.generator = generator.Generator( n_node=self.n_node, n_relation=self.n_relation, node_emd_init=self.node_embed_init_g, relation_emd_init=None)
def __init__(self, cfg): ''' Store the configuration object, create a L{Generator} object, and set the internal trace number to 0. @param cfg: The configuration object to use @type cfg: L{Config} object ''' # The config object used for setup info self.cfg = cfg # The logging object self.log = logging.getLogger(__name__) # The progress bar object self.pr = None # The current trace number self.tracenum = 0 # The generator object for creating fuzzed values self.generator = generator.Generator(self.cfg) # The Monitor object for testing fuzzed traces self.monitor = None # Boolean indicating if pointers are fair game self.fuzz_pointers = True # String indicating if snapshots should have their tags fuzzed # one by one ("sequential") or all at once ("simultaneous") self.snapshot_mode = None # String indicating if traces should have their snapshots fuzzed # one by one ("sequential") or all at once ("simultaneous") self.trace_mode = None
def main(): #Generating np arrays g = generator.Generator() Dimensionality = int(sys.argv[1]) Num_Points = int(sys.argv[2]) OS_mu = int(sys.argv[3]) OS_sigma = int(sys.argv[4]) IS_mu = int(sys.argv[5]) IS_sigma = int(sys.argv[6]) #Generating the outer sphere points OuterSphereArray = g.generate(OS_mu, OS_sigma, Dimensionality, Num_Points, 1) #Generating the inner sphere points InnerSphereArray = g.generate(IS_mu, IS_sigma, Dimensionality, Num_Points, 0) #Shuffling the data and splitting it up into the different arrays required. AllData = np.append(OuterSphereArray, InnerSphereArray, axis=0) np.random.shuffle(AllData) TrainingRows = int(math.floor((Num_Points * 2) * 0.8)) TrainingDataPoints = np.zeros((TrainingRows, Dimensionality)) TrainingLabels = np.zeros((TrainingRows, 2)) TestingDataPoints = np.zeros( ((Num_Points * 2 - TrainingRows), Dimensionality)) TestingLabels = np.zeros((len(TestingDataPoints), 2)) j = 0 for i in range(len(AllData)): if i < TrainingRows: Row = AllData[i] TrainingDataPoints[i] = Row[0:Dimensionality] TrainingLabels[i] = Row[-2:] else: Row = AllData[i] TestingDataPoints[j] = Row[0:Dimensionality] TestingLabels[j] = Row[-2:] j = j + 1 BatchSize = 100 num_Nodes_HL1 = 100 num_Output_Nodes = 2 Tester = Tester_np_Array.Tester_np_Array() Trainer = Trainer_np_Array.Trainer_np_Array(Dimensionality, num_Nodes_HL1, num_Output_Nodes) HiddenLayer1Dict, OutputLayerDict = Trainer.get_Layers() for i in range(50): Trainer.Train(TrainingDataPoints, TrainingLabels, BatchSize, TestingDataPoints, TestingLabels)
def __init__(self, channels, width, heigth, num_classes: int, embed_size: int, latent_dim: int = 100, lr: float = 0.002, b1: float = 0.5, b2: float = 0.999, batch_size: int = 1024, **kwargs): super().__init__() self.save_hyperparameters() # networks data_shape = (channels, width, heigth) self.generator = generator.Generator( num_classes=num_classes, embed_size=embed_size, latent_dim=self.hparams.latent_dim, img_shape=data_shape, output_dim=int(np.prod(data_shape))) self.discriminator = discriminator.Discriminator( img_shape=data_shape, output_dim=int(np.prod(data_shape)), num_classes=num_classes, ) self.validation_z = torch.rand(batch_size, self.hparams.latent_dim) self.example_input_array = torch.zeros(2, self.hparams.latent_dim)
def __init__(self, vocab_size, batch_size, pre_gen_epochs, pre_dis_epochs, gan_epochs, generate_sum, sequence_len, lr, real_file, fake_file, eval_file, update_rate): super(Solver, self).__init__() self.vocal_size = vocab_size self.batch_size = batch_size self.pre_gen_epochs = pre_gen_epochs self.pre_dis_epochs = pre_dis_epochs self.gan_epochs = gan_epochs self.generate_sum = generate_sum self.sequence_len = sequence_len self.lr = lr self.real_file = real_file self.fake_file = fake_file self.eval_file = eval_file self.update_rate = update_rate self.discriminator = discriminator.Discriminator( sequence_len, vocab_size, DisParams.emb_dim, DisParams.filter_sizes, DisParams.num_filters, DisParams.dropout) self.generator = generator.Generator(vocab_size, GenParams.emb_dim, GenParams.hidden_dim, GenParams.num_layers) self.target_lstm = target_lstm.TargetLSTM(vocab_size, GenParams.emb_dim, GenParams.hidden_dim, GenParams.num_layers) self.discriminator = util.to_cuda(self.discriminator) self.generator = util.to_cuda(self.generator) self.target_lstm = util.to_cuda(self.target_lstm)
def build_generator(self): """initialize the generator""" with tf.variable_scope("generator"): self.generator = generator.Generator( n_node=self.n_node, node_emd_init=self.node_embed_init_g, node_features=self.node_feature_init)
def post(file_name, file_path, length): # TODO: Error Checking if sentence > 300 characters, put sentence in arg text=sentence gen = Gen.Generator(file_path) gen.makeChain() sentence = gen.makeSentence(length) # Post to reddit r.submit('uljon', '[{0}]'.format(file_name) + sentence, text='') return
def __init__(self, data, rules, grammar, use_memory=False, memory={}): self.generator = generator.Generator(data, rules) self.interpreter = interpreter.Interpreter(grammar) if use_memory: self.interpreter.use_memory() for k, v in memory.iteritems(): self.interpreter.
def generate(self): generator.Generator().generate('band', 100) generator.Generator().generate('song', 100) generator.Generator().generate('instrument', 100) generator.Generator().generate('band_participants', 100) generator.Generator().generate('band_song', 10) generator.Generator().generate('song_tab', 100) generator.Generator().generate('tab_instrument', 10) generator.Generator().generate('tab_part', 10) self.start()
def interface(): settings.logger.info("Starting user interface...") n = model.Network() n.build() n.load_weights() g = generator.Generator(None, None) g.load_indexes() g.load_genre_binarizer() get_predictions(g, n)
def handle_read(self): request = self.recv(8192) if request: p = parser.Parser() tokens = p.parse_request(request) print(tokens) r = generator.Generator(tokens) response = r.gen_response() self.send(response)
def compile(self, sexp, stl=None): ''' return the content of a pyc file ''' g = generator.Generator() code = g.generate_module(sexp, stl=stl) return '%s%s%s' % (self.MAGIC_PYTHON, self.compile_timestamp(), marshal.dumps(code))
def __init__(self, address, in_dir, out_dir): BaseHTTPServer.HTTPServer.__init__(self, address, DevHTTPRequestHandler) self._out_dir = os.path.abspath(out_dir) in_dir = os.path.abspath(in_dir) self._generator = generator.Generator(in_dir, ['HtmlJinja', 'CssYaml']) self._last_generation = 0 self._previous_cwd = os.getcwd() self.RefreshSite()
def assemble(self, asms): self.tokenizer = tokenizer.Tokenizer() token_lines = [self.tokenizer.tokenize(asm) for asm in asms] self.parser = parser.Parser() parsed_lines = self.parser.parse(token_lines) self.symbol_table = symbol_table.SymbolTable() self.symbol_table.generate(parsed_lines) self.generator = generator.Generator() codes = self.generator.generate(parsed_lines, self.symbol_table) return codes
def run_gen_basic(domain_raw, size, verbose=False): goal, gamma, raw_fitness, count_evals, cache = domain_raw() gen = generator.Generator(gamma) random.seed(5) indiv = gen.gen_one(size, goal) assert indiv is not None istr = indiv.eval_str() ifit = raw_fitness(indiv) if verbose: print(istr) print(ifit)
def testGetUnionsAddsOrdinals(self): module = mojom.Module() union = module.AddUnion('a') union.AddField('a', mojom.BOOL) union.AddField('b', mojom.BOOL) union.AddField('c', mojom.BOOL, ordinal=10) union.AddField('d', mojom.BOOL) gen = generator.Generator(module) union = gen.GetUnions()[0] ordinals = [field.ordinal for field in union.fields] self.assertEquals([0, 1, 10, 11], ordinals)
def generate_(model_file_name): print('>>> Start generating lyrics? [y/n]') answer = input() gen = generator.Generator(model_file_name) while answer != 'exit': print('>>> To generate a lyrics, please define:') print('>>> a prime text for the lyrics') p_str = input() print('>>> the length of the lyrics you want') len = int(input()) print('>>> temperature? [higher is more chaotic, the default is 0.8]') temp = float(input()) gen.generate(prime_str=p_str, predict_len=len, temperature=temp, cuda=False)
def test_empty_init(self): """Test the default generator creation""" # 1. Create default disk deframenter object mygen = generator.Generator() # 2. Make sure it has the default values self.assertEqual(mygen.factor, generator.FACTOR_A) self.assertEqual(mygen.modulo, generator.MODULO) self.assertEqual(mygen.start, None) self.assertEqual(mygen.value, None) self.assertEqual(mygen.count, 0) self.assertEqual(mygen.name, None) self.assertEqual(mygen.part2, False)
def main(argv): del argv device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(device) embeddings, word_to_indx = read_embedding(FLAGS.embedding) x_train, y_train = read_data(FLAGS.train_data, word_to_indx) x_dev, y_dev = read_data(FLAGS.dev_data, word_to_indx) loader = DataLoader(BeersReviewDataSet(x_train, y_train), batch_size=32, shuffle=True) enc = encoder.Encoder(embeddings, FLAGS.hidden_dim_encoder, len(y_train[0]), FLAGS.drop_out_prob_encoder) enc.to(device) gen = generator.Generator(embeddings, FLAGS.hidden_dim_generator, FLAGS.drop_out_prob_generator) gen.to(device) optimizer = torch.optim.Adam([{ 'params': enc.parameters() }, { 'params': gen.parameters() }], lr=FLAGS.learning_rate) for i in range(FLAGS.epochs): print('-------------\nEpoch {}:\n'.format(i)) losses = [] obj_losses = [] selection_costs = [] continuity_costs = [] for _, batch in enumerate(loader): x, labels = batch[0].to(device), batch[1].to(device) optimizer.zero_grad() selection = gen.select(gen(x)) selection_cost, continuity_cost = gen.loss(selection, x) selection_costs.append(selection_cost.tolist()) continuity_costs.append(continuity_cost.tolist()) selection = torch.squeeze(selection) x = x * selection logit = torch.squeeze(enc(x)) loss = F.mse_loss(logit, labels.float()) obj_losses.append(loss.item()) loss += FLAGS.lambda_selection_cost * selection_cost loss += FLAGS.lambda_continuity_cost * continuity_cost loss.backward() losses.append(loss.item()) optimizer.step() print('Loss: ', sum(losses) / len(losses)) print('Loss for prediction: ', sum(obj_losses) / len(obj_losses)) print('Selection cost: ', sum(selection_costs) / len(selection_costs)) print('Continuity cost: ', sum(continuity_costs) / len(continuity_costs))
def test_values_init(self): """Test the generator creation with values""" # 1. Create default disk deframenter object mygen = generator.Generator(factor=12345, modulo=67890, start=START_A, name='George') # 2. Make sure it has the default values self.assertEqual(mygen.factor, 12345) self.assertEqual(mygen.modulo, 67890) self.assertEqual(mygen.start, START_A) self.assertEqual(mygen.value, START_A) self.assertEqual(mygen.count, 0) self.assertEqual(mygen.name, 'George') self.assertEqual(mygen.part2, False)
def __init__(self): self.remotestate = 0 self.delimiter = '\r' self.mode = 'timer' self.preset = .0 self.starttime = time.time() self.endtime = time.time() self.counting = False self.mypaused = False self.pausestart = 0 self.pausedTime = 0. self.threshold = 0 self.thresholdcounter = 1 self.proc = MyPIPE() self.generator = generator.Generator()
def main(): scan = sc.Scanning() scan.scan("signaltest") # scan.scan("testerrors") scan.print_lexemes() scan.print_key_words() scan.print_consts() scan.print_idents() scan.print_errors() pr = parser.Parser(scan.get_lexemes(), scan.get_key_words(), scan.get_consts(), scan.get_idents(), scan.get_complex()) pr.parsing() gn = generator.Generator() gn.compile(pr.get_tree().get_root()) print(gn.text) print(gn.errors)
def main(_): input_img, _ = load_img(INPUT_DIR) input_img = tf.convert_to_tensor(input_img, dtype=tf.float32) input_img = tf.expand_dims(input_img, axis=0) with tf.Session() as sess: with tf.variable_scope('generator'): gen = generator.Generator() gen.build(tf.convert_to_tensor(input_img)) sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess, CKPT_DIR) img = sess.run(gen.output) render(img, OUTPUT_DIR)