def run(self): if self.main_services.settings.generator: Generator.main(self) elif self.main_services.settings.trainer: Trainer.main(self) elif self.main_services.settings.analyzer: Analyzer.main(self) elif self.main_services.settings.load_simulator: simulator: Simulator = Simulator(self.main_services) simulator.start() if self.main_services.settings.clear_cache: self.main_services.resources.cache_dir.clear()
def test_sponsor_is_on_same_day_as_sponsored(self): sponsor = Member(first_name="sponsor", sos_percentage=50, family=100, sponsor_for_family=200) sponsored = Member(first_name="sponsored", sos_percentage=50, family=200, sponsored_by_family=100) members = self._large_list_of_members members.extend([sponsor, sponsored]) generator = Generator(members, self._basic_mock_work_day_service, 10) generator.generate() sos_days = generator.sos_days found_sponsor = False found_sponsored = False for day in sos_days: if sponsor in day.members: found_sponsor = True if sponsored in day.members: found_sponsored = True self.assertTrue(found_sponsor, "Sponsor not in list") self.assertTrue(found_sponsored, "Sponsored not in list") for day in sos_days: if sponsor in day.members or sponsored in day.members: self.assertListEqual(sorted(day.members), sorted([sponsor, sponsored])) return self.fail("Sponsor was not in list of days")
def test_when_sponsor_has_only_50_percent_sos(self): for i in range(0, 10): # This test is flaky so repeat it sponsor = Member(first_name="sponsor", sos_percentage=50, family=100, sponsor_for_family=200) sponsored = Member(first_name="sponsored", sos_percentage=100, family=200, sponsored_by_family=100) members = self._large_list_of_members members.extend([sponsor, sponsored]) generator = Generator(members, self._basic_mock_work_day_service, 10) generator.generate() sos_days = generator.sos_days found_sponsor = 0 found_sponsored = 0 for day in sos_days: if sponsor in day.members: found_sponsor += 1 if sponsored in day.members: found_sponsored += 1 self.assertEqual( found_sponsor, 1, "Sponsor should only have 1 SOS since they have 50% SOS") self.assertEqual( found_sponsored, 1, "Sponsored should only have 1 SOS since they need to be with sponsor" )
def __init__(self, batch_size, text_processor): self.batch_size = batch_size # initialize variables self.text_processor = text_processor self.image_processor = ImageProcessor() self.embedding = self.text_processor.embedding self.dictionary = self.text_processor.dictionary self.generator = Generator(self.batch_size, len(self.dictionary)) self.generator.build_generator() infer_wrapper = InferenceWrapper(self.generator, self.embedding) self.caption_generator = CaptionGenerator( infer_wrapper, self.dictionary, beam_size=1, length_normalization_factor=0) # restore model from checkpoint self.saver = tf.train.Saver() config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) self.saver.restore( self.sess, os.path.join(GenInferencer.checkpoint_dir, GenInferencer.checkpoint_file_name))
def get_generator(self, users, items, y_train): return Generator(users, items, y_train, batch_size=self.batch_size, n_item=self.n_items, shuffle=False)
def main(argv: typing.List[str]) -> int: parser = Parser() groups = parser.parse() generator = Generator(groups) generator.generate(out_dir) return 0
def test_members_family_not_allowed_more_than_once_in_holy_period(self): m1 = Member(family=1) m2 = Member(family=1) m3 = Member(family=1) generator = Generator([m1, m2, m3], self._basic_mock_work_day_service, 10) generator.sos_days.append_member(m1) generator.sos_days.append_member(m2) self.assertTrue(generator._is_members_family_in_holy_period(m3))
def __init__(self, pid, probes, time_table): WorkerThread.__init__(self, target=lambda: self._bpf.perf_buffer_poll(100), on_die=lambda: self._bpf.cleanup()) self._pid = pid self._probes = [Probe(probe) for probe in probes] self._generator = Generator() self._lost = dict() self.time_table = time_table self._init_bpf() WorkerThread.__init__(self, target=self._work_gen())
def test_member_is_not_allowed_to_have_sos_in_end_grace_period(self): m1 = Member(sos_percentage=50, family=1) m2 = Member(sos_percentage=50, family=2) m3 = Member(sos_percentage=50, family=3, end_date="2017-01-03") generator = Generator([m1, m2, m3], self._basic_mock_work_day_service, 0) generator.sos_days.append_member(m1) generator.sos_days.append_member(m2) generator.sos_days.append_member(m3) self.assertFalse(m3 in generator.sos_days.members)
def test_generator_retries_if_deadlock_occurs(self): m1 = Member(family=1) m2 = Member(family=1) generator = Generator([m1, m2], self._basic_mock_work_day_service, 10, number_of_retries=10) with self.assertRaises(NotPossibleToGenerateSosError): generator.generate() self.assertEqual(generator.number_of_retries_done, 10)
def __init__(self, generator_spec, discriminator_reward, load_model=False, reward_to_go=False, use_discriminator_reward=True): self.gen_spec = generator_spec self.generator = Generator(generator_spec, "training_generator", load_model) self.old_generator = Generator(generator_spec, "old_generator", load_model) if load_model: self.load() else: self.setup_ppo() self._discriminator_reward = discriminator_reward self.reward_to_go = reward_to_go self.use_discriminator_reward = use_discriminator_reward
def __init__(self): print("initialize the model") self.data_loader = MSCOCODataLoader() self.dictionary = self.data_loader.dictionary self.embedding = self.data_loader.embedding self.global_step = tf.Variable(0, False, name="global-step") self.gen_learning_rate = 0.0005 self.dis_learning_rate = 0.0005 self.generator = Generator(AdversarialTrainer.batch_size, len(self.dictionary)) self.discriminator = Discriminator(2 * AdversarialTrainer.batch_size, len(self.dictionary))
def test_list_is_random(self): names_ordered = "ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ" members = [] for index, name in enumerate(names_ordered): members.append( Member(first_name=name, sos_percentage=50, family=index)) generator = Generator(members, self._basic_mock_work_day_service, 10) generator.generate() names = "" for day in generator.sos_days: for m in day.members: names += m.first_name self.assertNotEqual(names, names_ordered)
def main(): """ Parse arguments from command line """ print("Version 1.1") argv = sys.argv length = len(argv) if length != 2: print_help() exit() dependent_list_string = sys.argv[1] statement = Generator.generate_statements(dependent_list_string) print(statement)
def __init__(self, solver, size: tuple, screen: pygame.Surface): super().__init__((size[0], size[1] // 9), screen) self.__solver = solver self.__generator = Generator() # create control buttons controlsize = (self.size[0] - self.size[0] // 2 - 25, self.size[1] // 2) self.__buttons = [ Button(*i, controlsize, self.screen) for i in ( (self.solve_all, (), (14.25, -1.8), "all", 24, (20, 450)), (self.solve_selected, (), (-16, -1.8), "selected", 24, (145, 450)), (self.reset, (), (1.8, 0.7), "reset", 24, (20, 500)), (self.generate, (), (-18.3, 0.7), "generate", 24, (145, 500)), ) ]
def __init__(self): # set main pygame screen size self.__screen_size = (1000, 720) self.__screen = pygame.display.set_mode(self.__screen_size[:2]) # change display icon pygame.display.set_icon(pygame.image.load("../assets/icon.png")) self.__generator = Generator() self.__board = self.__generator.generate() # create board object self.__board_model = Board(self.__screen_size, self.__board, self.__screen) # create solver object self.__solver = Solver(self.__board_model, 500) # create left panel object self.__left_panel = LeftPanel(self.__solver, self.__screen_size, self.__screen) # set screen title pygame.display.set_caption("Sudoku")
def test_generate_statements(self): data = ' * @var Foo | Bar ' statements = Generator.generate_statements(data) expected = ''' /** @var Foo */ private $foo; /** @var Bar */ private $bar; public function __construct( Foo $foo, Bar $bar ) { $this->foo = $foo; $this->bar = $bar; } ''' self.assertEqual(expected, statements)
def get_generator(self, users, items, y_train, use_utility_loss): if use_utility_loss: return CoocurrenceGenerator( users, items, y_train, batch_size=self.batch_size, user_item_rating_map=self.user_item_rating_map, item_rating_map=self.item_rating_map, shuffle=True, c_size=self.c_size, s_size=self.s_size, n_item=self.n_items) else: return Generator(users, items, y_train, batch_size=self.batch_size, n_item=self.n_items, shuffle=True)
def test_when_sponsored_starts_far_far_in_the_future(self): sponsor = Member(first_name="sponsor", sos_percentage=100, family=100, sponsor_for_family=200) mock_work_days_service = self._basic_mock_work_day_service start_after_date = mock_work_days_service.start_after_date # 120 days is a reasonable number indicating far in the future. # The case is when generating SOS in July and member starts in November the_future = datetime.strptime(start_after_date, '%Y-%m-%d').date() + timedelta(days=120) sponsored = Member(first_name="sponsored", sos_percentage=100, family=200, sponsored_by_family=100, start_date=the_future) members = self._large_list_of_members members.extend([sponsor, sponsored]) generator = Generator(members, mock_work_days_service, 10) generator.generate() sos_days = generator.sos_days found_sponsor = 0 found_sponsored = 0 for day in sos_days: if sponsor in day.members: found_sponsor += 1 if sponsored in day.members: found_sponsored += 1 self.assertEqual(found_sponsor, 2, "Sponsor have all their SOS, just as usual") self.assertEqual( found_sponsored, 0, "Sponsored should have no SOS since they have not started on Nätet yet" )
def test_short_run(self): token_data = os.path.join(os.path.dirname(sys.argv[0]), "test_parser_dataset.txt") freq_data = os.path.join(os.path.dirname(sys.argv[0]), "test_parser_result.txt") with open(token_data, encoding='utf8') as data: with open(freq_data, encoding='utf8') as freq: W = Word2Vec(data, freq) print("Made Word2Vec instance") w2v_save_file = os.path.join(os.path.dirname(sys.argv[0]), "test_word2vec.tfsav") gen_save_file = os.path.join(os.path.dirname(sys.argv[0]), "test_generator.tfsav") W.give_code() print("Gave code for tokens") W.tf_init(32, 192) print("Initialized") print("1st run (100 steps)....") W.tf_run(100, w2v_save_file, restore=True) E = W.Embeddings() print("Made Embeddings instance") del W G = Generator(E) print("Made Generator instance") G.nn_init(batch_size=32, timesteps=32, hidden_size=64) print("Initialized") print("1st run (5 steps)....") G.train_real_data(5, data, gen_save_file, restore=False) print("2st run (5 steps)....") G.train_real_data(5, data, gen_save_file) print(G.generate(gen_save_file))
def done(self, form_list, **kwargs): data = self.get_all_cleaned_data() student = get_user(self.request).student courses = Course.objects.filter(pk__in=data.get('courses', [])) loved_lecturers = Lecturer.objects.filter( pk__in=data.get('loved_lecturers', [])) hated_lecturers = Lecturer.objects.filter( pk__in=data.get('hated_lecturers', [])) hated_periods = data.get('hated_periods', {}) try: generator = Generator(student, courses=courses, loved_lecturers=loved_lecturers, hated_lecturers=hated_lecturers, hated_periods=hated_periods) time_table = generator.generate() return redirect('generator:timetable_view', time_table_id=time_table.id) except AttributeError: messages.add_message( self.request, messages.WARNING, 'Nie udało się znaleźć poprawnego planu dla ' 'wszystkich podanych kursów, zmniejsz ich ilość ' 'lub spróbuj ponownie.') return redirect('generator:form_view') except Exception as e: logging.exception("generator error: ") print(str(e)) messages.add_message( self.request, messages.ERROR, 'Wystąpił nieznany błąd! Spróbuj ponownie ' 'lub skontaktuj się z administratorem serwisu.') return redirect('generator:form_view')
def test_generator_proportion_0_gives_no_sos(self): m = Member() m.sos_percentage = 0 generator = Generator([m], self._basic_mock_work_day_service, 10) self.assertEqual(len(generator.pot), 0)
def generate_with_predefined_sequences(opts, TR, sched_group, group='experimental'): """ Generate schedule using sequences already defined. """ # get config config = get_config() type_data = get_seq_types(opts.type_file) seq_file = \ opts.seq_file + ".json".format(sched_group) \ if opts.seq_file else "./scheduling/sequences.json" # opts.seq_file + "_{}.json".format(sched_group) \ color_list = config["COLOR_LIST"] # create sequences row_list = [] sess_num = 0 # 0 is baseline session np.random.seed(config["RND_SEED"] + 1000 * sched_group) for index, row in type_data.iterrows(): seq_type, seq_length, max_chord_size, seq_keys, n_free_trials, \ n_paced_trials, n_free_trials_testing, n_paced_trials_testing, \ blocks, n_seqs_trained, n_seqs_untrained, n_seqs_fmri, \ n_sess, testing_sessions, n_runs = row testing_session_list = \ [int(x) for x in str(testing_sessions).split(",")] seq_keys = seq_keys.split(" ") blocks = [int(x) for x in blocks.split(",")] mygenerator = Generator(set=seq_keys, size=seq_length, maxchordsize=max_chord_size) trained_seqs, untrained_seqs \ = mygenerator.read_grouped(seq_file, seq_type) if opts.cycle_offset: trained_seqs = trained_seqs[ opts.cycle_offset:] + trained_seqs[:opts.cycle_offset] untrained_seqs = untrained_seqs[ opts.cycle_offset:] + untrained_seqs[:opts.cycle_offset] n_trained = len(trained_seqs) n_untrained = len(untrained_seqs) reorder_trained = list(permutations(range(n_trained))) reorder_trained_fmri = list(combinations(range(n_trained), n_seqs_fmri)) # reorder_untrained = list(combinations(range(n_untrained), n_seqs_untrained)) if not opts.no_untrained else [] reorder_untrained = [] untrained_list = range(n_untrained) #one = untrained_list[0] #twos = untrained_list[1:3] #rest = untrained_list[3:] untrained_groups = [] for j in range(n_seqs_untrained): untrained_groups.append(untrained_list[j::n_seqs_untrained]) for k in range(len(testing_session_list)): # mycombination = [one, twos[k % 2], rest[k % len(rest)]] mycombination = [x[k % len(x)] for x in untrained_groups] random.shuffle(mycombination) reorder_untrained.append(tuple(mycombination)) # n_seqs: how many are presented # get colors seq_color = {} for myseq in trained_seqs: index = random.randint(0, len(color_list) - 1) seq_color[myseq[1]] = color_list[index] del color_list[index] for myseq in untrained_seqs: index = random.randint(0, len(color_list) - 1) seq_color[myseq[1]] = color_list[index] del color_list[index] # untrained_index = 0 trained_comb_num = 0 untrained_comb_num = 0 for sess in range(n_sess): # controls the order across sessions trained_combination = list(reorder_trained[trained_comb_num \ % len(reorder_trained)]) trained_fmri_combination = list(reorder_trained_fmri[trained_comb_num \ % len(reorder_trained_fmri)]) trained_comb_num = trained_comb_num + 1 for paced in range(2): myruns = n_runs if paced and \ sess_num in testing_session_list else 1 # sess+1 if not sess_num in testing_session_list: # training sess + 1 sess_type = "training" n_trials = n_free_trials if paced == 0 else \ n_paced_trials for seq in range(n_seqs_trained): instruct = 1 if seq == 0 else 0 seq_index = trained_combination[seq] seq_train = "trained" sequence, sequence_string = \ trained_seqs[seq_index] if n_trials and group == 'experimental' > 0: row_list.append([ sess_num, sess_type, n_trials, " ".join(seq_keys), seq_type, sequence_string, seq_train, seq_color[sequence_string], trained_combination, seq_index, paced, instruct, 1, #run 1 # block ]) else: # testing / fmri untrained_combination = \ list(reorder_untrained[untrained_comb_num \ % len(reorder_untrained)]) if not \ opts.no_untrained > 0 else [] # print(untrained_combination) # print(reorder_untrained) if paced == 0: sess_type = "testing" n_trials = n_free_trials_testing for seq in range( n_seqs_trained + n_seqs_untrained): # trained and untrained instruct = 1 if seq == 0 else 0 # interleave trained/untrained if seq % 2 == 1 and not opts.no_untrained: seq_index = untrained_combination[(seq - 1) / 2] shuffled_combination = untrained_combination seq_train = "untrained" sequence, sequence_string = \ untrained_seqs[seq_index] else: seq_index = trained_combination[seq / 2] shuffled_combination = trained_combination seq_train = "trained" sequence, sequence_string = \ trained_seqs[seq_index] if n_trials > 0: row_list.append([ sess_num, sess_type, n_trials, " ".join(seq_keys), seq_type, sequence_string, seq_train, seq_color[sequence_string], shuffled_combination, seq_index, paced, instruct, 1, #run 1 # block ]) else: untrained_comb_num = untrained_comb_num + 1 sess_type = "fmri" combination_index = trained_fmri_combination + \ untrained_combination combination_type = \ len(trained_fmri_combination)*["trained"] + \ len(trained_fmri_combination)*["untrained"] # same amount of trained and untrained combination = zip(combination_type, combination_index) print(combination) n_trials = np.sum(np.array(blocks)) # compute run statistics nbeats = config["MAX_CHORD_SIZE"] + \ config["EXTRA_BEATS"] ITI = list( generate_ITIs(config["ITIMEAN_FMRI"], config["ITIRANGE_FMRI"], 'exp')) trial_duration = config["BEAT_INTERVAL"]*nbeats + \ config["BUFFER_TIME"] + config["FIXATION_TIME"] + \ np.mean(ITI) #config["ITIMEAN_FMRI"] run_duration = trial_duration*n_trials*\ (len(combination)) + config["START_TIME_FMRI"] + \ (len(combination)*n_trials/config["STRETCH_TRIALS"]-1)*config["STRETCH_TIME"] total_duration = run_duration * n_runs total_trials = n_runs * n_trials print("Trial duration: %.2f s; " % (trial_duration) + "Run duration: %.2f s (%.2f m, %d frames); " % (run_duration, run_duration / 60, np.ceil(run_duration / TR)) + "Total duration: %.2f m; " % (total_duration / 60) + "Total trials per sequence: %d" % (total_trials)) for run in range(myruns): shuffled_combination_run = \ shuffle_order(combination) last_seq = 0 for block, n_group in enumerate(blocks): shuffled_combination = \ shuffle_order(shuffled_combination_run) # avoid repetitions while last_seq == shuffled_combination[0]: shuffled_combination = \ shuffle_order(shuffled_combination_run) last_seq = shuffled_combination[-1] # shuffle trained and untrained for seq in range(len(shuffled_combination)): instruct = 1 if seq == 0 and \ block == 0 else 0 combination_type, combination_index = \ shuffled_combination[seq] if combination_type == "untrained": seq_train = "untrained" sequence, sequence_string = \ untrained_seqs[combination_index] else: seq_train = "trained" sequence, sequence_string = \ trained_seqs[combination_index] if n_trials > 0: row_list.append([ sess_num, sess_type, n_group, " ".join(seq_keys), seq_type, sequence_string, seq_train, seq_color[sequence_string], shuffled_combination, seq_index, paced, instruct, run + 1, #run block + 1 # block ]) sess_num = sess_num + 1 schedule = pd.DataFrame( row_list, columns=("sess_num", "sess_type", "n_trials", "seq_keys", "seq_type", "sequence_string", "seq_train", "seq_color", "combination", "seq_order", "paced", "instruct", "run", "block")) # schedule.loc[schedule["sess_num"] == 0, "sess_num"] = \ # np.max(schedule["sess_num"]) + 1 # schedule.sort_values(by = ["sess_num", "paced", "seq_train"], # inplace = True) if opts.schedule_file: schedulefilename = opts.schedule_file + "_s{}".format(sched_group) else: schedulefilename = "./scheduling/schedule{}".format(sched_group) if opts.split: schedule_home = \ schedule.loc[schedule["sess_type"] != "fmri", :] schedule_fmri = \ schedule.loc[schedule["sess_type"] == "fmri", :] schedule_home.to_csv(schedulefilename + ".csv", sep=";", index=False) schedule_fmri.to_csv(schedulefilename + "_fmri.csv", sep=";", index=False) else: schedule.to_csv(schedulefilename + ".csv", sep=";", index=False)
def main(): argdict = dict(zip(sys.argv, sys.argv[1:] + [''])) if "-h" in argdict: print(help_message) return #Set of filenames to data files. raw_filename = join_filenames("data", "tweets.csv") filtered_filename = join_filenames("data", "_tweets_filtered.txt") stat_filename = join_filenames("data", "tweets_stat.txt") tokenized_filename = join_filenames("data", "tweets_tokenized.txt") #Dimension of the model session_config = configparser.ConfigParser() session_config.read('session.ini') word2vec_batch_size = 640 embedding_size = int(session_config['dimension']['embedding_size']) gen_batch_size = 128 gen_seq_length = int(session_config['dimension']['gen_seq_length']) gen_hidden_size = [int(x) for x in session_config['dimension']['gen_hidden_size'].split(',')] #Hyper-parameter of the model learning_rate = 1E-06 if "-i" in argdict: #Filter valid tweets from data file, and use nlp parser to tokenize tweets if os.path.isfile(tokenized_filename): proceed = (input("Erasing old data. OK to proceed? (Y/N)") == "Y") else: proceed = True if proceed: with open_utf8(raw_filename, "r") as raw_file_r: #Filter actual tweets preparser = Preparser(raw_file_r) preparser.extract(filter=True) with open_utf8(filtered_filename, "w") as filtered_file_w: preparser.save(filtered_file_w) #Tokenize tweets with open_utf8(filtered_filename, "r") as filtered_file_r: parser = Parser(filtered_file_r) with open_utf8(stat_filename, "w") as stat_file_w: parser.get_stats(stat_file_w) with open_utf8(tokenized_filename, "w") as tokenized_file_w: parser.get_data(tokenized_file_w) if "-w" in argdict and int(argdict["-w"]) >= 0: #Start or continue word2vec optimization word2vec_num_step = int(argdict["-w"]) if "-W" in argdict: word2vec_save_filename = join_filenames("saves", argdict["-W"]) else: word2vec_save_filename = join_filenames( "saves", session_config['save_file']['word2vec_save']) word2vec_restore = os.path.isfile(word2vec_save_filename+".meta") word2vec = Word2Vec(tokenized_filename, stat_filename) word2vec.give_code() word2vec.tf_init(embedding_size=embedding_size, batch_size=word2vec_batch_size, seed=None) word2vec.tf_run(word2vec_num_step, word2vec_save_filename, restore=word2vec_restore) if "-g" in argdict and int(argdict["-g"]) >= 0: #Start or continue generator learning with open_utf8(stat_filename, "r") as stat_file_r, open_utf8(tokenized_filename, "r") as tokenized_file_r: embeddings = word2vec.Embeddings() if "-G" in argdict: gen_save_filename = join_filenames("saves", argdict["-G"]) else: gen_save_filename = join_filenames( "saves", session_config['save_file']['generator_save']) gen_restore = os.path.isfile(gen_save_filename+".meta") generator = Generator(embeddings) generator.nn_init( gen_batch_size, gen_seq_length, gen_hidden_size, learning_rate=learning_rate, seed=None, use_vector=("-V" in argdict)) generator.train_real_data(int(argdict["-g"]), tokenized_file_r, gen_save_filename, restore=gen_restore) if "-s" in argdict and int(argdict["-s"]) >= 0: result_filename = join_filenames(argdict["-S"]) unparser = Unparser(result_filename) sentences = generator.generate(gen_save_filename, int(argdict["-s"])) unparser.save(sentences)
def main(): generator = Generator(config) generator.generate()
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from options.train_options import TrainOptions from generator.generator import Generator from models.models import creat_model opt = TrainOptions().parse() generator = Generator(opt) train_generator = generator.synth_generator('train') val_generator = generator.synth_generator('val') # img_batch = val_generator.__next__() model = creat_model(opt) history = model.fit_generator( train_generator, steps_per_epoch=generator.num_train_samples // opt.batchSize, epochs=opt.epoch, validation_data=val_generator, validation_steps=generator.num_test_sample // opt.batchSize, class_weight='auto' ) # save model model.save(history)
#!/usr/bin/python3 from generator.generator import Generator from os.path import abspath import settings if __name__ == '__main__': Generator( content_path=abspath('data'), templates_path=abspath('templates'), output_path=abspath('output') ).generate()
# we're generating modules config = GeneratorConfig() config.output_dir = output_dir config.symptom_file = args.symptoms_json config.conditions_file = args.conditions_json config.config_file = args.config_file config.num_history_years = args.num_history_years config.min_symptoms = args.min_symptoms config.prefix = args.module_prefix config.generator_mode = args.generator_mode if not args.symptoms_json or not args.conditions_json: raise ValueError( "You must supply both the parsed symptoms.json and conditions.json file" ) generator = Generator(config) generator.generate() elif args.parse_symptoms: if not args.symptoms_csv: raise ValueError( "You must supply the symcat exported symptoms CSV file") symptoms = parse_symcat_symptoms(args.symptoms_csv) with open(os.path.join(output_dir, "symptoms.json"), "w") as fp: json.dump(symptoms, fp, indent=4) elif args.parse_conditions: if not args.conditions_csv: raise ValueError( "You must supply the symcat exported conditions CSV file") conditions = parse_symcat_conditions(args.conditions_csv) with open(os.path.join(output_dir, "conditions.json"), "w") as fp: json.dump(conditions, fp, indent=4)
if __name__ == "__main__": from torch.utils import data from sklearn.model_selection import train_test_split from generator.generator import Generator from discriminator.discriminator_semi import SemiSupervisedDiscriminator from classifier.classifier import Classifier from data.data_loader import ImageDataset, ImageTransform, make_datapath_list z_dim = 20 image_size_g = 64 image_size_d = 12 num_classes = 10 G = Generator(image_size_g, z_dim) D = SemiSupervisedDiscriminator(image_size_d, num_classes) C = Classifier(image_size_d, num_classes) G.apply(weights_init) D.apply(weights_init) print("Finish initialization of the network") label_list = list(range(num_classes)) img_list, label_list = make_datapath_list(label_list) train_img_list, test_img_list, train_label_list, test_label_list = train_test_split( img_list, label_list, test_size=0.2) mean = (0.5, ) std = (0.5, )
def generate_example(): generator = Generator(train_dir_path="results") text = generator.generate(src_text="Sing a song", n_chars=5) print(text)
def test_last_day_is_full(self): m = Member(sos_percentage=50, family=1) generator = Generator([m], self._basic_mock_work_day_service, 0) generator.generate() self.assertListEqual([], generator.sos_days)
def test_generator_proportion_50_gives_one_sos(self): m = Member() m.sos_percentage = 50 generator = Generator([m], self._basic_mock_work_day_service, 10) generator._populate_pot() self.assertEqual(len(generator.pot), 1)