Exemplo n.º 1
0
    def __init__(self, _device):
        self.device = _device
        self.batch_size = 64
        self.resolution = 28
        self.d_criterion = None
        self.d_optimizer = None
        self.g_criterion = None
        self.g_optimizer = None

        self.discriminator = Discriminator(num_layers=5,
                                           activations=["relu", "relu", "relu",
                                                        "sigmoid"],
                                           device=_device,
                                           num_nodes=[1, 64, 128, 64, 1],
                                           kernels=[5, 5, 3],
                                           strides=[2, 2, 2],
                                           dropouts=[.25, .25, 0],
                                           batch_size=64)

        # pass one image through the network so as to initialize the output
        # layer
        self.discriminator(torch.rand(
            size=[self.batch_size, 1, self.resolution, self.resolution]))

        self.generator = Generator(num_layers=6,
                                   activations=["relu", "relu", "relu", "relu",
                                                "tanh"],
                                   num_nodes=[1, 64, 128, 64, 64, 1],
                                   kernels=[3, 3, 3, 3],
                                   strides=[1, 1, 1, 1],
                                   batch_norms=[1, 1, 1, 0],
                                   upsamples=[1, 1, 0, 0],
                                   dropouts=[.25, .25, 0])
Exemplo n.º 2
0
    def __init__(self, verbosity=True, latent_dim=100):
        img_shape = (128, 128, 3)

        the_disc = Discriminator()
        the_gen = Generator()
        self.discriminator = the_disc.define_discriminator(
            verb=verbosity, sample_shape=img_shape)
        self.generator = the_gen.define_generator(verb=verbosity,
                                                  sample_shape=img_shape,
                                                  latent_dim=latent_dim)
        self.discriminator.trainable = False

        optimizer = Adam(0.0002, 0.5)
        self.discriminator.compile(
            loss=['binary_crossentropy', 'categorical_crossentropy'],
            loss_weights=[0.5, 0.5],
            optimizer=optimizer,
            metrics=['accuracy'])

        noise = Input(shape=(latent_dim, ))
        img = self.generator(noise)

        valid, _ = self.discriminator(img)

        self.combined = Model(noise, valid)
        self.combined.compile(loss=['binary_crossentropy'],
                              optimizer=optimizer)
Exemplo n.º 3
0
def main():
    # Loading Parameters
    parser = init_parameters()
    args, _ = parser.parse_known_args()

    # Updating Parameters (cmd > yaml > default)
    args = update_parameters(parser, args)

    # Setting save_dir
    save_dir = get_save_dir(args)
    U.set_logging(save_dir)
    with open('{}/config.yaml'.format(save_dir), 'w') as f:
        yaml.dump(vars(args), f)

    # Processing
    if args.generate_data or args.generate_label:
        g = Generator(args)
        g.start()

    elif args.extract or args.visualization:
        if args.extract:
            p = Processor(args, save_dir)
            p.extract()
        if args.visualization:
            v = Visualizer(args)
            v.start()

    else:
        p = Processor(args, save_dir)
        p.start()
Exemplo n.º 4
0
def main():
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('read_path')
    arg_parser.add_argument('write_path')
    args = arg_parser.parse_args()

    with open(args.read_path, 'r') as source:
        text = source.read()

        lexer = Lexer(text)
        tokens = lexer.lex()

        parser = Parser(tokens)
        ast = parser.parse()

        symbolizer = Symbolizer(ast)
        symbolizer.symbolize()

        optimizer = Optimizer(ast)
        optimizer.optimize()

        grapher = Grapher(ast)
        grapher.graph()

        generator = Generator(ast)
        generator.generate(args.write_path)

        runner = Runner(ast)
        runner.run()
Exemplo n.º 5
0
 def __init__(self, data, generator=None):
     data_indicator = data.iloc[:, range(9, len(data.columns), 1)]
     if generator is None:
         self._generator = Generator(data_indicator)
     else:
         self._generator = generator
     self._data = data
Exemplo n.º 6
0
    def test_generator(self):
        for path in glob.glob("test/grader/*/src.pas"):
            dir = os.path.dirname(path)
            should_fail = not dir.endswith('16')
            with open(path, 'r') as source:
                print(f"testing {path}")
                text = source.read()
                lexer = Lexer(text)
                tokens = lexer.lex()
                parser = Parser(tokens)
                ast = parser.parse()
                symbolizer = Symbolizer(ast)
                symbolizer.symbolize()
                grapher = Generator(ast, symbolizer)
                grapher.generate()
                sol = os.path.join(dir, 'src.c')
                out = os.path.join(dir, 'out')
                if os.path.exists(sol):
                    os.remove(sol)
                if os.path.exists(out):
                    os.remove(out)
                grapher.write(sol)
                p = None
                try:
                    p = sp.Popen(['gcc', sol, '-o', out], stdout=sp.PIPE)
                    retCode = p.wait()
                    self.assertTrue(retCode == 0)
                    p.stdout.close()
                    #s = str(p.stdout.read())
                    #self.assertTrue(s == '')
                except Exception:
                    self.assertFalse(should_fail)
                for i in range(1, 5):
                    inFile = os.path.join(dir, str(i) + '.in')
                    outFile = os.path.join(dir, str(i) + '.out')
                    with open(inFile, 'r') as inText:
                        with open(outFile, 'r') as outText:
                            inText = inText.read()
                            outText = outText.read()
                            try:
                                of = sp.Popen([out],
                                              stdin=sp.PIPE,
                                              stdout=sp.PIPE)
                                of.stdin.write(inText.encode('utf-8'))
                                of.stdin.close()
                                rc = of.wait()
                                self.assertTrue(rc == 0)
                                b = of.stdout.read()
                                s = b.decode('utf-8')
                                of.stdout.close()
                                if (not should_fail):
                                    self.assertEqual(s, str(outText))
                            except Exception:
                                self.assertFalse(should_fail)

        self.assertTrue(True)


#Tests().test_grapher()
def __collect_data(bca_inputs='bee_colony_inputs.txt', generator_inputs='settings.txt'):
    settings = input_output.read(bca_inputs)
    data = Generator(generator_inputs)
    intelligence = Engine(data)
    scouts = [Bee(intelligence) for _ in range(settings[0])]
    workers = [Bee(intelligence) for _ in range(settings[1])]
    steps = settings[2]
    hive = []
    return steps, scouts, workers, hive
Exemplo n.º 8
0
 def test_generator(self):
     g = Generator()
     w1 = Website('test1', num_pages=20)
     w2 = Website('test2', num_pages=20)
     g.generate(sites=[w1, w2], writeFile=True)
     self.assertEqual(len(g.sites), 2)
     self.assertEqual(len(g.sites[0].pages), 20)
     for i, page in enumerate(g.sites[0].pages):
         self.assertTrue(
             f'<title>{w1.domain} - Page{i}</title>' in page.html)
Exemplo n.º 9
0
    def setUp(self):
        self.batch_size = 32

        self.generator = Generator(
            csv_file=f"{gc['DATA_DIR']}/src/train.csv",
            images_src_dir=f"{gc['DATA_DIR']}/src/train_images",
            batch_size=self.batch_size,
            target_image_size=(224, 224),
            image_augmentation_options=default_image_augmenation_options,
            cache_dir=gc["DATA_DIR"] + "/images_cache",
        )
Exemplo n.º 10
0
    def createNew(self, species, length, gamutLength, cf):
        if species == cf:
            cf = ()
        cf = tuple(cf)

        key = (species, length, gamutLength, cf)
        if key in self.generators:
            return self.generators[key].createNew()
        else:
            if species == 'cf':
                g = Generator(cantusSpec(length, gamutLength, 'cf'))
            elif species == 's1':
                g = Generator(firstSpeciesSpec([ConstPitch(x) for x in cf], gamutLength, 's1'))
            elif species == 's2':
                g = Generator(secondSpeciesSpec([ConstPitch(x) for x in cf], gamutLength, 's2'))
            else:
                g = Generator(thirdSpeciesSpec([ConstPitch(x) for x in cf], gamutLength, 's3'))

            self.generators[key] = g
            return g.createNew()
Exemplo n.º 11
0
def main():
    args = parse_args()
    device = torch.device("cuda")

    generator = Generator().to(device)
    if args.freeze:
        for name, param in generator.named_parameters():
            if ("shared" not in name) and ("decoder.block.5" not in name):
                param.requires_grad = False

    train_dataset = DailyDialogueDataset(
        path_join(args.dataset_path, "train/dialogues_train.txt"),
        tokenizer=generator.tokenizer,
    )
    valid_dataset = DailyDialogueDataset(
        path_join(args.dataset_path, "validation/dialogues_validation.txt"),
        tokenizer=generator.tokenizer,
    )

    print(len(train_dataset), len(valid_dataset))

    optimizer = AdamW(generator.parameters(), lr=args.lr)

    best_loss = np.float("inf")

    for epoch in tqdm(range(args.num_epochs)):
        train_loss, valid_loss = [], []
        generator.train()
        for ind in np.random.permutation(len(train_dataset)):
            optimizer.zero_grad()
            context, reply = train_dataset.sample_dialouge(ind)
            context, reply = context.to(device), reply.to(device)
            loss = generator.get_loss(input_ids=context, labels=reply)
            loss.backward()
            optimizer.step()

            train_loss.append(loss.item())

        generator.eval()
        for ind in range(len(valid_dataset)):
            context, reply = valid_dataset[ind]
            context, reply = context.to(device), reply.to(device)
            with torch.no_grad():
                loss = generator.get_loss(input_ids=context, labels=reply)
            valid_loss.append(loss.item())

        train_loss, valid_loss = np.mean(train_loss), np.mean(valid_loss)
        print(
            f"Epoch {epoch + 1}, Train Loss: {train_loss:.2f}, Valid Loss: {valid_loss:.2f}"
        )
        if valid_loss < best_loss:
            best_loss = valid_loss
            torch.save(generator.state_dict(), args.output_path)
Exemplo n.º 12
0
    def __init__(self, window_height, window_width, button_height, screen):

        # Window parameters
        self.window_height = window_height
        self.window_width = window_width
        self.button_height = button_height
        self.screen = screen
        self.buttons = None
        self.clock = pg.time.Clock()

        # Maze parameters
        self.maze_size = 10
        self.scale = (self.window_width / self.maze_size)
        self.maze = Maze(self.maze_size, self.scale)
        self.maze_list = [self.maze.grid]
        self.build_maze = copy.deepcopy(self.maze.grid)

        # Initialize searcher, generator, learner objects
        self.generator = Generator()  # Used to generate mazes
        self.searcher = Searcher()  # Used to search mazes
        #self.learner = Learner()        # Used to simulate learning on mazes

        # Paths searched
        self.paths = {}
        self.shown_path = {}
        # Number of cells visited in each search
        self.visits = {"BFS": "-", "A*": "-", "DFS": "-"}
        # Length of the path found from the start to the end
        self.lengths = {"BFS": "-", "A*": "-", "DFS": "-"}

        # Reinforcement Learning parameters
        self.num_actions = 4
        self.num_states = self.maze.size * self.maze.size
        self.step_size = 0.5
        self.epsilon = 0.1
        self.discount = 0.9
        self.num_episodes = 50
        self.num_runs = 5
        self.all_states_visited = {}
        self.all_state_visits = {}
        self.all_reward_sums = {}

        # Colors
        self.path_colors = {
            "BFS": RED,
            "A*": ORANGE,
            "DFS": PINK,
            "Explore_BFS": DARK_GREEN,
            "Explore_A*": DARK_ORANGE,
            "Explore_DFS": PURPLE
        }
Exemplo n.º 13
0
def main():

    global window, ceilingtexture, floortexture, walltexture, map

    glutInit(sys.argv)
    glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
    glutInitWindowSize(640, 480)
    glutInitWindowPosition(200, 200)

    window = glutCreateWindow('Experimental Maze')

    # Generate map.
    generator = Generator()
    map = generator.generateMap(16)

    # Represents a top-down view of the maze.
    # map = [
    #     [1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1],
    #     [1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1],
    #     [1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1],
    #     [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1],
    #     [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1],
    #     [1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1],
    #     [1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1],
    #     [1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1],
    #     [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1],
    #     [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1],
    #     [1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],
    #     [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
    #     [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
    #     [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1],
    #     [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1],
    #     [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1]
    # ]

    # Load texture.
    texture = Texture()
    ceilingtexture = texture.loadImage('tex/ceiling.bmp')
    floortexture = texture.loadImage('tex/floor.bmp')
    walltexture = texture.loadImage('tex/wall.bmp')

    glutIgnoreKeyRepeat(1)
    glutKeyboardFunc(input.registerKeyDown)
    glutKeyboardUpFunc(input.registerKeyUp)

    glutDisplayFunc(drawScene)
    glutIdleFunc(drawScene)
    initGL(640, 480)
    glutMainLoop()
Exemplo n.º 14
0
    def __init__(self):
        self.logging = True

        # Game logic
        self.players = {}
        self.bots = {}
        self.botsInactive = {}

        self.bullets = {}
        self.bulletsIndex = 0

        self.geysers = {}
        self.geysersInactive = {}
        self.geysersIndex = 0

        self.doors = {}
        self.doorsIndex = 0

        self.fortified = {}
        self.fortifyIndex = 0

        # Game managment
        self.titlesToUpdate = []
        self.deadConnections = []
        self.mapDimensions = MAPDIMENSIONS
        gen = Generator(self.mapDimensions[0], self.mapDimensions[1], -200)

        self.map = [gen.getUnderworld(), gen.getOverworld()]
        self.bouldersMap = gen.getBoulders()

        # Create initial boulders
        for floor in [0, 1]:
            for boulder in self.bouldersMap[floor]:
                boulderx = boulder[0]
                bouldery = boulder[1]
                self.map[floor][bouldery][boulderx] = BOULDER_CHAR

        # Create initial bots
        botId = 0
        for x in range(0, round(BOTAMOUNT)):

            if x % 2 == 0:  # 1/2 of bot amount in overworld
                self.botsInactive[botId] = Bot(self, self.getSpawnPosition(),
                                               1)
                botId += 1

            self.botsInactive[botId] = Bot(self,
                                           self.getSpawnPosition(floor=0), 0)
            botId += 1
Exemplo n.º 15
0
    def __init__(self, master_key, clipboard, keys_and_emojis={}):
        """
        @param master_key: Veja a documentação na classe Listener.

        @param clipboard: Veja a documentação na classe Generator.

        @param keys_and_emojis: Dicionário com teclas e emojis.
        """

        self.__master_key = master_key
        self.__clipboard = clipboard
        self.__keys_and_emojis = keys_and_emojis

        self.__emoji_generator = Generator()
        self.__listener = None
Exemplo n.º 16
0
def test_run_generator():
    g = Generator()
    z_code = torch.randn(BATCH, D_Z)
    sent_emb = torch.randn(BATCH, D_HIDDEN)
    word_embs = torch.randn(BATCH, D_WORD, CAP_MAX_LEN)
    gen, att, mu, logvar = g(z_code, sent_emb, word_embs, None)
    print('Generated shape:')
    for k in gen:
        print(gen[k].size())
    print('Attention shape:')
    for a in att:
        print(att[a].size())

    print(f'Mu shape: {mu.size()}    logvar shape: {logvar.size()}')
    return gen, att, mu, logvar
Exemplo n.º 17
0
def main(args):
    mode = args.mode
    amount = args.number_amount
    bits_number = args.bits

    if mode not in range(3):
        raise Exception('Mode ' + str(mode) + ' not allowed.')

    if amount < 0:
        raise Exception('Amount ' +  str(amount) + ' not allowed.')

    if bits_number < 0:
        raise Exception('Number of bits ' + str(bits_number) + ' not allowed.')

    qrangen = Generator(mode, amount, bits_number)
    print(qrangen.generate_number())
    def generate(self, event):
        from src.generator import Generator
        import os

        schoolName = self.inputs["schoolName"].get()
        examName = self.inputs["examName"].get()
        authorityName = self.inputs["authorityName"].get()

        if (not schoolName) and (not examName):
            messagebox.showwarning("Required",
                                   "School Name and Exam Name are required!!")
            return
        elif not schoolName:
            messagebox.showwarning("Required", "School Name is required!!")
            return
        elif not examName:
            messagebox.showwarning("Required", "Exam Name is required!!")
            return

        try:
            self.waitLabel["text"] = "Wait..."
            factory = Generator(
                schoolName,
                examName,
                self.file,
                self.signatureFile.name if self.signatureFile else None,
                authorityName,
            )

            if not os.path.isdir("pdfs"):
                os.mkdir("pdfs", 0o666)

            print("Starting to generate PDFs")

            for student in factory.getCSV():
                factory.generatePDF(student["Roll Number"], log=True)
            self.waitLabel["text"] = ""
            messagebox.showinfo("Success!!",
                                "Generated PDFs!! Check the 'pdfs' folder")
        except Exception as e:
            print(e.with_traceback())
            self.waitLabel["text"] = ""
            messagebox.showerror("Error!!",
                                 "Something went wrong!! Please try again!!")
Exemplo n.º 19
0
    def __init__(self, damsm, device=DEVICE):
        self.gen = Generator(device)
        self.disc = Discriminator(device)
        self.damsm = damsm.to(device)
        self.damsm.txt_enc.eval(), self.damsm.img_enc.eval()
        freeze_params_(self.damsm.txt_enc), freeze_params_(self.damsm.img_enc)

        self.device = device
        self.gen.apply(init_weights), self.disc.apply(init_weights)

        self.gen_optimizer = torch.optim.Adam(self.gen.parameters(),
                                              lr=GENERATOR_LR,
                                              betas=(0.5, 0.999))

        self.discriminators = [self.disc.d64, self.disc.d128, self.disc.d256]
        self.disc_optimizers = [
            torch.optim.Adam(d.parameters(),
                             lr=DISCRIMINATOR_LR,
                             betas=(0.5, 0.999)) for d in self.discriminators
        ]
def generateCanti(powersOfTwo):
    lineLengths = powersOfTwo.copy()
    gamutSizes = powersOfTwo.copy()
    tasks = list(itertools.product(lineLengths, gamutSizes))
    results = []  # (lineLength, GamutSize, time)
    causes = []  #(lineLength, gamutSize, result)
    cfs = []  #(lineLength, gamutSize, result)
    print(tasks)
    for (length, gamut) in tasks:
        print((length, gamut))

        try:
            g = Generator(cantusSpec(length, gamut, ''))
        except Exception:
            causes.append((length, gamut, 'Invalid'))
            continue

        i = 0
        while True:
            if i == 5:
                causes.append((length, gamut, 'over5'))
                break
            i += 1
            res, runtime = timeWithTimeout(lambda: g.createNew(), 10)

            # No more unique output for task
            if res == []:
                if i == 1:
                    causes.append((length, gamut, 'No possible lines'))
                else:
                    causes.append((length, gamut, 'All outputs found'))
                break
            #timeout
            elif res is None:
                causes.append((length, gamut, 'timeout'))
                break
            else:
                out = (length, gamut, i, runtime)
                cfs.append((length, gamut, res))
                results.append(out)

    success = pd.DataFrame(columns=['length', 'gamut', 'iteration', 'times'],
                           data=results)
    fails = pd.DataFrame(columns=['length', 'gamut', 'result'], data=causes)
    lines = pd.DataFrame(columns=['length', 'gamut', 'line'], data=cfs)
    print(success)
    print(fails)
    print(cfs)
    dt = str(datetime.datetime.now())
    success.to_csv(
        'output/generator/cf/generateCFSuccess_Max:' + str(powersOfTwo[-1]) +
        "_Time:" + dt + '.csv',
        index=False,
    )
    fails.to_csv(
        'output/generator/cf/generateCFFails_Max:' + str(powersOfTwo[-1]) +
        "_Time:" + dt + '.csv',
        index=False,
    )
    lines.to_csv(
        'output/generator/cf/generateCFCFs_Max:' + str(powersOfTwo[-1]) +
        '.csv',
        index=False,
    )
Exemplo n.º 21
0
    type=float,
    default=1,
    help="Zoom factor",
)

args = parser.parse_args()
print(f"* Arguments:\n{pformat(vars(args))}")
# endregion

os.chdir(c["WORK_DIR"])
df = pd.read_csv(args.in_csv)

g = Generator(
    df=df,
    batch_size=1,
    shuffle=False,
    zoom=args.zoom,
    augmentation_options=None,
    image_output_size=tuple(args.size),
)


def _mapping(i):
    g.__getitem__(i)


with Pool(cpu_count()) as pool:
    list(
        tqdm(
            pool.imap(
                _mapping,
                range(df.shape[0]),
Exemplo n.º 22
0
parser.add_argument(
    "--size",
    type=int,
    default=[224, 224],
    nargs="+",
    help="Target image size (WxH)",
)

args = parser.parse_args()
print(f"* Arguments:\n{pformat(vars(args))}")
# endregion

g = Generator(
    csv_file=f"{gc['DATA_DIR']}/src/{args.set}.csv",
    images_src_dir=f"{gc['DATA_DIR']}/src/{args.set}_images",
    target_image_size=tuple(args.size),
    image_augmentation_options=None,
    cache_dir=gc["DATA_DIR"] + "/images_cache",
)


def _mapping(x):
    x, y = g.get_one(x,
                     use_cached=False,
                     write_cache=True,
                     normalize=False,
                     augment=False)


with Pool(cpu_count()) as pool:
    list(tqdm(
Exemplo n.º 23
0
 def __init__(self, description, path_to_settings):
     self.description = description
     self.path_to_generator = path_to_settings
     data = Generator(self.path_to_generator)
     self.intelligence = Engine(data)
Exemplo n.º 24
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--cuda',
                        default=False,
                        action='store_true',
                        help='Enable CUDA')
    args = parser.parse_args()
    use_cuda = True if args.cuda and torch.cuda.is_available() else False

    random.seed(SEED)
    np.random.seed(SEED)

    netG = Generator(VOCAB_SIZE, G_EMB_SIZE, G_HIDDEN_SIZE, G_LR, use_cuda)
    netD = Discriminator(VOCAB_SIZE, D_EMB_SIZE, D_NUM_CLASSES, D_FILTER_SIZES,
                         D_NUM_FILTERS, DROPOUT, D_LR, D_L2_REG, use_cuda)
    oracle = Oracle(VOCAB_SIZE, G_EMB_SIZE, G_HIDDEN_SIZE, use_cuda)

    # generating synthetic data
    # print('Generating data...')
    # generate_samples(oracle, BATCH_SIZE, GENERATED_NUM, REAL_FILE)

    # pretrain generator
    gen_set = GeneratorDataset(REAL_FILE)
    genloader = DataLoader(dataset=gen_set,
                           batch_size=BATCH_SIZE,
                           shuffle=True)

    print('\nPretraining generator...\n')
    for epoch in range(PRE_G_EPOCHS):
        loss = netG.pretrain(genloader)
        print('Epoch {} pretrain generator training loss: {}'.format(
            epoch, loss))

        generate_samples(netG, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        val_set = GeneratorDataset(EVAL_FILE)
        valloader = DataLoader(dataset=val_set,
                               batch_size=BATCH_SIZE,
                               shuffle=True)
        loss = oracle.val(valloader)
        print('Epoch {} pretrain generator val loss: {}'.format(
            epoch + 1, loss))

    # pretrain discriminator
    print('\nPretraining discriminator...\n')
    for epoch in range(D_STEPS):
        generate_samples(netG, BATCH_SIZE, GENERATED_NUM, FAKE_FILE)
        dis_set = DiscriminatorDataset(REAL_FILE, FAKE_FILE)
        disloader = DataLoader(dataset=dis_set,
                               batch_size=BATCH_SIZE,
                               shuffle=True)

        for _ in range(K_STEPS):
            loss = netD.dtrain(disloader)
            print('Epoch {} pretrain discriminator training loss: {}'.format(
                epoch + 1, loss))

    # adversarial training
    rollout = Rollout(netG,
                      update_rate=ROLLOUT_UPDATE_RATE,
                      rollout_num=ROLLOUT_NUM)
    print('\n#####################################################')
    print('Adversarial training...\n')

    for epoch in range(TOTAL_EPOCHS):
        for _ in range(G_STEPS):
            netG.pgtrain(BATCH_SIZE, SEQUENCE_LEN, rollout, netD)

        for d_step in range(D_STEPS):
            # train discriminator
            generate_samples(netG, BATCH_SIZE, GENERATED_NUM, FAKE_FILE)
            dis_set = DiscriminatorDataset(REAL_FILE, FAKE_FILE)
            disloader = DataLoader(dataset=dis_set,
                                   batch_size=BATCH_SIZE,
                                   shuffle=True)

            for k_step in range(K_STEPS):
                loss = netD.dtrain(disloader)
                print(
                    'D_step {}, K-step {} adversarial discriminator training loss: {}'
                    .format(d_step + 1, k_step + 1, loss))
        rollout.update_params()

        generate_samples(netG, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        val_set = GeneratorDataset(EVAL_FILE)
        valloader = DataLoader(dataset=val_set,
                               batch_size=BATCH_SIZE,
                               shuffle=True)
        loss = oracle.val(valloader)
        print('Epoch {} adversarial generator val loss: {}'.format(
            epoch + 1, loss))
Exemplo n.º 25
0
    elif "generatortest.py" in argv[1]:
        FILE_PATH = argv[2]
    else:
        error_msg = ("Unexpected call from the command line: {}")
        raise SyntaxError(error_msg.format(" ".join(argv)))
else:
    error_msg = ("Please pass an arg for the path to a flair program.")
    raise SyntaxError(error_msg)

# store program into string variable
with open(FILE_PATH, "r") as flr:
    flairProgram = flr.read()

scanner = Scanner(flairProgram)
parser = Parser(scanner)
ast = parser.parse()
analyzer = Analyzer(ast)
symbolTable = analyzer.getSymbolTable()
generator = Generator(ast, symbolTable)
code = generator.generateCode()
if "/" in FILE_PATH:
    fileName = FILE_PATH[FILE_PATH.rindex("/") + 1:]
else:
    fileName = FILE_PATH
if "." in fileName:
    fileName = fileName[:fileName.rindex(".")]
fileName += ".tm"
with open(fileName, 'w+') as f:
    f.write(code)
print("TM code saved to file {}".format(fileName))
Exemplo n.º 26
0
############################################


pred = Predictor(weights_path, meta_path)
type(pred)
pred.predict('This is incredible! I love it, this is the best chicken I have ever had.')
pred.predict('god is love')

top_labels = pred.predict_top_k('god is love', top_k_n=5)


############################################
# Get generated text similar to as above
############################################

gen = Generator(weights_path, meta_path)

gen.generate(
    cond_text="In the beginning",
    bag_of_words='data/bow_confucianism.tsv',
    class_label=None,
)



############################################
# Recommendations
############################################

attr_labels = {
    "None": None,
def generateOtherLines(powersOfTwo, specMaker, name):
    lineLengths = powersOfTwo.copy()
    gamutSizes = powersOfTwo.copy()
    tasks = list(itertools.product(lineLengths, gamutSizes))
    results = []  # (lineLength, GamutSize, time)
    causes = []  #(lineLength, gamutSize, result)
    lines = []
    print(tasks)
    for (length, gamut) in tasks:
        print(length, gamut)
        try:
            cf = Generator(cantusSpec(length, gamut, '')).createNew()
            if cf == []:
                causes.append((length, gamut, 'Invalid cf'))
        except Exception:
            causes.append((length, gamut, 'Invalid cf'))
            continue

        try:
            g = Generator(specMaker([ConstPitch(x) for x in cf], gamut, ''))
        except Exception:
            causes.append((length, gamut, 'Invalid species'))
            continue

        i = 0
        while True:
            if i == 5:
                causes.append((length, gamut, 'over5'))
                break
            i += 1
            res, runtime = timeWithTimeout(lambda: g.createNew(), 10)
            # No more unique output for task
            if res == []:
                if i == 1:
                    causes.append((length, gamut, 'No possible species'))
                else:
                    causes.append((length, gamut, 'All outputs found'))
                break
            #timeout
            elif res is None:
                causes.append((length, gamut, 'timeout'))
                break
            else:
                out = (length, gamut, i, runtime)
                lines.append((length, gamut, res))
                results.append(out)

    success = pd.DataFrame(columns=['length', 'gamut', 'iteration', 'times'],
                           data=results)
    fails = pd.DataFrame(columns=['length', 'gamut', 'result'], data=causes)
    linesDF = pd.DataFrame(columns=['length', 'gamut', 'line'], data=lines)
    print(success)
    print(fails)
    print(lines)
    dt = str(datetime.datetime.now())
    success.to_csv(
        'output/generator/' + name + '/generate' + name + 'Success_Max:' +
        str(powersOfTwo[-1]) + "_Time:" + dt + '.csv',
        index=False,
    )
    fails.to_csv(
        'output/generator/' + name + '/generate' + name + 'Fails_Max:' +
        str(powersOfTwo[-1]) + "_Time:" + dt + '.csv',
        index=False,
    )
    linesDF.to_csv(
        'output/generator/' + name + '/generate' + name + 'Lines_Max:' +
        str(powersOfTwo[-1]) + '.csv',
        index=False,
    )
Exemplo n.º 28
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--cuda',
                        default=False,
                        action='store_true',
                        help='Enable CUDA')
    args = parser.parse_args()
    use_cuda = True if args.cuda and torch.cuda.is_available() else False

    netG = Generator(VOCAB_SIZE, G_EMB_SIZE, G_HIDDEN_SIZE, use_cuda)
    netD = Discriminator(VOCAB_SIZE, D_EMB_SIZE, D_NUM_CLASSES, D_FILTER_SIZES,
                         D_NUM_FILTERS, DROPOUT, use_cuda)
    oracle = Oracle(VOCAB_SIZE, G_EMB_SIZE, G_HIDDEN_SIZE, use_cuda)

    if use_cuda:
        netG, netD, oracle = netG.cuda(), netD.cuda(), oracle.cuda()

    netG.create_optim(G_LR)
    netD.create_optim(D_LR, D_L2_REG)

    # generating synthetic data
    print('Generating data...')
    generate_samples(oracle, BATCH_SIZE, GENERATED_NUM, REAL_FILE)

    # pretrain generator
    gen_set = GeneratorDataset(REAL_FILE)
    genloader = DataLoader(dataset=gen_set,
                           batch_size=BATCH_SIZE,
                           shuffle=True)

    print('\nPretraining generator...\n')
    for epoch in range(PRE_G_EPOCHS):
        loss = netG.pretrain(genloader)
        print('Epoch {} pretrain generator training loss: {}'.format(
            epoch + 1, loss))

        generate_samples(netG, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        val_set = GeneratorDataset(EVAL_FILE)
        valloader = DataLoader(dataset=val_set,
                               batch_size=BATCH_SIZE,
                               shuffle=True)
        loss = oracle.val(valloader)
        print('Epoch {} pretrain generator val loss: {}'.format(
            epoch + 1, loss))

    # pretrain discriminator
    print('\nPretraining discriminator...\n')
    for epoch in range(PRE_D_EPOCHS):
        generate_samples(netG, BATCH_SIZE, GENERATED_NUM, FAKE_FILE)
        dis_set = DiscriminatorDataset(REAL_FILE, FAKE_FILE)
        disloader = DataLoader(dataset=dis_set,
                               batch_size=BATCH_SIZE,
                               shuffle=True)

        for k_step in range(K_STEPS):
            loss = netD.dtrain(disloader)
            print(
                'Epoch {} K-step {} pretrain discriminator training loss: {}'.
                format(epoch + 1, k_step + 1, loss))

    print('\nStarting adversarial training...')
    for epoch in range(TOTAL_EPOCHS):

        nets = [copy.deepcopy(netG) for _ in range(POPULATION_SIZE)]
        population = [(net, evaluate(net, netD)) for net in nets]
        for g_step in range(G_STEPS):
            t_start = time.time()
            population.sort(key=lambda p: p[1], reverse=True)
            rewards = [p[1] for p in population[:PARENTS_COUNT]]
            reward_mean = np.mean(rewards)
            reward_max = np.max(rewards)
            reward_std = np.std(rewards)
            print(
                "Epoch %d step %d: reward_mean=%.2f, reward_max=%.2f, reward_std=%.2f, time=%.2f s"
                % (epoch, g_step, reward_mean, reward_max, reward_std,
                   time.time() - t_start))

            elite = population[0]
            # generate next population
            prev_population = population
            population = [elite]
            for _ in range(POPULATION_SIZE - 1):
                parent_idx = np.random.randint(0, PARENTS_COUNT)
                parent = prev_population[parent_idx][0]
                net = mutate_net(parent, use_cuda)
                fitness = evaluate(parent, netD)
                population.append((net, fitness))

        netG = elite[0]

        for d_step in range(D_STEPS):
            # train discriminator
            generate_samples(netG, BATCH_SIZE, GENERATED_NUM, FAKE_FILE)
            dis_set = DiscriminatorDataset(REAL_FILE, FAKE_FILE)
            disloader = DataLoader(dataset=dis_set,
                                   batch_size=BATCH_SIZE,
                                   shuffle=True)

            for k_step in range(K_STEPS):
                loss = netD.dtrain(disloader)
                print(
                    'D_step {}, K-step {} adversarial discriminator training loss: {}'
                    .format(d_step + 1, k_step + 1, loss))

        generate_samples(netG, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        val_set = GeneratorDataset(EVAL_FILE)
        valloader = DataLoader(dataset=val_set,
                               batch_size=BATCH_SIZE,
                               shuffle=True)
        loss = oracle.val(valloader)
        print('Epoch {} adversarial generator val loss: {}'.format(
            epoch + 1, loss))
Exemplo n.º 29
0
 def setUp(self):
     self.batch_size = 32
     self.df = pd.read_csv(f"{c['WORK_DIR']}/work.csv")
     self.generator = Generator(df=self.df)
Exemplo n.º 30
0
    def generate_qrangen_data(self):
        qrangen = Generator(self.mode, self.iterations, self.bits)
        data = qrangen.generate_number()
        self.save_data_to_disk(data, 'Qrangen')

        return data