Esempio n. 1
0
    def __init__(self, verbosity=True, latent_dim=100):
        img_shape = (128, 128, 3)

        the_disc = Discriminator()
        the_gen = Generator()
        self.discriminator = the_disc.define_discriminator(
            verb=verbosity, sample_shape=img_shape)
        self.generator = the_gen.define_generator(verb=verbosity,
                                                  sample_shape=img_shape,
                                                  latent_dim=latent_dim)
        self.discriminator.trainable = False

        optimizer = Adam(0.0002, 0.5)
        self.discriminator.compile(
            loss=['binary_crossentropy', 'categorical_crossentropy'],
            loss_weights=[0.5, 0.5],
            optimizer=optimizer,
            metrics=['accuracy'])

        noise = Input(shape=(latent_dim, ))
        img = self.generator(noise)

        valid, _ = self.discriminator(img)

        self.combined = Model(noise, valid)
        self.combined.compile(loss=['binary_crossentropy'],
                              optimizer=optimizer)
Esempio n. 2
0
    def __init__(self, _device):
        self.device = _device
        self.batch_size = 64
        self.resolution = 28
        self.d_criterion = None
        self.d_optimizer = None
        self.g_criterion = None
        self.g_optimizer = None

        self.discriminator = Discriminator(num_layers=5,
                                           activations=["relu", "relu", "relu",
                                                        "sigmoid"],
                                           device=_device,
                                           num_nodes=[1, 64, 128, 64, 1],
                                           kernels=[5, 5, 3],
                                           strides=[2, 2, 2],
                                           dropouts=[.25, .25, 0],
                                           batch_size=64)

        # pass one image through the network so as to initialize the output
        # layer
        self.discriminator(torch.rand(
            size=[self.batch_size, 1, self.resolution, self.resolution]))

        self.generator = Generator(num_layers=6,
                                   activations=["relu", "relu", "relu", "relu",
                                                "tanh"],
                                   num_nodes=[1, 64, 128, 64, 64, 1],
                                   kernels=[3, 3, 3, 3],
                                   strides=[1, 1, 1, 1],
                                   batch_norms=[1, 1, 1, 0],
                                   upsamples=[1, 1, 0, 0],
                                   dropouts=[.25, .25, 0])
Esempio n. 3
0
def main():
    # Loading Parameters
    parser = init_parameters()
    args, _ = parser.parse_known_args()

    # Updating Parameters (cmd > yaml > default)
    args = update_parameters(parser, args)

    # Setting save_dir
    save_dir = get_save_dir(args)
    U.set_logging(save_dir)
    with open('{}/config.yaml'.format(save_dir), 'w') as f:
        yaml.dump(vars(args), f)

    # Processing
    if args.generate_data or args.generate_label:
        g = Generator(args)
        g.start()

    elif args.extract or args.visualization:
        if args.extract:
            p = Processor(args, save_dir)
            p.extract()
        if args.visualization:
            v = Visualizer(args)
            v.start()

    else:
        p = Processor(args, save_dir)
        p.start()
Esempio n. 4
0
def main():
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('read_path')
    arg_parser.add_argument('write_path')
    args = arg_parser.parse_args()

    with open(args.read_path, 'r') as source:
        text = source.read()

        lexer = Lexer(text)
        tokens = lexer.lex()

        parser = Parser(tokens)
        ast = parser.parse()

        symbolizer = Symbolizer(ast)
        symbolizer.symbolize()

        optimizer = Optimizer(ast)
        optimizer.optimize()

        grapher = Grapher(ast)
        grapher.graph()

        generator = Generator(ast)
        generator.generate(args.write_path)

        runner = Runner(ast)
        runner.run()
 def __init__(self, data, generator=None):
     data_indicator = data.iloc[:, range(9, len(data.columns), 1)]
     if generator is None:
         self._generator = Generator(data_indicator)
     else:
         self._generator = generator
     self._data = data
Esempio n. 6
0
    def test_generator(self):
        for path in glob.glob("test/grader/*/src.pas"):
            dir = os.path.dirname(path)
            should_fail = not dir.endswith('16')
            with open(path, 'r') as source:
                print(f"testing {path}")
                text = source.read()
                lexer = Lexer(text)
                tokens = lexer.lex()
                parser = Parser(tokens)
                ast = parser.parse()
                symbolizer = Symbolizer(ast)
                symbolizer.symbolize()
                grapher = Generator(ast, symbolizer)
                grapher.generate()
                sol = os.path.join(dir, 'src.c')
                out = os.path.join(dir, 'out')
                if os.path.exists(sol):
                    os.remove(sol)
                if os.path.exists(out):
                    os.remove(out)
                grapher.write(sol)
                p = None
                try:
                    p = sp.Popen(['gcc', sol, '-o', out], stdout=sp.PIPE)
                    retCode = p.wait()
                    self.assertTrue(retCode == 0)
                    p.stdout.close()
                    #s = str(p.stdout.read())
                    #self.assertTrue(s == '')
                except Exception:
                    self.assertFalse(should_fail)
                for i in range(1, 5):
                    inFile = os.path.join(dir, str(i) + '.in')
                    outFile = os.path.join(dir, str(i) + '.out')
                    with open(inFile, 'r') as inText:
                        with open(outFile, 'r') as outText:
                            inText = inText.read()
                            outText = outText.read()
                            try:
                                of = sp.Popen([out],
                                              stdin=sp.PIPE,
                                              stdout=sp.PIPE)
                                of.stdin.write(inText.encode('utf-8'))
                                of.stdin.close()
                                rc = of.wait()
                                self.assertTrue(rc == 0)
                                b = of.stdout.read()
                                s = b.decode('utf-8')
                                of.stdout.close()
                                if (not should_fail):
                                    self.assertEqual(s, str(outText))
                            except Exception:
                                self.assertFalse(should_fail)

        self.assertTrue(True)


#Tests().test_grapher()
def __collect_data(bca_inputs='bee_colony_inputs.txt', generator_inputs='settings.txt'):
    settings = input_output.read(bca_inputs)
    data = Generator(generator_inputs)
    intelligence = Engine(data)
    scouts = [Bee(intelligence) for _ in range(settings[0])]
    workers = [Bee(intelligence) for _ in range(settings[1])]
    steps = settings[2]
    hive = []
    return steps, scouts, workers, hive
Esempio n. 8
0
 def test_generator(self):
     g = Generator()
     w1 = Website('test1', num_pages=20)
     w2 = Website('test2', num_pages=20)
     g.generate(sites=[w1, w2], writeFile=True)
     self.assertEqual(len(g.sites), 2)
     self.assertEqual(len(g.sites[0].pages), 20)
     for i, page in enumerate(g.sites[0].pages):
         self.assertTrue(
             f'<title>{w1.domain} - Page{i}</title>' in page.html)
Esempio n. 9
0
    def setUp(self):
        self.batch_size = 32

        self.generator = Generator(
            csv_file=f"{gc['DATA_DIR']}/src/train.csv",
            images_src_dir=f"{gc['DATA_DIR']}/src/train_images",
            batch_size=self.batch_size,
            target_image_size=(224, 224),
            image_augmentation_options=default_image_augmenation_options,
            cache_dir=gc["DATA_DIR"] + "/images_cache",
        )
Esempio n. 10
0
    def createNew(self, species, length, gamutLength, cf):
        if species == cf:
            cf = ()
        cf = tuple(cf)

        key = (species, length, gamutLength, cf)
        if key in self.generators:
            return self.generators[key].createNew()
        else:
            if species == 'cf':
                g = Generator(cantusSpec(length, gamutLength, 'cf'))
            elif species == 's1':
                g = Generator(firstSpeciesSpec([ConstPitch(x) for x in cf], gamutLength, 's1'))
            elif species == 's2':
                g = Generator(secondSpeciesSpec([ConstPitch(x) for x in cf], gamutLength, 's2'))
            else:
                g = Generator(thirdSpeciesSpec([ConstPitch(x) for x in cf], gamutLength, 's3'))

            self.generators[key] = g
            return g.createNew()
Esempio n. 11
0
def main():
    args = parse_args()
    device = torch.device("cuda")

    generator = Generator().to(device)
    if args.freeze:
        for name, param in generator.named_parameters():
            if ("shared" not in name) and ("decoder.block.5" not in name):
                param.requires_grad = False

    train_dataset = DailyDialogueDataset(
        path_join(args.dataset_path, "train/dialogues_train.txt"),
        tokenizer=generator.tokenizer,
    )
    valid_dataset = DailyDialogueDataset(
        path_join(args.dataset_path, "validation/dialogues_validation.txt"),
        tokenizer=generator.tokenizer,
    )

    print(len(train_dataset), len(valid_dataset))

    optimizer = AdamW(generator.parameters(), lr=args.lr)

    best_loss = np.float("inf")

    for epoch in tqdm(range(args.num_epochs)):
        train_loss, valid_loss = [], []
        generator.train()
        for ind in np.random.permutation(len(train_dataset)):
            optimizer.zero_grad()
            context, reply = train_dataset.sample_dialouge(ind)
            context, reply = context.to(device), reply.to(device)
            loss = generator.get_loss(input_ids=context, labels=reply)
            loss.backward()
            optimizer.step()

            train_loss.append(loss.item())

        generator.eval()
        for ind in range(len(valid_dataset)):
            context, reply = valid_dataset[ind]
            context, reply = context.to(device), reply.to(device)
            with torch.no_grad():
                loss = generator.get_loss(input_ids=context, labels=reply)
            valid_loss.append(loss.item())

        train_loss, valid_loss = np.mean(train_loss), np.mean(valid_loss)
        print(
            f"Epoch {epoch + 1}, Train Loss: {train_loss:.2f}, Valid Loss: {valid_loss:.2f}"
        )
        if valid_loss < best_loss:
            best_loss = valid_loss
            torch.save(generator.state_dict(), args.output_path)
Esempio n. 12
0
    def __init__(self, window_height, window_width, button_height, screen):

        # Window parameters
        self.window_height = window_height
        self.window_width = window_width
        self.button_height = button_height
        self.screen = screen
        self.buttons = None
        self.clock = pg.time.Clock()

        # Maze parameters
        self.maze_size = 10
        self.scale = (self.window_width / self.maze_size)
        self.maze = Maze(self.maze_size, self.scale)
        self.maze_list = [self.maze.grid]
        self.build_maze = copy.deepcopy(self.maze.grid)

        # Initialize searcher, generator, learner objects
        self.generator = Generator()  # Used to generate mazes
        self.searcher = Searcher()  # Used to search mazes
        #self.learner = Learner()        # Used to simulate learning on mazes

        # Paths searched
        self.paths = {}
        self.shown_path = {}
        # Number of cells visited in each search
        self.visits = {"BFS": "-", "A*": "-", "DFS": "-"}
        # Length of the path found from the start to the end
        self.lengths = {"BFS": "-", "A*": "-", "DFS": "-"}

        # Reinforcement Learning parameters
        self.num_actions = 4
        self.num_states = self.maze.size * self.maze.size
        self.step_size = 0.5
        self.epsilon = 0.1
        self.discount = 0.9
        self.num_episodes = 50
        self.num_runs = 5
        self.all_states_visited = {}
        self.all_state_visits = {}
        self.all_reward_sums = {}

        # Colors
        self.path_colors = {
            "BFS": RED,
            "A*": ORANGE,
            "DFS": PINK,
            "Explore_BFS": DARK_GREEN,
            "Explore_A*": DARK_ORANGE,
            "Explore_DFS": PURPLE
        }
Esempio n. 13
0
def main():

    global window, ceilingtexture, floortexture, walltexture, map

    glutInit(sys.argv)
    glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
    glutInitWindowSize(640, 480)
    glutInitWindowPosition(200, 200)

    window = glutCreateWindow('Experimental Maze')

    # Generate map.
    generator = Generator()
    map = generator.generateMap(16)

    # Represents a top-down view of the maze.
    # map = [
    #     [1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1],
    #     [1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1],
    #     [1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1],
    #     [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1],
    #     [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1],
    #     [1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1],
    #     [1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1],
    #     [1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1],
    #     [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1],
    #     [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1],
    #     [1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],
    #     [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
    #     [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
    #     [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1],
    #     [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1],
    #     [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1]
    # ]

    # Load texture.
    texture = Texture()
    ceilingtexture = texture.loadImage('tex/ceiling.bmp')
    floortexture = texture.loadImage('tex/floor.bmp')
    walltexture = texture.loadImage('tex/wall.bmp')

    glutIgnoreKeyRepeat(1)
    glutKeyboardFunc(input.registerKeyDown)
    glutKeyboardUpFunc(input.registerKeyUp)

    glutDisplayFunc(drawScene)
    glutIdleFunc(drawScene)
    initGL(640, 480)
    glutMainLoop()
Esempio n. 14
0
    def __init__(self):
        self.logging = True

        # Game logic
        self.players = {}
        self.bots = {}
        self.botsInactive = {}

        self.bullets = {}
        self.bulletsIndex = 0

        self.geysers = {}
        self.geysersInactive = {}
        self.geysersIndex = 0

        self.doors = {}
        self.doorsIndex = 0

        self.fortified = {}
        self.fortifyIndex = 0

        # Game managment
        self.titlesToUpdate = []
        self.deadConnections = []
        self.mapDimensions = MAPDIMENSIONS
        gen = Generator(self.mapDimensions[0], self.mapDimensions[1], -200)

        self.map = [gen.getUnderworld(), gen.getOverworld()]
        self.bouldersMap = gen.getBoulders()

        # Create initial boulders
        for floor in [0, 1]:
            for boulder in self.bouldersMap[floor]:
                boulderx = boulder[0]
                bouldery = boulder[1]
                self.map[floor][bouldery][boulderx] = BOULDER_CHAR

        # Create initial bots
        botId = 0
        for x in range(0, round(BOTAMOUNT)):

            if x % 2 == 0:  # 1/2 of bot amount in overworld
                self.botsInactive[botId] = Bot(self, self.getSpawnPosition(),
                                               1)
                botId += 1

            self.botsInactive[botId] = Bot(self,
                                           self.getSpawnPosition(floor=0), 0)
            botId += 1
    def __init__(self, master_key, clipboard, keys_and_emojis={}):
        """
        @param master_key: Veja a documentação na classe Listener.

        @param clipboard: Veja a documentação na classe Generator.

        @param keys_and_emojis: Dicionário com teclas e emojis.
        """

        self.__master_key = master_key
        self.__clipboard = clipboard
        self.__keys_and_emojis = keys_and_emojis

        self.__emoji_generator = Generator()
        self.__listener = None
Esempio n. 16
0
def test_run_generator():
    g = Generator()
    z_code = torch.randn(BATCH, D_Z)
    sent_emb = torch.randn(BATCH, D_HIDDEN)
    word_embs = torch.randn(BATCH, D_WORD, CAP_MAX_LEN)
    gen, att, mu, logvar = g(z_code, sent_emb, word_embs, None)
    print('Generated shape:')
    for k in gen:
        print(gen[k].size())
    print('Attention shape:')
    for a in att:
        print(att[a].size())

    print(f'Mu shape: {mu.size()}    logvar shape: {logvar.size()}')
    return gen, att, mu, logvar
def test_get_neighbours_0(param):
    inp1, inp2, answer = param

    cells = [
        generator._cells[3][3], generator._cells[2][2], generator._cells[1][3]
    ]

    for cell in cells:
        cell.accepted = True

    neighbours = generator._get_neighbours(
        Generator.Cell(Position(inp1, inp2)), lambda c: c.accepted)
    generator.debug()

    assert len(neighbours['roads']) == answer
Esempio n. 18
0
def main(args):
    mode = args.mode
    amount = args.number_amount
    bits_number = args.bits

    if mode not in range(3):
        raise Exception('Mode ' + str(mode) + ' not allowed.')

    if amount < 0:
        raise Exception('Amount ' +  str(amount) + ' not allowed.')

    if bits_number < 0:
        raise Exception('Number of bits ' + str(bits_number) + ' not allowed.')

    qrangen = Generator(mode, amount, bits_number)
    print(qrangen.generate_number())
    def generate(self, event):
        from src.generator import Generator
        import os

        schoolName = self.inputs["schoolName"].get()
        examName = self.inputs["examName"].get()
        authorityName = self.inputs["authorityName"].get()

        if (not schoolName) and (not examName):
            messagebox.showwarning("Required",
                                   "School Name and Exam Name are required!!")
            return
        elif not schoolName:
            messagebox.showwarning("Required", "School Name is required!!")
            return
        elif not examName:
            messagebox.showwarning("Required", "Exam Name is required!!")
            return

        try:
            self.waitLabel["text"] = "Wait..."
            factory = Generator(
                schoolName,
                examName,
                self.file,
                self.signatureFile.name if self.signatureFile else None,
                authorityName,
            )

            if not os.path.isdir("pdfs"):
                os.mkdir("pdfs", 0o666)

            print("Starting to generate PDFs")

            for student in factory.getCSV():
                factory.generatePDF(student["Roll Number"], log=True)
            self.waitLabel["text"] = ""
            messagebox.showinfo("Success!!",
                                "Generated PDFs!! Check the 'pdfs' folder")
        except Exception as e:
            print(e.with_traceback())
            self.waitLabel["text"] = ""
            messagebox.showerror("Error!!",
                                 "Something went wrong!! Please try again!!")
Esempio n. 20
0
    def __init__(self, damsm, device=DEVICE):
        self.gen = Generator(device)
        self.disc = Discriminator(device)
        self.damsm = damsm.to(device)
        self.damsm.txt_enc.eval(), self.damsm.img_enc.eval()
        freeze_params_(self.damsm.txt_enc), freeze_params_(self.damsm.img_enc)

        self.device = device
        self.gen.apply(init_weights), self.disc.apply(init_weights)

        self.gen_optimizer = torch.optim.Adam(self.gen.parameters(),
                                              lr=GENERATOR_LR,
                                              betas=(0.5, 0.999))

        self.discriminators = [self.disc.d64, self.disc.d128, self.disc.d256]
        self.disc_optimizers = [
            torch.optim.Adam(d.parameters(),
                             lr=DISCRIMINATOR_LR,
                             betas=(0.5, 0.999)) for d in self.discriminators
        ]
Esempio n. 21
0
 def setUp(self):
     self.batch_size = 32
     self.df = pd.read_csv(f"{c['WORK_DIR']}/work.csv")
     self.generator = Generator(df=self.df)
Esempio n. 22
0
parser.add_argument(
    "--size",
    type=int,
    default=[224, 224],
    nargs="+",
    help="Target image size (WxH)",
)

args = parser.parse_args()
print(f"* Arguments:\n{pformat(vars(args))}")
# endregion

g = Generator(
    csv_file=f"{gc['DATA_DIR']}/src/{args.set}.csv",
    images_src_dir=f"{gc['DATA_DIR']}/src/{args.set}_images",
    target_image_size=tuple(args.size),
    image_augmentation_options=None,
    cache_dir=gc["DATA_DIR"] + "/images_cache",
)


def _mapping(x):
    x, y = g.get_one(x,
                     use_cached=False,
                     write_cache=True,
                     normalize=False,
                     augment=False)


with Pool(cpu_count()) as pool:
    list(tqdm(
Esempio n. 23
0
 def __init__(self, description, path_to_settings):
     self.description = description
     self.path_to_generator = path_to_settings
     data = Generator(self.path_to_generator)
     self.intelligence = Engine(data)
Esempio n. 24
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--cuda',
                        default=False,
                        action='store_true',
                        help='Enable CUDA')
    args = parser.parse_args()
    use_cuda = True if args.cuda and torch.cuda.is_available() else False

    random.seed(SEED)
    np.random.seed(SEED)

    netG = Generator(VOCAB_SIZE, G_EMB_SIZE, G_HIDDEN_SIZE, G_LR, use_cuda)
    netD = Discriminator(VOCAB_SIZE, D_EMB_SIZE, D_NUM_CLASSES, D_FILTER_SIZES,
                         D_NUM_FILTERS, DROPOUT, D_LR, D_L2_REG, use_cuda)
    oracle = Oracle(VOCAB_SIZE, G_EMB_SIZE, G_HIDDEN_SIZE, use_cuda)

    # generating synthetic data
    # print('Generating data...')
    # generate_samples(oracle, BATCH_SIZE, GENERATED_NUM, REAL_FILE)

    # pretrain generator
    gen_set = GeneratorDataset(REAL_FILE)
    genloader = DataLoader(dataset=gen_set,
                           batch_size=BATCH_SIZE,
                           shuffle=True)

    print('\nPretraining generator...\n')
    for epoch in range(PRE_G_EPOCHS):
        loss = netG.pretrain(genloader)
        print('Epoch {} pretrain generator training loss: {}'.format(
            epoch, loss))

        generate_samples(netG, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        val_set = GeneratorDataset(EVAL_FILE)
        valloader = DataLoader(dataset=val_set,
                               batch_size=BATCH_SIZE,
                               shuffle=True)
        loss = oracle.val(valloader)
        print('Epoch {} pretrain generator val loss: {}'.format(
            epoch + 1, loss))

    # pretrain discriminator
    print('\nPretraining discriminator...\n')
    for epoch in range(D_STEPS):
        generate_samples(netG, BATCH_SIZE, GENERATED_NUM, FAKE_FILE)
        dis_set = DiscriminatorDataset(REAL_FILE, FAKE_FILE)
        disloader = DataLoader(dataset=dis_set,
                               batch_size=BATCH_SIZE,
                               shuffle=True)

        for _ in range(K_STEPS):
            loss = netD.dtrain(disloader)
            print('Epoch {} pretrain discriminator training loss: {}'.format(
                epoch + 1, loss))

    # adversarial training
    rollout = Rollout(netG,
                      update_rate=ROLLOUT_UPDATE_RATE,
                      rollout_num=ROLLOUT_NUM)
    print('\n#####################################################')
    print('Adversarial training...\n')

    for epoch in range(TOTAL_EPOCHS):
        for _ in range(G_STEPS):
            netG.pgtrain(BATCH_SIZE, SEQUENCE_LEN, rollout, netD)

        for d_step in range(D_STEPS):
            # train discriminator
            generate_samples(netG, BATCH_SIZE, GENERATED_NUM, FAKE_FILE)
            dis_set = DiscriminatorDataset(REAL_FILE, FAKE_FILE)
            disloader = DataLoader(dataset=dis_set,
                                   batch_size=BATCH_SIZE,
                                   shuffle=True)

            for k_step in range(K_STEPS):
                loss = netD.dtrain(disloader)
                print(
                    'D_step {}, K-step {} adversarial discriminator training loss: {}'
                    .format(d_step + 1, k_step + 1, loss))
        rollout.update_params()

        generate_samples(netG, BATCH_SIZE, GENERATED_NUM, EVAL_FILE)
        val_set = GeneratorDataset(EVAL_FILE)
        valloader = DataLoader(dataset=val_set,
                               batch_size=BATCH_SIZE,
                               shuffle=True)
        loss = oracle.val(valloader)
        print('Epoch {} adversarial generator val loss: {}'.format(
            epoch + 1, loss))
Esempio n. 25
0
############################################


pred = Predictor(weights_path, meta_path)
type(pred)
pred.predict('This is incredible! I love it, this is the best chicken I have ever had.')
pred.predict('god is love')

top_labels = pred.predict_top_k('god is love', top_k_n=5)


############################################
# Get generated text similar to as above
############################################

gen = Generator(weights_path, meta_path)

gen.generate(
    cond_text="In the beginning",
    bag_of_words='data/bow_confucianism.tsv',
    class_label=None,
)



############################################
# Recommendations
############################################

attr_labels = {
    "None": None,
def generateOtherLines(powersOfTwo, specMaker, name):
    lineLengths = powersOfTwo.copy()
    gamutSizes = powersOfTwo.copy()
    tasks = list(itertools.product(lineLengths, gamutSizes))
    results = []  # (lineLength, GamutSize, time)
    causes = []  #(lineLength, gamutSize, result)
    lines = []
    print(tasks)
    for (length, gamut) in tasks:
        print(length, gamut)
        try:
            cf = Generator(cantusSpec(length, gamut, '')).createNew()
            if cf == []:
                causes.append((length, gamut, 'Invalid cf'))
        except Exception:
            causes.append((length, gamut, 'Invalid cf'))
            continue

        try:
            g = Generator(specMaker([ConstPitch(x) for x in cf], gamut, ''))
        except Exception:
            causes.append((length, gamut, 'Invalid species'))
            continue

        i = 0
        while True:
            if i == 5:
                causes.append((length, gamut, 'over5'))
                break
            i += 1
            res, runtime = timeWithTimeout(lambda: g.createNew(), 10)
            # No more unique output for task
            if res == []:
                if i == 1:
                    causes.append((length, gamut, 'No possible species'))
                else:
                    causes.append((length, gamut, 'All outputs found'))
                break
            #timeout
            elif res is None:
                causes.append((length, gamut, 'timeout'))
                break
            else:
                out = (length, gamut, i, runtime)
                lines.append((length, gamut, res))
                results.append(out)

    success = pd.DataFrame(columns=['length', 'gamut', 'iteration', 'times'],
                           data=results)
    fails = pd.DataFrame(columns=['length', 'gamut', 'result'], data=causes)
    linesDF = pd.DataFrame(columns=['length', 'gamut', 'line'], data=lines)
    print(success)
    print(fails)
    print(lines)
    dt = str(datetime.datetime.now())
    success.to_csv(
        'output/generator/' + name + '/generate' + name + 'Success_Max:' +
        str(powersOfTwo[-1]) + "_Time:" + dt + '.csv',
        index=False,
    )
    fails.to_csv(
        'output/generator/' + name + '/generate' + name + 'Fails_Max:' +
        str(powersOfTwo[-1]) + "_Time:" + dt + '.csv',
        index=False,
    )
    linesDF.to_csv(
        'output/generator/' + name + '/generate' + name + 'Lines_Max:' +
        str(powersOfTwo[-1]) + '.csv',
        index=False,
    )
def main():
    args = parse_args()
    device = torch.device("cuda")

    generator = Generator.from_file(args.generator_path).to(device)
    if args.freeze:
        for name, param in generator.named_parameters():
            if ("shared" not in name) and ("decoder.block.5" not in name):
                param.requires_grad = False
    discriminator = Discriminator.from_file(
        args.discriminator_path, tokenizer=generator.tokenizer
    ).to(device)
    if args.freeze:
        for name, param in discriminator.named_parameters():
            if ("shared" not in name) and ("decoder.block.5" not in name):
                param.requires_grad = False
    train_dataset = DailyDialogueDataset(
        path_join(args.dataset_path, "train/dialogues_train.txt"),
        tokenizer=generator.tokenizer,
        debug=args.debug,
    )
    valid_dataset = DailyDialogueDataset(
        path_join(args.dataset_path, "validation/dialogues_validation.txt"),
        tokenizer=generator.tokenizer,
        debug=args.debug,
    )

    print(len(train_dataset), len(valid_dataset))

    generator_optimizer = AdamW(generator.parameters(), lr=args.lr)
    discriminator_optimizer = AdamW(discriminator.parameters(), lr=args.lr)

    rewards = deque([], maxlen=args.log_every * args.generator_steps)
    rewards_real = deque([], maxlen=args.log_every * args.generator_steps)
    generator_loss = deque([], maxlen=args.log_every * args.generator_steps)
    discriminator_loss = deque([], maxlen=args.log_every * args.discriminator_steps)
    best_reward = 0

    generator.train()
    discriminator.train()

    for iter in tqdm(range(args.num_iterations)):
        for _ in range(args.discriminator_steps):
            discriminator_optimizer.zero_grad()
            context, real_reply = train_dataset.sample()
            context, real_reply = (
                context.to(device),
                real_reply.to(device),
            )
            fake_reply = generator.generate(context, do_sample=True)

            if args.regs:
                split_real = random.randint(1, real_reply.size(1))
                real_reply = real_reply[:, :split_real]
                split_fake = random.randint(1, fake_reply.size(1))
                fake_reply = fake_reply[:, :split_fake]

            loss, _, _ = discriminator.get_loss(context, real_reply, fake_reply)
            loss.backward()
            discriminator_optimizer.step()

            discriminator_loss.append(loss.item())

        for _ in range(args.generator_steps):
            generator_optimizer.zero_grad()
            context, real_reply = train_dataset.sample()
            context, real_reply = (
                context.to(device),
                real_reply.to(device),
            )
            fake_reply = generator.generate(context, do_sample=True)

            logprob_fake = generator.get_logprob(context, fake_reply)
            reward_fake = discriminator.get_reward(context, fake_reply)

            baseline = 0 if len(rewards) == 0 else np.mean(list(rewards))

            if args.regs:
                partial_rewards = torch.tensor(
                    [
                        discriminator.get_reward(context, fake_reply[:, :t])
                        for t in range(1, fake_reply.size(1) + 1)
                    ]
                ).to(device)
                loss = -torch.mean(partial_rewards * logprob_fake)

            else:
                loss = -(reward_fake - baseline) * torch.mean(logprob_fake)

            if args.teacher_forcing:
                logprob_real = generator.get_logprob(context, real_reply)
                reward_real = discriminator.get_reward(context, real_reply)
                loss -= torch.mean(logprob_real)
                rewards_real.append(reward_real)

            loss.backward()
            generator_optimizer.step()

            generator_loss.append(loss.item())
            rewards.append(reward_fake)

        if iter % args.log_every == 0:
            mean_reward = np.mean(list(rewards))
            mean_reward_real = np.mean(list(rewards_real))

            if args.discriminator_steps > 0:
                print(f"Discriminator Loss {np.mean(list(discriminator_loss))}")
            if args.generator_steps > 0:
                print(f"Generator Loss {np.mean(list(generator_loss))}")
                if args.teacher_forcing:
                    print(f"Mean real reward: {mean_reward_real}")
                print(f"Mean fake reward: {mean_reward}\n")

            context, real_reply = valid_dataset.sample()
            context, real_reply = (
                context.to(device),
                real_reply.to(device),
            )
            fake_reply = generator.generate(context, do_sample=True)
            reward_fake = discriminator.get_reward(context, fake_reply)

            print_dialogue(
                context=context,
                real_reply=real_reply,
                fake_reply=fake_reply,
                tokenizer=generator.tokenizer,
            )
            print(f"Reward: {reward_fake}\n")

            if mean_reward > best_reward:
                best_reward = mean_reward
                torch.save(discriminator.state_dict(), args.discriminator_output_path)
                torch.save(generator.state_dict(), args.generator_output_path)
            torch.save(
                discriminator.state_dict(), "all_" + args.discriminator_output_path
            )
            torch.save(generator.state_dict(), "all_" + args.generator_output_path)
Esempio n. 28
0
def main():
    args = parse_args()
    device = torch.device("cuda")

    generator = Generator.from_file(args.generator_path).to(device)
    generator.eval()
    discriminator = Discriminator(tokenizer=generator.tokenizer).to(device)

    train_dataset = DailyDialogueDataset(
        path_join(args.dataset_path, "train/dialogues_train.txt"),
        tokenizer=generator.tokenizer,
    )
    valid_dataset = DailyDialogueDataset(
        path_join(args.dataset_path, "validation/dialogues_validation.txt"),
        tokenizer=generator.tokenizer,
    )

    print(len(train_dataset), len(valid_dataset))

    optimizer = AdamW(discriminator.parameters(), lr=args.lr)

    for epoch in tqdm(range(args.num_epochs)):
        train_loss, valid_loss = [], []
        rewards_real, rewards_fake, accuracy = [], [], []
        discriminator.train()
        for ind in np.random.permutation(len(train_dataset)):
            optimizer.zero_grad()
            context, real_reply = train_dataset.sample_dialouge(ind)
            context, real_reply = (
                context.to(device),
                real_reply.to(device),
            )
            fake_reply = generator.generate(context, do_sample=True)

            loss, _, _ = discriminator.get_loss(context, real_reply,
                                                fake_reply)
            loss.backward()
            optimizer.step()

            train_loss.append(loss.item())

        discriminator.eval()
        real_replies, fake_replies = [], []
        for ind in range(len(valid_dataset)):
            context, real_reply = valid_dataset[ind]
            context, real_reply = (
                context.to(device),
                real_reply.to(device),
            )
            fake_reply = generator.generate(context, do_sample=True)

            with torch.no_grad():
                loss, reward_real, reward_fake = discriminator.get_loss(
                    context, real_reply, fake_reply)
            valid_loss.append(loss.item())
            rewards_real.append(reward_real)
            rewards_fake.append(reward_fake)
            accuracy.extend([reward_real > 0.5, reward_fake < 0.5])

            real_reply, fake_reply = (
                generator.tokenizer.decode(real_reply[0]),
                generator.tokenizer.decode(fake_reply[0]),
            )
            real_replies.append(real_reply)
            fake_replies.append(fake_reply)

        train_loss, valid_loss = np.mean(train_loss), np.mean(valid_loss)
        print(
            f"Epoch {epoch + 1}, Train Loss: {train_loss:.2f}, Valid Loss: {valid_loss:.2f}, Reward real: {np.mean(rewards_real):.2f}, Reward fake: {np.mean(rewards_fake):.2f}, Accuracy: {np.mean(accuracy):.2f}"
        )
        print(f"Adversarial accuracy, {np.mean(accuracy):.2f}")
        for order in range(1, 5):
            print(
                f"BLEU-{order}: {bleuscore(real_replies, fake_replies, order=order)}"
            )
        print(f"DIST-1: {dist1(fake_replies)}")
        print(f"DIST-2: {dist2unbiased(fake_replies)}")
def generateCanti(powersOfTwo):
    lineLengths = powersOfTwo.copy()
    gamutSizes = powersOfTwo.copy()
    tasks = list(itertools.product(lineLengths, gamutSizes))
    results = []  # (lineLength, GamutSize, time)
    causes = []  #(lineLength, gamutSize, result)
    cfs = []  #(lineLength, gamutSize, result)
    print(tasks)
    for (length, gamut) in tasks:
        print((length, gamut))

        try:
            g = Generator(cantusSpec(length, gamut, ''))
        except Exception:
            causes.append((length, gamut, 'Invalid'))
            continue

        i = 0
        while True:
            if i == 5:
                causes.append((length, gamut, 'over5'))
                break
            i += 1
            res, runtime = timeWithTimeout(lambda: g.createNew(), 10)

            # No more unique output for task
            if res == []:
                if i == 1:
                    causes.append((length, gamut, 'No possible lines'))
                else:
                    causes.append((length, gamut, 'All outputs found'))
                break
            #timeout
            elif res is None:
                causes.append((length, gamut, 'timeout'))
                break
            else:
                out = (length, gamut, i, runtime)
                cfs.append((length, gamut, res))
                results.append(out)

    success = pd.DataFrame(columns=['length', 'gamut', 'iteration', 'times'],
                           data=results)
    fails = pd.DataFrame(columns=['length', 'gamut', 'result'], data=causes)
    lines = pd.DataFrame(columns=['length', 'gamut', 'line'], data=cfs)
    print(success)
    print(fails)
    print(cfs)
    dt = str(datetime.datetime.now())
    success.to_csv(
        'output/generator/cf/generateCFSuccess_Max:' + str(powersOfTwo[-1]) +
        "_Time:" + dt + '.csv',
        index=False,
    )
    fails.to_csv(
        'output/generator/cf/generateCFFails_Max:' + str(powersOfTwo[-1]) +
        "_Time:" + dt + '.csv',
        index=False,
    )
    lines.to_csv(
        'output/generator/cf/generateCFCFs_Max:' + str(powersOfTwo[-1]) +
        '.csv',
        index=False,
    )
Esempio n. 30
0
    type=float,
    default=1,
    help="Zoom factor",
)

args = parser.parse_args()
print(f"* Arguments:\n{pformat(vars(args))}")
# endregion

os.chdir(c["WORK_DIR"])
df = pd.read_csv(args.in_csv)

g = Generator(
    df=df,
    batch_size=1,
    shuffle=False,
    zoom=args.zoom,
    augmentation_options=None,
    image_output_size=tuple(args.size),
)


def _mapping(i):
    g.__getitem__(i)


with Pool(cpu_count()) as pool:
    list(
        tqdm(
            pool.imap(
                _mapping,
                range(df.shape[0]),