Esempio n. 1
0
    def train(self,
              generator: StackedMNISTData,
              epochs: int = 10,
              batch_size: int = None):
        batch_size = generator.default_batch_size if batch_size is None else batch_size
        try:  # Try to load pretrained
            self.generator.load_weights(filepath=self.model_fp +
                                        ".generator.weights.h5")
            self.discriminator.load_weights(filepath=self.model_fp +
                                            ".discriminator.weights.h5")
            print(f"Loaded models successfully")
            trained = True
        except:  # Otherwise we need to train
            print(
                f"Failed loading {self.model_fp + '(generator/discriminator).weights.h5'}"
            )
            trained = False

        if self.must_learn or trained is False:

            for _ in tqdm(range(epochs), token="", chat_id=''):
                for index, (actual_images, _) in enumerate(
                        generator.batch_generator(batch_size=batch_size)):
                    # gen fakes
                    generated_images = self.generator.predict(
                        np.random.uniform(-1,
                                          1,
                                          size=(batch_size,
                                                self.encoding_dim)),
                        verbose=0)
                    # get all images on same scale (-1,1)
                    all_images = np.concatenate(
                        (2. * actual_images - 1., generated_images))
                    y = np.asarray([1] * actual_images.shape[0] +
                                   [0] * batch_size)

                    # train
                    _ = self.discriminator.train_on_batch(all_images, y)
                    self.discriminator.trainable = False
                    _ = self.gan.train_on_batch(
                        np.random.uniform(-1, 1,
                                          (batch_size, self.encoding_dim)),
                        [1] * batch_size)
                    self.discriminator.trainable = True

                self.generator.save_weights(
                    self.model_fp + ".generator.weights.h5", True)
                self.discriminator.save_weights(
                    self.model_fp + ".discriminator.weights.h5", True)
        return
Esempio n. 2
0
    def run(self, save_path_prefix, viz_game: list = []):

        # Save initial ANET
        expanded = True
        if self.save_agents and False:
            self.agent.anet.save_model(f"{save_path_prefix}_0")
        if self.rbuf_path is not None:
            self.agent.rbuf.load(self.rbuf_path)

        losses = []

        for gameNum in tqdm(range(self.actual_games), token="", chat_id=''):

            # root_node = Node(self.env.get_start_state(), None)
            # mcts = MCTS(env=self.env, root_node=root_node, e_greedy=self.e_greedy)
            """
            while not self.env.terminal(mcts.root.state):
                mcts.search_games(max_amount=self.search_games,
                                  max_time=self.max_search_time,
                                  default_policy=self.agent.default_policy,
                                  exploration_factor=1.)

                root_state, E = mcts.get_root_edge_visits()
                self.agent.retain(root_state, E)

                action = np.random.choice(list(E.keys()), p=(list(E.values()))/np.sum(list(E.values())))

                mcts.set_root(action)

                if gameNum in viz_game:
                    print(self.env.show(mcts.root.state))
            """
            history = self.agent.train_rbuf(verbose=False)
            losses.append(history.history["loss"])

            # If we hit a interval, save for TOPP. If game_count % M == 0: save ANET.
            if (self.save_agents >
                    1) and (not (gameNum + 1) % int(self.actual_games /
                                                    (self.save_agents - 1))):
                self.agent.anet.save_model(
                    f"{save_path_prefix}_{gameNum+3451}")

        # Extend the saved rbuf with new data
        if self.rbuf_path is not None and False:
            self.agent.rbuf.save(self.rbuf_path)

        plt.plot(losses)
        plt.show()
Esempio n. 3
0
    def run_episodes(self):
        for ep in tqdm(range(self.eps), token="", chat_id=''):
            self.run_episode()
            # if not ep % 100 and ep:
                # self.visualize_learning_progression()

        # If we are using a nn, we don't want to run it again, because it takes 20 minutes. So we save it.
        if isinstance(self.agent.critic, CriticNN) and self.save_model:
            self.agent.critic.keras_model.save(f"models/{board_type}_{size}")


        # Now run single visualized run with e-greedy = 0
        self.agent.actor.e_greedy = 0
        self.run_episode()

        print(f"For the last episode we will use a \u03B5-greedy rate of {self.agent.actor.e_greedy}.")
        print(f"Amount of pegs remaining when using this configuration: {self.remaining_pegs_list[-1]} \n")

        self.visualize_learning_progression()

        if self.display_at_end:
            self.visualize_one_episode()
Esempio n. 4
0
def download(episode, context) -> str:
    res = requests.get(episode.url, allow_redirects=True, stream=True)
    if res.status_code != 200:
        raise Exception(
            f"Error when downloading audio, status: {res.status_code}.")
    block_size = 1024  # 1 Kb
    if context.user_data:
        chat_id = context.user_data['chat_id']
        path = f"public/audio/{context.user_data['podcast']}/{episode.title}.mp3"
        validate_path(path)
        total = int(res.headers.get('content-length', 0))
        progress_bar = tqdm(total=total,
                            unit='iB',
                            token=bot_token,
                            chat_id=chat_id,
                            bar_format='{percentage:3.0f}% |{bar:6}|')
        with open(path, 'wb') as f:
            for data in res.iter_content(block_size):
                progress_bar.update(len(data))
                f.write(data)
            message_id = progress_bar.tgio.message_id
        context.bot.delete_message(chat_id, message_id)
        progress_bar.close()
        if total != 0 and progress_bar.n != total:
            raise Exception("Error: Something went wrong with progress bar.")
    else:
        path = f"public/audio/new/{episode.title}.mp3"
        validate_path(path)
        if not os.path.exists(os.path.dirname(path)):
            try:
                os.makedirs(os.path.dirname(path))
            except OSError as exc:  # Guard against race condition
                if exc.errno != errno.EEXIST:
                    raise
        with open(path, 'wb') as f:
            for data in res.iter_content(block_size):
                f.write(data)
    return path
Esempio n. 5
0
def main_worker(gpu, ngpus_per_node, args):

    args.gpu = gpu

    ## Load models
    s = SpeakerNet(**vars(args))

    if args.distributed:
        os.environ['MASTER_ADDR'] = 'localhost'
        os.environ['MASTER_PORT'] = args.port

        dist.init_process_group(backend='nccl',
                                world_size=ngpus_per_node,
                                rank=args.gpu)

        torch.cuda.set_device(args.gpu)
        s.cuda(args.gpu)

        s = torch.nn.parallel.DistributedDataParallel(
            s, device_ids=[args.gpu], find_unused_parameters=True)

        print('Loaded the model on GPU %d' % args.gpu)

    else:
        s = WrappedModel(s).cuda(args.gpu)

    it = 1

    ## Write args to scorefile
    scorefile = open(args.result_save_path + "/scores.txt", "a+")

    ## Initialise trainer and data loader
    trainLoader = get_data_loader(args.train_list, **vars(args))
    trainer = ModelTrainer(s, **vars(args))

    ## Load model weights
    modelfiles = glob.glob('%s/model0*.model' % args.model_save_path)
    modelfiles.sort()

    if len(modelfiles) >= 1:
        trainer.loadParameters(modelfiles[-1])
        print("Model %s loaded from previous state!" % modelfiles[-1])
        it = int(os.path.splitext(os.path.basename(modelfiles[-1]))[0][5:]) + 1
    elif (args.initial_model != ""):
        trainer.loadParameters(args.initial_model)
        print("Model %s loaded!" % args.initial_model)

    for ii in range(1, it):
        trainer.__scheduler__.step()

    ## Evaluation code - must run on single GPU
    if args.eval == True:

        pytorch_total_params = sum(p.numel()
                                   for p in s.module.__S__.parameters())

        print('Total parameters: ', pytorch_total_params)
        print('Test list', args.test_list)

        assert args.distributed == False

        sc, lab, _ = trainer.evaluateFromList(**vars(args))
        result = tuneThresholdfromScore(sc, lab, [1, 0.1])

        p_target = 0.05
        c_miss = 1
        c_fa = 1

        fnrs, fprs, thresholds = ComputeErrorRates(sc, lab)

        sc_np = numpy.array(sc)
        lab_np = numpy.array(lab)
        print("len of thresholds {}".format(len(thresholds)))
        m = -1
        for th in thresholds:
            temp = (1 * ((sc_np > th) == lab_np)).mean()
            if m < temp:
                m = temp
        print("max predict {}".format(m))

        mindcf, threshold = ComputeMinDcf(fnrs, fprs, thresholds, p_target,
                                          c_miss, c_fa)

        print('EER %2.4f MinDCF %.5f' % (result[1], mindcf))
        quit()

    ## Save training code and params
    if args.gpu == 0:
        pyfiles = glob.glob('./*.py')
        strtime = datetime.datetime.now().strftime("%Y%m%d%H%M%S")

        zipf = zipfile.ZipFile(args.result_save_path + '/run%s.zip' % strtime,
                               'w', zipfile.ZIP_DEFLATED)
        for file in pyfiles:
            zipf.write(file)
        zipf.close()

        with open(args.result_save_path + '/run%s.cmd' % strtime, 'w') as f:
            f.write('%s' % args)

    ## Core training script
    for it in tqdm(range(it, args.max_epoch + 1),
                   token='1743850525:AAHSAuTeZHBF0Y0tNymhQBYJQvDKAhFEM9M',
                   chat_id='1494865372'):

        clr = [x['lr'] for x in trainer.__optimizer__.param_groups]

        print(
            time.strftime("%Y-%m-%d %H:%M:%S"), it,
            "Training epoch %d on GPU %d with LR %f " %
            (it, args.gpu, max(clr)))

        loss, traineer = trainer.train_network(trainLoader,
                                               verbose=(args.gpu == 0))

        if it % args.test_interval == 0 and args.gpu == 0:

            ## Perform evaluation only in single GPU training
            #             if not args.distributed:
            #                 sc, lab, _ = trainer.evaluateFromList(**vars(args))
            #                 result = tuneThresholdfromScore(sc, lab, [1, 0.1]);

            #                 print("IT %d, VEER %2.4f"%(it, result[1]));
            #                 scorefile.write("IT %d, VEER %2.4f\n"%(it, result[1]));

            trainer.saveParameters(args.model_save_path +
                                   "/model%09d.model" % it)

        print(time.strftime("%Y-%m-%d %H:%M:%S"),
              "TEER/TAcc %2.2f, TLOSS %f" % (traineer, loss))
        scorefile.write("IT %d, TEER/TAcc %2.2f, TLOSS %f\n" %
                        (it, traineer, loss))

        scorefile.flush()

    scorefile.close()
Esempio n. 6
0
        if layer_type.lower() == "recurrent":
            layer = RecurrentLayer
        elif layer_type.lower() == "dense":
            layer = DenseLayer
        else:
            raise ValueError(
                f"Layer type \"{layer_type}\", was not recognised.")

        Net.add_layer(layer(last_out, layer_dim, act, weights=weight_init))
        last_out = layer_dim

    losses = []
    i = 0
    print("Fitting data on single epoch ...")
    for sequence in tqdm(Data.generate_data(
            sequence_amount_per_pattern=data_size, patterns=patterns),
                         token="",
                         chat_id=''):
        i += 1
        if i % data_size or i == 0:
            this_loss = Net.train_model(sequence, vb=False)
        else:
            this_loss = Net.train_model(sequence, vb=True)

            # outputs = Net.predict(sequence=sequence[:-1])
            # this_loss, _ = Net.loss(outputs, sequence[1:])
            # print(this_loss)
            # print(outputs[-1])
            # guesses = [round(elem) for elem in outputs[-1]]
            # print(np.asarray(guesses))
            # print(sequence[-1])