Ejemplo n.º 1
0
    def __init__(self,
                 ntokens,
                 nsampled,
                 nhid,
                 tied_weight,
                 n_proj,
                 sparse=True):
        super(SampledSoftmax, self).__init__()

        # Parameters
        self.ntokens = ntokens
        self.nsampled = nsampled

        self.sampler = LogUniformSampler(self.ntokens)

        self.n_proj = n_proj
        if n_proj != nhid:
            self.projection = nn.Linear(n_proj, nhid)
        else:
            self.projection = None

        if not sparse:
            self.params = nn.Linear(nhid, ntokens)
            if tied_weight is not None:
                self.params.weight = tied_weight
            else:
                util.initialize(self.params.weight)
        else:
            self.softmax_b = torch.nn.Embedding(ntokens, 1, sparse=True)
            self.softmax_w = torch.nn.Embedding(ntokens, nhid, sparse=True)
        self.sparse = sparse

        print("-- Used sampled softmax with " + str(self.nsampled) +
              " samples.")
Ejemplo n.º 2
0
    def __init__(self, ntokens, nsampled, nhid, tied_weight):
        super(SampledSoftmax, self).__init__()

        # Parameters
        self.ntokens = ntokens
        self.nsampled = nsampled

        self.sampler = LogUniformSampler(self.ntokens)
        self.params = nn.Linear(nhid, ntokens)

        if tied_weight is not None:
            self.params.weight = tied_weight
        else:
            util.initialize(self.params.weight)
Ejemplo n.º 3
0
    def __init__(self, ntokens, ninp, nhid, nout, nlayers, proj, dropout):
        super(RNNModel, self).__init__()
        # Parameters
        self.nhid = nhid
        self.nlayers = nlayers

        # Create Layers
        self.drop = nn.Dropout(dropout)
        self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout)

        if proj:
            self.proj = nn.Linear(nhid, nout)
            util.initialize(self.proj.weight)
        else:
            self.proj = None
Ejemplo n.º 4
0
def main():

    if len(sys.argv) > 1 and \
       sys.argv[1] == "--about":
        about()
        return

    pygame.init()
    screen = pygame.display.set_mode(LAUNCH_SCREEN_RECT.size, 0)
    util.initialize()
    pygame.display.set_caption("DVONN for One")
    icon = pygame.transform.scale(util.get_image("icon"), (32, 32))
    pygame.display.set_icon(icon)
    clock = pygame.time.Clock()

    l = Launcher(screen, clock)
    l.run()
Ejemplo n.º 5
0
def main():

    if len(sys.argv) > 1 and \
       sys.argv[1] == "--about":
        about()
        return

    pygame.init()
    screen = pygame.display.set_mode(LAUNCH_SCREEN_RECT.size,0)
    util.initialize()
    pygame.display.set_caption("DVONN for One")
    icon = pygame.transform.scale(util.get_image("icon"),(32,32))
    pygame.display.set_icon(icon)
    clock = pygame.time.Clock()

    l = Launcher(screen,clock)
    l.run()
Ejemplo n.º 6
0
    def viewBySelected(self):
        mlog.debug("> upcoming.Window.viewBySelected()")

        # switch to next view by
        self.viewBy += 1
        if self.viewBy >= self.VIEW_BY_MAX:
            self.viewBy = self.VIEW_BY_MIN + 1

        # store the setting change
        self.settings.put("upcoming_view_by",
                          "%d" % self.viewBy,
                          shouldCreate=1)

        # refresh the listing
        self.loadList()

        mlog.debug("< upcoming.Window.viewBySelected()")


# =============================================================================
if __name__ == "__main__":
    try:
        loadingWin = xbmcgui.DialogProgress()
        loadingWin.create("Upcoming Shows", "Loading Upcoming Shows",
                          "Please Wait...")
        util.initialize()
        showWindow(loadingWin)
    except Exception, ex:
        traceback.print_exc()
        ui.Dialog().ok('Shit blew up!', str(ex))
Ejemplo n.º 7
0
def pretrain_manipulators(params):
    speaker_categs = torch.load(params.speaker_categs_path)
    num_speakers, speaker_feature_dim = speaker_categs.size()

    describer_model = util.load_model(params.header + DESCRIBER_FOOTER)
    describer = Describer(describer_model, speaker_feature_dim)
    describer.load_state_dict(torch.load(
        'snapshots/' + params.header + DESCRIBER_FOOTER + '.pth'))
    describer.eval()

    reconstructor_model = util.load_model(params.header + RECONSTRUCTOR_FOOTER)
    reconstructor = Reconstructor(reconstructor_model, params.log_frac)
    reconstructor.load_state_dict(torch.load(
        'snapshots/' + params.header + RECONSTRUCTOR_FOOTER + '.pth'))

    latent_forger_model = util.load_model(params.header + LATENT_FORGER_FOOTER)
    latent_forger = LatentForger(latent_forger_model)
    util.initialize(latent_forger)

    if example_tensor.is_cuda:
        speaker_categs = speaker_categs.cuda()
        describer = describer.cuda()
        reconstructor = reconstructor.cuda()
        latent_forger = latent_forger.cuda()

    optim = torch.optim.Adam(latent_forger.parameters(), lr=params.lr)

    data_loader = VCTKLoader(
        params.data_path, example_tensor, features='log')
    data_iterator = iter(data_loader)

    latent_loss_sum_batch = 0.0
    reconst_loss_sum_batch = 0.0
    loss_sum_batch = 0.0
    num_in_batch = 0

    period = 0
    while period < params.num_periods:
        period += 1

        loss_sum = 0.0
        loss_count = 0

        print(util.COMMENT_HEADER, end='')

        for _ in range(params.period_size):
            orig, orig_speaker = next(data_iterator)
            orig_categ = speaker_categs[orig_speaker].unsqueeze(0)

            forgery_categ = speaker_categs[
                np.random.randint(num_speakers)].unsqueeze(0)

            orig_latent, metadata = describer.latent(orig)
            orig_latent = orig_latent.detach()
            pretend_latent = latent_forger.modify_latent(
                orig_latent, forgery_categ, orig_categ)
            pretend_reconst = reconstructor.reconst(pretend_latent, metadata)

            latent_loss = latent_forger.pretrain_loss(
                pretend_latent, orig_latent)
            reconst_loss = reconstructor.reconst_loss(pretend_reconst, orig)

            loss = latent_loss + reconst_loss

            latent_loss_sum_batch += latent_loss.item()
            reconst_loss_sum_batch += reconst_loss.item()
            loss_sum_batch = loss_sum_batch + loss
            num_in_batch += 1

            if num_in_batch >= params.batch_size:
                mean_loss = loss_sum_batch / num_in_batch
                optim.zero_grad()
                mean_loss.backward()
                optim.step()

                print("(" + "|".join([
                    "%0.3f" % (latent_loss_sum_batch / num_in_batch),
                    "%0.3f" % (reconst_loss_sum_batch / num_in_batch)]) + ")",
                    end=' ', flush=True)

                latent_loss_sum_batch = 0.0
                reconst_loss_sum_batch = 0.0
                loss_sum_batch = 0.0
                num_in_batch = 0

            loss_sum += loss.item()
            loss_count += 1

        print('')
        loss_mean = loss_sum / loss_count

        metrics = [
            ('period', period),
            ('loss', round(loss_mean, 3))
        ]
        util.print_metrics(metrics)

        torch.save(
            latent_forger.state_dict(),
            'snapshots/' + params.header + LATENT_FORGER_FOOTER + '.pth')
Ejemplo n.º 8
0
def initialize(doc=__doc__):
    util.initialize(doc)

    blockify = Blockify(blocklist.Blocklist())

    return blockify
Ejemplo n.º 9
0
    if not queue:  # Can't save an empty queue.
        error_msg(outbar, 'Queue is empty.')
        return

    if fn is None:  # No argument.
        error_msg(outbar, 'Missing argument to write.')
        return

    path = join(expanduser('~'), '.local', 'share', 'pmcli', 'playlists')
    if not exists(path):  # No playlists directory.
        error_msg(outbar, 'Path to playlists does not exist.')

    elif exists(join(path, fn)):
        error_msg(outbar, 'Playist %s already exists.' % fn)

    else:  # Write the playlist.
        with open(join(path, fn), 'a') as f:
            json.dump(queue, f)
        addstr(outbar, 'Wrote queue to %s.' % fn)


if __name__ == '__main__':
    colour, main, inbar, infobar, outbar = initialize()
    addstr(infobar, 'Enter \'h\' or \'help\' if you need help.')
    queue = Queue()
    global content
    content = None

    while True:
        transition(get_input())
def train(params):
    encoder = util.load_model(params.header + ENCODER_FOOTER)
    util.initialize(encoder)
    encoder.train()

    discriminator = util.load_model(params.header + DISCRIMINATOR_FOOTER)
    util.initialize(discriminator)
    discriminator.train()

    if example_tensor.is_cuda:
        encoder = encoder.cuda()
        discriminator = discriminator.cuda()

    optim = torch.optim.Adam(torch.nn.Sequential(encoder,
                                                 discriminator).parameters(),
                             lr=params.lr)
    mse = torch.nn.MSELoss()
    bce = torch.nn.BCEWithLogitsLoss()

    speaker_averages = {}
    average_decay = 1.0 - 1.0 / params.average_count
    data_loader = loader.VCTKLoader(
        params.data_path,
        example_tensor,
        features='log',
        speaker_take_count=params.speaker_take_count,
        utterance_take_count=params.utterance_take_count)

    def update_average(speaker, encoded):
        if speaker not in speaker_averages:
            speaker_averages[speaker] = encoded
        else:
            speaker_averages[speaker] = (
                average_decay * speaker_averages[speaker] +
                (1.0 - average_decay) * encoded)

    cluster_loss_sum_batch = 0.0
    discrim_loss_sum_batch = 0.0
    loss_sum_batch = 0.0
    num_in_batch = 0
    data_iterator = iter(data_loader)

    period = 0
    while period < params.num_periods:
        period += 1

        loss_sum_print = 0.0
        loss_count_print = 0

        print(util.COMMENT_HEADER, end='')

        for _ in range(params.period_size):
            utterance_1, speaker_1 = next(data_iterator)
            utterance_2, speaker_2 = next(data_iterator)

            encoded_1 = encoder(utterance_1)
            encoded_2 = encoder(utterance_2)

            update_average(speaker_1, encoded_1.detach())
            update_average(speaker_2, encoded_2.detach())

            discrim_1 = discriminator(torch.cat([encoded_1, encoded_1], dim=1))
            discrim_2 = discriminator(torch.cat([encoded_2, encoded_2], dim=1))
            discrim_3 = discriminator(torch.cat([encoded_1, encoded_2], dim=1))
            discrim_4 = discriminator(torch.cat([encoded_2, encoded_1], dim=1))

            compare = 1 if speaker_1 == speaker_2 else 0
            compare = example_tensor.new_tensor(compare).reshape(1, 1)
            same = example_tensor.new_tensor(1).reshape(1, 1)

            discrim_loss = (bce(discrim_1, same) + bce(discrim_2, same) + bce(
                discrim_3, compare) + bce(discrim_4, compare)) / 4.0

            cluster_loss = (mse(encoded_1, speaker_averages[speaker_1]) +
                            mse(encoded_2, speaker_averages[speaker_2])) / 2.0

            loss = params.cluster_term * cluster_loss + discrim_loss

            cluster_loss_sum_batch += cluster_loss.item()
            discrim_loss_sum_batch += discrim_loss.item()
            loss_sum_batch = loss_sum_batch + loss
            num_in_batch += 1

            if num_in_batch >= params.batch_size:
                mean_loss = loss_sum_batch / num_in_batch
                optim.zero_grad()
                mean_loss.backward()
                optim.step()

                loss_sum_print += mean_loss.item()
                loss_count_print += 1

                print("(" + "|".join([
                    "%0.3f" % (cluster_loss_sum_batch / num_in_batch),
                    "%0.3f" % (discrim_loss_sum_batch / num_in_batch)
                ]) + ")",
                      end=' ',
                      flush=True)

                cluster_loss_sum_batch = 0.0
                discrim_loss_sum_batch = 0.0
                loss_sum_batch = 0.0
                num_in_batch = 0

        print('')
        loss_mean = loss_sum_print / loss_count_print

        metrics = [('period', period), ('loss', round(loss_mean, 3))]
        util.print_metrics(metrics)

        torch.save(encoder.state_dict(),
                   'snapshots/' + params.header + ENCODER_FOOTER + '.pth')

        torch.save(
            discriminator.state_dict(),
            'snapshots/' + params.header + DISCRIMINATOR_FOOTER + '.pth')
Ejemplo n.º 11
0
                        s.subtitle()))
                
            self.controls['schedule_lbl'].control.setLabel(self.translator.get(84) % totRecs)
            self.controls['guidedata'].control.reset()
            self.controls['guidedata'].control.addLabel(self.conn.getGuideData())
        except:
            log.exception("refresh")

#***************************************************************************#
#   End Window Class                                                        #
#***************************************************************************#

try:
    log.info("\n\t\tInitializing...\n\n")
    updateProgress("Initializing...")
    util.initialize()
    log.info(">> MythBox Version: %s <<" % __version__)
    
    updateProgress("Creating Main Window")

    import mythtv
    import mythdb
    import mythtv
    import util
    
    updateProgress("Checking Settings")
    
    # TODO: Springify later...
    # Injected dependencies that get passed all over the place
    
    # 1
Ejemplo n.º 12
0
nsampled = 8192

train_corpus = StreamGBWDataset(vocabulary, os.path.join(args.data, "training-monolingual.tokenized.shuffled/*"))
test_corpus = StreamGBWDataset(vocabulary, os.path.join(args.data, "heldout-monolingual.tokenized.shuffled/*"), deterministic=True)
print("load dataset - complete")
'''

###############################################################################
# Build the model
###############################################################################
eval_batch_size = 1
net = model.RNNModel(ntokens, args.emsize, args.nhid, args.nlayers,
                     args.dropout)

encoder = nn.Embedding(ntokens, args.emsize)
util.initialize(encoder, ntokens)

twht = None
if args.tied:
    if args.nhid != args.emsize:
        raise ValueError(
            'When using the tied flag, hidden must be equal to embedding size')
    twht = encoder.weight

ss = model.SampledSoftmax(ntokens, nsampled, args.nhid, tied_weight=twht)

net.add_module("encoder", encoder)
net.add_module("decoder", ss)

net.cuda(0)
encoder.cuda(0)
Ejemplo n.º 13
0
def train_analysts(params):
    speaker_categs = torch.load(params.speaker_categs_path)
    speaker_feature_dim = speaker_categs.size()[1]

    describer_model = util.load_model(params.header + DESCRIBER_FOOTER)
    describer = Describer(describer_model, speaker_feature_dim)
    util.initialize(describer)

    reconstructor_model = util.load_model(params.header + RECONSTRUCTOR_FOOTER)
    reconstructor = Reconstructor(reconstructor_model, params.log_frac)
    util.initialize(reconstructor)

    examiner_model = util.load_model(params.header + EXAMINER_FOOTER)
    distinguisher_model = util.load_model(params.header + DISTINGUISHER_FOOTER)
    discriminator = PairDiscriminator(examiner_model, distinguisher_model)
    util.initialize(discriminator)
    discriminator.train()

    if example_tensor.is_cuda:
        speaker_categs = speaker_categs.cuda()
        describer = describer.cuda()
        reconstructor = reconstructor.cuda()
        discriminator = discriminator.cuda()

    optim = torch.optim.Adam(
        torch.nn.Sequential(describer, reconstructor).parameters(),
        lr=params.lr)
    advers_optim = torch.optim.Adam(
        discriminator.parameters(),
        lr=params.advers_lr)

    data_loader = VCTKLoader(
        params.data_path, example_tensor, features='log')
    data_iterator = iter(data_loader)

    categ_loss_sum_batch = 0.0
    robustness_loss_sum_batch = 0.0
    reconst_loss_sum_batch = 0.0
    gen_loss_batch = 0.0
    advers_loss_batch = 0.0

    loss_sum_batch = 0.0
    discrim_loss_sum_batch = 0.0
    num_in_batch = 0

    period = 0
    while period < params.num_periods:
        period += 1

        loss_sum = 0.0
        loss_count = 0

        print(util.COMMENT_HEADER, end='')

        for _ in range(params.period_size):
            orig, orig_speaker = next(data_iterator)
            orig_categ = speaker_categs[orig_speaker].unsqueeze(0)

            describer.eval()
            center_categ = describer.categ(orig).detach()
            describer.train()

            (latent, metadata, pred_categ) = describer.describe(orig)
            reconst = reconstructor.reconst(latent, metadata)

            truth = 0 if (np.random.random() < 0.5) else 1

            if truth == 0:
                decision = discriminator.discriminate(
                    orig_categ, orig, reconst)
            else:
                decision = discriminator.discriminate(
                    orig_categ, reconst, orig)

            categ_loss = describer.categ_loss(pred_categ, orig_categ)
            robustness_loss = describer.categ_loss(pred_categ, center_categ)
            reconst_loss = reconstructor.reconst_loss(reconst, orig)
            gen_loss = discriminator.gen_loss(decision, truth)
            advers_loss = discriminator.advers_loss(decision, truth)

            loss = (
                params.categ_term * categ_loss +
                params.robustness_term * robustness_loss +
                reconst_loss +
                params.advers_term * gen_loss)
            discrim_loss = advers_loss

            categ_loss_sum_batch += categ_loss.item()
            robustness_loss_sum_batch += robustness_loss.item()
            reconst_loss_sum_batch += reconst_loss.item()
            gen_loss_batch += gen_loss.item()
            advers_loss_batch += advers_loss.item()

            loss_sum_batch = loss_sum_batch + loss
            discrim_loss_sum_batch = discrim_loss_sum_batch + discrim_loss
            num_in_batch += 1

            if num_in_batch >= params.batch_size:
                mean_discrim_loss = discrim_loss_sum_batch / num_in_batch
                if gen_loss_batch / num_in_batch <= 10.0:
                    advers_optim.zero_grad()
                    mean_discrim_loss.backward(retain_graph=True)
                    advers_optim.step()

                mean_loss = loss_sum_batch / num_in_batch
                optim.zero_grad()
                mean_loss.backward()
                optim.step()

                print("(" + "|".join([
                    "%0.3f" % (categ_loss_sum_batch / num_in_batch),
                    "%0.3f" % (robustness_loss_sum_batch / num_in_batch),
                    "%0.3f" % (reconst_loss_sum_batch / num_in_batch),
                    "%0.3f" % (gen_loss_batch / num_in_batch),
                    "%0.3f" % (advers_loss_batch / num_in_batch)]) + ")",
                    end=' ', flush=True)

                categ_loss_sum_batch = 0.0
                robustness_loss_sum_batch = 0.0
                reconst_loss_sum_batch = 0.0
                gen_loss_batch = 0.0
                advers_loss_batch = 0.0

                loss_sum_batch = 0.0
                discrim_loss_sum_batch = 0.0
                num_in_batch = 0

            loss_sum += loss.item()
            loss_count += 1

        print('')
        loss_mean = loss_sum / loss_count

        metrics = [
            ('period', period),
            ('loss', round(loss_mean, 3))
        ]
        util.print_metrics(metrics)

        torch.save(
            describer.state_dict(),
            'snapshots/' + params.header + DESCRIBER_FOOTER + '.pth')

        torch.save(
            reconstructor.state_dict(),
            'snapshots/' + params.header + RECONSTRUCTOR_FOOTER + '.pth')
Ejemplo n.º 14
0
def train_manipulators(params):
    speaker_categs = torch.load(params.speaker_categs_path)
    num_speakers, speaker_feature_dim = speaker_categs.size()

    describer_model = util.load_model(params.header + DESCRIBER_FOOTER)
    describer = Describer(describer_model, speaker_feature_dim)
    describer.load_state_dict(torch.load(
        'snapshots/' + params.header + DESCRIBER_FOOTER + '.pth'))
    describer.eval()

    reconstructor_model = util.load_model(params.header + RECONSTRUCTOR_FOOTER)
    reconstructor = Reconstructor(reconstructor_model, params.log_frac)
    reconstructor.load_state_dict(torch.load(
        'snapshots/' + params.header + RECONSTRUCTOR_FOOTER + '.pth'))

    latent_forger_model = util.load_model(params.header + LATENT_FORGER_FOOTER)
    latent_forger = LatentForger(latent_forger_model)
    latent_forger.load_state_dict(torch.load(
        'snapshots/' + params.header + LATENT_FORGER_FOOTER + '.pth'))
    latent_forger.train()

    examiner_model = util.load_model(params.header + EXAMINER_FOOTER)
    distinguisher_model = util.load_model(params.header + DISTINGUISHER_FOOTER)
    discriminator = PairDiscriminator(examiner_model, distinguisher_model)
    util.initialize(discriminator)
    discriminator.train()

    if example_tensor.is_cuda:
        speaker_categs = speaker_categs.cuda()
        describer = describer.cuda()
        reconstructor = reconstructor.cuda()
        latent_forger = latent_forger.cuda()
        discriminator = discriminator.cuda()

    optim = torch.optim.Adam(
        latent_forger.parameters(),
        lr=params.lr)
    advers_optim = torch.optim.Adam(
        discriminator.parameters(),
        lr=params.advers_lr)

    data_loader = VCTKLoader(
        params.data_path, example_tensor, features='log')
    data_iterator = iter(data_loader)

    forgery_categ_loss_sum_batch = 0.0
    activity_loss_sum_batch = 0.0
    pretend_latent_loss_sum_batch = 0.0
    pretend_reconst_loss_sum_batch = 0.0
    gen_loss_batch = 0.0
    advers_loss_batch = 0.0

    loss_sum_batch = 0.0
    discrim_loss_sum_batch = 0.0
    num_in_batch = 0

    period = 0
    while period < params.num_periods:
        period += 1

        loss_sum = 0.0
        loss_count = 0

        print(util.COMMENT_HEADER, end='')

        for _ in range(params.period_size):
            orig, orig_speaker = next(data_iterator)
            target, target_speaker = next(data_iterator)

            orig_categ = speaker_categs[orig_speaker].unsqueeze(0)
            target_categ = speaker_categs[target_speaker].unsqueeze(0)

            target_latent, metadata = describer.latent(target)
            target_reconst = reconstructor.reconst(target_latent, metadata)
            target_reconst = target_reconst.detach()

            orig_latent, metadata = describer.latent(orig)
            orig_latent = orig_latent.detach()

            forgery_latent_raw = latent_forger.modify_latent(
                orig_latent, orig_categ, target_categ)
            forgery = reconstructor.reconst(forgery_latent_raw, metadata)

            (forgery_latent, metadata, pred_forgery_categ) = \
                describer.describe(forgery)

            activity_orig = torch.exp(orig).mean(dim=1)
            activity_forgery = torch.exp(forgery).mean(dim=1)

            pretend_latent = latent_forger.modify_latent(
                forgery_latent, target_categ, orig_categ)
            pretend_reconst = reconstructor.reconst(pretend_latent, metadata)

            truth = 0 if (np.random.random() < 0.5) else 1

            if truth == 0:
                decision = discriminator.discriminate(
                    target_categ, target_reconst, forgery)
            else:
                decision = discriminator.discriminate(
                    target_categ, forgery, target_reconst)

            forgery_categ_loss = describer.categ_loss(
                pred_forgery_categ, target_categ)
            activity_loss = ((activity_orig - activity_forgery) ** 2).mean(
                dim=list(range(activity_orig.dim())))
            pretend_latent_loss = describer.latent_loss(
                pretend_latent, orig_latent)
            pretend_reconst_loss = reconstructor.reconst_loss(
                pretend_reconst, orig)
            gen_loss = discriminator.gen_loss(decision, truth)
            advers_loss = discriminator.advers_loss(decision, truth)

            loss = (params.categ_term * forgery_categ_loss +
                    params.activity_term * activity_loss +
                    pretend_latent_loss +
                    pretend_reconst_loss +
                    params.advers_term * gen_loss)
            discrim_loss = advers_loss

            forgery_categ_loss_sum_batch += forgery_categ_loss.item()
            activity_loss_sum_batch += activity_loss.item()
            pretend_latent_loss_sum_batch += pretend_latent_loss.item()
            pretend_reconst_loss_sum_batch += pretend_reconst_loss.item()
            gen_loss_batch += gen_loss.item()
            advers_loss_batch += advers_loss.item()

            loss_sum_batch = loss_sum_batch + loss
            discrim_loss_sum_batch = discrim_loss_sum_batch + discrim_loss
            num_in_batch += 1

            if num_in_batch >= params.batch_size:
                mean_discrim_loss = discrim_loss_sum_batch / num_in_batch
                if gen_loss_batch / num_in_batch <= 10.0:
                    advers_optim.zero_grad()
                    mean_discrim_loss.backward(retain_graph=True)
                    advers_optim.step()

                mean_loss = loss_sum_batch / num_in_batch
                optim.zero_grad()
                mean_loss.backward()
                if period >= 1:
                    optim.step()

                print("(" + "|".join([
                    "%0.3f" % (forgery_categ_loss_sum_batch / num_in_batch),
                    "%0.3f" % (activity_loss_sum_batch / num_in_batch),
                    "%0.3f" % (pretend_latent_loss_sum_batch / num_in_batch),
                    "%0.3f" % (pretend_reconst_loss_sum_batch / num_in_batch),
                    "%0.3f" % (gen_loss_batch / num_in_batch),
                    "%0.3f" % (advers_loss_batch / num_in_batch)
                ]) + ")", end=' ', flush=True)

                forgery_categ_loss_sum_batch = 0.0
                activity_loss_sum_batch = 0.0
                pretend_latent_loss_sum_batch = 0.0
                pretend_reconst_loss_sum_batch = 0.0
                gen_loss_batch = 0.0
                advers_loss_batch = 0.0

                loss_sum_batch = 0.0
                discrim_loss_sum_batch = 0.0
                num_in_batch = 0

            loss_sum += loss.item()
            loss_count += 1

        print('')
        loss_mean = loss_sum / loss_count

        metrics = [
            ('period', period),
            ('loss', round(loss_mean, 3))
        ]
        util.print_metrics(metrics)

        torch.save(
            latent_forger.state_dict(),
            'snapshots/' + params.header + LATENT_FORGER_FOOTER + '.pth')
Ejemplo n.º 15
0
import kaldi_active_grammar

if __name__ == '__main__':
    import util
    compiler, decoder = util.initialize()

##### Set up a rule mixing strict commands with free dictation

rule = kaldi_active_grammar.KaldiRule(compiler, 'TestRule')
fst = rule.fst

dictation_nonterm = '#nonterm:dictation'
end_nonterm = '#nonterm:end'

# Optional preface
previous_state = fst.add_state(initial=True)
next_state = fst.add_state()
fst.add_arc(previous_state, next_state, 'cap')
fst.add_arc(previous_state, next_state,
            None)  # Optionally skip, with an epsilon (silent) arc

# Required free dictation
previous_state = next_state
extra_state = fst.add_state()
next_state = fst.add_state()
# These two arcs together (always use together) will recognize one or more words of free dictation (but not zero):
fst.add_arc(previous_state, extra_state, dictation_nonterm)
fst.add_arc(extra_state, next_state, None, end_nonterm)

# Loop repetition, alternating between a group of alternatives and more free dictation
previous_state = next_state
Ejemplo n.º 16
0
    def __init__(self):

        self.client = carla.Client("127.0.0.1", 2000)
        self.client.set_timeout(20.0)

        change_to_Town06(self.client)
        self.world = self.client.get_world()
        self.world_snapshot = self.world.get_snapshot()
        self.time_step_count = 0
        self.time_step = 0.025  # Seconds. Have to fully divide 1
        self.curr_time = 0
        self.subject_path_time_res = 0.5
        self.warmup_time = 3
        # 0. Reset Scene
        initialize(self.world, self.client, self.time_step)

        self.lane_marker_linestring = (
            get_lane_marker_linestring_from_right_lane_road_and_lane_id(
                self.world, 15, -6))

        # 1. Spawn vehicles
        self.subject_behavior = "very_aggressive"
        (
            self.ego_vehicle,
            self.subject_vehicle,
            self.current_lane_waypoints,
            self.subject_agent,
        ) = setup_scenario(
            self.world,
            self.client,
            synchronous_master=True,
            subject_behavior=self.subject_behavior,
        )

        # 2. Get the path follower object
        self.path_follower = PathFollower(self.world, self.ego_vehicle,
                                          self.time_step)

        # 3. Coarse Trajectory Genertion
        road = Road()
        actions = Actions()
        constraints = Constraints()
        termination_conditions = TerminationConditions(max_time=15,
                                                       max_position_x=180)
        start_state = State([
            self.ego_vehicle.get_location().x,
            self.ego_vehicle.get_location().y
        ], 0, 0)
        cost_calculator = CostCalculator(
            subject_path_time_res=self.subject_path_time_res)
        self.latticeGenerator = LatticeGenerator(
            road=road,
            actions=actions,
            constraints=constraints,
            cost_calculator=cost_calculator,
            termination_conditions=termination_conditions,
            start_state=start_state,
            ego=self.ego_vehicle,
            subject=self.subject_vehicle,
        )

        # 3. Get the controller object
        args_lateral = {
            "K_P": 1.0,
            "K_D": 0.0,
            "K_I": 0.0,
            "dt": self.time_step
        }
        args_longitudinal = {
            "K_P": 1.0,
            "K_D": 0.00,
            "K_I": 0.00,
            "dt": self.time_step,
        }
        self.controller = VehiclePIDController(self.ego_vehicle, args_lateral,
                                               args_longitudinal)

        # 4. Placeholder for the concatenated trajectory
        self.traj_to_track = []
        self.has_lane_change_happend = 0
        self.is_lane_change_happening = 0
        self.planned_path = None
        self.next_timestep = None
        self.latest_tracked_speed_ego = get_speed(self.ego_vehicle)

        # self.regenerate_traj_flag = False

        if self.subject_behavior == "manual":
            self.path_predictor = PathPredictor(self.world,
                                                self.subject_vehicle,
                                                self.subject_path_time_res)
        else:
            self.path_predictor = PathPredictor(self.world,
                                                self.subject_agent.vehicle,
                                                self.subject_path_time_res)
        self.subject_traj = []
        self.ego_traj = []
Ejemplo n.º 17
0
def initialize(doc=__doc__):
    util.initialize(doc)

    blockify = Blockify(blocklist.Blocklist())

    return blockify
Ejemplo n.º 18
0
ntokens = len(vocabulary)
nsampled = 8192

train_corpus = StreamGBWDataset(vocabulary, os.path.join(args.data, "training-monolingual.tokenized.shuffled/*"))
test_corpus = StreamGBWDataset(vocabulary, os.path.join(args.data, "heldout-monolingual.tokenized.shuffled/*"), deterministic=True)
print("load dataset - complete")
'''

###############################################################################
# Build the model
###############################################################################
eval_batch_size = 1
net = model.RNNModel(ntokens, args.emsize, args.nhid, args.emsize, args.nlayers, args.proj, args.dropout)

encoder = nn.Embedding(ntokens, args.emsize)
util.initialize(encoder.weight)

twht = None
if args.tied:
    if args.nhid != args.emsize and not args.proj:
        raise ValueError('When using the tied flag, hidden must be equal to embedding size')
    twht = encoder.weight

D = args.emsize if args.proj else args.nhid
ss = model.SampledSoftmax(ntokens, nsampled, D, tied_weight=twht)

net.add_module("encoder", encoder)
net.add_module("decoder", ss)
net.cuda()

criterion = nn.CrossEntropyLoss()
def learn(env,
          total_timesteps,
          seed=None,
          nsteps=1024,
          ent_coef=0.01,
          lr=0.01,
          vf_coef=0.5,
          p_coef=1.0,
          max_grad_norm=None,
          gamma=0.99,
          lam=0.95,
          nminibatches=15,
          noptepochs=4,
          cliprange=0.2,
          save_interval=100,
          copeoperation=False,
          human_ent_coef=0.01,
          human_vf_coef=0.5,
          human_p_coef=1.0):

    set_global_seeds(seed)
    sess = get_session()
    global_summary = tf.summary.FileWriter(
        'summaries/' + 'feeding' +
        datetime.datetime.now().strftime('%d-%m-%y%H%M'), sess.graph)

    if isinstance(lr, float): lr = constfn(lr)
    else: assert callable(lr)
    if isinstance(cliprange, float): cliprange = constfn(cliprange)
    else: assert callable(cliprange)

    # Get the nb of env
    nenvs = env.num_envs
    # Calculate the batch_size
    nbatch = nenvs * nsteps
    nbatch_train = nbatch // nminibatches
    if copeoperation == True:
        human_model = Model(env=env,
                            nbatch_act=nenvs,
                            nbatch_train=nbatch_train,
                            ent_coef=human_ent_coef,
                            vf_coef=human_vf_coef,
                            p_coef=human_p_coef,
                            max_grad_norm=max_grad_norm,
                            human=True,
                            robot=False)
        robot_model = Model(env=env,
                            nbatch_act=nenvs,
                            nbatch_train=nbatch_train,
                            ent_coef=ent_coef,
                            vf_coef=vf_coef,
                            p_coef=p_coef,
                            max_grad_norm=max_grad_norm,
                            human=False,
                            robot=True)

    if copeoperation == False:
        model = Model(env=env,
                      nbatch_act=nenvs,
                      nbatch_train=nbatch_train,
                      ent_coef=ent_coef,
                      vf_coef=vf_coef,
                      p_coef=p_coef,
                      max_grad_norm=max_grad_norm)
    initialize()

    # Instantiate the runner object
    if copeoperation == True:
        runner = Runner(env=env,
                        model=None,
                        nsteps=nsteps,
                        gamma=gamma,
                        lam=lam,
                        human_model=human_model,
                        robot_model=robot_model)
    if copeoperation == False:
        runner = Runner(env=env,
                        model=model,
                        nsteps=nsteps,
                        gamma=gamma,
                        lam=lam)

    epinfobuf = deque(maxlen=10)  #recent 10 episode
    pbar = tqdm(total=total_timesteps, dynamic_ncols=True)

    tfirststart = time.perf_counter()

    nupdates = total_timesteps // nbatch
    for update in range(1, nupdates + 1):
        assert nbatch % nminibatches == 0
        # Start timer
        frac = 1.0 - (update - 1.0) / nupdates
        # Calculate the learning rate
        lrnow = lr(frac)
        # Calculate the cliprange
        cliprangenow = cliprange(frac)

        # Get minibatch
        if copeoperation == False:
            obs, returns, masks, actions, values, neglogpacs, epinfos = runner.run(
            )
        if copeoperation == True:
            obs, human_returns, robot_returns, masks, human_actions, robot_actions, human_values, robot_values, human_neglogpacs, robot_neglogpacs, epinfos = runner.coop_run(
            )
        epinfobuf.extend(epinfos)
        mblossvals = []
        human_mblossvals = []
        robot_mblossvals = []
        inds = np.arange(nbatch)
        for _ in range(noptepochs):
            # Randomize the indexes
            np.random.shuffle(inds)
            for start in range(0, nbatch, nbatch_train):
                end = start + nbatch_train
                mbinds = inds[start:end]
                if copeoperation == True:
                    human_slices = (arr[mbinds]
                                    for arr in (obs[:, 24:], human_returns,
                                                human_actions, human_values,
                                                human_neglogpacs))
                    robot_slices = (arr[mbinds]
                                    for arr in (obs[:, :24], robot_returns,
                                                robot_actions, robot_values,
                                                robot_neglogpacs))
                    human_mblossvals.append(
                        human_model.train(lrnow, cliprangenow, *human_slices))
                    robot_mblossvals.append(
                        robot_model.train(lrnow, cliprangenow, *robot_slices))
                if copeoperation == False:
                    slices = (arr[mbinds] for arr in (obs, returns, actions,
                                                      values, neglogpacs))
                    mblossvals.append(model.train(lrnow, cliprangenow,
                                                  *slices))  #None
        # Feedforward --> get losses --> update
        if copeoperation == True:
            human_lossvals = np.mean(human_mblossvals, axis=0)
            robot_lossvals = np.mean(robot_mblossvals, axis=0)
        if copeoperation == False:
            lossvals = np.mean(mblossvals, axis=0)
        summary = tf.Summary()
        if copeoperation == True:
            human_ev = explained_variance(human_values, human_returns)
            robot_ev = explained_variance(robot_values, robot_returns)
        if copeoperation == False:
            ev = explained_variance(values, returns)
        performance_r = np.mean([epinfo['r'] for epinfo in epinfobuf])
        performance_len = np.mean([epinfo['l'] for epinfo in epinfobuf])
        success_time = np.mean(
            [epinfo['success_time'] for epinfo in epinfobuf])
        fall_time = np.mean([epinfo['fall_time'] for epinfo in epinfobuf])
        summary.value.add(tag='Perf/Reward', simple_value=performance_r)
        summary.value.add(tag='Perf/episode_len', simple_value=performance_len)
        summary.value.add(tag='Perf/success_time', simple_value=success_time)
        summary.value.add(tag='Perf/fall_time', simple_value=fall_time)
        if copeoperation == True:
            summary.value.add(tag='Perf/human_explained_variance',
                              simple_value=float(human_ev))
            summary.value.add(tag='Perf/robot_explained_variance',
                              simple_value=float(robot_ev))
        if copeoperation == False:
            summary.value.add(tag='Perf/explained_variance',
                              simple_value=float(ev))
        if copeoperation == True:
            for (human_lossval, human_lossname) in zip(human_lossvals,
                                                       human_model.loss_names):
                if human_lossname == 'grad_norm':
                    summary.value.add(tag='grad/' + human_lossname,
                                      simple_value=human_lossval)
                else:
                    summary.value.add(tag='human_loss/' + human_lossname,
                                      simple_value=human_lossval)
            for (robot_lossval, robot_lossname) in zip(robot_lossvals,
                                                       robot_model.loss_names):
                if robot_lossname == 'grad_norm':
                    summary.value.add(tag='grad/' + robot_lossname,
                                      simple_value=robot_lossval)
                else:
                    summary.value.add(tag='robot_loss/' + robot_lossname,
                                      simple_value=robot_lossval)
        if copeoperation == False:
            for (lossval, lossname) in zip(lossvals, model.loss_names):
                if lossname == 'grad_norm':
                    summary.value.add(tag='grad/' + lossname,
                                      simple_value=lossval)
                else:
                    summary.value.add(tag='loss/' + lossname,
                                      simple_value=lossval)

        global_summary.add_summary(summary, int(update * nbatch))
        global_summary.flush()
        print('finish one update')
        if update % 10 == 0:
            msg = 'step: {},episode reward: {},episode len: {},success_time: {},fall_time: {}'
            pbar.update(update * nbatch)
            pbar.set_description(
                msg.format(update * nbatch, performance_r, performance_len,
                           success_time, fall_time))

        if update % save_interval == 0:
            tnow = time.perf_counter()
            print('consume time', tnow - tfirststart)
            if copeoperation == True:
                savepath = osp.join("my_model_cop/", '%.5i' % update)
            if copeoperation == False:
                savepath = osp.join("my_model/", '%.5i' % update)
            os.makedirs(savepath, exist_ok=True)
            savepath = osp.join(savepath, 'ppomodel')
            print('Saving to', savepath)
            save_state(savepath)
    pbar.close()

    return model
Ejemplo n.º 20
0
vocabulary = Vocabulary.from_file(os.path.join(args.data, "1b_word_vocab.txt"))

ntokens = len(vocabulary)
nsampled = 16384

train_corpus = StreamGBWDataset(vocabulary, os.path.join(args.data, "training-monolingual.tokenized.shuffled/*"))
test_corpus = StreamGBWDataset(vocabulary, os.path.join(args.data, "heldout-monolingual.tokenized.shuffled/*"), deterministic=True)
print("load dataset - complete")
'''

###############################################################################
# Build the model
###############################################################################
embed = nn.Embedding(ntokens, args.emsize)
net = m.TransformerModel(m.DecoderPreprocessor(args, embed), m.TransformerDecoder(args, embed), nsampled)
util.initialize(embed.weight)
net.cuda()

print("Sampled Softmax:", nsampled, "Batch Size:", args.batch_size, "Initial LR:", args.lr)
#optimizer = Adam(net.parameters(), args.lr, betas=(0.0, 0.999))
#optimizer = optim.RMSprop(net.parameters(), args.lr)
optimizer = RMSprop(net.parameters(), args.lr)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=train_corpus.batch_num*args.epochs, eta_min=1e-8)
net, optimizer = amp.initialize(net, optimizer, opt_level="O1")

###############################################################################
# Training code
###############################################################################

def get_batch(item, device_id=0):
    data, target, wrd_cnt, batch_num = item
Ejemplo n.º 21
0
#!/usr/bin/env python3

import dotfiles
import util
import shutil
import os

from pathlib import Path

util.initialize(Path(__file__).parent)

modules = dotfiles.Module.__subclasses__()
for i, module in enumerate(modules):
    progress = "({} out of {})".format(i + 1, len(modules))

    module_name = util.color(module.__name__, util.Color.green)
    module_source = util.color(module.__module__, util.Color.yellow)

    print("Installing module", module_name, "from", module_source, progress)
    instance = module()

    for message in instance.install():
        print(util.timestamp(message))

    print("Copying over files from module", module_name)

    default_path = Path(os.path.expanduser(module.default_dest))
    module_path = Path(module.__module__)
    valid_file_paths = [path for path in module_path.glob("*") if util.validate_path(path)]

    for path in valid_file_paths: