Esempio n. 1
0
def create_data(file_name):
    '''with open('../../consistency_ami_kim.csv', 'rb') as data_file:'''
    with open(file_name, 'rb') as data_file:
        print 'loading data'
        data_reader = csv.reader(data_file, delimiter="|")
        headers = data_reader.next()
        data = {}
        sg = SequenceGenerator()
        topics = sg.getTopicNames()
        data_topics = sg.generateSequences()
        eu_tags = {}
        old_meetingID = ""
        meetingID = ""
        data["eu_tag"] = []
        for topic in topics:
            eu_tags[topic] = convert_topic_das_euginio(data_topics[topic])
        for title in headers:
            data[title] = []
        index = 0
        count = 0
        for row in data_reader:
            # need to find the euginio tags for the group
            for header, value in zip(headers, row):
                if header == "Meeting ID":
                    meetingID = value
                if header == "words":
                    sentence = value.strip()
                    data[header].append(
                        (value.translate(None, string.punctuation)).lower())
                else:
                    data[header].append(value)
            try:
                if meetingID != old_meetingID:
                    meeting_data = get_meeting_transcript(
                        meetingID, topics, data_topics)
                    index = 0
                if sentence in meeting_data[index]:
                    data["eu_tag"].append(eu_tags[topic][index])
                    count += 1
                    index += 1
                else:
                    data["eu_tag"].append("na")
            except:
                for header in headers:
                    data[header] = data[header][:-1]
            old_meetingID = meetingID
        print 'data created with ' + str(len(data)) + ' columns and ' + str(
            len(data['words'])) + ' rows'
        print count
    return data
Esempio n. 2
0
def predict(test_data_loader, model, ntm_model, opt):
    if opt.delimiter_type == 0:
        delimiter_word = pykp.io.SEP_WORD
    else:
        delimiter_word = pykp.io.EOS_WORD
    generator = SequenceGenerator(
        model,
        ntm_model,
        opt.use_topic_represent,
        opt.topic_type,
        bos_idx=opt.word2idx[pykp.io.BOS_WORD],
        eos_idx=opt.word2idx[pykp.io.EOS_WORD],
        pad_idx=opt.word2idx[pykp.io.PAD_WORD],
        beam_size=opt.beam_size,
        max_sequence_length=opt.max_length,
        copy_attn=opt.copy_attention,
        coverage_attn=opt.coverage_attn,
        review_attn=opt.review_attn,
        length_penalty_factor=opt.length_penalty_factor,
        coverage_penalty_factor=opt.coverage_penalty_factor,
        length_penalty=opt.length_penalty,
        coverage_penalty=opt.coverage_penalty,
        cuda=opt.gpuid > -1,
        n_best=opt.n_best,
        block_ngram_repeat=opt.block_ngram_repeat,
        ignore_when_blocking=opt.ignore_when_blocking)

    evaluate_beam_search(generator, test_data_loader, opt, delimiter_word)
Esempio n. 3
0
def predict(test_data_loader, model, opt):
    if opt.delimiter_type == 0:
        delimiter_word = pykp.io.SEP_WORD
    else:
        delimiter_word = pykp.io.EOS_WORD
        
    generator = SequenceGenerator(model,
                                  bos_idx=opt.word2idx[pykp.io.BOS_WORD],
                                  eos_idx=opt.word2idx[pykp.io.EOS_WORD],
                                  pad_idx=opt.word2idx[pykp.io.PAD_WORD],
                                  peos_idx=opt.word2idx[pykp.io.PEOS_WORD],
                                  beam_size=1,
                                  max_sequence_length=opt.max_length,
                                  copy_attn=opt.copy_attention,
                                  coverage_attn=opt.coverage_attn,
                                  review_attn=opt.review_attn,
                                  cuda=opt.gpuid > -1
                                  )
    """
    if opt.one2many and opt.one2many_mode > 1:
        prediction_by_sampling(generator, test_data_loader, opt, delimiter_word)
    else:
        evaluate_beam_search(generator, test_data_loader, opt, delimiter_word)
    """
    if opt.sampling:
        raise ValueError("Not support yet!")
        #prediction_by_sampling(generator, test_data_loader, opt, delimiter_word)
    else:
        evaluate_beam_search(generator, test_data_loader, opt, delimiter_word)
Esempio n. 4
0
    def generate_sequences(self, protein_sequence: str) -> None:
        sequencesLimit = self.validate_limit()
        data = list()

        try:
            self.generator = SequenceGenerator(protein_sequence)
            for i, sequence in enumerate(self.generator, start=1):
                if i > sequencesLimit:
                    break
                data.append("".join(sequence))
        except:
            data.append("Invalid sequence")

        self.sequences.insert(tk.END, *data)
Esempio n. 5
0
def main(args):
    if os.path.exists(args.corpus[:-4]+'_processed.txt'):
        raw_text = load_data(args.corpus[:-4]+'_processed.txt', processed=True)
    else:
        raw_text = load_data(args.corpus)
        # raw_text = text_cleaner(raw_text)
        with open(args.corpus[:-4]+'_processed.txt', 'w', encoding='utf8') as f:
            f.write(raw_text)

    mapping = models.KeyedVectors.load('word2vec_skipgram.bin')
    ########################################
    raw_text = raw_text.split()
    # print(raw_text)
    vocab_size = 300 # Now it word embed size
    generic_lm = WordLM(vocab_size, mapping, seq_length=args.seq_length, multi_gpu=args.multi_gpu,
                batch_size=args.batch_size, ckpt_path=args.ckpt_path, model_path=args.model_path, mode_name=args.mode)
    ########################################

    if args.low_ram:
        if args.mode == 'right2left':
            raw_text = raw_text[::-1]

        model = generic_lm.get_model()
        continue_epoch = generic_lm.get_continue_epoch()

        optimizer = Adam(lr=5e-4, decay=5e-6)
        model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['accuracy'])

        # checkpoint = ModelCheckpoint(os.path.join(args.ckpt_path, 'WordLM_{epoch:03d}.h5'), period=args.ckpt_period)
        early_stop = EarlyStopping(monitor='loss', patience=50)
        save_model = SaveModel(ckpt_path=args.ckpt_path, model_path=args.model_path, mode_name=args.mode, ckpt_period=args.ckpt_period)
        sequenece_genrator = SequenceGenerator(raw_text, args.seq_length, mapping, vocab_size, batch_size=args.batch_size)

        model.fit_generator(generator=sequenece_genrator,
                                epochs=args.epochs + continue_epoch,
                                initial_epoch=continue_epoch,
                                callbacks=[save_model, early_stop])

        model.save(os.path.join(args.model_path, 'WordLM_%s.model'%args.mode))

    else:
        if args.mode == 'right2left':
            raw_text = raw_text[::-1]

        generic_lm.fit(raw_text, epochs=args.epochs, ckpt_period=args.ckpt_period)
Esempio n. 6
0
    def fit(self, corpus, epochs, ckpt_period=1):
        optimizer = Adam(lr=5e-4, decay=5e-6)
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])

        # checkpoint = ModelCheckpoint(os.path.join(self.ckpt_path, 'GenericLM_{epoch:03d}.h5'), period=ckpt_period, save_weights_only=True)
        early_stop = EarlyStopping(monitor='loss', patience=12)
        save_model = SaveModel(ckpt_path=self.ckpt_path,
                               model_path=self.model_path,
                               mode_name=self.mode_name,
                               ckpt_period=ckpt_period)
        sequenece_genrator = SequenceGenerator(corpus,
                                               self.seq_length,
                                               self.mapping,
                                               batch_size=self.batch_size)

        self.model.fit_generator(generator=sequenece_genrator,
                                 epochs=epochs + self.continue_epoch,
                                 initial_epoch=self.continue_epoch,
                                 callbacks=[save_model, early_stop])
def predict(test_data_loader, model, opt):
    if opt.delimiter_type == 0:
        delimiter_word = pykp.io.SEP_WORD
    else:
        delimiter_word = pykp.io.EOS_WORD
    generator = SequenceGenerator(model,
                                  bos_idx=opt.word2idx[pykp.io.BOS_WORD],
                                  eos_idx=opt.word2idx[pykp.io.EOS_WORD],
                                  pad_idx=opt.word2idx[pykp.io.PAD_WORD],
                                  beam_size=opt.beam_size,
                                  max_sequence_length=opt.max_length,
                                  copy_attn=opt.copy_attention,
                                  coverage_attn=opt.coverage_attn,
                                  review_attn=opt.review_attn,
                                  include_attn_dist=opt.include_attn_dist,
                                  length_penalty_factor=opt.length_penalty_factor,
                                  coverage_penalty_factor=opt.coverage_penalty_factor,
                                  length_penalty=opt.length_penalty,
                                  coverage_penalty=opt.coverage_penalty,
                                  cuda=opt.gpuid > -1,
                                  n_best=opt.n_best,
                                  block_ngram_repeat=opt.block_ngram_repeat,
                                  ignore_when_blocking=opt.ignore_when_blocking,
                                  peos_idx=opt.word2idx[pykp.io.PEOS_WORD]
                                  )
    """
    if opt.one2many and opt.one2many_mode > 1:
        prediction_by_sampling(generator, test_data_loader, opt, delimiter_word)
    else:
        evaluate_beam_search(generator, test_data_loader, opt, delimiter_word)
    """
    if opt.sampling:
        raise ValueError("Not support yet!")
        #prediction_by_sampling(generator, test_data_loader, opt, delimiter_word)
    else:
        evaluate_beam_search(generator, test_data_loader, opt, delimiter_word)
Esempio n. 8
0
def predict(test_data_loader, model, opt):
    if opt.delimiter_type == 0:
        delimiter_word = pykp.io.SEP_WORD
    else:
        delimiter_word = pykp.io.EOS_WORD
    generator = SequenceGenerator(model,
                                  bos_idx=opt.word2idx[pykp.io.BOS_WORD],
                                  eos_idx=opt.word2idx[pykp.io.EOS_WORD],
                                  pad_idx=opt.word2idx[pykp.io.PAD_WORD],
                                  beam_size=opt.beam_size,
                                  threshold=opt.threshold,
                                  max_sequence_length=opt.max_length,
                                  copy_attn=opt.copy_attention,
                                  include_attn_dist=opt.include_attn_dist,
                                  length_penalty_factor=opt.length_penalty_factor,
                                  coverage_penalty_factor=opt.coverage_penalty_factor,
                                  length_penalty=opt.length_penalty,
                                  coverage_penalty=opt.coverage_penalty,
                                  cuda=opt.gpuid > -1,
                                  n_best=opt.n_best,
                                  ignore_when_blocking=opt.ignore_when_blocking,
                                  )

    predict_with_beam_search(generator, test_data_loader, opt, delimiter_word)
Esempio n. 9
0
def main(opt):
    #print("agsnf efnghrrqthg")
    clip = 5
    start_time = time.time()
    train_data_loader, valid_data_loader, word2idx, idx2word, vocab = load_data_and_vocab(
        opt, load_train=True)
    load_data_time = time_since(start_time)
    logging.info('Time for loading the data: %.1f' % load_data_time)

    print("______________________ Data Successfully Loaded ______________")
    model = Seq2SeqModel(opt)
    if torch.cuda.is_available():
        model.load_state_dict(torch.load(opt.model_path))
        model = model.to(opt.gpuid)
    else:
        model.load_state_dict(torch.load(opt.model_path, map_location="cpu"))

    print(
        "___________________ Generator Initialised and Loaded _________________________"
    )
    generator = SequenceGenerator(model,
                                  bos_idx=opt.word2idx[pykp.io.BOS_WORD],
                                  eos_idx=opt.word2idx[pykp.io.EOS_WORD],
                                  pad_idx=opt.word2idx[pykp.io.PAD_WORD],
                                  peos_idx=opt.word2idx[pykp.io.PEOS_WORD],
                                  beam_size=1,
                                  max_sequence_length=opt.max_length,
                                  copy_attn=opt.copy_attention,
                                  coverage_attn=opt.coverage_attn,
                                  review_attn=opt.review_attn,
                                  cuda=opt.gpuid > -1)

    init_perturb_std = opt.init_perturb_std
    final_perturb_std = opt.final_perturb_std
    perturb_decay_factor = opt.perturb_decay_factor
    perturb_decay_mode = opt.perturb_decay_mode
    hidden_dim = opt.D_hidden_dim
    embedding_dim = opt.D_embedding_dim
    n_layers = opt.D_layers

    hidden_dim = opt.D_hidden_dim
    embedding_dim = opt.D_embedding_dim
    n_layers = opt.D_layers
    D_model = Discriminator(opt.vocab_size, embedding_dim, hidden_dim,
                            n_layers, opt.word2idx[pykp.io.PAD_WORD])
    print("The Discriminator Description is ", D_model)

    PG_optimizer = torch.optim.Adagrad(model.parameters(),
                                       opt.learning_rate_rl)
    if torch.cuda.is_available():
        D_model.load_state_dict(torch.load(opt.Discriminator_model_path))
        D_model = D_model.to(opt.gpuid)
    else:
        D_model.load_state_dict(
            torch.load(opt.Discriminator_model_path, map_location="cpu"))

    # D_model.load_state_dict(torch.load("Discriminator_checkpts/D_model_combined1.pth.tar"))
    total_epochs = opt.epochs
    for epoch in range(total_epochs):

        total_batch = 0
        print("Starting with epoch:", epoch)
        for batch_i, batch in enumerate(train_data_loader):

            model.train()
            PG_optimizer.zero_grad()

            if perturb_decay_mode == 0:  # do not decay
                perturb_std = init_perturb_std
            elif perturb_decay_mode == 1:  # exponential decay
                perturb_std = final_perturb_std + (
                    init_perturb_std - final_perturb_std) * math.exp(
                        -1. * total_batch * perturb_decay_factor)
            elif perturb_decay_mode == 2:  # steps decay
                perturb_std = init_perturb_std * math.pow(
                    perturb_decay_factor, math.floor((1 + total_batch) / 4000))

            avg_rewards = train_one_batch(D_model, batch, generator, opt,
                                          perturb_std)

            torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
            avg_rewards.backward()
            PG_optimizer.step()

            if batch_i % 4000 == 0:
                print("Saving the file ...............----------->>>>>")
                print("The avg reward is", -avg_rewards.item())
                state_dfs = model.state_dict()
                torch.save(
                    state_dfs, "RL_Checkpoints/Attention_Generator_" +
                    str(epoch) + ".pth.tar")
Esempio n. 10
0
    def test(self):
        #prepare model
        path = self.args.transformer
        max_len = self.args.max_len
        state_dict = torch.load(path)['state_dict']
        vocab_size = self.data_utils.vocab_size

        self.transformer, self.sampler = self.make_model(
            src_vocab=vocab_size,
            tgt_vocab=vocab_size,
            N=self.args.num_layer,
            dropout=self.args.dropout,
            pointer_gen=self.args.pointer_gen,
            gdrop=self.args.gdrop,
            pred_pos_drop=self.args.pred_pos_drop,
            g_hidden=self.args.g_hidden)
        self.transformer.load_state_dict(state_dict)

        sampler_path = self.args.sampler
        sampler_state_dict = torch.load(sampler_path)['state_dict']
        self.sampler.load_state_dict(sampler_state_dict)

        pred_dir = make_save_dir(self.args.pred_dir)
        filename = self.args.filename

        # print('-----------')
        #start decoding
        data_yielder = self.data_utils.data_yielder(valid=False)
        total_loss = []
        start = time.time()

        #file
        f = open(os.path.join(pred_dir, filename), 'w')
        f_pos = open(os.path.join(pred_dir, self.args.pos_file), 'w')

        self.transformer.eval()
        self.sampler.eval()
        step = 0
        sampler_accs = []
        total_loss = []
        for batch in data_yielder:
            #print(batch['src'].data.size())
            pos_list = []
            step += 1
            # if step % 10 == 1:
            #     print('Step ', step)
            src = batch['src'].long()
            src_pos = src.clone()
            src_mask = batch['src_mask'].long()
            if not self.args.pred_pos:
                sampler_label = batch['sampler_label'].long()
                # tgt = batch['tgt'].long()
            pos = None

            # sampler model
            if self.args.pred_pos:
                pos_out = self.sampler.greedy_decode(src, src_mask,
                                                     self.data_utils.max_len,
                                                     self.data_utils.bos)
                src_pos = torch.cat((src, pos_out[:, 1:]), dim=1).detach()

            else:
                out = self.sampler.forward(src, src_mask)
                # choose top10 or top1
                selected_tag_logprob, sampled_idx = torch.max(out, dim=-1)
                # selected_tag_logprob, sampled_idx = torch.topk(out, 10, dim=-1)

                sampled_idx = sampled_idx.unsqueeze(1)
                # print('sampled_idx', sampled_idx.size())
                sample_acc = (sampler_label == sampled_idx[:, 0]
                              ).sum() / sampler_label.fill_(1).sum().float()
                sampler_accs.append(sample_acc.detach().cpu().numpy())

                # for j in range(sampled_idx.size(1)):
                sampled_idx = sampled_idx.squeeze(1)
                # create pos-informed data
                s = sampled_idx
                # print('s %d'%j, s)
                for i in range(sampled_idx.size(0)):
                    # pos = self.data_utils.pos_dict['idx2structure'][int(sampled_idx[i])]
                    pos = self.data_utils.pos_dict['idx2structure'][int(s[i])]
                    # print(pos)
                    f_pos.write(pos)
                    f_pos.write('\n')
                    pos = ['<' + l + '>' for l in pos.strip().split()]

                    pos_list.append(pos)
                    # print('pos', pos)
                    src_pos[i] = self.data_utils.addpos2src(
                        pos, src_pos[i], self.data_utils.src_max_len)

                if self.args.pos_masking:
                    posmask = torch.zeros(
                        (sampled_idx.size(0), self.args.max_len,
                         self.data_utils.vocab_size)).cuda()
                    posmask[:, :, self.data_utils.pad] = 1

                    if len(pos) > self.args.max_len:
                        pos = pos[:self.args.max_len]
                    ### pos_masking ###
                    for j, p in enumerate(pos):
                        posmask[i, j] = self.data_utils.pos2mask[p]
                    batch['posmask'] = posmask

            rl_src_mask = (src_pos != self.data_utils.pad).unsqueeze(1)

            if self.args.beam_size > 1:
                seq_gen = SequenceGenerator(
                    self.transformer,
                    self.data_utils,
                    beam_size=self.args.beam_size,
                    no_repeat_ngram_size=self.args.block_ngram)
                out = seq_gen._generate(batch,
                                        src_pos,
                                        rl_src_mask,
                                        pos_masking=self.args.pos_masking,
                                        bos_token=self.data_utils.bos)
            else:
                if self.args.pos_masking:
                    out = self.transformer.greedy_decode(
                        src_pos, rl_src_mask, self.data_utils.max_len,
                        self.data_utils.bos, batch['posmask'])
                else:
                    out = self.transformer.greedy_decode(
                        src_pos, rl_src_mask, self.data_utils.max_len,
                        self.data_utils.bos)

            # out = self.transformer.greedy_decode(batch['src'].long(), batch['src_mask'], max_len, self.data_utils.bos)
            for i, l in enumerate(out):
                # print(l)
                if self.args.beam_size > 1:
                    sentence = self.data_utils.id2sent(l[0]['tokens'][:-1],
                                                       True)
                else:
                    sentence = self.data_utils.id2sent(l[1:], True)
                # pos = pos_list[i]
                #print(l[1:])
                f.write(sentence)
                # f.write('\t')
                # pos_str = ""
                # for p in pos:
                #     pos_str += p
                #     pos_str += " "
                # f.write(pos_str.strip())
                f.write("\n")
            src_pos = src.clone()
            pos_list = []
            # out, _ = self.transformer.forward(src_pos, tgt,
            #                         batch['src_mask'], batch['tgt_mask'])
            # loss = self.transformer.loss_compute(out, batch['y'].long(), self.data_utils.pad)
            # total_loss.append(loss.item())
        if not self.args.pred_pos:
            print('sampler accs %f' % (sum(sampler_accs) / len(sampler_accs)))
Esempio n. 11
0
def train_model(model, optimizer_ml, optimizer_rl, criterion,
                train_data_loader, valid_data_loader, opt):
    total_batch = -1
    early_stop_flag = False

    report_train_reward_statistics = RewardStatistics()
    total_train_reward_statistics = RewardStatistics()
    report_train_reward = []
    report_valid_reward = []
    best_valid_reward = float('-inf')
    num_stop_increasing = 0
    init_perturb_std = opt.init_perturb_std
    final_perturb_std = opt.final_perturb_std
    perturb_decay_factor = opt.perturb_decay_factor
    perturb_decay_mode = opt.perturb_decay_mode

    if opt.train_from:  # opt.train_from:
        #TODO: load the training state
        raise ValueError(
            "Not implemented the function of load from trained model")
        pass

    generator = SequenceGenerator(model,
                                  bos_idx=opt.word2idx[pykp.io.BOS_WORD],
                                  eos_idx=opt.word2idx[pykp.io.EOS_WORD],
                                  pad_idx=opt.word2idx[pykp.io.PAD_WORD],
                                  peos_idx=opt.word2idx[pykp.io.PEOS_WORD],
                                  beam_size=1,
                                  max_sequence_length=opt.max_length,
                                  copy_attn=opt.copy_attention,
                                  coverage_attn=opt.coverage_attn,
                                  review_attn=opt.review_attn,
                                  cuda=opt.gpuid > -1)

    model.train()

    for epoch in range(opt.start_epoch, opt.epochs + 1):
        if early_stop_flag:
            break

        # TODO: progress bar
        # progbar = Progbar(logger=logging, title='Training', target=len(train_data_loader), batch_size=train_data_loader.batch_size,total_examples=len(train_data_loader.dataset.examples))
        for batch_i, batch in enumerate(train_data_loader):
            total_batch += 1
            if perturb_decay_mode == 0:  # do not decay
                perturb_std = init_perturb_std
            elif perturb_decay_mode == 1:  # exponential decay
                perturb_std = final_perturb_std + (
                    init_perturb_std - final_perturb_std) * math.exp(
                        -1. * total_batch * perturb_decay_factor)
            elif perturb_decay_mode == 2:  # steps decay
                perturb_std = init_perturb_std * math.pow(
                    perturb_decay_factor, math.floor((1 + total_batch) / 4000))

            batch_reward_stat, log_selected_token_dist = train_one_batch(
                batch, generator, optimizer_rl, opt, perturb_std)
            report_train_reward_statistics.update(batch_reward_stat)
            total_train_reward_statistics.update(batch_reward_stat)

            # Checkpoint, decay the learning rate if validation loss stop dropping, apply early stopping if stop decreasing for several epochs.
            # Save the model parameters if the validation loss improved.
            if total_batch % 4000 == 0:
                print("Epoch %d; batch: %d; total batch: %d" %
                      (epoch, batch_i, total_batch))
                sys.stdout.flush()

            if epoch >= opt.start_checkpoint_at:
                if (opt.checkpoint_interval == -1 and batch_i == len(train_data_loader) - 1) or \
                        (opt.checkpoint_interval > -1 and total_batch > 1 and total_batch % opt.checkpoint_interval == 0):

                    valid_reward_stat = evaluate_reward(
                        valid_data_loader, generator, opt)
                    model.train()
                    current_valid_reward = valid_reward_stat.reward()
                    print("Enter check point!")
                    sys.stdout.flush()

                    current_train_reward = report_train_reward_statistics.reward(
                    )
                    current_train_pg_loss = report_train_reward_statistics.loss(
                    )

                    if current_valid_reward > best_valid_reward:
                        print("Valid reward increases")
                        sys.stdout.flush()
                        best_valid_reward = current_valid_reward
                        num_stop_increasing = 0

                        check_pt_model_path = os.path.join(
                            opt.model_path,
                            '%s.epoch=%d.batch=%d.total_batch=%d' %
                            (opt.exp, epoch, batch_i, total_batch) + '.model')
                        torch.save(  # save model parameters
                            model.state_dict(), open(check_pt_model_path,
                                                     'wb'))
                        logging.info('Saving checkpoint to %s' %
                                     check_pt_model_path)
                    else:
                        print("Valid reward does not increase")
                        sys.stdout.flush()
                        num_stop_increasing += 1
                        # decay the learning rate by the factor specified by opt.learning_rate_decay
                        if opt.learning_rate_decay_rl:
                            for i, param_group in enumerate(
                                    optimizer_rl.param_groups):
                                old_lr = float(param_group['lr'])
                                new_lr = old_lr * opt.learning_rate_decay
                                if old_lr - new_lr > EPS:
                                    param_group['lr'] = new_lr

                    logging.info(
                        'Epoch: %d; batch idx: %d; total batches: %d' %
                        (epoch, batch_i, total_batch))
                    logging.info(
                        'avg training reward: %.4f; avg training loss: %.4f; avg validation reward: %.4f; best validation reward: %.4f'
                        % (current_train_reward, current_train_pg_loss,
                           current_valid_reward, best_valid_reward))

                    report_train_reward.append(current_train_reward)
                    report_valid_reward.append(current_valid_reward)

                    if not opt.disable_early_stop_rl:
                        if num_stop_increasing >= opt.early_stop_tolerance:
                            logging.info(
                                'Have not increased for %d check points, early stop training'
                                % num_stop_increasing)
                            early_stop_flag = True
                            break
                    report_train_reward_statistics.clear()

    # export the training curve
    train_valid_curve_path = opt.exp_path + '/train_valid_curve'
    export_train_and_valid_reward(report_train_reward, report_valid_reward,
                                  opt.checkpoint_interval,
                                  train_valid_curve_path)
from os.path import isfile, join

if __name__ == "__main__":
    # delete current pkls
    data_path = os.path.join(PATH_TO_PROJECT, 'data')
    filenames = [f for f in listdir(data_path) if isfile(join(data_path, f))]
    pkl_filenames = [s for s in filenames if '.pkl' in s]
    for pkl_file in pkl_filenames:
        path_to_file = os.path.join(data_path, pkl_file)
        if os.path.isfile(path_to_file):
            os.remove(path_to_file)

    shapes = ["square", "sawtooth", "sinusoidal", "gaussian_pulses"]  # ["sinusoidal"]#
    # shapes = ["sinusoidal"]
    for shape in shapes:
        data = SequenceGenerator(sequence_shape=shape)
        # Cadence
        time_sample_noise = 0.1
        max_length = 100  # 50
        min_length = 100  # 20
        time_span = [0, 1]  # [10, 50]
        min_time_spam = 1  # 10

        data.set_cadence_params(time_sample_noise=time_sample_noise,
                                max_length=max_length,
                                min_length=min_length,
                                time_span=time_span,
                                min_time_spam=min_time_spam)

        # Signal
        amp_range = np.linspace(start=0.25, stop=1, num=4)   # [0.25, 0.75]#None#[0.4, 0.9]#[0.25,0.75]#[0.4, 0.9] #[0.6, 0.9]
Esempio n. 13
0
def main():
    #print("agsnf efnghrrqthg")
    print("dfsgf")
    clip = 5
    start_time = time.time()
    train_data_loader, valid_data_loader, word2idx, idx2word, vocab = load_data_and_vocab(
        opt, load_train=True)
    load_data_time = time_since(start_time)
    logging.info('Time for loading the data: %.1f' % load_data_time)

    model = Seq2SeqModel(opt)
    #model = model.device()
    #print("The Device is",opt.gpuid)
    model = model.to("cuda:2")

    #model.load_state_dict(torch.load("model/kp20k.ml.one2many.cat.copy.bi-directional.20190704-170553/kp20k.ml.one2many.cat.copy.bi-directional.epoch=2.batch=264.total_batch=8000.model"))
    # model.load_state_dict(torch.load("Checkpoint_individual_3.pth.tar"))
    model.load_state_dict(
        torch.load(
            "model/kp20k.ml.one2many.cat.copy.bi-directional.20190715-132016/kp20k.ml.one2many.cat.copy.bi-directional.epoch=3.batch=26098.total_batch=108000.model"
        ))
    generator = SequenceGenerator(model,
                                  bos_idx=opt.word2idx[pykp.io.BOS_WORD],
                                  eos_idx=opt.word2idx[pykp.io.EOS_WORD],
                                  pad_idx=opt.word2idx[pykp.io.PAD_WORD],
                                  peos_idx=opt.word2idx[pykp.io.PEOS_WORD],
                                  beam_size=1,
                                  max_sequence_length=opt.max_length,
                                  copy_attn=opt.copy_attention,
                                  coverage_attn=opt.coverage_attn,
                                  review_attn=opt.review_attn,
                                  cuda=opt.gpuid > -1)

    init_perturb_std = opt.init_perturb_std
    final_perturb_std = opt.final_perturb_std
    perturb_decay_factor = opt.perturb_decay_factor
    perturb_decay_mode = opt.perturb_decay_mode

    D_model = Discriminator(opt.vocab_size, embedding_dim, hidden_dim,
                            n_layers, opt.word2idx[pykp.io.PAD_WORD])

    # D_model.load_state_dict(torch.load("Discriminator_checkpts/Checkpoint_Individual_Training_4.pth.tar"))

    PG_optimizer = torch.optim.Adagrad(model.parameters(), 0.00005)

    print("The Discriminator statistics are ", D_model)

    if torch.cuda.is_available():
        D_model = D_model.to("cuda:1")

    total_epochs = 5
    for epoch in range(total_epochs):

        total_batch = 0
        print("Starting with epoch:", epoch)
        for batch_i, batch in enumerate(valid_data_loader):
            total_batch += 1

            PG_optimizer.zero_grad()

            if perturb_decay_mode == 0:  # do not decay
                perturb_std = init_perturb_std
            elif perturb_decay_mode == 1:  # exponential decay
                perturb_std = final_perturb_std + (
                    init_perturb_std - final_perturb_std) * math.exp(
                        -1. * total_batch * perturb_decay_factor)
            elif perturb_decay_mode == 2:  # steps decay
                perturb_std = init_perturb_std * math.pow(
                    perturb_decay_factor, math.floor((1 + total_batch) / 4000))

            avg_rewards = train_one_batch(D_model, batch, generator, opt,
                                          perturb_std)

            avg_rewards.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), clip)

            PG_optimizer.step()

            if batch_i % 15 == 0:
                print("The avg reward is", -avg_rewards.item())
            if batch_i % 100 == 0:
                print("Saving the file ...............----------->>>>>")
                print("The avg reward is", -avg_rewards.item())
                state_dfs = model.state_dict()
                torch.save(
                    state_dfs, "RL_Checkpoints/Checkpoint_SeqGAN_" +
                    str(epoch) + ".pth.tar")

        print("Saving the file ...............----------->>>>>")
        state_dfs = model.state_dict()
        torch.save(
            state_dfs,
            "RL_Checkpoints/Checkpoint_SeqGAN_" + str(epoch) + ".pth.tar")
from sequence_generator import SequenceGenerator

# how long should the entire sequence take (in ms)
SEQUENCE_DURATION_MS = 4000

# Noon, 3, Noon, 3, Noon, 9, Noon, 9
# SEQUENCE = [0, 90, 0, 90, 0, -90, 0, -90]
SEQUENCE = [0,90]

sg = SequenceGenerator(SEQUENCE_DURATION_MS, SEQUENCE)

print("time,\tstart,\tend, \t%, \tpos")

for x in range(8000):
  sg.position(x)
def main():
    #print("agsnf efnghrrqthg")
    clip = 5
    start_time = time.time()
    train_data_loader, valid_data_loader, word2idx, idx2word, vocab = load_data_and_vocab(
        opt, load_train=True)
    load_data_time = time_since(start_time)
    print(idx2word[5])
    logging.info('Time for loading the data: %.1f' % load_data_time)

    model = Seq2SeqModel(opt)
    #model = model.device()
    #print("The Device is",opt.gpuid)
    #model = model.to(devices)
    model = model.to(devices)

    # model.load_state_dict(torch.load("model/kp20k.ml.one2many.cat.copy.bi-directional.20190628-114655/kp20k.ml.one2many.cat.copy.bi-directional.epoch=2.batch=54573.total_batch=116000.model"))
    model.load_state_dict(
        torch.load(
            "model/kp20k.ml.one2many.cat.copy.bi-directional.20190715-132016/kp20k.ml.one2many.cat.copy.bi-directional.epoch=3.batch=26098.total_batch=108000.model"
        ))
    generator = SequenceGenerator(model,
                                  bos_idx=opt.word2idx[pykp.io.BOS_WORD],
                                  eos_idx=opt.word2idx[pykp.io.EOS_WORD],
                                  pad_idx=opt.word2idx[pykp.io.PAD_WORD],
                                  peos_idx=opt.word2idx[pykp.io.PEOS_WORD],
                                  beam_size=1,
                                  max_sequence_length=opt.max_length,
                                  copy_attn=opt.copy_attention,
                                  coverage_attn=opt.coverage_attn,
                                  review_attn=opt.review_attn,
                                  cuda=opt.gpuid > -1)

    init_perturb_std = opt.init_perturb_std
    final_perturb_std = opt.final_perturb_std
    perturb_decay_factor = opt.perturb_decay_factor
    perturb_decay_mode = opt.perturb_decay_mode

    D_model = Discriminator(opt.vocab_size, embedding_dim, hidden_dim,
                            n_layers, opt.word2idx[pykp.io.PAD_WORD])

    print("The Discriminator statistics are ", D_model)

    if torch.cuda.is_available():
        D_model = D_model.to(devices)

    D_model.train()

    D_optimizer = torch.optim.Adam(D_model.parameters(), lr=0.001)

    print("gdsf")
    total_epochs = 5
    for epoch in range(total_epochs):

        total_batch = 0
        print("Starting with epoch:", epoch)
        for batch_i, batch in enumerate(train_data_loader):
            total_batch += 1
            D_optimizer.zero_grad()

            if perturb_decay_mode == 0:  # do not decay
                perturb_std = init_perturb_std
            elif perturb_decay_mode == 1:  # exponential decay
                perturb_std = final_perturb_std + (
                    init_perturb_std - final_perturb_std) * math.exp(
                        -1. * total_batch * perturb_decay_factor)
            elif perturb_decay_mode == 2:  # steps decay
                perturb_std = init_perturb_std * math.pow(
                    perturb_decay_factor, math.floor((1 + total_batch) / 4000))

            avg_batch_loss, real_r, fake_r = train_one_batch(
                D_model, batch, generator, opt, perturb_std)
            #            print("Currently loss is",avg_batch_loss.item())
            #            print("Currently real loss is",real_r.item())
            #            print("Currently fake loss is",fake_r.item())
            #            state_dfs = D_model.state_dict()
            #            torch.save(state_dfs,"Checkpoint_" + str(epoch) + ".pth.tar")
            #

            if batch_i % 350 == 0:
                print("Currently loss is", avg_batch_loss.item())
                print("Currently real loss is", real_r.item())
                print("Currently fake loss is", fake_r.item())

                print("Saving the file ...............----------->>>>>")
                state_dfs = D_model.state_dict()
                torch.save(
                    state_dfs, "Discriminator_checkpts/D_model_combined" +
                    str(epoch) + ".pth.tar")

            torch.nn.utils.clip_grad_norm_(D_model.parameters(), clip)
            avg_batch_loss.backward()
            D_optimizer.step()
            #sys.exit()

            #sys.exit()

        print("Saving the file ...............----------->>>>>")
        state_dfs = D_model.state_dict()
        torch.save(
            state_dfs, "Discriminator_checkpts/D_model_combined" + str(epoch) +
            ".pth.tar")
Esempio n. 16
0
def main(args):

    model_name = args.model_name
    assert model_name is not None
    if model_name == "gan":
        Model = LSTMModel
    elif model_name == "vae":
        Model = VarLSTMModel
    elif model_name == "mle":
        Model = LSTMModel
    else:
        raise Exception("Model name should be: gan|vae|mle")

    if len(args.gpuid) >= 1 and args.gpuid[0] >= 0:
        use_cuda = True
        cuda.set_device(args.gpuid[0])
        map_to = torch.device(f"cuda:{args.gpuid[0]}")
    else:
        use_cuda = False
        map_to = torch.device('cpu')

    # Load dataset
    # if args.replace_unk is None:
    if data.has_binary_files(args.data, ['test']):
        dataset = data.load_dataset(
            args.data,
            ['test'],
            args.src_lang,
            args.trg_lang,
        )
    else:
        dataset = data.load_raw_text_dataset(
            args.data,
            ['test'],
            args.src_lang,
            args.trg_lang,
        )

    if args.src_lang is None or args.trg_lang is None:
        # record inferred languages in args, so that it's saved in checkpoints
        args.src_lang, args.trg_lang = dataset.src, dataset.dst

    print('| [{}] dictionary: {} types'.format(dataset.src,
                                               len(dataset.src_dict)))
    print('| [{}] dictionary: {} types'.format(dataset.dst,
                                               len(dataset.dst_dict)))
    print('| {} {} {} examples'.format(args.data, 'test',
                                       len(dataset.splits['test'])))

    # Set model parameters
    args.encoder_embed_dim = 128
    args.encoder_layers = 2  # 4
    args.encoder_dropout_out = 0
    args.decoder_embed_dim = 128
    args.decoder_layers = 2  # 4
    args.decoder_out_embed_dim = 128
    args.decoder_dropout_out = 0
    args.bidirectional = False

    # Load model
    if args.model_file is None:
        g_model_path = 'checkpoint/VAE_2021-03-04 12:16:21/best_gmodel.pt'
    else:
        g_model_path = args.model_file

    def load_params():
        params = json.loads(
            open(os.path.join(os.path.dirname(g_model_path),
                              "params.json")).read())
        args.__dict__.update(params)

    load_params()

    assert os.path.exists(g_model_path), f"Path does not exist {g_model_path}"
    generator = Model(args,
                      dataset.src_dict,
                      dataset.dst_dict,
                      use_cuda=use_cuda)
    model_dict = generator.state_dict()
    model = torch.load(g_model_path, map_location=map_to)
    pretrained_dict = model.state_dict()
    # 1. filter out unnecessary keys
    pretrained_dict = {
        k: v
        for k, v in pretrained_dict.items() if k in model_dict
    }
    # 2. overwrite entries in the existing state dict
    model_dict.update(pretrained_dict)
    # 3. load the new state dict
    generator.load_state_dict(model_dict)
    generator.eval()

    print("Generator loaded successfully!")

    if use_cuda > 0:
        generator.cuda()
    else:
        generator.cpu()

    max_positions = generator.encoder.max_positions()

    testloader = dataset.eval_dataloader(
        'test',
        max_sentences=args.max_sentences,
        max_positions=max_positions,
        skip_invalid_size_inputs_valid_test=args.
        skip_invalid_size_inputs_valid_test,
    )

    translator = SequenceGenerator(generator,
                                   beam_size=args.beam,
                                   stop_early=(not args.no_early_stop),
                                   normalize_scores=(not args.unnormalized),
                                   len_penalty=args.lenpen,
                                   unk_penalty=args.unkpen)

    if use_cuda:
        translator.cuda()

    with open('predictions.txt', 'w') as translation_writer:
        with open('real.txt', 'w') as ground_truth_writer:

            translations = translator.generate_batched_itr(
                testloader,
                maxlen_a=args.max_len_a,
                maxlen_b=args.max_len_b,
                cuda=use_cuda)

            for sample_id, src_tokens, target_tokens, hypos in translations:
                # Process input and ground truth
                target_tokens = target_tokens.int().cpu()
                src_str = dataset.src_dict.string(src_tokens, args.remove_bpe)
                target_str = dataset.dst_dict.string(target_tokens,
                                                     args.remove_bpe,
                                                     escape_unk=True)

                # Process top predictions
                for i, hypo in enumerate(hypos[:min(len(hypos), args.nbest)]):
                    hypo_tokens = hypo['tokens'].int().cpu()
                    hypo_str = dataset.dst_dict.string(hypo_tokens,
                                                       args.remove_bpe)

                    hypo_str += '\n'
                    target_str += '\n'

                    # translation_writer.write(hypo_str.encode('utf-8'))
                    # ground_truth_writer.write(target_str.encode('utf-8'))
                    translation_writer.write(hypo_str)
                    ground_truth_writer.write(target_str)
        training_step = network.training_op

    with tf.variable_scope('loss'):
        loss_summary_op = tf.summary.merge([
            tf.summary.scalar('loss', network.loss),
            ])
    summary_full = tf.summary.merge_all()
    conv_var_list = [v for v in tf.trainable_variables() if 'conv' in v.name and 'weight' in v.name and
                    (v.get_shape().as_list()[0] != 1 or v.get_shape().as_list()[1] != 1)]
    for var in conv_var_list:
        tf_util.conv_variable_summaries(var, scope=var.name.replace('/', '_')[:-2])
    summary_with_images = tf.summary.merge_all()
    '''

    #sess = tf_util.Session()
    sequence_generator = SequenceGenerator(None)

    #sess.run(tf.global_variables_initializer())

    all_scene_types = [
        'retrieval_goal-', 'traversal_goal-', 'transferral_goal-'
    ]
    #scene_types = ['retrieval_goal-', 'traversal_goal-', 'transferral_goal-']
    #scene_types = ['retrieval_goal-', 'traversal_goal-']#, 'transferral_goal-']
    #scene_types = ['transferral_goal-']
    scene_types = ['traversal_goal-']
    #scene_types = ['retrieval_goal-']

    #scene_numbers = ['0933','0934','0935']
    scene_numbers = ['0058']  #,'0934','0935']
    #scene_numbers = create_scene_numbers(100)
Esempio n. 18
0
def main(args):
    assert args.path is not None, '--path required for generation!'
    assert not args.sampling or args.nbest == args.beam, \
        '--sampling requires --nbest to be equal to --beam'
    assert args.replace_unk is None or args.raw_text, \
        '--replace-unk requires a raw text dataset (--raw-text)'

    if args.max_tokens is None and args.max_sentences is None:
        args.max_tokens = 12000
    print(args)

    use_cuda = torch.cuda.is_available() and not args.cpu

    # Load dataset splits
    task = tasks.setup_task(args)
    task.load_dataset(args.gen_subset)
    print('| {} {} {} examples'.format(args.data, args.gen_subset,
                                       len(task.dataset(args.gen_subset))))
    # args.data: 第二个参数

    # Set dictionaries
    src_dict = task.source_dictionary
    tgt_dict = task.target_dictionary

    # Load ensemble
    print('| loading model(s) from {}'.format(args.path))
    models, _ = utils.load_ensemble_for_inference(args.path.split(':'),
                                                  task,
                                                  model_arg_overrides=eval(
                                                      args.model_overrides))

    # Optimize ensemble for generation
    for model in models:
        model.make_generation_fast_(
            beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
            need_attn=args.print_alignment,
        )
        if args.fp16:
            model.half()

    # Load alignment dictionary for unknown word replacement
    # (None if no unknown word replacement, empty if no path to align dictionary)
    align_dict = utils.load_align_dict(args.replace_unk)

    # Load dataset (possibly sharded)
    # 这里载入的是什么数据?dataset=task.dataset(args.gen_subset), data(train)
    # 为什么感觉还是在train的基础上修改的
    itr = task.get_batch_iterator(
        dataset=task.dataset(args.gen_subset),
        max_tokens=args.max_tokens,
        max_sentences=args.max_sentences,
        max_positions=utils.resolve_max_positions(
            task.max_positions(),
            *[model.max_positions() for model in models]),
        ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
        required_batch_size_multiple=8,
        num_shards=args.num_shards,
        shard_id=args.shard_id,
    ).next_epoch_itr(shuffle=False)

    # Initialize generator
    gen_timer = StopwatchMeter()
    if args.score_reference:
        translator = SequenceScorer(models, task.target_dictionary)
    else:
        print(args.beam)
        translator = SequenceGenerator(
            models,
            task.target_dictionary,
            beam_size=args.beam,
            minlen=args.min_len,
            stop_early=(not args.no_early_stop),
            normalize_scores=(not args.unnormalized),
            len_penalty=args.lenpen,
            unk_penalty=args.unkpen,
            sampling=args.sampling,
            sampling_topk=args.sampling_topk,
            sampling_temperature=args.sampling_temperature,
            diverse_beam_groups=args.diverse_beam_groups,
            diverse_beam_strength=args.diverse_beam_strength)

    if use_cuda:
        translator.cuda()

    # Initialize fluency scorer (and language model)
    fluency_scorer = FluencyScorer(args.lang_model_path,
                                   args.lang_model_data,
                                   use_cpu=False)

    en_filename = os.path.join(args.out_dir, 'errorgen.en')
    gec_filename = os.path.join(args.out_dir, 'errorgen.gec')
    has_target = True
    with progress_bar.build_progress_bar(args, itr) as t, open(
            en_filename, 'w') as en_file, open(gec_filename, 'w') as gec_file:
        if args.score_reference:
            translations = translator.score_batched_itr(t,
                                                        cuda=use_cuda,
                                                        timer=gen_timer)
        else:
            translations = translator.generate_batched_itr(
                t,
                maxlen_a=args.max_len_a,
                maxlen_b=args.max_len_b,
                cuda=use_cuda,
                timer=gen_timer,
                prefix_size=args.prefix_size,
            )

        for sample_id, src_tokens, target_tokens, hypos in translations:
            # Process input and ground truth
            has_target = target_tokens is not None
            target_tokens = target_tokens.int().cpu() if has_target else None

            # Either retrieve the original sentences or regenerate them from tokens.
            if align_dict is not None:
                src_str = task.dataset(
                    args.gen_subset).src.get_original_text(sample_id)
                target_str = task.dataset(
                    args.gen_subset).tgt.get_original_text(sample_id)
            else:
                src_str = src_dict.string(src_tokens, args.remove_bpe)
                if has_target:
                    target_str = tgt_dict.string(target_tokens,
                                                 args.remove_bpe,
                                                 escape_unk=True)

            # Only consider sentences with at least four words.
            if len(src_tokens) < 5:
                continue

            # Calculate the fluency score for the source sentence
            source_fluency = fluency_scorer.score_sentence(src_str)

            # Process top predictions
            for i, hypo in enumerate(hypos[:min(len(hypos), args.nbest)]):
                hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
                    hypo_tokens=hypo['tokens'].int().cpu(),
                    src_str=src_str,
                    alignment=hypo['alignment'].int().cpu()
                    if hypo['alignment'] is not None else None,
                    align_dict=align_dict,
                    tgt_dict=tgt_dict,
                    remove_bpe=args.remove_bpe,
                )

                # Skip if this is the original sentence.
                if hypo_str == target_str:
                    continue

                # Score the hypothesis.
                hypo_fluency = fluency_scorer.score_sentence(hypo_str)

                # Save the hypothesis if it is sufficiently disfluent.
                if (source_fluency / hypo_fluency) > 1.05:
                    en_file.write('{}\n'.format(hypo_str))
                    gec_file.write('{}\n'.format(src_str))
Esempio n. 19
0
    def test(self):
        # Prepare model
        path = self.args.load_model
        state_dict = torch.load(path)['state_dict']

        self.model.load_state_dict(state_dict)

        # file path for prediction
        pred_dir = make_save_dir(self.args.pred_dir)
        filename = self.args.filename
        outfile = open(os.path.join(pred_dir, self.args.task, filename), 'w')

        # Start decoding
        data_yielder = self.data_utils.data_yielder()
        total_loss = []
        start = time.time()

        # If beam search, create sequence generator object
        self._beam_search = self.config['eval']['beam_size'] > 1
        # self._beam_search = True
        if self._beam_search:
            seq_gen = SequenceGenerator(
                self.model,
                self.data_utils,
                beam_size=self.config['eval']['beam_size'],
                no_repeat_ngram_size=self.config['eval']['block_ngram'])

        self.model.eval()
        step = 0

        # Run one batch
        for batch in data_yielder:
            step += 1
            if step % 10 == 1:
                print('Step ', step)

            # Decoding according to scheme
            if self._beam_search:
                out = seq_gen.generate(batch,
                                       pos_masking=self.config['pos_masking'],
                                       bos_token=self.data_utils.bos)
            else:
                max_length = self.config['max_len']
                if self.config['pos_masking']:
                    out = self.model.greedy_decode(batch['src'].long(),
                                                   batch['src_mask'],
                                                   max_length,
                                                   self.data_utils.bos,
                                                   batch['posmask'])
                else:
                    if self.args.task == 'joint_gen':
                        max_length = self.config['max_decode_step']
                    out = self.model.greedy_decode(batch['src'].long(),
                                                   batch['src_mask'],
                                                   max_length,
                                                   self.data_utils.bos)

            # Write sentences to file
            for l in out:
                if self._beam_search:
                    sentence = self.data_utils.id2sent(l[0]['tokens'][:-1],
                                                       True)
                else:
                    sentence = self.data_utils.id2sent(l[1:], True)
                outfile.write(sentence)
                outfile.write("\n")

        outfile.close()
Esempio n. 20
0
def explore_all_scenes():
    #sess = tf_util.Session()
    sequence_generator = SequenceGenerator(None,None)

    #sess.run(tf.global_variables_initializer())

    all_scene_types = ['retrieval_goal-', 'traversal_goal-', 'transferral_goal-']
    #scene_types = ['retrieval_goal-', 'traversal_goal-', 'transferral_goal-']
    #scene_types = ['retrieval_goal-', 'traversal_goal-']#, 'transferral_goal-']
    scene_types = ['transferral_goal-']
    #scene_types = ['traversal_goal-']
    #scene_types = ['retrieval_goal-']


    #scene_numbers = ['0933','0934','0935']
    #scene_numbers = ['0058']#,'0934','0935']
    scene_numbers = ['0007']
    #scene_numbers = create_scene_numbers('0100', '0201')
    print (scene_numbers)
    #exit()
    #scene_number = [i]
    all_data = {}
    training_data = {}
    exploration_data = {}
    actual_count_of_explored_scenes = {}
    total_goal_objects_found = {}
    actual_goal_data = {}
    for elem in scene_types :
        all_data[elem] = {"explored": [], "actual":[], 'explored_total':0, 'actual_total':0}
        training_data[elem] = {}
        exploration_data[elem] = {}
        actual_count_of_explored_scenes[elem] = 0
        total_goal_objects_found[elem] = 0
        actual_goal_data[elem] = 0


    total_actions = 0
    #env = game_util.create_ai2thor_env()


    for scene_type in scene_types :
        for scene_number in scene_numbers :
            
            current_explored = 0
            #new_data, bounds, goal_pose = sequence_generator.explore_scene(str(scene_type)+ scene_number + ".json")
            explore_scene(sequence_generator,event=None,scene_type=scene_type,scene_number=scene_number)
            #sequence_generator.explore_3d_scene(str(scene_type)+ scene_number + ".json",event=None)
            #exit()
            current_explored_objects = sequence_generator.agent.game_state.discovered_objects
            current_explored_uuids = sequence_generator.agent.game_state.discovered_explored
            current_explored = len(current_explored_objects)
            
            total_actions += sequence_generator.agent.game_state.number_actions
            #sequence_generator.agent.game_state.env.end_scene('', 0.0) 
            goal = sequence_generator.agent.game_state.event.goal
            goal_objects = []
            
            
            print (type(goal))
            #for key,value in sequence_generator.agent.game_state.goal.__dict__.items():
            for key,value in goal.metadata.items():
                if key == "target" or key == "target_1" or key == "target_2":
                    goal_objects.append(goal.metadata[key]["id"])
                    actual_goal_data[scene_type] += 1 
                    #goal_objects.append(goal.metadata['target_2']["id"])
                    #print (key, type(value))

            #sequence_generator.agent.game_state.discovered_objects = []
            print ("Total objects discovered = " ,current_explored )
            #with open("discovered_data.json","w") as fp:   
            #    print ("number of objects discovered until now : ",len(sequence_generator.agent.game_state.discovered_objects))  
            #    json.dump(sequence_generator.agent.game_state.discovered_objects,fp,indent=1)  
            for elem in current_explored_uuids:
                print (elem)#current_explored_objects

            print ("explored objects over, goal next")

            for elem in goal_objects:
                print (elem)#current_explored_objects
                         

            for elem in goal_objects :
                if elem in current_explored_uuids:
                    total_goal_objects_found[scene_type] += 1 

            '''
            Checking for number of objects by using AIthor controller
            current_actual = 0
            event = game_util.reset_ai2thor_env(env,str(scene_type)+ scene_number + ".json")
            current_actual = len(event.metadata['objects'])
            '''
           
            #all_data[scene_type]['explored'].append(current_explored)
            #all_data[scene_type]['actual'].append(current_actual)
            all_data[scene_type]['explored_total'] += current_explored
            #all_data[scene_type]['actual_total'] += current_actual
            
            #training_data[scene_type][scene_number] = current_actual
            exploration_data[scene_type][scene_number] = current_explored
            print ("Total actions until now= ", total_actions)
            
        #for key,items in all_data.items():
            #print ("Explored total= ", items['explored_total'])
            #print ("Actual", items['actual_total'])

    actual_data = json.load(open('training_total_objects_data.json'))

    for key,value in exploration_data.items() :
        for key2, value2 in value.items() :
            actual_count_of_explored_scenes[key] += actual_data[key][key2] 

    #print ("Total explored = " , all_data.items)
    for key,items in all_data.items():
        print ("Total explored     for scenes in {} is {}".format(key, items['explored_total']))
        print ("Total actual       for scenes in {} is {}".format( key, actual_count_of_explored_scenes[key]))
        print ("Total goals found  for scenes in {} is {}".format( key, total_goal_objects_found[key]))
        print ("Total goal actual  for scenes in {} is {}".format( key, actual_goal_data[key]))


    print ("Total actions = ", total_actions)

    '''
    parser.add_argument("--subset",
                        default="test",
                        help="subset on which to condition the model")
    args = parser.parse_args()

    temperatures = [1]
    batch_loader = DialogueBatchLoader(
        sources="dialogue movie_occurrences movieIds_in_target", batch_size=1)
    rec = Recommender(batch_loader.train_vocabulary,
                      batch_loader.n_movies,
                      params=test_params.recommender_params)
    load_model(rec, args.model_path)
    batch_loader.set_word2id(rec.encoder.word2id)
    generator = SequenceGenerator(rec.decoder,
                                  beam_size=args.beam_size,
                                  word2id=batch_loader.word2id,
                                  movie_id2name=batch_loader.id2name,
                                  max_sequence_length=40)
    batch_loader.batch_index[args.subset] = 0

    # START
    with open(args.save_path, "w") as f:
        f.write("")
    for _ in tqdm(range(args.n_examples)):
        # Load batch
        batch_index = batch_loader.batch_index[args.subset]
        batch = batch_loader.load_batch(subset=args.subset)
        if rec.cuda_available:
            batch["dialogue"] = batch["dialogue"].cuda()
            batch["target"] = batch["target"].cuda()
            batch["senders"] = batch["senders"].cuda()
Esempio n. 22
0
def main(opt):
    clip = 5
    start_time = time.time()
    train_data_loader, valid_data_loader, word2idx, idx2word, vocab = load_data_and_vocab(
        opt, load_train=True)
    load_data_time = time_since(start_time)
    logging.info('Time for loading the data: %.1f' % load_data_time)

    print(
        "Data Successfully Loaded __.__.__.__.__.__.__.__.__.__.__.__.__.__.")
    model = Seq2SeqModel(opt)

    ##    if torch.cuda.is_available():
    if torch.cuda.is_available():
        model.load_state_dict(torch.load(opt.model_path))
        model = model.to(opt.gpuid)
    else:
        model.load_state_dict(torch.load(opt.model_path, map_location="cpu"))

    print(
        "___________________ Generator Initialised and Loaded _________________________"
    )
    generator = SequenceGenerator(model,
                                  bos_idx=opt.word2idx[pykp.io.BOS_WORD],
                                  eos_idx=opt.word2idx[pykp.io.EOS_WORD],
                                  pad_idx=opt.word2idx[pykp.io.PAD_WORD],
                                  peos_idx=opt.word2idx[pykp.io.PEOS_WORD],
                                  beam_size=1,
                                  max_sequence_length=opt.max_length,
                                  copy_attn=opt.copy_attention,
                                  coverage_attn=opt.coverage_attn,
                                  review_attn=opt.review_attn,
                                  cuda=opt.gpuid > -1)

    init_perturb_std = opt.init_perturb_std
    final_perturb_std = opt.final_perturb_std
    perturb_decay_factor = opt.perturb_decay_factor
    perturb_decay_mode = opt.perturb_decay_mode
    hidden_dim = opt.D_hidden_dim
    embedding_dim = opt.D_embedding_dim
    n_layers = opt.D_layers
    if torch.cuda.is_available():
        D_model = Discriminator(opt.vocab_size, embedding_dim, hidden_dim,
                                n_layers, opt.word2idx[pykp.io.PAD_WORD],
                                opt.gpuid)
    else:
        D_model = Discriminator(opt.vocab_size, embedding_dim, hidden_dim,
                                n_layers, opt.word2idx[pykp.io.PAD_WORD],
                                "cpu")
    print("The Discriminator Description is ", D_model)
    if opt.pretrained_Discriminator:
        if torch.cuda.is_available():
            D_model.load_state_dict(torch.load(opt.Discriminator_model_path))
            D_model = D_model.to(opt.gpuid)
        else:
            D_model.load_state_dict(
                torch.load(opt.Discriminator_model_path, map_location="cpu"))
    else:
        if torch.cuda.is_available():
            D_model = D_model.to(opt.gpuid)
        else:
            D_model.load_state_dict(
                torch.load(opt.Discriminator_model_path, map_location="cpu"))
    D_optimizer = torch.optim.Adam(D_model.parameters(), opt.learning_rate)
    print("Beginning with training Discriminator")
    print(
        "########################################################################################################"
    )
    total_epochs = 5
    for epoch in range(total_epochs):
        total_batch = 0
        print("Starting with epoch:", epoch)
        for batch_i, batch in enumerate(train_data_loader):
            best_valid_loss = 1000
            D_model.train()
            D_optimizer.zero_grad()

            if perturb_decay_mode == 0:  # do not decay
                perturb_std = init_perturb_std
            elif perturb_decay_mode == 1:  # exponential decay
                perturb_std = final_perturb_std + (
                    init_perturb_std - final_perturb_std) * math.exp(
                        -1. * total_batch * perturb_decay_factor)
            elif perturb_decay_mode == 2:  # steps decay
                perturb_std = init_perturb_std * math.pow(
                    perturb_decay_factor, math.floor((1 + total_batch) / 4000))
            avg_batch_loss, _, _ = train_one_batch(D_model, batch, generator,
                                                   opt, perturb_std)
            torch.nn.utils.clip_grad_norm_(D_model.parameters(), clip)
            avg_batch_loss.backward()

            D_optimizer.step()
            D_model.eval()

            if batch_i % 4000 == 0:
                total = 0
                valid_loss_total, valid_real_total, valid_fake_total = 0, 0, 0
                for batch_j, valid_batch in enumerate(valid_data_loader):
                    total += 1
                    valid_loss, valid_real, valid_fake = train_one_batch(
                        D_model, valid_batch, generator, opt, perturb_std)
                    valid_loss_total += valid_loss.cpu().detach().numpy()
                    valid_real_total += valid_real.cpu().detach().numpy()
                    valid_fake_total += valid_fake.cpu().detach().numpy()
                    D_optimizer.zero_grad()

                print("Currently loss is ", valid_loss_total.item() / total)
                print("Currently real loss is ",
                      valid_real_total.item() / total)
                print("Currently fake loss is ",
                      valid_fake_total.item() / total)

                if best_valid_loss > valid_loss_total.item() / total:
                    print(
                        "Loss Decreases so saving the file ...............----------->>>>>"
                    )
                    state_dfs = D_model.state_dict()
                    torch.save(
                        state_dfs,
                        "Discriminator_checkpts/Attention_Disriminator_" +
                        str(epoch) + ".pth.tar")
                    best_valid_loss = valid_loss_total.item() / total
def train_model(model,
                optimizer_rl,
                train_data_loader,
                valid_data_loader,
                opt,
                lagrangian_params=None):
    total_batch = -1
    early_stop_flag = False

    report_train_reward_statistics = RewardStatistics()
    total_train_reward_statistics = RewardStatistics()
    report_train_reward = []
    report_valid_reward = []
    if opt.constrained_mdp:
        report_train_lagrangian_statistics = LagrangianStatistics()
        report_lagrangian_loss = []
        report_lagrangian_multipliers = []
        report_violate_amounts = []
        report_lagrangian_grad_norms = []
        lagrangian_model, optimizer_lagrangian = lagrangian_params
    best_valid_reward = float('-inf')
    num_stop_increasing = 0
    if opt.train_from:  # opt.train_from:
        raise ValueError(
            "Not implemented the function of load from trained model")

    generator = SequenceGenerator(model,
                                  bos_idx=io.BOS,
                                  eos_idx=io.EOS,
                                  pad_idx=io.PAD,
                                  beam_size=1,
                                  max_sequence_length=opt.pred_max_len,
                                  cuda=opt.gpuid > -1,
                                  n_best=1)

    model.train()

    for epoch in range(opt.start_epoch, opt.epochs + 1):
        if early_stop_flag:
            break

        for batch_i, batch in enumerate(train_data_loader):
            total_batch += 1

            stat, log_selected_token_dist = train_one_batch(
                batch, generator, optimizer_rl, opt, lagrangian_params)
            if opt.constrained_mdp:
                batch_reward_stat, batch_lagrangian_stat = stat
            else:
                batch_reward_stat = stat

            report_train_reward_statistics.update(batch_reward_stat)
            total_train_reward_statistics.update(batch_reward_stat)
            if opt.constrained_mdp:
                report_train_lagrangian_statistics.update(
                    batch_lagrangian_stat)

            if total_batch % opt.checkpoint_interval == 0:
                print("Epoch %d; batch: %d; total batch: %d" %
                      (epoch, batch_i, total_batch))
                sys.stdout.flush()
            """
            if total_batch % 20 == 0:
                print("lagrangian loss: {:.5f}; grad_norm: {:.5f}; violate_amount: {:.5f}".format(report_train_lagrangian_statistics.loss(), report_train_lagrangian_statistics.grad_norm(), report_train_lagrangian_statistics.violate_amt()))
                print("lagrangian value: {}".format(lagrangian_model.get_lagrangian_multiplier_array()))
                report_train_lagrangian_statistics.clear()
                print("threshold: {}".format(lagrangian_model.cost_threshold.cpu().numpy()))
            """

            # Checkpoint, decay the learning rate if validation loss stop dropping, apply early stopping if stop decreasing for several epochs.
            # Save the model parameters if the validation loss improved.
            if epoch >= opt.start_checkpoint_at:
                if (opt.checkpoint_interval == -1 and batch_i == len(train_data_loader) - 1) or \
                        (opt.checkpoint_interval > -1 and total_batch > 1 and total_batch % opt.checkpoint_interval == 0):
                    print("Enter check point!")
                    sys.stdout.flush()
                    # log training reward and pg loss
                    current_train_reward = report_train_reward_statistics.reward(
                    )
                    current_train_pg_loss = report_train_reward_statistics.loss(
                    )
                    report_train_reward.append(current_train_reward)
                    # Run validation and log valid reward
                    valid_reward_stat = evaluate_reward(
                        valid_data_loader, generator, opt)
                    model.train()
                    current_valid_reward = valid_reward_stat.reward()
                    report_valid_reward.append(current_valid_reward)
                    # print out train and valid reward
                    logging.info(
                        'Epoch: %d; batch idx: %d; total batches: %d' %
                        (epoch, batch_i, total_batch))
                    logging.info(
                        'avg training reward: %.4f; avg training loss: %.4f; avg validation reward: %.4f; best validation reward: %.4f'
                        % (current_train_reward, current_train_pg_loss,
                           current_valid_reward, best_valid_reward))
                    # log lagrangian training loss and last lagrangian value
                    if opt.constrained_mdp:
                        current_lagrangian_loss = report_train_lagrangian_statistics.loss(
                        )
                        current_lagrangian_grad_norm = report_train_lagrangian_statistics.grad_norm(
                        )
                        current_violate_amount = report_train_lagrangian_statistics.violate_amt(
                        )
                        report_lagrangian_loss.append(current_lagrangian_loss)
                        report_violate_amounts.append(current_violate_amount)
                        report_lagrangian_grad_norms.append(
                            current_lagrangian_grad_norm)
                        lagrangian_multipliers_array = lagrangian_model.get_lagrangian_multiplier_array(
                        )
                        report_lagrangian_multipliers.append(
                            lagrangian_multipliers_array)
                        logging.info(
                            "Lagrangian_loss: %.5f; grad_norm: %.5f; violate_amount: %.5f"
                            % (current_lagrangian_loss,
                               current_lagrangian_grad_norm,
                               current_violate_amount))
                        logging.info(
                            "Value of lagrangian_multipliers: {}".format(
                                lagrangian_multipliers_array))

                    if epoch >= opt.start_decay_and_early_stop_at:
                        if current_valid_reward > best_valid_reward:  # update the best valid reward and save the model parameters
                            print("Valid reward increases")
                            sys.stdout.flush()
                            best_valid_reward = current_valid_reward
                            num_stop_increasing = 0

                            check_pt_model_path = os.path.join(
                                opt.model_path, 'ckpt',
                                '%s-epoch-%d-total_batch-%d-valid_reward-%.3f'
                                % (opt.exp, epoch, total_batch,
                                   current_valid_reward))
                            torch.save(  # save model parameters
                                model.state_dict(),
                                open(check_pt_model_path, 'wb'))
                            logging.info('Saving checkpoint to %s' %
                                         check_pt_model_path)
                        else:
                            print("Valid reward does not increase")
                            sys.stdout.flush()
                            num_stop_increasing += 1
                            # decay the learning rate by the factor specified by opt.learning_rate_decay
                            decay_learning_rate(optimizer_rl,
                                                opt.learning_rate_decay,
                                                opt.min_lr)

                        # decay the learning rate for lagrangian multiplier
                        if opt.constrained_mdp and opt.decay_multiplier_learning_rate:
                            logging.info(
                                "Decay learning rate of lagrangian multiplier...."
                            )
                            decay_learning_rate(optimizer_lagrangian, 0.5,
                                                1e-8)

                        if not opt.disable_early_stop:
                            if num_stop_increasing >= opt.early_stop_tolerance:
                                logging.info(
                                    'Have not increased for %d check points, early stop training'
                                    % num_stop_increasing)
                                early_stop_flag = True
                                break

                    report_train_reward_statistics.clear()
                    if opt.constrained_mdp:
                        report_train_lagrangian_statistics.clear()

    # export the training curve
    train_valid_curve_path = opt.exp_path + '/train_valid_curve'
    export_train_and_valid_reward(report_train_reward, report_valid_reward,
                                  opt.checkpoint_interval,
                                  train_valid_curve_path)
    if opt.constrained_mdp:
        export_lagrangian_stats(report_lagrangian_loss,
                                report_lagrangian_multipliers,
                                report_lagrangian_grad_norms,
                                report_violate_amounts,
                                opt.checkpoint_interval, opt.exp_path)

    # Only keep the highest three checkpoints
    remove_old_ckpts(opt.model_path, reverse=True)
Esempio n. 24
0
def predict_seq2seq(opt, model=None):
    print(
        '\n==================================Predict Seq2seq==================================='
    )
    opt.pred_path = 'pred/' + '_'.join(opt.model_path.split('/')[-2:]).replace(
        '.ckpt', '_seq2seq.txt')
    opt.test_src_fn = '../data/{}/test_src.txt'.format(opt.data_tag)

    # Create model directory
    vocab_path = os.path.join(opt.data_path, 'vocab.pt')
    vocab, trg_class_vocab = torch.load(vocab_path, 'rb')
    opt.pad_idx = vocab('<pad>')
    opt.bos_idx = vocab('<bos>')
    opt.eos_idx = vocab('<eos>')
    opt.unk_idx = vocab('<unk>')
    opt.vocab_size = len(vocab)
    opt.idx2word = vocab.idx2word
    opt.trg_class_vocab_size = len(trg_class_vocab)
    print(
        '\nLoad vocab from %s: token vocab size: %d, trg label vocab size: %d'
        % (vocab_path, opt.vocab_size, opt.trg_class_vocab_size))

    # default para:
    if model is not None:
        opt.max_length = 6
        opt.beam_size = 10
        opt.n_best = 5
        opt.replace_unk = True
    else:
        opt.combine_pred = 'combine' in opt.model_path
        opt.vocab = vocab
        opt.trg_class_vocab = trg_class_vocab

    # Create data loader
    opt.is_test = True
    opt.only_classifier = False
    opt.debug = 'debug' in opt.model_path
    test_data_loader = prepare_data_loader('test',
                                           vocab,
                                           trg_class_vocab,
                                           opt,
                                           is_shuffle=False)

    # Restore the models
    if model is None:
        model = MultimodalMixture(opt).to(opt.device)
        model.load_state_dict(
            torch.load(opt.model_path,
                       map_location=lambda storage, loc: storage))

    model.eval()
    # Construct the sequence generator based on the pretrained models
    generator = SequenceGenerator(model,
                                  beam_size=opt.beam_size,
                                  max_sequence_length=opt.max_length,
                                  copy_attn=opt.copy_attn,
                                  cuda=torch.cuda.is_available(),
                                  n_best=opt.n_best)

    # run beam search to obtain a ranked list of predictions
    run_beam_search(generator, test_data_loader, opt)

    # Evaluate the predictions
    try:
        return pred_evaluate(opt.pred_path, opt.test_src_fn, opt.res_fn)
    except ZeroDivisionError:
        print('ZeroDivisionError due to the poor performance')
        return [0]
Esempio n. 25
0
c = Canvas(gui ,width=800 ,height=800)
c.pack()

start_point_x = 400
start_point_y = 700
line_length = 300
line = c.create_line(
  start_point_x,
  start_point_y,
  start_point_x,
  start_point_y - line_length,
  fill="black")

gui.title("Servo Waver")

sg = SequenceGenerator(SEQUENCE_DURATION_MS, SEQUENCE)

start_time = time.time()

def convert_rad_to_x_y(degrees, length):
  x = math.sin(math.radians(degrees)) * length
  y = math.cos(math.radians(degrees)) * length
  return x,y

while True:
  elapsed_time = (time.time() - start_time) * 1000
  angle = sg.position(elapsed_time)
  x,y = convert_rad_to_x_y(angle, line_length)
  c.coords(
    line,
    start_point_x,
Esempio n. 26
0
import os
from prefix_span import PrefixSpan
from sequence_generator import SequenceGenerator

module_path = os.path.abspath(os.path.join('/src'))

if __name__ == '__main__':
    seq = SequenceGenerator(csvfile='~/db/csvfile.csv', jsThreshold=0.01)
    data = seq.generate_sequence()
    print(len(data))

    prex = PrefixSpan()
    result_df = prex.prefix_span_display(dataset=data, minSupport=5)
    print(result_df)
Esempio n. 27
0
def main(args):

    use_cuda = (len(args.gpuid) >= 1)
    if args.gpuid:
        cuda.set_device(args.gpuid[0])
        print(args.replace_unk)  #None
        # Load dataset
        if args.replace_unk is None:
            dataset = data.load_dataset(
                args.data,
                ['test'],
                args.src_lang,
                args.trg_lang,
            )
        else:
            dataset = data.load_raw_text_dataset(
                args.data,
                ['test'],
                args.src_lang,
                args.trg_lang,
            )

        if args.src_lang is None or args.trg_lang is None:
            # record inferred languages in args, so that it's saved in checkpoints
            args.src_lang, args.trg_lang = dataset.src, dataset.dst

        print('| [{}] dictionary: {} types'.format(dataset.src,
                                                   len(dataset.src_dict)))
        print('| [{}] dictionary: {} types'.format(dataset.dst,
                                                   len(dataset.dst_dict)))
        print('| {} {} {} examples'.format(args.data, 'test',
                                           len(dataset.splits['test'])))

    # Set model parameters
    args.encoder_embed_dim = 1000
    args.encoder_layers = 2
    args.encoder_dropout_out = 0
    args.decoder_embed_dim = 1000
    args.decoder_layers = 2
    args.decoder_out_embed_dim = 1000
    args.decoder_dropout_out = 0
    args.bidirectional = False

    # Load model
    g_model_path = args.model_dir  #'checkpoints/generator/numupdate2.997465464368014.data.nll_270000.0.pt'
    assert os.path.exists(g_model_path)
    generator = LSTMModel(args,
                          dataset.src_dict,
                          dataset.dst_dict,
                          use_cuda=use_cuda)
    model_dict = generator.state_dict()
    model = torch.load(g_model_path)
    pretrained_dict = model.state_dict()
    # 1. filter out unnecessary keys
    pretrained_dict = {
        k: v
        for k, v in pretrained_dict.items() if k in model_dict
    }
    # 2. overwrite entries in the existing state dict
    model_dict.update(pretrained_dict)
    # 3. load the new state dict
    generator.load_state_dict(model_dict)
    generator.eval()

    print("Generator loaded successfully!")

    if use_cuda > 0:
        generator.cuda()
    else:
        generator.cpu()

    max_positions = generator.encoder.max_positions()

    testloader = dataset.eval_dataloader(
        'test',
        max_sentences=args.max_sentences,
        max_positions=max_positions,
        skip_invalid_size_inputs_valid_test=args.
        skip_invalid_size_inputs_valid_test,
    )

    translator = SequenceGenerator(generator,
                                   beam_size=args.beam,
                                   stop_early=(not args.no_early_stop),
                                   normalize_scores=(not args.unnormalized),
                                   len_penalty=args.lenpen,
                                   unk_penalty=args.unkpen)

    if use_cuda:
        translator.cuda()

    with open('predictions.txt', 'wb') as translation_writer:
        with open('real.txt', 'wb') as ground_truth_writer:
            with open('src.txt', 'wb') as src_writer:

                translations = translator.generate_batched_itr(
                    testloader,
                    maxlen_a=args.max_len_a,
                    maxlen_b=args.max_len_b,
                    cuda=use_cuda)

                for sample_id, src_tokens, target_tokens, hypos in translations:
                    # Process input and ground truth
                    target_tokens = target_tokens.int().cpu()
                    src_str = dataset.src_dict.string(src_tokens,
                                                      args.remove_bpe)
                    target_str = dataset.dst_dict.string(target_tokens,
                                                         args.remove_bpe,
                                                         escape_unk=True)

                    # Process top predictions
                    for i, hypo in enumerate(
                            hypos[:min(len(hypos), args.nbest)]):
                        hypo_tokens = hypo['tokens'].int().cpu()
                        hypo_str = dataset.dst_dict.string(
                            hypo_tokens, args.remove_bpe)

                        hypo_str += '\n'
                        target_str += '\n'
                        src_str += '\n'

                        translation_writer.write(hypo_str.encode('utf-8'))
                        ground_truth_writer.write(target_str.encode('utf-8'))
                        src_writer.write(src_str.encode('utf-8'))
Esempio n. 28
0
def main(args):
    if os.path.exists(args.corpus[:-4] + '_processed.txt'):
        raw_text = load_data(args.corpus[:-4] + '_processed.txt',
                             processed=True)
    else:
        raw_text = load_data(args.corpus)
        # raw_text = text_cleaner(raw_text)
        with open(args.corpus[:-4] + '_processed.txt', 'w',
                  encoding='utf8') as f:
            f.write(raw_text)

    if os.path.exists('generic_mapping.pkl'):
        mapping = pickle.load(open('generic_mapping.pkl', 'rb'))
    else:
        if args.low_ram:
            chars = sorted(list(set(raw_text)))
            mapping = dict((c, i) for i, c in enumerate(chars))

            # save the mapping
            pickle.dump(mapping, open('generic_mapping.pkl', 'wb'))
        else:
            mapping = generate_mapping(raw_text)

    ########################################
    vocab_size = len(mapping)
    generic_lm = GenericLM(vocab_size,
                           mapping,
                           seq_length=args.seq_length,
                           multi_gpu=args.multi_gpu,
                           batch_size=args.batch_size,
                           ckpt_path=args.ckpt_path,
                           model_path=args.model_path,
                           mode_name=args.mode)
    ########################################

    if args.low_ram:
        if args.mode == 'right2left':
            raw_text = raw_text[::-1]

        model = generic_lm.get_model()
        continue_epoch = generic_lm.get_continue_epoch()

        optimizer = Adam(lr=5e-4, decay=5e-6)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])

        # checkpoint = ModelCheckpoint(os.path.join(args.ckpt_path, 'GenericLM_{epoch:03d}.h5'), period=args.ckpt_period)
        early_stop = EarlyStopping(monitor='loss', patience=12)
        save_model = SaveModel(ckpt_path=args.ckpt_path,
                               model_path=args.model_path,
                               mode_name=args.mode,
                               ckpt_period=args.ckpt_period)
        sequenece_genrator = SequenceGenerator(raw_text,
                                               args.seq_length,
                                               mapping,
                                               batch_size=args.batch_size)

        model.fit_generator(generator=sequenece_genrator,
                            epochs=args.epochs + continue_epoch,
                            initial_epoch=continue_epoch,
                            callbacks=[save_model, early_stop])

        model.save(
            os.path.join(args.model_path, 'GenericLM_%s.model' % args.mode))

    else:
        if args.mode == 'right2left':
            raw_text = raw_text[::-1]

        generic_lm.fit(raw_text,
                       epochs=args.epochs,
                       ckpt_period=args.ckpt_period)