示例#1
0
    def beam_decode(self, batch, max_len, oov_nums):

        bos_token = self.data_utils.bos
        beam_size = self.args.beam_size
        vocab_size = self.data_utils.vocab_size

        src = batch['src'].long()
        src_mask = batch['src_mask']
        src_extended = batch['src_extended'].long()
        memory = self.model.encode(src, src_mask)
        batch_size = src.size(0)

        beam = Beam(self.data_utils.pad, bos_token, self.data_utils.eos,
                    beam_size, batch_size, self.args.n_best, True, max_len)

        ys = torch.full((batch_size, 1), bos_token).type_as(src.data).cuda()
        log_prob = self.model.decode(
            memory, src_mask, Variable(ys),
            Variable(
                subsequent_mask(ys.size(1)).type_as(src.data).expand(
                    (ys.size(0), ys.size(1), ys.size(1)))), src_extended,
            oov_nums)

        # log_prob = [batch_size, 1, voc_size]
        top_prob, top_indices = torch.topk(input=log_prob, k=beam_size, dim=-1)
        # print(top_indices)
        top_prob = top_prob.view(-1, 1)
        top_indices = top_indices.view(-1, 1)
        beam.update_prob(top_prob.detach().cpu(), top_indices.detach().cpu())
        # [batch_size, 1, beam_size]
        ys = top_indices
        top_indices = None
        # print(ys.size())
        ####### repeat var #######
        src = torch.repeat_interleave(src, beam_size, dim=0)
        src_mask = torch.repeat_interleave(src_mask, beam_size, dim=0)
        #[batch_size, src_len, d_model] -> [batch_size*beam_size, src_len, d_model]
        memory = torch.repeat_interleave(memory, beam_size, dim=0)
        # print('max_len', max_len)
        for t in range(1, max_len):
            log_prob = self.model.decode(
                memory, src_mask, Variable(ys),
                Variable(
                    subsequent_mask(ys.size(1)).type_as(src.data).expand(
                        (ys.size(0), ys.size(1), ys.size(1)))), src)
            # print('log_prob', log_prob.size())
            log_prob = log_prob[:, -1].unsqueeze(1)
            # print(beam.seq)
            real_top = beam.advance(log_prob.detach().cpu())
            # print(real_top.size())
            # print(ys.size())
            # print(real_top.size())
            ys = torch.cat((ys, real_top.view(-1, 1).cuda()), dim=-1)
            # print(ys.size())

        # print(ys.size())
        # print(beam.top_prob)
        # print(len(beam.seq))

        return [beam.seq[0]]
示例#2
0
 def _extend_beam_for_test(self, beam):
     # return beam for next step
     new_beam = Beam(self.model.beam_size)
     # go over all state in beam
     for state in beam:
         # unpacking state
         pending, prev_score, prev_feats, deps, _ = self._extract_state(
             state)
         for i, (tok1, tok2) in enumerate(zip(pending, pending[1:])):
             # get local features
             lc_feats = self.model.featex(pending, deps, i)
             # score of local features
             scores = self.model.get_score(lc_feats)
             # global feats
             go_feats = prev_feats + lc_feats
             for clas, score in enumerate(scores):
                 arc = self._get_action(clas, tok1, tok2)
                 n_pending, n_deps = self._apply_action(arc, state)
                 # try starter score is zero
                 if prev_score == float('-inf'):
                     n_score = score
                 else:
                     n_score = prev_score + score
                 new_state = self._get_state(n_pending, go_feats, n_score,
                                             clas, n_deps)
                 new_beam.add(new_state)
     return new_beam
示例#3
0
    def extend_beam_for_parse(self, beam):
        new_beam = Beam(beam_size=self.model.beam_size)
        for state in beam:
            pending = state['pending']
            prev_features = state['features']
            prev_score = state['score']
            arcs = state['arcs']
            for attachment_point in xrange(len(pending) - 1):
                # at this point you could create n new state
                local_features = features_extract(pending, arcs,
                                                  attachment_point)
                # features extract funtion has not be built yet
                scores = self.model.get_scores(local_features)
                global_features = prev_features + local_features
                for cls_id, score in scores.iteritems():
                    action = get_action(attachment_point, cls_id,
                                        self.model.label_hash)
                    # apply action to pending
                    new_pending, new_arcs = apply_action(pending, arcs, action)
                    # create new state

                    if prev_score == float('-inf'):
                        current_score = score
                    else:
                        current_score = prev_score + score

                    new_state = get_state(new_pending, global_features,
                                          current_score, cls_id, new_arcs)
                    # add new state to beam
                    new_beam.add(new_state)
        return new_beam
示例#4
0
 def parse(self, sent):
     sent = [ROOT] + sent
     init_state = get_state(sent)
     beam = Beam(self.model.beam_size)
     beam.add(init_state)
     for i in xrange(len(sent) - 1):
         beam = self.extend_beam_for_parse(beam)
     final_state = beam.top()
     return final_state['arcs']
示例#5
0
    def test_beam_init(self):
        n1 = Node(0.,0.,0.)
        n2 = Node(10.,10.,0.)

        b1 = Beam(n1,n2)
        b1.properties['Area'] = 0.1*0.1
        
        self.assertAlmostEqual(b1.length(), sqrt(200.))
        self.assertAlmostEqual(b1.volume(), 0.01*sqrt(200.))
        self.assertTrue(b1.sizeOfEM() == 12)
示例#6
0
 def _extend_beam(self, beam, oracle):
     new_beam = Beam(self.model.beam_size)
     valid_action = Beam(beam_size=1)
     for state in beam:
         pending, prev_score, prev_feats, deps, stt = self._extract_state(
             state)
         for i, (tok1, tok2) in enumerate(zip(pending, pending[1:])):
             lc_feats = self.model.featex(pending, deps, i)
             scores = self.model.get_score(lc_feats)
             go_feats = prev_feats + lc_feats
             for clas, score in scores.iteritems():
                 arc = self._get_action(clas, tok1, tok2)
                 # stt ensure all action before in state is valid
                 if stt:
                     is_valid = self._check_valid(arc, deps, oracle)
                 n_pending, n_deps = self._apply_action(arc, state)
                 if prev_score == float('-inf'):
                     n_score = score
                 else:
                     n_score = prev_score + score
                 new_state = self._get_state(n_pending, go_feats, n_score,
                                             clas, n_deps, is_valid)
                 new_beam.add(new_state)
                 if is_valid:
                     valid_action.add(new_state)
     return new_beam, valid_action.top()
示例#7
0
def decode(model, opts, test_batcher, i2c, i2p):
    """
    Decode the input
    """

    logging.basicConfig(filename=opts.log, level=logging.INFO)

    if opts.use_cuda:
        #print "Find GPU enable, using GPU to compute..."
        model.cuda()
        torch.cuda.manual_seed(opts.seed)
    # else:
    #     print "Find GPU unable, using CPU to compute..."

    result_file = opts.result_file
    result_writer = codecs.open(result_file, 'w', 'utf-8')

    t = trange(opts.data_size, desc='DECODE')
    for iter in t:
        input, target, pos, target_length, input_length = test_batcher.next()
        input_tensor = Variable(torch.LongTensor(input))
        target_tensor = Variable(torch.LongTensor(target))
        pos_tensor = Variable(torch.LongTensor(pos))
        if opts.use_cuda:
            input_tensor = input_tensor.cuda()
            target_tensor = target_tensor.cuda()
            pos_tensor = pos_tensor.cuda()

        encoder_state, encoder_output, pos_feature, _ = model.encode_once(
            input_tensor, pos_tensor)
        start_decode = Variable(torch.LongTensor([2])).cuda().unsqueeze(1)

        beam = Beam(model, opts.beam_size, opts.max_target_len, encoder_state,
                    encoder_output, pos_feature, start_decode, input)
        hyper = beam.run()

        raw_input = ""
        result = ""
        raw_pos = ""
        for word in hyper.word_list:
            result += i2c[word]
        for word in input[0]:
            raw_input += i2c[word]
        for word in pos[0]:
            if word == 0:
                break
            raw_pos += i2p[word] + ";"
        raw_pos = raw_pos[:-1]
        result_writer.write(raw_input + '\t' + result + '\t' + raw_pos + '\n')

    result_writer.close()
    os.system(('../evaluation/evalm.py --gold %s --guess %s --task 1') %
              (opts.test, opts.result_file))
    log_result('temp.txt', opts)
示例#8
0
文件: PyEPIC.py 项目: YueHao/PyEPIC
class PyEpic(object):
    def __init__(self, inputfile=''):
        self.beam1 = Beam()
        self.beam2 = Beam()
        if inputfile is not '':
            self.readfrom(inputfile)

    def readfrom(self, inputfile):
        glbdict,b1dict,b2dict = epicIO.parse_file(inputfile)
        self.beam1.read_input(b1dict)
        self.beam2.read_input(b2dict)
示例#9
0
    def decode_beam(self,
                    beam_size,
                    batch_size,
                    encoder_states,
                    block_ngram=0,
                    expand_beam=1):
        dev = self.opts['device']
        beams = [
            Beam(beam_size,
                 device='cuda',
                 block_ngram=block_ngram,
                 expand_beam=expand_beam) for _ in range(batch_size)
        ]
        decoder_input = self.sos_buffer.expand(batch_size * beam_size,
                                               1).to(dev)
        inds = torch.arange(batch_size).to(dev).unsqueeze(1).repeat(
            1, beam_size).view(-1)

        encoder_states = self.reorder_encoder_states(
            encoder_states, inds)  # not reordering but expanding
        incr_state = encoder_states[1]

        for ts in range(self.longest_label):
            if all((b.done() for b in beams)):
                break
            score, incr_state, attn_w_log = self.decoder(
                decoder_input, incr_state, encoder_states)
            score = score[:, -1:, :]
            score = score.view(batch_size, beam_size, -1)
            score = F.log_softmax(score, dim=-1)

            for i, b in enumerate(beams):
                if not b.done():
                    b.advance(score[i])

            incr_state_inds = torch.cat([
                beam_size * i + b.get_backtrack_from_current_step()
                for i, b in enumerate(beams)
            ])
            incr_state = self.reorder_decoder_incremental_state(
                incr_state, incr_state_inds)
            selection = torch.cat([
                b.get_output_from_current_step() for b in beams
            ]).unsqueeze(-1)
            decoder_input = selection

        for b in beams:
            b.check_finished()

        beam_preds_scores = [list(b.get_top_hyp()) for b in beams]
        for pair in beam_preds_scores:
            pair[0] = Beam.get_pretty_hypothesis(pair[0])

        return beam_preds_scores, beams
示例#10
0
    def test_beam_transform(self):
        n1 = Node(0.,0.,0.)
        n2 = Node(10.,10.,0.)

        b1 = Beam(n1,n2)
        
        T = b1.calcT()
        
        p0 = np.dot(T,(0.,0.,0.))
        self.almostEqual(p0, (0.,0.,0.))
        
        p1 = np.dot(T,(10.,10.,0.))
        self.almostEqual(p1, (sqrt(200.),0.,0.))
示例#11
0
    def test_beam_transform(self):
        n1 = Node(0., 0., 0.)
        n2 = Node(10., 10., 0.)

        b1 = Beam(n1, n2)

        T = b1.calcT()

        p0 = np.dot(T, (0., 0., 0.))
        self.almostEqual(p0, (0., 0., 0.))

        p1 = np.dot(T, (10., 10., 0.))
        self.almostEqual(p1, (sqrt(200.), 0., 0.))
示例#12
0
文件: ocr.py 项目: hedinang/ocr
 def translate_beam_search(self, img):
     with torch.no_grad():
         memory = self.transformer(img)
         beam = Beam(beam_size=2,
                     min_length=0,
                     n_top=1,
                     ranker=None,
                     start_token_id=1,
                     end_token_id=2)
         for _ in range(128):
             tgt_inp = beam.get_current_state().transpose(0, 1).to(
                 self.device)  # TxN
             decoder_outputs = self.transformer.transformer.forward_decoder(
                 tgt_inp, memory)
             log_prob = log_softmax(decoder_outputs[:, -1, :].squeeze(0),
                                    dim=-1)
             beam.advance(log_prob.cpu())
             if beam.done():
                 break
         scores, ks = beam.sort_finished(minimum=1)
         hypothesises = []
         for times, k in ks:
             hypothesis = beam.get_hypothesis(times, k)
             hypothesises.append(hypothesis)
         encode = [1] + [int(i) for i in hypothesises[0][:-1]]
         return self.vocab.decode(encode)
示例#13
0
    def predict_one(self, source, num_candidates=5):
        source_preprocessed = self.preprocess(source)
        source_tensor = torch.tensor(source_preprocessed).unsqueeze(
            0)  # why unsqueeze?
        length_tensor = torch.tensor(len(source_preprocessed)).unsqueeze(0)

        sources_mask = pad_masking(source_tensor, source_tensor.size(1))
        memory_mask = pad_masking(source_tensor, 1)
        memory = self.model.encoder(source_tensor, sources_mask)

        decoder_state = self.model.decoder.init_decoder_state()
        # print('decoder_state src', decoder_state.src.shape)
        # print('previous_input previous_input', decoder_state.previous_input)
        # print('previous_input previous_layer_inputs ', decoder_state.previous_layer_inputs)

        # Repeat beam_size times
        memory_beam = memory.detach().repeat(
            self.beam_size, 1, 1)  # (beam_size, seq_len, hidden_size)

        beam = Beam(beam_size=self.beam_size,
                    min_length=0,
                    n_top=num_candidates,
                    ranker=None)

        for _ in range(self.max_length):

            new_inputs = beam.get_current_state().unsqueeze(
                1)  # (beam_size, seq_len=1)
            decoder_outputs, decoder_state = self.model.decoder(
                new_inputs, memory_beam, memory_mask, state=decoder_state)
            # decoder_outputs: (beam_size, target_seq_len=1, vocabulary_size)
            # attentions['std']: (target_seq_len=1, beam_size, source_seq_len)

            attention = self.model.decoder.decoder_layers[
                -1].memory_attention_layer.sublayer.attention
            beam.advance(decoder_outputs.squeeze(1), attention)

            beam_current_origin = beam.get_current_origin()  # (beam_size, )
            decoder_state.beam_update(beam_current_origin)

            if beam.done():
                break

        scores, ks = beam.sort_finished(minimum=num_candidates)
        hypothesises, attentions = [], []
        for i, (times, k) in enumerate(ks[:num_candidates]):
            hypothesis, attention = beam.get_hypothesis(times, k)
            hypothesises.append(hypothesis)
            attentions.append(attention)

        self.attentions = attentions
        self.hypothesises = [[token.item() for token in h]
                             for h in hypothesises]
        hs = [self.postprocess(h) for h in self.hypothesises]
        return list(reversed(hs))
示例#14
0
def fire_random_beam(ai_settings, screen, aliens, beams):
    """Fire a beam from a random alien in the fleet"""
    firing_alien = random.choice(aliens.sprites())
    if len(beams) < ai_settings.beams_allowed:
        new_beam = Beam(ai_settings, screen, firing_alien)
        ai_settings.alien_channel.play(ai_settings.alien_fire_sound)
        beams.add(new_beam)
示例#15
0
    def configure(self):
        """
        Configure the beams from the config source. This is done whenever
        an instrument is instantiated.
        :return:
        """
        if self.beams:
            raise RuntimeError("Beamformer ops already configured.")

        # get beam names from config
        beam_names = []
        self.beams = {}
        for k in self.corr.configd:
            if k.startswith("beam"):
                bmnm = self.corr.configd[k]["output_products"]
                if bmnm in beam_names:
                    raise ValueError(
                        "Cannot have more than one beam with " "the name %s. Please check the " "config file." % bmnm
                    )
                newbeam = Beam.from_config(k, self.hosts, self.corr.configd, self.corr.speadops)
                self.beams[newbeam.name] = newbeam
                beam_names.append(bmnm.strip())
        self.logger.info("Found beams: %s" % beam_names)

        # configure the beams
        for beam in self.beams.values():
            beam.configure()

        # add the beam data streams to the instrument list
        for beam in self.beams.values():
            self.corr.register_data_stream(beam.data_stream)
示例#16
0
    def eval_step(self, batch, decoding_strategy='score', dump=False):
        xs, ys, use_packed = batch.text_vecs, batch.label_vecs, batch.use_packed
        xs_lens, ys_lens = batch.text_lens, batch.label_lens

        self.eval_mode()
        encoder_states = self.encoder(xs, xs_lens, use_packed=use_packed)

        if decoding_strategy == 'score':
            assert ys is not None
            _ = self.compute_loss(encoder_states, xs_lens, ys)

        if decoding_strategy == 'greedy':
            scores, preds, attn_w_log = self.decode_greedy(
                encoder_states, batch.text_vecs.size(0))
            preds = torch.stack(preds, dim=1)
            scores = torch.stack(scores, dim=1)
            #import ipdb; ipdb.set_trace()
            pred_lengths = (scores < 0).sum(dim=1).to(scores.device)
            length_penalties = torch.Tensor([
                Beam.get_length_penalty(i) for i in pred_lengths.tolist()
            ]).to(scores.device)
            scores_length_penalized = scores.sum(dim=1) / length_penalties
            pred_scores = tuple(
                (p, s) for p, s in zip(preds, scores_length_penalized))
            if dump is True:
                _dump = [attn_w_log]
                return pred_scores, _dump
            else:
                return pred_scores

        if 'beam' in decoding_strategy:
            beams = self.decode_beam(int(decoding_strategy.split(':')[-1]),
                                     len(batch.text_lens), encoder_states)
            pred_scores = beams
            return pred_scores
示例#17
0
    def setup(self, static, Wcent):

        Nmax = Variable("N_{max}", 5, "-", "max loading")
        cbar, _ = c_bar(0.5, static.N)
        sigmaai = Variable("\\sigma_{AI}", 207, "MPa", "aluminum max stress")
        kappa = Variable("\\kappa", 0.05, "-", "max tip deflection ratio")

        with Vectorize(static.N - 1):
            Mr = Variable("M_r", "N*m", "wing section root moment")

        with Vectorize(static.N):
            qbar = Variable("\\bar{q}", cbar, "-", "normalized loading")

        beam = Beam(static.N, qbar)

        constraints = [
            # dimensionalize moment of inertia and young's modulus
            beam["\\bar{EI}"] <=
            (8 * static["E"] * static["I"] / Nmax / Wcent / static["b"]**2),
            Mr == (beam["\\bar{M}"][:-1] * Wcent * Nmax * static["b"] / 4),
            sigmaai >= Mr / static["S_y"],
            beam["\\bar{\\delta}"][-1] <= kappa,
        ]

        return beam, constraints
示例#18
0
    def configure(self, *args, **kwargs):
        """
        Configure the beams from the config source. This is done whenever
        an instrument is instantiated.
        :return:
        """
        if self.beams:
            raise RuntimeError('Beamformer ops already configured.')

        # get beam names from config
        beam_names = []
        self.beams = {}
        max_pkt_size = self.corr.b_stream_payload_len
        for section_name in self.corr.configd:
            if section_name.startswith('beam'):
                beam = Beam.from_config(section_name, self.hosts,
                                        self.corr.configd,
                                        self.corr.fops,
                                        self.corr.speadops,
                                        max_pkt_size=max_pkt_size,
                                        *args, **kwargs)
                if beam.name in beam_names:
                    raise ValueError('Cannot have more than one beam with '
                                     'the name %s. Please check the '
                                     'config file.' % beam.name)
                self.beams[beam.name] = beam
                beam_names.append(beam.name)
        self.logger.info('Found {} beams: {}'.format(len(beam_names),
                                                     beam_names))

        # add the beam data streams to the instrument list
        for beam in self.beams.values():
            self.corr.add_data_stream(beam)
def fire_random_beam(ai_settings, screen, aliens, beams):
    firing_alien = random.choice(aliens.sprites())
    if len(beams) < ai_settings.beams_allowed and \
            (ai_settings.beam_stamp is None or
             (abs(pygame.time.get_ticks() - ai_settings.beam_stamp) > ai_settings.beam_time)):
        new_beam = Beam(ai_settings, screen, firing_alien)
        firing_alien.fire_weapon()
        beams.add(new_beam)
示例#20
0
 def constructBeams(self, beamFile):
     f = open(beamFile, 'r')
     tempbeam = []
     next(f)
     for line in f:
         tempbeam.append(Beam(line))
     f.close()
     return tempbeam
示例#21
0
 def parse(self, sent):
     # parse one sent according to current model paramaters
     # ROOT token at begining of pending
     sent = [ROOT] + sent
     # start state
     init_state = self._get_state(sent)
     # create a beam
     beam = Beam(self.model.beam_size)
     # add state to beam
     beam.add(init_state)
     # loop until only one tree left
     for step in range(len(sent) - 1):
         # beam of next step
         beam = self._extend_beam_for_test(beam)
     # result of parse
     deps = beam.top()['deps']
     return deps
示例#22
0
  def beam_generate(self,h,c,tembs,vembs,gembs,nerd,beamsz,k):
    #h,c,tembs,vembs,gembs,rembs = self.encode_inputs(title,entities,graph)
    #h,c,tembs,vembs,gembs = self.encode_inputs(title,entities,graph)
    embs = [x for x in [(self.t_attn,tembs),(self.g_attn,gembs),(self.e_attn,vembs)] if x[1] is not None]

    outp = torch.LongTensor(vembs[0].size(0),1).fill_(self.starttok).cuda()
    last = h.transpose(0,1)
    outputs = []
    beam = None
    for i in range(self.maxlen):
      outp = self.emb_w_vertex(outp.clone(),nerd)
      enc = self.Embedding(outp)
      decin = torch.cat((enc,last),2)
      decout,(h,c) = self.dlstm(decin,(h,c))
      last, vweight, _ = self.hierattn(decout,embs)
      scalar = torch.sigmoid(self.switch(h))
      outs = torch.cat((decout,last),2)
      decoded = self.outlin(outs.contiguous().view(-1, self.args.hsz*2))
      decoded = decoded.view(outs.size(0), outs.size(1), self.args.ntoks)
      decoded = torch.softmax(decoded,2)
      decoded[:,:,0].fill_(0)
      decoded[:,:,1].fill_(0)
      scalars = scalar.transpose(0,1)
      decoded = torch.mul(decoded,1-scalars.expand_as(decoded))
      vweights = torch.mul(vweight,scalars.expand_as(vweight))
      decoded = torch.cat([decoded,vweights],2)

      zero_vec = 1e-6*torch.ones_like(decoded)
      decoded += zero_vec
      decoded = decoded.log()
      scores, words = decoded.topk(dim=2,k=k)
      #scores = scores.transpose(0,1); words = words.transpose(0,1)
      if not beam:
        beam = Beam(words.squeeze(),scores.squeeze(),[h for i in range(beamsz)],
                  [c for i in range(beamsz)],[last for i in range(beamsz)],beamsz,k)
        beam.endtok = self.endtok
        newembs = []
        for a,x in embs:
          tmp = (x[0].repeat(len(beam.beam),1,1),x[1].repeat(len(beam.beam),1))
          newembs.append((a,tmp))
        embs = newembs
      else:
        if not beam.update(scores,words,h,c,last):
          break
        newembs = []
        for a,x in embs:
          tmp = (x[0][:len(beam.beam),:,:],x[1][:len(beam.beam)])
          newembs.append((a,tmp))
        embs = newembs
      outp = beam.getwords()
      h = beam.geth()
      c = beam.getc()
      last = beam.getlast()

    return beam
示例#23
0
 def train_sent(self, sent):
     # take sent from corpus and update weight vector
     # according to this example
     self.model.update_perceptron_counter()
     # init state
     sent = [ROOT] + sent
     init_state = get_state(sent)
     # build gold tree
     gold_tree = get_tree(sent)
     beam = Beam(self.model.beam_size)
     top_valid_state = None
     beam.add(init_state)
     # loop n-1 step
     step = 0
     # above check
     for i in xrange(len(sent) - 1):
         step += 1
         # extend current beam
         beam, top_valid_state = self.extend_beam_for_train(beam, gold_tree)
         try:
             if not beam.has_element(top_valid_state.top()):
                 self.update_paramaters(beam.top(), -1)
                 self.update_paramaters(top_valid_state.top(), 1)
                 break
         except Exception:
             raise
     if step == len(sent) - 1:
         top_beam = beam.top()
         predict_arcs = top_beam['arcs']
         if not compare_arcs(predict_arcs, gold_tree):
             self.update_paramaters(top_beam, -1)
             self.update_paramaters(top_valid_state.top(), 1)
示例#24
0
 def train(self, sent):
     # update paramaters with one sent
     # ROOT token at begining of pending
     sent = [ROOT] + sent
     # oracle object to check valid action
     oracle = Oracle(sent)
     # gold_deps for full update
     gold_deps = self._build_gold(sent)
     # create start state
     init_state = self._get_state(sent)
     # create beam
     beam = Beam(self.model.beam_size)
     # add state to beam
     beam.add(init_state)
     # correct action with highest score at one step
     valid_action = None
     for step in range(len(sent) - 1):
         beam, valid_action = self._extend_beam(beam, oracle)
         # if beam not contain valid action in it, update
         if not beam.has_element(valid_action):
             beam_top = beam.top()
             self.model.update(beam_top, valid_action)
             break
     else:
         beam_top = beam.top()
         beam_deps = beam_top['deps']
         # if final deps is not like gold_deps, do full update
         if not self._check_equal(gold_deps, beam_deps):
             self.model.update(beam_top, valid_action)
示例#25
0
def fire(ai_settings, screen, ship, bullets, beams):
    # Create a new bullet and add it to the bullets group.
    if ship.charge():
        new_beam = Beam(ai_settings, screen, ship)
        beams.add(new_beam)
    else:
        new_bullet = Bullet(ai_settings, screen, ship)
        bullets.add(new_bullet)
    ship.fire()
示例#26
0
def fire_random_beam(ai_settings, screen, aliens, beams):
    """Fire a beam from aliens"""
    attack = random.choice(aliens.sprites())
    if len(beams) < ai_settings.beams_limit and \
            (ai_settings.beam_stamp is None or
             (abs(pygame.time.get_ticks() - ai_settings.beam_stamp) > ai_settings.beam_time)):
        new_beam = Beam(ai_settings, screen, attack)
        attack.fire_weapon()
        beams.add(new_beam)
示例#27
0
文件: bot.py 项目: intangere/Beam
class Iris(irc.IRCClient):

	nickname = irx.config.nickname
	realname = irx.config.realname
	username = irx.config.username

	def __init__(self):
		self.irx = Irx.Irx(self.sendLine, irx.config.nickname, irx.config.username, irx.config.realname)
		self.irx.loadPlugins("plugins")
		self.irx.buildCommandList()
		self.beam = Beam()
	def connectionMade(self):
		irc.IRCClient.connectionMade(self)
	
	def connectionLost(self):
		irc.IRCClient.connectionLost(self, reason)
	
	def signedOn(self):
		for channel in irx.config.channels:
			self.join(channel)
	
	def topicUpdated(self, user, channel, topic):
		f = open("data/topics/current_topic_%s.txt" % channel, "w+")
		f.write(topic)
		f.close()

	def privmsg(self, user, channel, data):
		if data.startswith("."):
			self.irx.doCommand(channel, user, data)
		else:
			if not data.startswith('%s:' % irx.config.nickname):
				if 'headsplitter' not in user:
					if data.count('.') > 1:
						data = data.split('.')
						for line in data:
							self.beam.addToChain(line)
					else:
						self.beam.addToChain(data)
			else:
				    try:
				        self.irx.send(channel, '%s: %s' % (user.split('!', 1)[0], self.beam.generateRandomText()))
				        #self.irx.send(channel, '%s: %s' % (user.split('!', 1)[0], self.beam.generateText(data.split('%s:' % irx.config.nickname, 1)[1].strip()))) This is seeded with your message
				    except IndexError:
					pass
示例#28
0
    def extend_beam_for_train(self, beam, gold_tree):
        """
        
        :param beam: 
        :param gold_tree: 
        :return: 
        """
        new_beam = Beam(beam_size=self.model.beam_size)
        top_valid_state = Beam(beam_size=1)

        for state in beam:
            pending = state['pending']
            prev_features = state['features']
            prev_score = state['score']
            arcs = state['arcs']
            state_valid = state['valid']
            for attachment_point in xrange(len(pending) - 1):
                # at this point you could create n new state
                local_features = features_extract(pending, arcs,
                                                  attachment_point)
                # features extract funtion has not be built yet
                scores = self.model.get_scores(local_features)
                global_features = prev_features + local_features
                for cls_id, score in scores.iteritems():
                    action = get_action(attachment_point, cls_id,
                                        self.model.label_hash)
                    # check if action is valid
                    valid_action = False
                    if state_valid:
                        valid_action = check_valid_action(
                            pending, arcs, action, gold_tree)
                    # apply action to pending
                    new_pending, new_arcs = apply_action(pending, arcs, action)
                    # create new state

                    if prev_score == float('-inf'):
                        current_score = score
                    else:
                        current_score = prev_score + score

                    new_state = get_state(new_pending, global_features,
                                          current_score, cls_id, new_arcs,
                                          valid_action)
                    # add new state to beam
                    new_beam.add(new_state)
                    if valid_action:
                        top_valid_state.add(new_state)
        return new_beam, top_valid_state
示例#29
0
文件: main.py 项目: xrisk/Mandalorian
 def generate_beams(self):
     n_beam = 30
     for _ in range(n_beam):
         r = randint(4, self.__row - 10)
         c = randint(50, self.__col - 10)
         t = Beam(r, c, self)
         t.set_orientation(choice(["vert", "horiz", "diag"]))
         t.render(self.__buf)
         self.add_entity(t)
示例#30
0
    def test_beam_init(self):
        n1 = Node(0., 0., 0.)
        n2 = Node(10., 10., 0.)

        b1 = Beam(n1, n2)
        b1.properties['Area'] = 0.1 * 0.1

        self.assertAlmostEqual(b1.length(), sqrt(200.))
        self.assertAlmostEqual(b1.volume(), 0.01 * sqrt(200.))
        self.assertTrue(b1.sizeOfEM() == 12)
示例#31
0
    def __init__(self,
                 lon,
                 lat,
                 antpos,
                 freqs,
                 df=40e3,
                 date=ephem.now(),
                 driftMode=True,
                 beam=Beam(mwabeam, [None, [0., 0.]])):
        self.driftMode = driftMode
        self.obs = ephem.Observer()
        self.lat = lat
        self.lon = lon
        self.obs.lat = str(lat)
        self.obs.lon = str(lon)
        self.obs.date = date
        self.antpos = antpos
        #convert from n-s,e-w coords to XYZ coords where Y points to -6 hours, X to 0 hours and Z along the earth's rotation axis.
        self.lst = np.degrees(float(repr(self.obs.sidereal_time())))
        self.xyz = nsew2xyz(antpos, self.lst, lat)
        self.nant = self.xyz.shape[0]
        self.freqs = freqs
        df = df
        nf = len(freqs)
        self.delays = np.arange(-nf / 2, nf / 2) / (nf * df)
        self.nvis = (self.nant - 1) * self.nant / 2
        self.model = np.zeros((len(freqs), self.nvis)).astype(complex)
        self.pcentre = np.array([self.lst, lat])
        if (self.driftMode):
            self.nunique, self.unique_map, self.unique_uvw, self.uvw = self.getUnique(
                self.pcentre)
            #            print self.uvw
            #            print len(freqs),self.nunique
            self.model_true = np.zeros(
                (len(freqs), self.nunique)).astype(complex)
#        print self.unique_map
        self.modelspace = 'freq'
        self.data = np.zeros((len(freqs), self.nvis)).astype(complex)
        self.datastate = 'freq'
        #fifteen degrees at lowest frequency
        self.beam = beam
        self.bandpass = [
            Bandpass(flatband, [1]) for mm in range(self.xyz.shape[0])
        ]  #instantiate all antennae to same bandpass
示例#32
0
    def beam_generate(self, batch, beam_size, k) :
        batch = batch.input
        encoder_output, context = self.encoder(batch[0], batch[1])
        hidden = []
        for i in range(len(context)) :
            each = context[i]
            hidden.append(torch.cat([each[0:each.size(0):2], each[1:each.size(0):2]], 2))
        hx = hidden[0]
        cx = hidden[1]
        recent_token = torch.LongTensor(1, ).fill_(2).to(self.device)
        beam = None
        for i in range(1000) :
            embedded = self.decoder.embedding(recent_token.type(dtype = torch.long).to(self.device))
            #(beam_size, embedding_size)
            embedded = embedded.unsqueeze(0).permute(1, 0, 2)
            output, (hx, cx) = self.decoder.rnn(embedded, (hx.contiguous(), cx.contiguous()))
            hx = hx.permute(1, 0, -1)
            cx = cx.permute(1, 0, -1)
            output = self.decoder.out(output.contiguous()) #(beam_size, 1, target_vocab_size)
            output = self.softmax(output)
            output[:, :, 0].fill_(0)
            output[:, :, 1].fill_(0)
            output[:, :, 2].fill_(0)
            decoded = output.log().to(self.device)
            scores, words = decoded.topk(dim = -1, k = k) #(beam_size, 1, k) (beam_size, 1, k)
            scores.to(self.device)
            words.to(self.device)

            if not beam :
                beam = Beam(words.squeeze(), scores.squeeze(), [hx] * beam_size, [cx] * beam_size, beam_size, k, self.decoder.output_vocab_size, self.device)
                beam.endtok = 5
                beam.eostok = 3
            else :
                if not beam.update(scores, words, hx, cx) : break
            
            recent_token = beam.getwords().view(-1) #(beam_size, )
            hx = beam.get_h().permute(1, 0, -1)
            cx = beam.get_c().permute(1, 0, -1)
            #context = beam.get_context()
        
        return beam
示例#33
0
文件: engine.py 项目: poopas/yxkull
    def process_beams(self):
        self.beams = []
        for p1 in self.players:
            for p2 in self.players:
                if p1.dist_to(p2.pos) < 120.0:
                    beam = Beam()
                    beam.pos1 = p1.pos
                    beam.pos2 = p2.pos
                    beam.damage = 3

                    self.beams.append(beam)

        # Make the beams take damage!
        for beam in self.beams:
            length = beam.length() 

            for entity in self.entities:
                if line_circle_intersect(beam.pos1, beam.pos2, entity.pos, entity.radius):
                    beam.make_damage(entity)
示例#34
0
    def parse_palkit(self, line):
        try:
            if line.strip() != "":
                parts = line.split(",")
                measure = int(parts[0].strip())
                measurenotes = self.sort_measurenotes(measure)
                notes = []

                note_parts = parts[1].split(":")
                for note in note_parts:
                    try:
                        notesort = int(note.strip())
                    except ValueError:
                        raise CorruptedCompositionFileError(
                            "Omituinen nuotti palkille")

                    notes.append(measurenotes[notesort])

                beam = Beam(notes)
                Composition.add_beam(self.comp, beam)
                return True
        except:
            print("Huono palkki")
示例#35
0
        def findReaction(
            b: Beam, p: Union[Beam, None]
        ) -> Tuple[Vector3, float]:  # tuple float is the beam's angle
            v: Vector3 = Vector3(0, 0, 0)
            endFirst: bool
            for i in range(len(self.beams)):
                if b == self.beams[i][0]:
                    break

            if len(b.start[1]) == 0:
                endFirst = False
                if b.start[0] != None:
                    v = rotate(b.start[0].reaction, -self.beams[i][2])
            elif len(b.end[1]) == 0:
                endFirst = True
                if b.end[0] != None:
                    v = rotate(b.end[0].reaction, -self.beams[i][2])
            elif p != None:
                if p in b.start[1]:
                    endFirst = True
                    for c in b.end[1]:
                        r: Tuple[Vector3, float] = findReaction(c, b)
                        v += rotate(r[0], r[1] - self.beams[i][2])
                elif p in b.end[1]:
                    endFirst = False
                    for c in b.start[1]:
                        r: Tuple[Vector3, float] = findReaction(c, b)
                        v += rotate(r[0], r[1] - self.beams[i][2])
                else:
                    raise Exception('Cannot find parent!')
            else:
                raise Exception('Parent not given!')

            v = b.solve(v, self.beams[i][2], endFirst)
            solution[i] = b.stress
            return (v, self.beams[i][2])
    def f_decode_text_beam(self, z, max_seq_len, n=4):
        if z is None:
            assert "z is none"

        batch_size = self.m_batch_size

        beam_size = self.m_beam_size

        ### hidden size: batch_size*hidden_size
        hidden = z

        ### hidden size: 1*batch_size*hidden_size
        hidden = hidden.unsqueeze(0)

        hidden_size = hidden.size()[2]

        ### hidden_beam: 1*(batch_size*beam_size)*hidden_size
        hidden_beam = hidden.repeat(1, beam_size, 1)

        ### beam: batch_size
        beam = [
            Beam(beam_size, self.m_pad_idx, self.m_sos_idx, self.m_eos_idx,
                 self.m_device) for k in range(batch_size)
        ]

        batch_idx = list(range(batch_size))
        remaining_sents = batch_size

        print("max_seq_len", max_seq_len)
        for i in range(max_seq_len):

            ### input: 1*(beam_size*remain_size)
            input = torch.stack([
                b.get_cur_state() for b in beam if not b.m_done
            ]).t().contiguous().view(1, -1).to(self.m_device)
            # print("input", input)
            # exit()

            ### input_emb: (remain_size*beam_size)*1*embed_size
            input_emb = self.m_network.m_embedding(input.transpose(1, 0))

            ### output: (remain_size*beam_size)*1*hidden_size
            ### hidden_beam: 1*(remain_size*beam_size)*hidden_size
            output, hidden_beam = self.m_network.m_decoder_rnn(
                input_emb, hidden_beam)

            ### logits: (remain_size*beam_size)*voc_size
            logits = self.m_network.m_linear_output(output.squeeze(1))

            ### pred_prob: (remain_size*beam_size)*voc_size
            pred_prob = F.log_softmax(logits, dim=-1)

            ### word_lk: remain_size*beam_size*voc_size
            word_lk = pred_prob.view(beam_size, remaining_sents,
                                     -1).transpose(0, 1).contiguous()

            active = []

            for b in range(batch_size):
                if beam[b].m_done:
                    continue

                idx = batch_idx[b]
                if not beam[b].advance(word_lk[idx]):
                    active += [b]

                ### hidden_beam: 1*(remain_size*beam_size)*hidden_size
                ### b_hidden: 1*beam_size*1*hidden_size
                b_hidden = hidden_beam.view(-1, beam_size, remaining_sents,
                                            hidden_size)[:, :, idx]

                ### b_hidden: 1*beam_size*1*hidden_size
                b_hidden.copy_(
                    b_hidden.index_select(1, beam[b].get_cur_origin()))

            if not active:
                break

            ### index of remaining sentences in last round
            active_idx = torch.LongTensor([batch_idx[k]
                                           for k in active]).to(self.m_device)

            ### re-index for remaining sentences
            batch_idx = {beam: idx for idx, beam in enumerate(active)}

            def update_active(t):

                ### t_tmp: beam_size*remain_size*hidden_size
                t_tmp = t.data.view(-1, remaining_sents, hidden_size)

                ###
                new_size = list(t.size())
                new_size[-2] = new_size[-2] * len(
                    active_idx) // remaining_sents

                ### new_t: beam_size*new_remain_size*hidden_size
                new_t = t_tmp.index_select(1, active_idx)
                ### new_t: beam_size*new_remain_size*hidden_size
                new_t = new_t.view(*new_size)
                new_t = torch.tensor(new_t).to(self.m_device)

                return new_t

            ### hidden_beam: 1*(new_remain_size*beam_size)*hidden_size
            hidden_beam = update_active(hidden_beam)

            remaining_sents = len(active)

        all_hyp, all_scores = [], []
        n_best = 1

        # for b in range(batch_size):
        #     scores, ks = beam[b].sort_best()
        #     all_scores += [scores[:n_best]]

        #     hyps = zip(*[beam[b].get_hyp(k) for k in ks[:n_best]])
        #     all_hyp += [hyps]

        for b in range(batch_size):
            scores, ks = beam[b].sort_best()
            # print("scores", scores)
            all_scores += [scores[:n_best]]

            k = ks[:n_best]
            # print("k", k)
            hyps = beam[b].get_hyp(k)
            all_hyp += [hyps]

        return all_hyp, all_scores
示例#37
0
文件: bot.py 项目: intangere/Beam
	def __init__(self):
		self.irx = Irx.Irx(self.sendLine, irx.config.nickname, irx.config.username, irx.config.realname)
		self.irx.loadPlugins("plugins")
		self.irx.buildCommandList()
		self.beam = Beam()
示例#38
0
文件: PyEPIC.py 项目: YueHao/PyEPIC
 def __init__(self, inputfile=''):
     self.beam1 = Beam()
     self.beam2 = Beam()
     if inputfile is not '':
         self.readfrom(inputfile)
示例#39
0
文件: ex01.py 项目: heczis/pybeam
    ConstantContinuousLoad(q, a, a+b),
    PointLoad(-F, a),
    PointLoad(-F, a+b)
]
# reactions - list of unknown loads
#  All must have magnitude = 1, otherwise the results of
#  Beam.get_reactions would be wrong.
reactions = [PointLoad(1, 0), PointLoad(1, a+b+c)]

# Initialize the beam object.
# For now we don't need the actual value of Jz since we are
# going to compute the actual dimensions based on the values
# of reaction forces.
beam = Beam(loads, reactions,
            l = a+b+c,
            E = E,
            Jz = 1.,
)

Rs = beam.get_reactions()
print('reactions:\n R_A = {:.2e}N\n R_B = {:.2e}N'.format(Rs[0], Rs[1]))

### computation and plots of bending force and moment ###
n = 12*5+1
x = np.linspace(0, a+b+c, n)

plt.plot(x, [beam.force(xi) for xi in x], '.-', label='force')
Mo = np.array([beam.moment(xi) for xi in x])
plt.plot(x, Mo, '.-', label='moment')

plt.legend(loc='best')