def test_compound_wolter2_with_hole(): p = 0. #beam1 = Beam.initialize_as_person(10000) beam1 = Beam(100000) beam1.set_circular_spot(1.) #beam1.set_rectangular_spot(5 / 2 * 1e-5, -5 / 2 * 1e-5, 5 / 2 * 1e-5, -5 / 2 * 1e-5) beam1.x *= 10. beam1.z *= 10. beam1.set_point(p, 0., p) op_ax = Beam(1) op_ax.set_point(p, 0., p) beam = op_ax.merge(beam1) beam.set_divergences_collimated() beam.plot_xz(0) p = 20000. q = 30. z0 = 5. focal = 2 * z0 + q wolter2 = CompoundOpticalElement.initialiaze_as_wolter_2(p1=p, q1=q, z0=z0) #oe1 = Optical_element.initialize_as_surface_conic_paraboloid_from_focal_distances(p=p, q=0., theta=0., alpha=0., infinity_location="p", focal=focal) #oe2 = Optical_element.initialize_my_hyperboloid(p=0., q=-q, theta=90*np.pi/180, alpha=0., wolter=2, z0=z0, distance_of_focalization=focal) #oe1.rotation_to_the_optical_element(beam) #oe1.translation_to_the_optical_element(beam) #[beam, t] = oe1.intersection_with_optical_element(beam) #oe1.output_direction_from_optical_element(beam) #[beam, t] = oe2.intersection_with_optical_element(beam) #oe2. output_direction_from_optical_element(beam) #oe2.theta = 0. #oe2.rotation_to_the_screen(beam) #oe2.translation_to_the_screen(beam) #oe2.intersection_with_the_screen(beam) beam = wolter2.trace_compound(beam) beam.plot_xz() print("mean(beam.x)=%f, mean(beam.y)=%f, mean(beam.z)=%f" % (np.mean(beam.x), np.mean(beam.y), np.mean(beam.z))) beam.retrace(10.) beam.plot_xz() plt.show()
def test_my_hyperbolic_mirror(): beam = Beam() beam.set_flat_divergence(0.005, 0.0005) p1 = 130. q1 = 0. spherical_mirror = Optical_element.initialize_as_spherical_mirror(p1, q1, theta=0, alpha=0, R=130.) beam = spherical_mirror.trace_optical_element(beam) p = 15 q = p1 - p theta = 0 * np.pi / 180 hyp_mirror = Optical_element.initialize_my_hyperboloid(p, q, theta) beam = hyp_mirror.trace_optical_element(beam) beam.plot_xz() assert_almost_equal(beam.x, 0., 10) assert_almost_equal(beam.y, 0., 10) assert_almost_equal(beam.z, 0., 10) if do_plot: plt.show()
def test_compound_wolter1_with_hole(): print( ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> test_compound_wolter1_with_hole" ) p = 100. beam1 = Beam.initialize_as_person(100000) beam1.x *= 50. beam1.z *= 50. beam1.set_point(p, 0., p) op_ax = Beam(1) op_ax.set_point(p, 0., p) beam = op_ax.merge(beam1) beam.set_divergences_collimated() beam.plot_xz() p = 6 * 1e8 R = 100. theta = 0.001 * np.pi / 180 wolter = CompoundOpticalElement.initialiaze_as_wolter_1_with_two_parameters( p1=p, R=R, theta=theta) #beam = wolter.trace_compound(beam) beam = wolter.trace_good_rays(beam) beam.plot_good_xz() beam.retrace(10.) beam.plot_good_xz() plt.show()
def test_plane_mirror(): print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> test_plane_mirror") beam1 = Beam(5000) beam1.set_point(0, 0, 0) beam1.set_flat_divergence(5e-3, 5e-2) p = 1. q = 1. theta = np.pi / 4 alpha = 0 plane_mirror = Optical_element.initialize_as_plane_mirror( p, q, theta, alpha) xmin = -10**5 xmax = 10**5 ymin = 10**5 ymax = -10**5 bound = BoundaryRectangle(xmax, xmin, ymax, ymin) plane_mirror.rectangular_bound(bound) beam1 = plane_mirror.trace_optical_element(beam1) beam1.plot_xz() beam1.plot_xpzp() if do_plot: plt.show()
def test_clean_wolter3(): p = 50. beam1 = Beam.initialize_as_person() beam1.set_point(p, 0., p) #beam1.set_rectangular_spot(5 / 2 * 1e-5, -5 / 2 * 1e-5, 5 / 2 * 1e-5, -5 / 2 * 1e-5) op_ax = Beam(1) op_ax.set_point(p, 0., p) beam = op_ax.merge(beam1) beam.set_divergences_collimated() beam.plot_xz() distance_between_the_foci = 10. wolter3 = CompoundOpticalElement.initialize_as_wolter_3( 20., 5., distance_between_the_foci) print(wolter3.oe1.ccc_object.get_coefficients()) print(wolter3.oe2.ccc_object.get_coefficients()) #beam = wolter3.trace_wolter3(beam, z0) beam = wolter3.trace_compound(beam) beam.plot_xz() beam.retrace(0.1) beam.plot_xz() plt.show()
def test_spherical_mirror(): print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> test_spherical_mirror") beam1 = Beam(5000) beam1.set_point(0, 0, 0) beam1.set_flat_divergence(5e-3, 5e-2) p = 2. q = 1. theta = 30 theta = theta * np.pi / 180 alpha = 0 * np.pi / 180 spherical_mirror = Optical_element.initialize_as_spherical_mirror( p, q, theta, alpha) #spherical_mirror.set_spherical_mirror_radius_from_focal_distances() print(spherical_mirror.R) beam1 = spherical_mirror.trace_optical_element(beam1) beam1.plot_xz() beam1.plot_xpzp() print(np.mean(beam1.flag)) if do_plot: plt.show() assert_almost_equal(np.abs(beam1.x).mean(), 0.0, 2) assert_almost_equal(np.abs(beam1.y).mean(), 0.0, 2) assert_almost_equal(np.abs(beam1.z).mean(), 0.0, 2)
def predict(self, enc_hidden, context, context_lengths, batch, beam_size, max_code_length, generator, replace_unk, vis_params): decState = DecoderState( enc_hidden, Variable(torch.zeros(1, 1, self.opt.rnn_size).cuda(), requires_grad=False) ) # Repeat everything beam_size times. def rvar(a, beam_size): return Variable(a.repeat(beam_size, 1, 1), volatile=True) context = rvar(context.data, beam_size) context_lengths = context_lengths.repeat(beam_size) decState.repeat_beam_size_times(beam_size) beam = Beam(beam_size, cuda=True, vocab=self.vocabs['code']) for i in range(max_code_length): if beam.done(): break # Construct batch x beam_size nxt words. # Get all the pending current beam words and arrange for forward. # Uses the start symbol in the beginning inp = beam.getCurrentState() # Should return a batch of the frontier # Turn any copied words to UNKs if self.opt.copy_attn: inp['code'] = inp['code'].masked_fill_(inp['code'].gt(len(self.vocabs["code"]) - 1), self.vocabs["code"].stoi['<unk>']) # Run one step., decState gets automatically updated decOut, attn, copy_attn = self.forward(inp, context, context_lengths, decState) # decOut: beam x rnn_size decOut = decOut.squeeze(1) out = generator(decOut, copy_attn.squeeze(1) if copy_attn is not None else None, batch['src_map'], inp).data out = out.unsqueeze(1) if self.opt.copy_attn: out = generator.collapseCopyScores(out, batch) out = out.log() # beam x tgt_vocab beam.advance(out[:, 0], attn.data[:, 0]) decState.beam_update(beam.getCurrentOrigin(), beam_size) score, times, k = beam.getFinal() # times is the length of the prediction hyp, att = beam.getHyp(times, k) goldNl = self.vocabs['seq2seq'].addStartOrEnd(batch['raw_seq2seq'][0]) goldCode = self.vocabs['code'].addStartOrEnd(batch['raw_code'][0]) predSent = self.buildTargetTokens( hyp, self.vocabs, goldNl, att, batch['seq2seq_vocab'][0], replace_unk ) return Prediction(goldNl, goldCode, predSent, att)
def test_duplicate(self): print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> test_duplicate") b1=Beam() b2=b1.duplicate() assert_almost_equal(b1.x, b2.x ,9) assert_almost_equal(b1.y, b2.y ,9) assert_almost_equal(b1.z, b2.z ,9) assert_almost_equal(b1.vx,b2.vx,9) assert_almost_equal(b1.vy,b2.vy,9) assert_almost_equal(b1.vz,b2.vz,9)
def test_gaussian_beam(self): print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> test_gaussian_beam") beam=Beam(5000) beam.set_point(1.,1.,1.) beam.set_gaussian_divergence(0.05,0.0005) print(np.mean(beam.vx)) print(np.mean(beam.vz)) assert_almost_equal(np.mean(beam.vx),0.0,1) assert_almost_equal(np.mean(beam.vz),0.0,1)
def wordBeamSearch(mat, beamWidth, lm, useNGrams): "decode matrix, use given beam width and language model" chars = lm.getAllChars() blankIdx = len( chars) # blank label is supposed to be last label in RNN output maxT, _ = mat.shape # shape of RNN output: TxC genesisBeam = Beam(lm, useNGrams) # empty string last = BeamList( ) # list of beams at time-step before beginning of RNN output last.addBeam(genesisBeam) # start with genesis beam # go over all time-steps for t in range(maxT): curr = BeamList() # list of beams at current time-step # go over best beams bestBeams = last.getBestBeams(beamWidth) # get best beams for beam in bestBeams: # calc probability that beam ends with non-blank prNonBlank = 0 if beam.getText() != '': # char at time-step t must also occur at t-1 labelIdx = chars.index(beam.getText()[-1]) prNonBlank = beam.getPrNonBlank() * mat[t, labelIdx] # calc probability that beam ends with blank prBlank = beam.getPrTotal() * mat[t, blankIdx] # save result curr.addBeam(beam.createChildBeam('', prBlank, prNonBlank)) # extend current beam with characters according to language model nextChars = beam.getNextChars() for c in nextChars: # extend current beam with new character labelIdx = chars.index(c) if beam.getText() != '' and beam.getText()[-1] == c: prNonBlank = mat[t, labelIdx] * beam.getPrBlank( ) # same chars must be separated by blank else: prNonBlank = mat[t, labelIdx] * beam.getPrTotal( ) # different chars can be neighbours # save result curr.addBeam(beam.createChildBeam(c, 0, prNonBlank)) # move current beams to next time-step last = curr # return most probable beam last.completeBeams(lm) bestBeams = last.getBestBeams(1) # sort by probability return bestBeams[0].getText()
def beam_search(model, batch_x, max_trg_len=15, k=args.beam_width): enc_outs, hidden = model.encode(batch_x) hidden = model.init_decoder_hidden(hidden) mask = batch_x.eq(model.vocab['<pad>']).unsqueeze(1) b_size = batch_x.shape[0] beams = [Beam(k, model.vocab, hidden[:, i, :]) for i in range(b_size)] for _ in range(max_trg_len): not_finish = [j for j in range(b_size) if not beams[j].done] if len(not_finish) == 0: break _word_ = torch.cat([beams[j].get_current_word() for j in not_finish], dim=0) _enc_outs_ = torch.cat( [enc_outs[j].unsqueeze(0).expand(k, -1, -1) for j in not_finish], dim=0) _hidden_ = torch.cat([beams[j].get_hidden_state() for j in not_finish], dim=1) _mask_ = torch.cat( [mask[j].unsqueeze(0).expand(k, -1, -1) for j in not_finish], dim=0) logits, hidden = model.decode(_word_, _enc_outs_, _hidden_, _mask_) log_probs = torch.log(F.softmax(logits, -1)) idx = 0 for j in not_finish: beams[j].advance_(log_probs[idx:idx + k], hidden[:, idx:idx + k, :]) idx += k # for j in range(b_size): # word = beams[j].get_current_word() # enc_outs_j = enc_outs[j].unsqueeze(0).expand(k, -1, -1) # hidden = beams[j].get_hidden_state() # mask_j = mask[j].unsqueeze(0).expand(k, -1, -1) # logit, hidden = model.decode(word, enc_outs_j, hidden, mask_j) # # logit: [k x V], hidden: [k x hid_dim] # log_probs = torch.log(F.softmax(logit, -1)) # beams[j].advance_(log_probs, hidden) allHyp, allScores = [], [] n_best = 1 for b in range(batch_x.shape[0]): scores, ks = beams[b].sort_best() allScores += [scores[:n_best]] hyps = [beams[b].get_hyp(k) for k in ks[:n_best]] allHyp.append(hyps) # shape of allHyp: [batch, 1, list] allHyp = [[int(w.cpu().numpy()) for w in hyp[0]] for hyp in allHyp] return allHyp
def beamSearchV2(self, pre_words, beamK, param_lambda, maxToken): # Beam search with sentence length normalization. prev_beam = Beam(beamK) prev_beam.add(0.0,1.0,False,pre_words.split(" ")) while True: current_beam = Beam(beamK) for(score, prob, complete, prefix) in prev_beam: if complete == True: current_beam.add(score, prob, complete, prefix) else: for key in self.graph.graph[prefix[-1]]: if key == "</s>": current_beam.add((1/pow(len(prefix[:]),param_lambda))*math.log(prob), prob,True,prefix[:]) else: current_beam.add((1 / pow(len(prefix[:] + [key]), param_lambda)) * math.log(prob), prob * self.graph.graph[prefix[-1]][key], False, prefix[:] + [key]) (best_score,best_prob,best_complete, best_prefix) = max(current_beam) if best_complete or len(best_prefix)-1 == maxToken: return StringDouble.StringDouble(' '.join(best_prefix), best_score) prev_beam = current_beam
def test_boundary_condition(): #beam1 = Beam(10000) #beam1.set_point(0, 0, 0) #beam1.set_flat_divergence(5e-3, 5e-2) shadow_beam = run_shadow_source() beam1 = Beam(10000) beam1.initialize_from_arrays( shadow_beam.getshonecol(1), shadow_beam.getshonecol(2), shadow_beam.getshonecol(3), shadow_beam.getshonecol(4), shadow_beam.getshonecol(5), shadow_beam.getshonecol(6), shadow_beam.getshonecol(10), 0 ) bound1=BoundaryRectangle(xmax=0.005,xmin=-0.005,ymax=0.05,ymin=-0.05) bound2=BoundaryRectangle(xmax=0.01,xmin=-0.01,ymax=0.1,ymin=-0.1) plane_mirror=Optical_element.initialize_as_plane_mirror(2,1,65*np.pi/180,0) parabolic_mirror=Optical_element.initialize_as_surface_conic_paraboloid_from_focal_distances(5,2,28*np.pi/180,90*np.pi/180) plane_mirror.rectangular_bound(bound1) parabolic_mirror.rectangular_bound(bound2) beam1=plane_mirror.trace_optical_element(beam1) beam1=parabolic_mirror.trace_optical_element(beam1) beam1.plot_xz() plt.title("Total points plot") beam1.plot_good_xz() plt.title("Good points plot") print(beam1.flag) indices=np.where(beam1.flag>0) print("The good number of ray are: %f" %(beam1.flag[indices].size)) plt.show() shadow_beam=trace_shadow(shadow_beam) assert_almost_equal(beam1.x, shadow_beam.getshonecol(1), 8) assert_almost_equal(beam1.y, shadow_beam.getshonecol(2), 8) assert_almost_equal(beam1.z, shadow_beam.getshonecol(3), 8)
def beam_search(model, batch_x, vocab, max_trg_len=10, k=3): beams = [Beam(k, vocab, max_trg_len) for _ in range(batch_x.shape[0])] for i in range(max_trg_len): for j in range(len(beams)): x = batch_x[j].unsqueeze(0).expand(k, -1) y = beams[j].get_sequence() logit = model(x, y) # logit: [k, seqlen, V] log_probs = torch.log(F.softmax(logit[:, i, :], -1)) beams[j].advance_(log_probs) allHyp = [b.get_hyp().cpu().numpy() for b in beams] return allHyp
def rule_create_beam(self, frame, length, width, height,name): """Creates a Beam Object. Parameters ---------- frame: Compas Frame length: double width: double height: double name: UUID """ self.new_beam = Beam(frame, length, width, height,name) self.beams.append(self.new_beam) return self.new_beam
def test_montel_elliptical(): print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> test_montel_elliptical") beam = Beam(25000) beam.set_flat_divergence(25 * 1e-6, 25 * 1e-6) beam.set_rectangular_spot(xmax=25 * 1e-6, xmin=-25 * 1e-6, zmax=5 * 1e-6, zmin=-5 * 1e-6) beam.set_gaussian_divergence(25 * 1e-4, 25 * 1e-4) beam.flag *= 0 p = 5. q = 15. #theta = np.pi/2 - 0.15 theta = 85. * np.pi / 180 xmax = 0. xmin = -0.3 ymax = 0.1 ymin = -0.1 zmax = 0.3 zmin = 0. bound1 = BoundaryRectangle(xmax, xmin, ymax, ymin, zmax, zmin) bound2 = BoundaryRectangle(xmax, xmin, ymax, ymin, zmax, zmin) montel = CompoundOpticalElement.initialize_as_montel_ellipsoid( p=p, q=q, theta=theta, bound1=bound1, bound2=bound2) beam03 = montel.trace_montel(beam) print(beam03[2].N / 25000) plt.figure() plt.plot(beam03[0].x, beam03[0].z, 'ro') plt.plot(beam03[1].x, beam03[1].z, 'bo') plt.plot(beam03[2].x, beam03[2].z, 'go') plt.xlabel('x axis') plt.ylabel('z axis') plt.axis('equal') beam03[2].plot_xz(0) print("No reflection = %d\nOne reflection = %d\nTwo reflection = %d" % (beam03[0].N, beam03[1].N, beam03[2].N)) plt.show()
def __init__( self, # for force_data force_filename, save_directory, filetypes, # for beam mass_ratio, top_tension, horizontal, tension_include_boyancy, length, diameter, bending_stiffness, fluid_density, fluid_velocity): time, inline_force, crossflow_force =\ numpy.loadtxt(force_filename, unpack=True) self._fluid_density = fluid_density self._fluid_velocity = fluid_velocity self._inline_force_data = ForceData( time=time, force=inline_force) self._crossflow_force_data = ForceData( time=time, force=crossflow_force) self._beam = Beam( node_number=self._crossflow_force_data.node_number, mass_ratio=mass_ratio, top_tension=top_tension, horizontal=horizontal, tension_include_boyancy=tension_include_boyancy, length=length, diameter=diameter, bending_stiffness=bending_stiffness) self._save_directory = save_directory if self._save_directory[-1] != '/': self._save_directory += '/' self._filetypes = filetypes self._beam.plot_modal_shapes( self._append_filetypes('modal_shapes'), max_order=4) with open(self._save_directory + 'natural_frequencies.txt', 'wb') as file_: numpy.savetxt( file_, self._beam.natural_frequencies[:20], fmt='%.4e')
def test_kirk_patrick_baez(): #beam=Beam.initialize_as_person() #beam.set_flat_divergence(1e-12, 1e-12) #beam.x = beam.x*1e-3 #beam.z = beam.z*1e-3 shadow_beam = shadow_source() beam = Beam() beam.initialize_from_arrays(shadow_beam.getshonecol(1), shadow_beam.getshonecol(2), shadow_beam.getshonecol(3), shadow_beam.getshonecol(4), shadow_beam.getshonecol(5), shadow_beam.getshonecol(6), shadow_beam.getshonecol(10), 0) bound1 = BoundaryRectangle(xmax=2.5, xmin=-2.5, ymax=2.5, ymin=-2.5) bound2 = BoundaryRectangle(xmax=1., xmin=-1., ymax=1., ymin=-1.) kirk_patrick_baez = CompoundOpticalElement.initialize_as_kirkpatrick_baez( p=10., q=5., separation=4., theta=89 * np.pi / 180, bound1=bound1, bound2=bound2) beam = kirk_patrick_baez.trace_compound(beam) beam.plot_good_xz(0) indices = np.where(beam.flag > 0) assert_almost_equal(beam.x[indices], 0., 4) assert_almost_equal(beam.z[indices], 0., 4) beam.retrace(50.) beam.plot_good_xz() print(kirk_patrick_baez.info()) print("Number of good rays: %f" % (beam.number_of_good_rays())) #beam.histogram() if do_plot: plt.show()
def beamSearchDecoder(self, enc_states, hidden, test=False, sentence=None, st="<s>", ed="</s>", k=3): """ Decoder with beam search :param enc_states: :param hidden: :param test: :param sentence: :param st: :param ed: :param k: :return: """ batch_size = enc_states.shape[0] hidden = F.tanh(self.init_decoder_hidden(hidden[1])).view(1, batch_size, self.hid_dim) if test: beams = [Beam(k, self.vocab, hidden[:,i,:], self.device) for i in range(batch_size)] for i in range(self.max_trg_len): for j in range(batch_size): logits, hidden = self.decoderStep(enc_states[j].view(1, -1, self.hid_dim).expand(k, -1, -1), beams[j].get_hidden_state(), beams[j].get_current_word()) logLikelihood = torch.log(F.softmax(logits, dim=-1)) beams[j].advance(logLikelihood, hidden) allHyp, allScores = [], [] n_best = 1 for b in range(batch_size): scores, ks = beams[b].sort_best() allScores += [scores[:n_best]] hyps = [beams[b].get_hyp(k) for k in ks[:n_best]] allHyp.append(hyps) return allHyp # return sentences else: max_seq_len = sentence.shape[1] logits = torch.zeros(batch_size, max_seq_len - 1, self.vocab_size, device=self.device) for i in range(max_seq_len - 1): # logit: [batch, 1, vocab_size] logit, hidden = self.decoderStep(enc_states, hidden, sentence[:, i]) logits[:, i, :] = logit.squeeze() return logits
def translate_attall(model, x_data, x_mask, args): x_data = CudaVariable(torch.LongTensor(x_data)) # T B x_mask = CudaVariable(torch.LongTensor(x_mask)) # T B x_data = x_data.transpose(0, 1) # B T x_mask = x_mask.transpose(0, 1) # B T xm = (x_data.data.ne(Const.PAD)).type(torch.cuda.FloatTensor) Bs = args.beam_width Bn, Tx = x_data.size() encY = model.encoder(x_data, x_mask) * xm.unsqueeze(2) #-- Repeat data for beam search n_inst, Ts, d_h = encY.size() x_data = x_data.repeat(1, Bs).view(n_inst * Bs, Ts) encY = encY.repeat(1, Bs, 1).view(n_inst * Bs, Ts, d_h) #-- Prepare beams inst_dec_beams = [Beam(Bs, device=device) for _ in range(n_inst)] #-- Bookkeeping for active or not active_inst_idx_list = list(range(n_inst)) inst_idx_to_position_map = get_inst_idx_to_tensor_position_map( active_inst_idx_list) #-- Decode for len_dec_seq in range(1, args.max_length + 1): active_inst_idx_list = beam_decode_step(model, inst_dec_beams, len_dec_seq, x_data, encY, inst_idx_to_position_map, Bs) if not active_inst_idx_list: break # all instances have finished their path to <EOS> x_data, encY, inst_idx_to_position_map = collate_active_info( x_data, encY, inst_idx_to_position_map, active_inst_idx_list, Bs) n_best = 1 batch_hyp, batch_scores = collect_hypothesis_and_scores( inst_dec_beams, n_best) y_hat = batch_hyp[0][0] #cpu().numpy().flatten().tolist() return y_hat
def test_wolter2_good_rays(self): print( ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> test_wolter2_good_rays" ) p = 100. ##### if p=100 the trace_good_ray goes crazy beam1 = Beam.initialize_as_person(10000) #beam1 = Beam(100000) #beam1.set_circular_spot(1.) # beam1.set_rectangular_spot(5 / 2 * 1e-5, -5 / 2 * 1e-5, 5 / 2 * 1e-5, -5 / 2 * 1e-5) beam1.x *= 10. beam1.z *= 10. beam1.set_point(p, 0., p) op_ax = Beam(1) op_ax.set_point(p, 0., p) beam = op_ax.merge(beam1) beam.set_divergences_collimated() beam.plot_xz(0) p = 20000. q = 30. z0 = 5. focal = 2 * z0 + q wolter2 = CompoundOpticalElement.initialiaze_as_wolter_2(p1=p, q1=q, z0=z0) beam = wolter2.trace_good_rays(beam) beam.plot_good_xz() beam.retrace(10.) beam.plot_good_xz() plt.title("test_wolter2_good_rays") print(beam.flag) if do_plot: plt.show()
def test_ideal_lens_with_trace_optical_element(): print( ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> test_ideal_lens_with_trace_optical_element" ) beam = Beam() beam.set_flat_divergence(0.05, 0.005) p = 1. q = 5. lens = Optical_element.ideal_lens(p, q) beam = lens.trace_optical_element(beam) beam.plot_xz() if do_plot: plt.show() assert_almost_equal(np.abs(beam.x).mean(), 0.0, 4) assert_almost_equal(np.abs(beam.z).mean(), 0.0, 4)
def test_spherical_mirror(self): print(">>>>>>>>>>>>>>> test_spherical_mirror") shadow_beam = run_shadow_source() beam1 = Beam() beam1.initialize_from_arrays( shadow_beam.getshonecol(1), shadow_beam.getshonecol(2), shadow_beam.getshonecol(3), shadow_beam.getshonecol(4), shadow_beam.getshonecol(5), shadow_beam.getshonecol(6), shadow_beam.getshonecol(10), ) #beam1 = Beam(5000) #beam1.set_point(0, 0, 0) #beam1.set_flat_divergence(5e-3, 5e-2) p = 2. q = 1. theta = 41 * np.pi / 180 shadow_beam = run_shadow_source() spherical_mirror = Optical_element.initialize_as_surface_conic_sphere_from_focal_distances( p, q, theta) beam1 = spherical_mirror.trace_optical_element(beam1) if do_plot: beam1.plot_xz() beam1.plot_xpzp() plt.title("Spherical mirror with p=2, q=1, theta=41") plt.show() shadow_beam = run_shadow_spherical_mirror(shadow_beam) assert_almost_equal(beam1.x, shadow_beam.getshonecol(1), 8) assert_almost_equal(beam1.y, shadow_beam.getshonecol(2), 8) assert_almost_equal(beam1.z, shadow_beam.getshonecol(3), 8)
def test_ellipsoidal_mirror(self): print(">>>>>>>>>>>>>>> test_ellipsoidal_mirror") #beam1=Beam(5000) #beam1.set_point(0,0,0) #beam1.set_flat_divergence(5e-3,5e-2) shadow_beam = run_shadow_source() beam1 = Beam() beam1.initialize_from_arrays( shadow_beam.getshonecol(1), shadow_beam.getshonecol(2), shadow_beam.getshonecol(3), shadow_beam.getshonecol(4), shadow_beam.getshonecol(5), shadow_beam.getshonecol(6), shadow_beam.getshonecol(10), ) p = 20. q = 10. theta = 50 * np.pi / 180 spherical_mirror = Optical_element.initialize_as_surface_conic_ellipsoid_from_focal_distances( p, q, theta) beam1 = spherical_mirror.trace_optical_element(beam1) if do_plot: beam1.plot_xz() beam1.plot_xpzp() plt.title("Ellipsoidal mirror with p=20, q=10, theta=50") plt.show() shadow_beam = run_shadow_elliptical_mirror(beam1) assert_almost_equal(beam1.vx, shadow_beam.getshonecol(4), 1) assert_almost_equal(beam1.vy, shadow_beam.getshonecol(5), 1) assert_almost_equal(beam1.vz, shadow_beam.getshonecol(6), 1)
def beam_search(model, batch_x, vocab, max_trg_len=18, k=3): beams = [Beam(k, vocab, max_trg_len) for _ in range(batch_x.shape[0])] enc_outputs = model.encode(batch_x) for i in range(max_trg_len): todo = [j for j in range(len(beams)) if not beams[j].done] xs = torch.cat([batch_x[j].unsqueeze(0).expand(k, -1) for j in todo], dim=0) ys = torch.cat([beams[j].get_sequence() for j in todo], dim=0) enc_outs = torch.cat( [enc_outputs[j].unsqueeze(0).expand(k, -1, -1) for j in todo], dim=0) logits, *_ = model.decode(enc_outs, xs, ys[:, :i + 1]) log_probs = torch.log(F.softmax(logits[:, i, :], -1)) idx = 0 for j in todo: beams[j].advance_v1(log_probs[idx:idx + k]) idx += k allHyp = [b.get_hyp().cpu().numpy() for b in beams] return allHyp
def test_ideal_lens_collimated_beam(): print( ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> test_ideal_lens_collimated_beam") beam = Beam() beam.set_circular_spot(20 * 1e-9) beam.set_divergences_collimated() beam.plot_xz() p = 1. q = 5. lens = Optical_element.ideal_lens(p, q, q, q) beam = lens.trace_optical_element(beam) beam.plot_xz() if do_plot: plt.show() assert_almost_equal(np.abs(beam.x).mean(), 0.0, 4) assert_almost_equal(np.abs(beam.z).mean(), 0.0, 4)
def test_optimezed_wolter1_good_rays(self): print( ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> test_optimezed_wolter1_good_rays" ) p = 100. beam1 = Beam.initialize_as_person() beam1.x *= 50. beam1.z *= 50. beam1.set_point(p, 0., p) op_ax = Beam(1) op_ax.set_point(p, 0., p) beam = op_ax.merge(beam1) beam.set_divergences_collimated() beam.plot_xz() p = 1e12 R = 100. theta = 1e-3 * np.pi / 180 wolter1 = CompoundOpticalElement.initialiaze_as_wolter_1_with_two_parameters( p1=p, R=R, theta=theta) beam = wolter1.trace_good_rays(beam) beam.plot_good_xz() indices = np.where(beam.flag >= 0) assert_almost_equal(beam.x[indices], 0., 8) assert_almost_equal(beam.z[indices], 0., 8) beam.retrace(100.) beam.plot_good_xz() plt.title("optimezed_wolter1_good_rays") if do_plot: plt.show()
def test_rectangular_shape(self): beam = Beam(round(1e5)) plane_mirror = Optical_element.initialize_as_surface_conic_plane( p=10., q=0., theta=0.) beam.set_flat_divergence(0.02, 0.1) xmax = 0.01 xmin = -0.0008 ymax = 1. ymin = -0.29 bound = BoundaryRectangle(xmax=xmax, xmin=xmin, ymax=ymax, ymin=ymin) plane_mirror.set_bound(bound) beam = plane_mirror.trace_optical_element(beam) beam.plot_xz() beam.plot_good_xz() indices = np.where(beam.flag > 0) assert_almost_equal(max(beam.x[indices]) - xmax, 0., 2) assert_almost_equal(-min(beam.x[indices]) + xmin, 0., 2) assert_almost_equal(max(beam.z[indices]) + ymin, 0., 2) assert_almost_equal(-min(beam.z[indices]) - ymax, 0., 2) print(max(beam.x[indices]), min(beam.x[indices]), max(beam.y[indices]), min(beam.y[indices])) if do_plot is True: plt.show() ######### BoundaryCircle has to be implemented in the code of intersection_with_optical_element ####################
def beam_decode(model, input, hidden, max_len_decode, beam_size, pad_id, sos_id, eos_id, tup_idx=4, batch_size=1, use_constraints=True, init_beam=False, roles=None): # hidden [1, 1, hidden_size] assert beam_size > 0 and batch_size == 1, "Beam decoding batch size must be 1 and Beam size greater than 0." # Helper functions for working with beams and batches def var(a): return Variable(a, volatile=True) def bottle(m): return m.view(batch_size * beam_size, -1) def unbottle(m): return m.view(beam_size, batch_size, -1) def beam_update(e, idx, positions, beam_size): sizes = e.size() # [1, beam_size, hidden_size] br = sizes[1] if len(sizes) == 3: sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2])[:, :, idx] else: sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2], sizes[3])[:, :, idx] # [1, beam_size, hidden_size] indexed_before = sent_states.data.index_select(1, positions) sent_states.data.copy_(sent_states.data.index_select(1, positions)) indexed_after = sent_states.data.index_select(1, positions) # 1 beam object as we have batch_size 1 during decoding beam = [ Beam(beam_size, n_best=args.n_best, cuda=use_cuda, pad=pad_id, eos=eos_id, bos=sos_id, min_length=10) ] if init_beam: # id of last element in seq to init the beam for b in beam: b.next_ys[0][0] = np.asscalar(input.data.numpy()[0]) # [1, beam_size, hidden_size] hidden = hidden.repeat(1, beam_size, 1) # this comes from the known role id of the last seqence object. #if args.emb_type: #inp2 = role.repeat(1, beam_size) verb_list = [[]] * beam_size #for constraints # run the decoder to generate the sequence for i in range(max_len_decode): # one all beams have EOS break if all((b.done() for b in beam)): break # No need to explicitly set the input to previous output - beam advance does it. Make sure. inp = var( torch.stack([b.get_current_state() for b in beam ]).t().contiguous().view(-1)) #[beam_size] # Tested that the last output is the input in the next time step. # Run one step of the decoder # dec_out: beam x rnn_size inp = inp.unsqueeze(1) if args.emb_type: curr_idx = i % 5 # this gives the index of the role type: [tup, v, s, o, prep] curr_role = roles[curr_idx] # wrap into a tensor and make a var. repeat beam times inp2 = var(torch.LongTensor([curr_role])).repeat(beam_size, 1) logit, hidden = model(inp, hidden, inp2) else: logit, hidden = model(inp, hidden) # [1, beam_size, hidden_size] logit = torch.unsqueeze(logit, 0) probs = F.log_softmax(logit, dim=2).data out = unbottle(probs) # [beam_size, 1, vocab_size] out.log() # Advance each beam. We have 1 beam object. for j, b in enumerate(beam): #print("OUT: {}".format(out[:, j])) # [beam_size, vocab_size] if use_constraints: b.advance( ge.schema_constraint(out[:, j], b.next_ys[-1], verb_list)) else: b.advance(out[:, j]) beam_update(hidden, j, b.get_current_origin(), beam_size) if use_constraints: verb_list = ge.update_verb_list(verb_list, b, tup_idx) # extract sentences from beam and return ret = _from_beam(beam, args.n_best) return ret
def translate_batch(self, src_seq, src_pos): def get_inst_idx_to_tensor_position_map(inst_idx_list): return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)} def beam_decoder_step(inst_dec_beams, len_dec, src_seq, enc_output, indx2position_map, n_bm): def prepare_beam_dec_seq(inst_dec_beams, len_dec): #就是将所有未完成的beam里已有的输出以(bh*n_bm, len_dec)的格式返回 dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done] dec_partial_seq = torch.stack(dec_partial_seq).to(self.device) dec_partial_seq = dec_partial_seq.view(-1, len_dec) return dec_partial_seq def prepare_beam_dec_pos(len_dec, n_active_insts, n_bm): dec_partial_pos = torch.arange(1, len_dec+1. dtype=torch.long, device=self.device) dec_partial_pos = dec_partial_pos.unsqueeze(0).repeat(n_active_insts*n_bm, 1) return dec_partial_pos def predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_insts, n_bm): dec_output, *_ = self.model.encoder(dec_seq, dec_pos, src_seq, enc_output) dec_output = dec_output[:, -1, :] word_prob = F.log_softmax(self.model.tgt_word_prj(dec_output), dim=1) word_prob = dec_output.view(n_active_insts, n_bm, -1) return word_prob def collect_active_inst_idx_list(inst_dec_beams, word_prob, indx2position_map): active_insts_list = [] for idx, pos in indx2position_map: is_done = inst_dec_beams[idx].advance(word_prob[pos]) if not is_done: active_insts_list += [idx] return active_insts_list n_active_insts = len(indx2position_map) dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec) dec_pos = prepare_beam_dec_pos(len_dec, n_active_insts, n_bm) word_prob = predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_insts, n_bm) activate_inst_idx_list = collect_active_inst_idx_list(inst_dec_beams, word_prob, indx2position_map) return activate_inst_idx_list def collect_active_part(beam_tensor, active_pos_list, n_pre_active_insts, n_bm): _, *dim = beam_tensor.size() n_current_active_insts = len(active_pos_list) new_shape = (n_current_active_insts * n_bm, *dim) beam_tensor = beam_tensor.view(n_pre_active_insts, -1) beam_tensor = beam_tensor.index_select(0, active_pos_list) beam_tensor = beam_tensor.view(*new_shape) return beam_tensor def collate_active_info(src_seq, enc_output, indx2position_map, activate_inst_list): n_pre_active_insts = len(indx2position_map) active_pos_list = [indx2position_map[idx] for idx in activate_inst_list] active_pos_list = torch.LongTensor(active_pos_list).to(self.device) src_seq = collect_active_part(src_seq, active_pos_list, n_pre_active_insts, n_bm) enc_output = collect_active_part(enc_output, active_pos_list, n_pre_active_insts, n_bm) indx2position_map = get_inst_idx_to_tensor_position_map(activate_inst_list) return src_seq, enc_output, indx2position_map def collate_hyp_and_scores(inst_dec_beams, n_best): all_hyp, all_score = [], [] for idx in range(len(inst_dec_beams)): score, tail = inst_dec_beams[idx].sort_scores() all_score += [score[:n_best]] hyps = [inst_dec_beams.get_hyp_stream_from_one_final_scores(i) for i in tail[:n_best]] all_hyp += [hyps] return all_hyp, all_score with torch.no_grad(): #改变设备、获得编码器输出 src_seq, src_pos = src_seq.to(self.device), src_pos.to(self.device) enc_output, *_ = self.model.encoder(src_seq, src_pos) #repeat data for beam search n_bm = self.opt.beam_size n_inst, len_s, d_h = enc_output.size() src_seq.repeat(1, n_bm).view(n_inst*n_bm, len_s) enc_output.repeat(1, n_bm, 1).view(n_inst*n_bm, len_s, d_h) '''a.repeat(1,2).view(4,3) tensor([[ 1, 2, 3], [ 1, 2, 3], [45, 6, 7], [45, 6, 7]]) ''' #准备beams inst_dec_beams = [Beam(self.opt.n_bm, self.device) for i in range(n_inst)] #设置list,储存还未获得全部输出的seq activate_inst_list = list(range(n_inst)) indx2position_map = get_inst_idx_to_tensor_position_map(activate_inst_list) #decoder:以一个seq(句子)最大单词数为循环次数,循环体为一个beam——step,并且更新上一步设置的list for len_dec in range(1, self.opt.max_token_seq_len+1): activate_inst_list = beam_decoder_step( inst_dec_beams, len_dec, src_seq, enc_output, indx2position_map, n_bm) if not activate_inst_list: break src_seq, enc_output, indx2position_map = collate_active_info(src_seq, enc_output, indx2position_map, activate_inst_list) #获得预测值和分数 batch_hyp, batch_scores = collate_hyp_and_scores(inst_dec_beams, self.opt.n_best) return batch_hyp, batch_scores