'attention_type': args.attention_type, 'attention_alignment': args.attention_alignment, 'encoder_type': args.encoder_type, 'weights_init': w_init, 'biases_init': b_init, 'raw_output': args.raw_output, 'name': 'parrot'} parrot = Parrot(**parrot_args) parrot.initialize() features, features_mask, labels, labels_mask, speaker, start_flag, raw_sequence = \ parrot.symbolic_input_variables() cost, extra_updates, attention_vars, cost_raw = parrot.compute_cost( features, features_mask, labels, labels_mask, speaker, start_flag, args.batch_size, raw_audio=raw_sequence) cost_name = args.which_cost cost.name = cost_name if parrot.raw_output: cost_raw.name = "sampleRNN_cost" cg = ComputationGraph(cost) model = Model(cost) parameters = cg.parameters step_rule = CompositeRule( [StepClipping(10. * args.grad_clip), Adam(args.learning_rate)])
'timing_coeff': args.timing_coeff, 'encoder_type': saved_args.encoder_type, 'raw_output': saved_args.raw_output, 'name': 'parrot' } parrot = Parrot(**parrot_args) features, features_mask, labels, labels_mask, speaker, start_flag, raw_audio = \ parrot.symbolic_input_variables() cost, extra_updates, attention_vars, cost_raw = parrot.compute_cost( features, features_mask, labels, labels_mask, speaker, start_flag, args.num_samples, raw_audio=raw_audio) model = Model(cost) model.set_parameter_values(parameters) print "Successfully loaded the parameters." if args.sample_one_step: gen_x, gen_k, gen_w, gen_pi, gen_phi, gen_pi_att = \ parrot.sample_using_input(data_tr, args.num_samples) else: gen_x, gen_k, gen_w, gen_pi, gen_phi, gen_pi_att = parrot.sample_model(
'attention_type': args.attention_type, 'attention_alignment': args.attention_alignment, 'encoder_type': args.encoder_type, 'weights_init': w_init, 'biases_init': b_init, 'name': 'parrot' } parrot = Parrot(**parrot_args) parrot.initialize() features, features_mask, labels, labels_mask, speaker, start_flag = \ parrot.symbolic_input_variables() cost, extra_updates, attention_vars = parrot.compute_cost( features, features_mask, labels, labels_mask, speaker, start_flag, args.batch_size) cost_name = args.which_cost cost.name = cost_name cg = ComputationGraph(cost) model = Model(cost) parameters = cg.parameters step_rule = CompositeRule( [StepClipping(10. * args.grad_clip), Adam(args.learning_rate)]) algorithm = GradientDescent(cost=cost,