Beispiel #1
0
 def init_test(self, testresult):
     '''测试用例初始化
     '''
     super(iTestCase, self).init_test(testresult)
     if not os.path.exists(self.attachments_path):
         os.makedirs(self.attachments_path)
     self._device_manager = DeviceManager()
Beispiel #2
0
	def __init__(self):

		self.bytes_free = 0
		self.fail = 0

		config = ConfigParser()
		config.read(os.path.join(os.path.dirname(__file__), './config/settings_dev.cfg'))

		for s in config.sections():
			self.__dict__ = dict(self.__dict__.items() + {i[0]: i[1] for i in config.items(s)}.items())

		self.device = DeviceManager(hostname=self.device_hostname, username=self.ssh_username)
		self.music_path = os.path.join(self.music_base_path, self.music_folder)

		if not os.path.exists(self.music_path):
			logger.error("Music path %s does not exist." %(self.music_path))
			exit(1)
Beispiel #3
0
    def __init__(self,
                 mode,
                 iterator,
                 params,
                 rev_vocab_table=None,
                 scope=None,
                 log_trainables=True):

        print_out("# creating %s graph ..." % mode)
        self.dtype = tf.float32

        self.mode = mode
        self.embedding_size = params.embedding_size
        self.num_layers = params.num_layers
        self.iterator = iterator

        # self.scheduled_sampling_prob = scheduled_sampling_prob
        # self.num_samples_for_loss = num_samples_for_loss

        self.device_manager = DeviceManager()
        self.round_robin = RoundRobin(self.device_manager)
        self.num_gpus = self.device_manager.num_available_gpus()
        print_out("# number of gpus %d" % self.num_gpus)

        with tf.variable_scope(scope or 'ta_seq2seq_graph', dtype=self.dtype):
            self.init_embeddings(params.vocab_file,
                                 params.embedding_type,
                                 self.embedding_size,
                                 scope=scope)

            with tf.variable_scope(scope or "build_network"):
                with tf.variable_scope("output_projection") as output_scope:
                    if params.boost_topic_gen_prob:
                        self.output_layer = taware_layer.JointDenseLayer(
                            params.vocab_size,
                            params.topic_vocab_size,
                            scope=output_scope,
                            name="output_projection")
                    else:
                        self.output_layer = layers_core.Dense(
                            params.vocab_size,
                            # activation=tf.nn.tanh,
                            use_bias=False,
                            name="output_projection")

            encoder_keep_prob, decoder_keep_prob = self.get_keep_probs(
                mode, params)
            self.batch_size = tf.size(self.iterator.source_sequence_lengths)

            encoder_outputs, encoder_state = self.__build_encoder(
                params, encoder_keep_prob)

            logits, sample_id, final_decoder_state = self.__build_decoder(
                params, encoder_outputs, encoder_state, decoder_keep_prob)

            if mode != tf.contrib.learn.ModeKeys.INFER:
                with tf.device(self.device_manager.tail_gpu()):
                    loss = self.__compute_loss(logits)
            else:
                loss = None

            if mode == tf.contrib.learn.ModeKeys.TRAIN:
                self.train_loss = loss
                self.word_count = tf.reduce_sum(
                    self.iterator.source_sequence_lengths) + tf.reduce_sum(
                        self.iterator.target_sequence_length)
            elif mode == tf.contrib.learn.ModeKeys.EVAL:
                self.eval_loss = loss
            elif mode == tf.contrib.learn.ModeKeys.INFER:
                self.sample_words = rev_vocab_table.lookup(
                    tf.to_int64(sample_id))

            if mode != tf.contrib.learn.ModeKeys.INFER:
                ## Count the number of predicted words for compute ppl.
                self.predict_count = tf.reduce_sum(
                    self.iterator.target_sequence_length)

            self.global_step = tf.Variable(0, trainable=False)
            trainables = tf.trainable_variables()

            # Gradients and SGD update operation for training the model.
            # Arrage for the embedding vars to appear at the beginning.
            if mode == tf.contrib.learn.ModeKeys.TRAIN:
                self.learning_rate = tf.constant(params.learning_rate)
                # decay
                self.learning_rate = self._get_learning_rate_decay(
                    params, self.global_step, self.learning_rate)

                # Optimizer
                if params.optimizer.lower() == "sgd":
                    opt = tf.train.GradientDescentOptimizer(self.learning_rate)
                    tf.summary.scalar("lr", self.learning_rate)
                elif params.optimizer.lower() == "adam":
                    opt = tf.train.AdamOptimizer(self.learning_rate)
                    tf.summary.scalar("lr", self.learning_rate)
                else:
                    raise ValueError('Unknown optimizer: ' + params.optimizer)

                # Gradients
                gradients = tf.gradients(self.train_loss,
                                         trainables,
                                         colocate_gradients_with_ops=True)

                clipped_grads, grad_norm = tf.clip_by_global_norm(
                    gradients, params.max_gradient_norm)
                grad_norm_summary = [tf.summary.scalar("grad_norm", grad_norm)]
                grad_norm_summary.append(
                    tf.summary.scalar("clipped_gradient",
                                      tf.global_norm(clipped_grads)))

                self.grad_norm = grad_norm

                self.update = opt.apply_gradients(zip(clipped_grads,
                                                      trainables),
                                                  global_step=self.global_step)

                # Summary
                self.train_summary = tf.summary.merge([
                    tf.summary.scalar("lr", self.learning_rate),
                    tf.summary.scalar("train_loss", self.train_loss),
                ] + grad_norm_summary)

            if mode == tf.contrib.learn.ModeKeys.INFER:
                self.infer_logits, self.sample_id = logits, sample_id
                self.infer_summary = tf.no_op()

            # Saver
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)

            # Print trainable variables
            if log_trainables:
                print_out("# Trainable variables")
                for trainable in trainables:
                    print_out("  %s, %s, %s" %
                              (trainable.name, str(
                                  trainable.get_shape()), trainable.op.device))