Пример #1
0
def single_nn_job():

	# iterate for many different subsets of songs for training multiple NN
	network_id = 'something_nn3'	
	generation = sqlutil.check_generation()
	source_folder = 'generation_'+str(generation)
	file_hd5 = network_id+'_'+str(generation)+'_.hd5'

	# get_song_collection from natural selection
	training_set = get_song_collection(source_folder)

	# train	
	network = nnutil.train_network(training_set)

	# store training set information	
	sqlutil.save_training(network_id, training_set)

	# store network information
	sqlutil.save_nn(network_id, file_hd5)
	
	# generate new collection & store
	# update generation 
	sqlutil.update_generation(generation+1)
	# create folder
	generation = sqlutil.check_generation()
	destination_folder = 'generation_'+str(generation)
	new_collection = nnutil.generate_media(destination_folder)
	new_collection = training_set
	for song_id in new_collection:
		store_song(song_id, generation, network_id)
Пример #2
0
Файл: hatn.py Проект: zqwq/HATN
    def create_train_op(self):

        with tf.name_scope('train'):

            self.opt = tf.train.MomentumOptimizer(self.lr, 0.9)
            self.train_joint_op = nn_utils.train_network(
                self.opt, self.pivot_loss + self.lab_aux_loss, self.nil_vars,
                self.max_grad_norm, "joint_train")
            self.train_aux_op = nn_utils.train_network(self.opt,
                                                       self.unlab_aux_loss,
                                                       self.nil_vars,
                                                       self.max_grad_norm,
                                                       "aux_train")
            self.train_dom_op = nn_utils.train_network(self.opt, self.dom_loss,
                                                       self.nil_vars,
                                                       self.max_grad_norm,
                                                       "dom_train")
Пример #3
0
    def create_train_op(self):

        with tf.name_scope('train'):

            self.opt = tf.train.MomentumOptimizer(self.lr, 0.9)
            self.train_op = nn_utils.train_network(self.opt, self.pivot_loss,
                                                   self.nil_vars,
                                                   self.max_grad_norm,
                                                   "train_op")
Пример #4
0
    def build_graph(self):

        with tf.variable_scope(self.name):

            src_reviews = tf.cond(
                self.train_flag, lambda: tf.slice(
                    self.reviews, [0, 0], [self.batch_size, self.max_len]),
                lambda: self.reviews)
            tar_reviews = tf.cond(
                self.train_flag,
                lambda: tf.slice(self.reviews, [self.batch_size, 0],
                                 [self.batch_size, self.max_len]),
                lambda: self.reviews)
            src_batch_length = tf.cond(
                self.train_flag,
                lambda: tf.slice(self.batch_length, [0], [self.batch_size]),
                lambda: self.batch_length)
            tar_batch_length = tf.cond(
                self.train_flag, lambda: tf.slice(
                    self.batch_length, [self.batch_size], [self.batch_size]),
                lambda: self.batch_length)

            asp_labels = tf.cond(
                self.train_flag, lambda: tf.slice(
                    self.asp_labels, [0, 0], [self.batch_size, self.max_len]),
                lambda: self.asp_labels)
            ts_labels = tf.cond(
                self.train_flag, lambda: tf.slice(
                    self.ts_labels, [0, 0], [self.batch_size, self.max_len]),
                lambda: self.ts_labels)
            asp_labels = tf.reshape(asp_labels, [-1])
            ts_labels = tf.reshape(ts_labels, [-1])
            opn_labels = tf.reshape(self.opn_labels, [-1])

            src_weights_mask = tf.cast(tf.sign(src_reviews), tf.float32)
            weights_mask = tf.cast(tf.sign(self.reviews), tf.float32)

            asp_h_s, ts_h_s, asp_pred_s, opn_pred_s, ts_pred_s, ma_s, mo_s, alpha_a_s, alpha_o_s = self.SuperNet(
                src_reviews,
                src_batch_length,
                self.ma,
                self.mo,
                self.dropout_rate,
                reuse=False)
            asp_h_t, ts_h_t, asp_pred_t, opn_pred_t, ts_pred_t, ma_t, mo_t, alpha_a_t, alpha_o_t = self.SuperNet(
                tar_reviews,
                tar_batch_length,
                self.ma,
                self.mo,
                self.dropout_rate,
                reuse=True)
            opn_pred = tf.concat([opn_pred_s, opn_pred_t], 0)

            asp_loss = tf.nn.softmax_cross_entropy_with_logits(
                labels=tf.one_hot(asp_labels, self.dim_ote_y),
                logits=asp_pred_s,
                name='ote_tagger')  #(b*m, dim_ote_y)
            opn_loss = tf.nn.softmax_cross_entropy_with_logits(
                labels=tf.one_hot(opn_labels, self.dim_lm_y),
                logits=opn_pred,
                name='opn_tagger')  #(b*m, dim_lm_y)
            ts_loss = tf.nn.softmax_cross_entropy_with_logits(
                labels=tf.one_hot(ts_labels, self.dim_ts_y),
                logits=ts_pred_s,
                name='ts_tagger')  #(b*m, dim_ts_y)

            asp_attention = tf.concat([alpha_a_s, alpha_a_t], 0)
            self.ote_transfer_loss = self.add_adv_loss(
                asp_h_s,
                asp_h_t,
                adapt_rate=self.args.adapt_rate,
                attention=asp_attention,
                mask=weights_mask,
                batch_length=self.batch_length,
                selective=self.args.selective,
                weight=self.args.adv_weight,
                scope='asp_dann')

            asp_loss = tf.reshape(
                asp_loss, [-1, self.max_len]) * src_weights_mask  #(b, m)
            ts_loss = tf.reshape(
                ts_loss, [-1, self.max_len]) * src_weights_mask  #(b, m)
            opn_loss = tf.reshape(opn_loss,
                                  [-1, self.max_len]) * weights_mask  #(b, m)

            asp_loss = tf.reduce_sum(asp_loss, axis=-1) / tf.cast(
                src_batch_length, tf.float32)  #(b)
            ts_loss = tf.reduce_sum(ts_loss, axis=-1) / tf.cast(
                src_batch_length, tf.float32)  #(b)
            opn_loss = tf.reduce_sum(opn_loss, axis=-1) / tf.cast(
                self.batch_length, tf.float32)  #(b)

            self.asp_loss = tf.reduce_mean(asp_loss)
            self.ts_loss = tf.reduce_mean(ts_loss)
            self.opn_loss = tf.reduce_mean(opn_loss)

            self.loss = self.asp_loss + self.opn_loss + self.ts_loss

            asp_pred = tf.cond(self.domain_flag, lambda: asp_pred_s,
                               lambda: asp_pred_t)
            ts_pred = tf.cond(self.domain_flag, lambda: ts_pred_s,
                              lambda: ts_pred_t)
            self.asp_attentions = tf.cond(self.domain_flag, lambda: alpha_a_s,
                                          lambda: alpha_a_t)
            self.opn_attentions = tf.cond(self.domain_flag, lambda: alpha_o_s,
                                          lambda: alpha_o_t)

            self.asp_predictions = tf.reshape(
                tf.argmax(asp_pred, -1, name="asp_predictions"),
                [-1, self.max_len])
            self.ts_predictions = tf.reshape(
                tf.argmax(ts_pred, -1, name="ts_predictions"),
                [-1, self.max_len])

            # determine the optimizer
            if self.args.optimizer == "adam":
                self.opt = tf.train.AdamOptimizer(self.lr)
            elif self.args.optimizer == "momentum":
                self.opt = tf.train.MomentumOptimizer(self.lr, 0.9)
            elif self.args.optimizer == "adadelta":
                self.opt = tf.train.AdadeltaOptimizer(self.lr)
            elif self.args.optimizer == "sgd":
                self.opt = tf.train.GradientDescentOptimizer(self.lr)
            else:
                raise Exception("Unsupported optimizer type: %s" %
                                self.args.optimizer)

            var_list = [
                tf_var for tf_var in tf.get_collection(
                    tf.GraphKeys.TRAINABLE_VARIABLES)
            ]

            self.train_op = nn_utils.train_network(self.opt, self.loss,
                                                   var_list, self.nil_vars,
                                                   self.clip_grad, "train_op")
            self.ote_transfer_op = nn_utils.train_network(
                self.opt, self.ote_transfer_loss, var_list, self.nil_vars,
                self.clip_grad, "ote_dann_op")