示例#1
0
    def cluster(self, vectors, assign_clusters=False,ClusterNum=None, DisType='euc',Stype='mean',trace=False):
        # stores the merge order

        #-------------------------------------------------
        self._distMap.clear()   # 每次聚类不同样本之前必须更新
        #-------------------------------------------------

        l = len(vectors)
        if(0==l):
            return []


        if('cos'==DisType):
            for i in range(l):
                for j in range(i+1,l):
                    self._distMap[(i,j)] = cosine_distance(vectors[i], vectors[j])
        elif('euc'==DisType):
            for i in range(l):
                for j in range(i+1,l):
                    self._distMap[(i,j)] = euclidean_distance(vectors[i], vectors[j])
        result = VectorSpaceClusterer.cluster(self, vectors,assign_clusters,ClusterNum, Stype, trace)

        #/////////////////////// 测试,输出距离 /////////////////
        # m = 0
        # for k,v in self._distMap:
        #     m +=1 
        #     print v,"\t",
        #     if (m%7==0):
        #         print
        #/////////////////////////////////////////////////////

        if(2==len(vectors[0])):         # 二维样本则显示可视化结果
            draw_2D_cluster(vectors, result)

        return result
示例#2
0
    def cluster(self,
                vectors,
                assign_clusters=False,
                ClusterNum=None,
                DisType='cos',
                Stype='avg',
                trace=False):
        # stores the merge order

        #-------------------------------------------------
        self._distMap.clear()  # 每次聚类不同样本之前必须更新
        #-------------------------------------------------

        l = len(vectors)
        if ('cos' == DisType):
            for i in range(l):
                for j in range(i + 1, l):
                    self._distMap[(i, j)] = cosine_distance(
                        vectors[i], vectors[j])
        elif ('euc' == DisType):
            for i in range(l):
                for j in range(i + 1, l):
                    self._distMap[(i, j)] = euclidean_distance(
                        vectors[i], vectors[j])
        self._dendrogram = Dendrogram(
            [numpy.array(vector, numpy.float64) for vector in vectors])
        result = VectorSpaceClusterer.cluster(self, vectors, assign_clusters,
                                              ClusterNum, Stype, trace)

        if (2 == len(vectors[0])):  # 二维样本则显示可视化结果
            self.draw_2D(vectors, result)

        return result
示例#3
0
文件: model.py 项目: IMLHF/DRUnet-SE
    def __init__(self, noisy_wav_batch, clean_wav_batch, is_training, hparams,
                 **kwargs):
        self.noisy_wav_batch = noisy_wav_batch
        self.clean_wav_batch = clean_wav_batch
        self.is_training = is_training
        self.hparams = hparams

        self.enhanced_wav_batch = self._forward()

        self.save_variables = []

        if clean_wav_batch is not None:
            # cosine distance
            height = self.hparams.selection
            half_in = tf.reshape(self.noisy_wav_batch, [-1, height // 16])
            half_clean = tf.reshape(self.clean_wav_batch, [-1, height // 16])
            half_out = tf.reshape(self.enhanced_wav_batch, [-1, height // 16])
            self.loss = cosine_distance(half_clean, half_out, half_in)

            if self.is_training:
                # TRAINING OPTIMIZER
                with tf.variable_scope('not_train_vars', reuse=tf.AUTO_REUSE):
                    self.global_step = tf.compat.v1.get_variable(
                        'global_step',
                        dtype=tf.int32,
                        initializer=tf.constant(1),
                        trainable=False)
                    self.lr = tf.compat.v1.get_variable(
                        'lr',
                        dtype=tf.float32,
                        trainable=False,
                        initializer=tf.constant(self.hparams.lr))
                with tf.variable_scope('optimizer', reuse=tf.AUTO_REUSE):
                    opt = tf.compat.v1.train.AdamOptimizer(
                        learning_rate=self.lr,
                        beta1=0.,
                        beta2=0.999,
                        epsilon=1e-8)
                self.save_variables.append(self.global_step)
                self.save_variables.append(self.lr)
                grads = opt.compute_gradients(
                    self.loss,
                    var_list=[
                        var for var in tf.compat.v1.trainable_variables()
                    ])
                self.optimizer_op = opt.apply_gradients(
                    grads, global_step=self.global_step)

        # Model saver
        self.save_variables.extend(
            [var for var in tf.compat.v1.trainable_variables()])
        self.saver = tf.compat.v1.train.Saver(self.save_variables,
                                              max_to_keep=25,
                                              save_relative_paths=True)
示例#4
0
    def cluster(self,
                vectors,
                assign_clusters=False,
                ClusterNum=None,
                DisType='euc',
                Stype='mean',
                trace=False):
        # stores the merge order

        #-------------------------------------------------
        self._distMap.clear()  # 每次聚类不同样本之前必须更新
        #-------------------------------------------------

        l = len(vectors)
        if (0 == l):
            return []

        if ('cos' == DisType):
            for i in range(l):
                for j in range(i + 1, l):
                    self._distMap[(i, j)] = cosine_distance(
                        vectors[i], vectors[j])
        elif ('euc' == DisType):
            for i in range(l):
                for j in range(i + 1, l):
                    self._distMap[(i, j)] = euclidean_distance(
                        vectors[i], vectors[j])
        result = VectorSpaceClusterer.cluster(self, vectors, assign_clusters,
                                              ClusterNum, Stype, trace)

        #/////////////////////// 测试,输出距离 /////////////////
        # m = 0
        # for k,v in self._distMap:
        #     m +=1
        #     print v,"\t",
        #     if (m%7==0):
        #         print
        #/////////////////////////////////////////////////////

        if (2 == len(vectors[0])):  # 二维样本则显示可视化结果
            draw_2D_cluster(vectors, result)

        return result
示例#5
0
    def cluster(self, vectors, assign_clusters=False, DisType='cos',Stype='avg',trace=False):
        # stores the merge order

        #-------------------------------------------------
        self._distMap.clear()   # 每次聚类不同样本之前必须更新
        #-------------------------------------------------

        l = len(vectors)
        if('cos'==DisType):
            for i in range(l):
                for j in range(i+1,l):
                    self._distMap[(i,j)] = cosine_distance(vectors[i], vectors[j])
        elif('euc'==DisType):
            for i in range(l):
                for j in range(i+1,l):
                    self._distMap[(i,j)] = euclidean_distance(vectors[i], vectors[j])
        self._dendrogram = Dendrogram(
            [numpy.array(vector, numpy.float64) for vector in vectors])
        result = VectorSpaceClusterer.cluster(self, vectors,assign_clusters, Stype, trace)

        return result
def train(d):
    """train"""
    trainSpeechNames = tf.placeholder(tf.string,
                                      shape=[None],
                                      name="train_speech_names")
    batch_size = d.batch_size
    height = d.selection
    # TRAINING OPTIMIZER
    global_step = tf.Variable(0, trainable=False, name='global_step')
    lr = tf.Variable(d.lr, trainable=False)
    opt = tf.train.AdamOptimizer(learning_rate=lr,
                                 beta1=0.,
                                 beta2=0.999,
                                 epsilon=1e-8)
    #
    log_file = open(os.path.join(d.workdir, "logfile.txt"), 'w+')
    log_device_file = open(os.path.join(d.workdir, "devicefile.log"), 'w+')
    # Model save path
    model_path = os.path.join(d.workdir, "models")
    dp.create_folder(model_path)
    # initialize dataset
    with tf.name_scope('dataset'):
        dataset = tf.data.Dataset.from_tensor_slices(trainSpeechNames) \
                               .map(func).batch(16)
        iterator = dataset.make_initializable_iterator()
        Ref, Input = iterator.get_next()
    Output = end_to_end(Input, True, d)
    # cosine distance
    half_in = tf.reshape(Input, [-1, height // 16])
    half_clean = tf.reshape(Ref, [-1, height // 16])
    half_out = tf.reshape(Output, [-1, height // 16])
    loss_fn = cosine_distance(half_clean, half_out, half_in)
    grads = opt.compute_gradients(
        loss_fn, var_list=[var for var in tf.trainable_variables()])
    optimizer_op = opt.apply_gradients(grads, global_step=global_step)
    tf.summary.scalar('loss', loss_fn)
    merged = tf.summary.merge_all()
    # INITIALIZE GPU CONFIG
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    #config.log_device_placement = True
    sess = tf.Session(config=config)
    train_writer = tf.summary.FileWriter(os.path.join(d.workdir, "log/train"),
                                         sess.graph)
    sess.run(tf.global_variables_initializer())
    sess.run(iterator.initializer,
             feed_dict={trainSpeechNames: train_speech_names})
    # Model save
    saver = tf.train.Saver(max_to_keep=25)
    #saver.restore(sess, os.path.join(d.workdir, "models/se_model16_15000.ckpt"))
    #sess.run(tf.assign(lr, d.lr))

    loss_train = np.zeros(10000)
    train_batchs = len(
        train_speech_names) // batch_size  # Training set batch number
    val_batchs = math.ceil(len(val_speech_names) /
                           batch_size)  # Verification set batch number
    loss_val = np.zeros(val_batchs)

    while True:
        # TRAINING ITERATION
        try:
            summary, _, loss_vec, gs = sess.run(
                [merged, optimizer_op, loss_fn, global_step])
        except tf.errors.OutOfRangeError:
            np.random.seed()
            np.random.shuffle(train_speech_names)
            sess.run(iterator.initializer,
                     feed_dict={trainSpeechNames: train_speech_names})
            continue
        loss_train[gs % 5000] = loss_vec

        if gs % 50 == 0:
            train_writer.add_summary(summary, gs)
        if gs % 5000 == 0:
            val_dataset = tf.data.Dataset.from_tensor_slices(val_speech_names) \
                                         .map(func).batch(16)
            val_iteator = val_dataset.make_one_shot_iterator()
            val_Ref, val_Input = val_iteator.get_next()
            val_Output = end_to_end(val_Input, False, d)
            val_half_in = tf.reshape(val_Input, [-1, height // 16])
            val_half_clean = tf.reshape(val_Ref, [-1, height // 16])
            val_half_out = tf.reshape(val_Output, [-1, height // 16])
            # cosine distance
            val_loss_fn = cosine_distance(val_half_clean, val_half_out,
                                          val_half_in)
            for i in range(0, val_batchs):
                val_loss = sess.run(val_loss_fn)
                loss_val[i] = val_loss
            val_loss_mean = np.mean(loss_val)
            mean_loss_train = np.mean(loss_train[:5000])
            print(
                time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) +
                "\tbatch: %d\ttrain loss: %.4f\tvalidation loss: %.4f\n" %
                (gs, mean_loss_train, val_loss_mean))
            log_file.write(
                time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) +
                "\tbatch: %d\ttrain loss: %.4f\tvalidation loss: %.4f\n" %
                (gs, mean_loss_train, val_loss_mean))
            log_file.flush()
            saver.save(sess, os.path.join(model_path, "se_model%d.ckpt" % gs))
        if gs == 200000:
            break
    log_file.close()
    def add_prediction_op(self):
        """Adds the unrolled RNN:
            h_0 = 0
            for t in 1 to T:
                o_t, h_t = cell(x_t, h_{t-1})
                o_drop_t = Dropout(o_t, dropout_rate)
                y_t = o_drop_t U + b_2
        TODO: There a quite a few things you'll need to do in this function:
            - Define the variables U, b_2.
            - Define the vector h as a constant and inititalize it with
              zeros. See tf.zeros and tf.shape for information on how
              to initialize this variable to be of the right shape.
              https://www.tensorflow.org/api_docs/python/constant_op/constant_value_tensors#zeros
              https://www.tensorflow.org/api_docs/python/array_ops/shapes_and_shaping#shape
            - In a for loop, begin to unroll the RNN sequence. Collect
              the predictions in a list.
            - When unrolling the loop, from the second iteration
              onwards, you will HAVE to call
              tf.get_variable_scope().reuse_variables() so that you do
              not create new variables in the RNN cell.
              See https://www.tensorflow.org/versions/master/how_tos/variable_scope/
            - Concatenate and reshape the predictions into a predictions
              tensor.
        Hint: You will find the function tf.pack (similar to np.asarray)
              useful to assemble a list of tensors into a larger tensor.
              https://www.tensorflow.org/api_docs/python/array_ops/slicing_and_joining#pack
        Hint: You will find the function tf.transpose and the perms
              argument useful to shuffle the indices of the tensor.
              https://www.tensorflow.org/api_docs/python/array_ops/slicing_and_joining#transpose
        Remember:
            * Use the xavier initilization for matrices.
            * Note that tf.nn.dropout takes the keep probability (1 - p_drop) as an argument.
            The keep probability should be set to the value of self.dropout_placeholder
        Returns:
            pred: tf.Tensor of shape (batch_size, max_length, n_classes)
        """
        x1, x2 = self.add_embedding()
        dropout_rate = self.dropout_placeholder

        # choose cell type
        if self.config.cell == "rnn":
            cell = RNNCell(self.config.embed_size, self.config.hidden_size)
        elif self.config.cell == "gru":
            cell = GRUCell(self.config.embed_size, self.config.hidden_size)
        elif self.config.cell == "lstm":
            cell = LSTMCell(self.config.embed_size, self.config.hidden_size)
        else:
            raise ValueError("Unsuppported cell type: " + self.config.cell)

        # Initialize hidden states to zero vectors of shape (num_examples, hidden_size)
        h1 = tf.zeros((tf.shape(x1)[0], self.config.hidden_size), tf.float32)
        h2 = tf.zeros((tf.shape(x2)[0], self.config.hidden_size), tf.float32)

        with tf.variable_scope("RNN1") as scope:
            for time_step in range(self.helper.max_length):
                if time_step != 0:
                    scope.reuse_variables()
                o1_t, h1 = cell(x1[:, time_step, :], h1, scope)
        with tf.variable_scope("RNN2") as scope:
            for time_step in range(self.helper.max_length):
                if time_step != 0:
                    scope.reuse_variables()
                o2_t, h2 = cell(x2[:, time_step, :], h2, scope)

        # h_drop1 = tf.nn.dropout(h1, dropout_rate)
        # h_drop2 = tf.nn.dropout(h2, dropout_rate)

        # use L2-regularization: sum of squares of all parameters

        if self.config.distance_measure == "l2":
            # perform logistic regression on l2-distance between h1 and h2
            distance = norm(h1 - h2 + 0.000001)
            logistic_a = tf.Variable(0.0, dtype=tf.float32, name="logistic_a")
            logistic_b = tf.Variable(0.0, dtype=tf.float32, name="logistic_b")
            self.regularization_term = tf.square(logistic_a) + tf.square(
                logistic_b)
            preds = tf.sigmoid(logistic_a * distance + logistic_b)

        elif self.config.distance_measure == "cosine":
            # perform logistic regression on cosine distance between h1 and h2
            distance = cosine_distance(h1 + 0.000001, h2 + 0.000001)
            logistic_a = tf.Variable(1.0, dtype=tf.float32, name="logistic_a")
            logistic_b = tf.Variable(0.0, dtype=tf.float32, name="logistic_b")
            self.regularization_term = tf.square(logistic_a) + tf.square(
                logistic_b)
            preds = tf.sigmoid(logistic_a * distance + logistic_b)

        elif self.config.distance_measure == "custom_coef":
            # perform logistic regression on the vector |h1-h2|,
            # equivalent to logistic regression on the (scalar) weighted Manhattan distance between h1 and h2,
            # ie. weighted sum of |h1-h2|
            logistic_a = tf.get_variable(
                "coef", [self.config.hidden_size], tf.float32,
                tf.contrib.layers.xavier_initializer())
            logistic_b = tf.Variable(0.0, dtype=tf.float32, name="logistic_b")
            self.regularization_term = tf.reduce_sum(
                tf.square(logistic_a)) + tf.square(logistic_b)
            preds = tf.sigmoid(
                tf.reduce_sum(logistic_a * tf.abs(h1 - h2), axis=1) +
                logistic_b)

        elif self.config.distance_measure == "concat":
            # use softmax for prediction
            U = tf.get_variable(
                "U", (4 * self.config.hidden_size, self.config.n_classes),
                tf.float32, tf.contrib.layers.xavier_initializer())
            b = tf.get_variable("b", (self.config.n_classes, ), tf.float32,
                                tf.constant_initializer(0))
            v = tf.nn.relu(tf.concat([h1, h2, tf.square(h1 - h2), h1 * h2], 1))
            self.regularization_term = tf.reduce_sum(
                tf.square(U)) + tf.reduce_sum(tf.square(b))
            preds = tf.matmul(v, U) + b

        elif self.config.distance_measure == "concat_steroids":
            # use softmax for prediction
            W1 = tf.get_variable(
                "W1", (4 * self.config.hidden_size, self.config.hidden_size),
                tf.float32, tf.contrib.layers.xavier_initializer())
            b1 = tf.get_variable("b1", (self.config.hidden_size, ), tf.float32,
                                 tf.constant_initializer(0))

            W2 = tf.get_variable(
                "W2", (self.config.hidden_size, self.config.n_classes),
                tf.float32, tf.contrib.layers.xavier_initializer())
            b2 = tf.get_variable("b2", (self.config.n_classes, ), tf.float32,
                                 tf.constant_initializer(0))

            v1 = tf.nn.relu(tf.concat(
                [h1, h2, tf.square(h1 - h2), h1 * h2], 1))
            v2 = tf.nn.relu(tf.matmul(v1, W1) + b1)

            self.regularization_term = tf.reduce_sum(
                tf.square(W1)) + tf.reduce_sum(tf.square(b1)) + tf.reduce_sum(
                    tf.square(W2)) + tf.reduce_sum(tf.square(b2))
            preds = tf.matmul(v2, W2) + b2

        else:
            raise ValueError("Unsuppported distance type: " +
                             self.config.distance_measure)

        return preds