def bkm(fn, prefix, k, b, n_classes, maxiter=10):

    with open(fn) as fx:
        content = fx.readlines()
    glen1 = len(content)
    seq1 = range(glen1)
    rn.shuffle(seq1)
    ls = seq1[0:k]
    cidx = itemgetter(*ls)(content)
    centers, c_lab = get_batch(cidx, prefix, n_classes)
    print "Get centers' feature!"
    cfeat = afc.getFeat(centers, True, o_layer="fcb")
    print(cfeat.shape)
    rn.shuffle(seq1)

    predicts = np.zeros((glen1))
    groundtruths = np.zeros((glen1))
    iters = 0
    step = (glen1 / b) / 10
    print("Step is " + str(step))

    preg = 0
    while (maxiter > iters):
        count1 = np.zeros((n_classes))
        for i in range(0, glen1, b):
            ls = seq1[i:i + b]
            X = itemgetter(*ls)(content)
            data, lab1 = get_batch(X, prefix, n_classes)
            groundtruths[i:i + b] = lab1.argmax()
            # find nearest center and record the count
            fea1 = afc.getFeat(data, False, o_layer="fcb")
            p_lab = np.zeros((b))
            for j in range(b):
                diff1 = pow(pow(fea1[j, :] - cfeat, 2.0), 0.5)
                diff1 = diff1.sum(axis=1)
                p_lab[j] = diff1.argmin()
                count1[int(p_lab[j])] += 1
            predicts[i:i + b] = p_lab

            # update the center by SGD
            for j in range(b):
                idx1 = int(p_lab[j])
                lr = 1.0 / count1[idx1]
                cfeat[idx1, :] = (1 - lr) * cfeat[idx1, :] + lr * (fea1[j, :])
        print(".")
        iters += 1
        nmi = mi(predicts, groundtruths)
        print("Iter " + str(iters) + " where NMI is " + str(nmi))

    return nmi
Exemplo n.º 2
0
def bkm(fn, prefix, k, b, n_classes, maxiter=10):

    with open(fn) as fx:
        content = fx.readlines()
    glen1 = len(content)
    seq1 = range(glen1)
    rn.shuffle(seq1)
    ls = seq1[0:k]
    cidx = itemgetter(*ls)(content)
    centers, c_lab = get_batch(cidx, prefix, n_classes)
    print "Get centers' feature!"
    cfeat = afc.getFeat(centers, True, o_layer="out")
    print(cfeat.shape)
    rn.shuffle(seq1)

    predicts = np.zeros((glen1))
    groundtruths = np.zeros((glen1))
    iters = 0
    step = (glen1 / b) / 10
    print("Step is " + str(step))

    preg = 0

    count1 = np.zeros((n_classes))
    allfea = np.zeros((glen1, 1000))
    print "Extracting features...."
    for i in range(0, glen1, b):
        ls = seq1[i:i + b]
        X = itemgetter(*ls)(content)
        data, lab1 = get_batch(X, prefix, n_classes)
        groundtruths[i:i + b] = lab1.argmax()
        # find nearest center and record the count
        fea1 = afc.getFeat(data, False, o_layer="out")
        allfea[i:i + b] = fea1
        if (i % round(glen1 / 100) < b):
            print "Progress in extracting feature: " + str(
                i / float(glen1) * 100) + "%"
    np.save("kmeans_places_val.fea", allfea)
    #~ allfea=np.load("kmeans_places_val.fea.npy")
    print "Start to cluster images..."
    k_means = KMeans(init=cfeat, n_clusters=n_classes, n_init=1)
    k_means.fit(allfea)
    predicts = k_means.labels_
    nmi = mi(predicts, groundtruths)

    return nmi
Exemplo n.º 3
0
def trainer():
    global load_thread
    global seq1
    global step
    global data_queue
    global lr
    tii = time.time()
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.5

    X = tf.placeholder("float", [None, 224, 224, 3])
    sX = tf.placeholder("float", [None, 7, 7])
    Y = tf.placeholder("float", [None, n_classes])
    X2 = tf.placeholder(tf.float32, [None, 224, 224, 3])

    # 构建模型
    oldpred = al.getalex(X, keep_prob)
    sess2 = tf.Session(config=config)
    sess2.run(tf.initialize_all_variables())
    saver = tf.train.Saver()
    saver.restore(sess2, "/Data/jess/tf_pretrained.ckpt")
    xx = tf.trainable_variables()
    sess2.close()

    import alexccnnmodel as al2
    biasesInd = 44
    weightsInd = 34
    B1 = tf.placeholder("float", al2.biases['bc9'].get_shape().as_list())
    updateB = al2.biases['bc9'].assign(B1)

    W1 = tf.placeholder("float", al2.weights['wc9'].get_shape().as_list())
    updateW = al2.weights['wc9'].assign(W1)

    sess2 = tf.Session(config=config)
    sess2.run(tf.initialize_all_variables())
    fc8 = al2.getccnn(X, keep_prob, xx, xx)
    saver.restore(sess2,
                  "/Data/jess/tf_alex_pre_ccnn_model_iter285001.ckpt-285001")
    xx2 = tf.trainable_variables()
    sess2.close()

    with tf.Session(config=config) as sess:

        w10 = tf.Variable(tf.random_normal([1000, n_classes], stddev=0.01))
        b10 = tf.Variable(tf.random_normal([n_classes], stddev=0.01))
        pred = al2.alex_ccnn10(X, xx2, xx2, keep_prob, w10, b10, o_layer="out")
        fcb = al2.alex_ccnn10(X, xx2, xx2, keep_prob, w10, b10, o_layer="fcb")

        cost = -tf.reduce_sum(Y * tf.log(pred))
        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=learning_rate).minimize(cost)
        correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

        #==========Get Init Center====================@
        #~ centers, lab1=val_batch(n_classes, ss=0)
        #~ rn.shuffle(seq1)
        saver = tf.train.Saver()
        gtlab = np.zeros((vlen1))
        predicts = np.zeros((vlen1))

        lr = 1e-3
        step = 1
        len2 = len(xx2)
        myvarlist = [xx2[weightsInd], xx2[biasesInd]]
        myvar = tf.trainable_variables()
        for kk in range(len2):
            print(
                str(kk) + "th data is " + str(xx2[kk].get_shape().as_list()) +
                " with " + str(type(xx2[kk])))

        varxx = tf.gradients(cost, myvarlist)

        print(varxx)
        sess.run(tf.initialize_all_variables())
        lastgrad = []
        # Total epochs we have to reach.
        allRuntime = 0
        saver.restore(sess, '/Data/tf_place_val_step_454791.ckpt')
        step = 454791
        vst = int(np.floor((step * batch_size) / vlen1))
        centers, lab1 = val_batch(n_classes, ss=0)

        for v in range(vst, 1000):
            ts = time.time()

            rn.shuffle(seq1)
            load_thread = threading.Thread(target=load_data)
            load_thread.deamon = True
            load_thread.start()
            cfeat = sess.run(fcb, feed_dict={X: centers, keep_prob: 1.})

            featLen = cfeat.shape[1]
            count1 = np.zeros((n_classes))

            acc_batch_x = np.zeros((batch_size, 224, 224, 3))
            acc_batch_y = np.zeros((batch_size, n_classes))
            bct = 0
            for k in range(0, vlen1, batch_size):
                epcho1 = np.floor((step * batch_size) / vlen1)
                #============Extract feature==================
                batch_sx, batch_ys = data_queue.get()
                fea1 = sess.run(fcb, feed_dict={X: batch_sx, keep_prob: 1.})
                gtlab[k:k + batch_size] = batch_ys.argmax(axis=1)
                b = batch_size
                b2 = 2
                p_lab = np.zeros((b))
                p_lab2 = np.zeros((b, n_classes))
                p_err = np.zeros((b))
                for j in range(b):
                    diff1 = pow(pow(fea1[j, :] - cfeat, 2.0), 0.5)
                    #~ pdb.set_trace()
                    diff1 = diff1.sum(axis=1)
                    p_lab[j] = diff1.argmin()
                    p_lab2[j, int(p_lab[j])] = 1
                    count1[int(p_lab[j])] += 1
                    p_err[j] = min(diff1)

                predicts[k:k + b] = p_lab
                # Use the most realible set to update network parameters
                acc_ind = sorted(range(len(p_err)), key=lambda k: p_err[k])
                myidn = acc_ind[0:b2]
                acc_batch_x[bct:bct + b2, :, :, :] = batch_sx[myidn, :, :, :]
                acc_batch_y[bct:bct + b2, :] = p_lab2[myidn, :]

                #~ acc_batch_x = batch_sx
                #~ acc_batch_y = p_lab2
                bct = bct + b2
                perrave = p_err.sum(axis=0) / batch_size

                #When the size of the collected training sample is larger than batch size, performing network updating -*-
                if (bct >= batch_size):
                    bct = 0
                    #===========Update network====================
                    sess.run(optimizer,
                             feed_dict={
                                 X: acc_batch_x,
                                 Y: acc_batch_y,
                                 keep_prob: dropout,
                                 learning_rate: lr
                             })

                    #===========Update center======================
                    #~ hh=input("Input any word to continue...")
                    var_grad = sess.run(varxx,
                                        feed_dict={
                                            X: acc_batch_x,
                                            Y: acc_batch_y,
                                            keep_prob: 1.
                                        })
                    allvar = sess.run(myvar,
                                      feed_dict={
                                          X: acc_batch_x,
                                          Y: acc_batch_y,
                                          keep_prob: 1.
                                      })
                    if (lastgrad != []):
                        bct = 0
                        sess.run(updateB,
                                 feed_dict={
                                     B1: allvar[biasesInd] + lr * lastgrad[1]
                                 })
                        sess.run(updateW,
                                 feed_dict={
                                     W1: allvar[weightsInd] + lr * lastgrad[0]
                                 })
                        fea1 = sess.run(fcb,
                                        feed_dict={
                                            X: acc_batch_x,
                                            keep_prob: 1.
                                        })
                        sess.run(updateB, feed_dict={B1: allvar[biasesInd]})
                        sess.run(updateW, feed_dict={W1: allvar[weightsInd]})
                        #print  str(k) + "th ==> " + str(lastgrad[1][1:5])
                    lastgrad = var_grad

                for j in myidn:
                    idx1 = int(p_lab[j])
                    lr2 = 1.0 / count1[idx1]
                    cfeat[idx1, :] = (1 -
                                      lr2) * cfeat[idx1, :] + lr2 * fea1[j, :]

                #for j in range(n_classes):
                #cfeat[j, :] = cfeat[j, :] + lr*var_grad

                if step % display_step == 1:
                    # 计算损失值
                    loss = sess.run(cost,
                                    feed_dict={
                                        X: batch_sx,
                                        Y: batch_ys,
                                        keep_prob: 1.
                                    })
                    acc = sess.run(accuracy,
                                   feed_dict={
                                       X: batch_sx,
                                       Y: batch_ys,
                                       keep_prob: 1.
                                   })

                    print("Iter=" + str(step * batch_size) + "/epcho=" +
                          str(np.floor((step * batch_size) / vlen1)) +
                          ", Loss= " + "{:.6f}".format(loss) +
                          ", Training Accuracy=" + "{:.5f}".format(acc) +
                          ", lr=" + str(lr) + ", Sec.=" +
                          "{:.5f}".format(time.time() - tii))
                    #print "This stage has " + str(acc_batch_y.shape[0]) + " samples to be used to update the network."
                    tii = time.time()
                step += 1
            #input()
            nmi = mi(predicts, gtlab)
            td = time.time() - ts
            allRuntime = allRuntime + td
            print(
                str(v) + "th epoch's NMI to mini-batch kmeans is " + str(nmi) +
                ". Time: " + str(td) + " sec.")
            save_path = saver.save(
                sess, "/Data/tf_place_val_step_" + str(step) + ".ckpt")
            print("Model saved in file: %s" % save_path)
            threading.Thread._Thread__stop(load_thread)

    threading.Thread._Thread__stop(load_thread)
			if step % display_step == 1:
				# 计算损失值
				loss = sess.run(cost, feed_dict={X: acc_batch_x[0:bct,:,:,:], Y: acc_batch_y[0:bct], keep_prob: 1.})
				acc = sess.run(accuracy, feed_dict={X: acc_batch_x[0:bct,:,:,:], Y: acc_batch_y[0:bct], keep_prob: 1.})
				print "Iter=" + str(step*batch_size) + "/epcho=" + str(np.floor((step*batch_size)/vlen1)) + ", Loss= " + "{:.6f}".format(loss) + ", Training Accuracy=" + "{:.5f}".format(acc) + ", lr=" + str(lr) + ",time"+str(time.time()-ttk)
				ttk = time.time()
				print "there are " + str(copp) +"/"+str(copp_cc) + " samples are estimated as corrected!!"

				losshist[lossct] = loss
				lossct = lossct+1
				copp=0
				copp_cc=0
				pred_str1=""
				gt_str1=""
			
				
			step += 1
		if (v>300):
			if (v%300)==0 | (v%3000)==0:
				lr=lr/10
			
		if (v % 100)==0:
			nmi = mi(predicts[0:k+b], gtlab[0:k+b])
			td = time.time()-ts
			allRuntime=allRuntime+td
			print("\033[1;31m"+str(v)+"th epoch's NMI to mini-batch kmeans is " + str(nmi) + ". Time: " + str(td) + " sec.\033[0m\n")
			
	save_path = saver.save(sess, "Data/tf_mnist_" +  str(step) + ".ckpt")
threading.Thread._Thread__stop(load_thread)

Exemplo n.º 5
0
import scipy.cluster.hierarchy as sch
from sklearn.cluster import AgglomerativeClustering

X = cluster1

points = X[:,[0,1]]

# create dendrogram
dendrogram = sch.dendrogram(sch.linkage(points, method='ward'))
# create clusters
hc = AgglomerativeClustering(n_clusters=2, affinity = 'euclidean', linkage = 'ward')
# save clusters for chart
y_hc = hc.fit_predict(points)

plt.scatter(points[y_hc ==0,0], points[y_hc == 0,1], s=100, c='red')
plt.scatter(points[y_hc==1,0], points[y_hc == 1,1], s=100, c='black')
plt.scatter(points[y_hc ==2,0], points[y_hc == 2,1], s=100, c='blue')
#plt.scatter(points[y_hc ==3,0], points[y_hc == 3,1], s=100, c='cyan')

##Evaluation methods for different clustering techniques
###!!VARIABLE y_km changes for each of the existing techniques, y_km is example variable
###for KMeans. Respective variable for
###DBSCAN is labels
###Hierarchical clustering is y_hc
from sklearn.metrics import silhouette_score
f'Silhouette Score(n=2): {silhouette_score(X, y_km)}'

##X[:,2] is base data column with true class labels
from sklearn.metrics.cluster import normalized_mutual_info_score as mi
mi(X[:,2],y_km)