'save_file': 'pen_10.pkl.gz', 'pretraining_epochs': 50, 'pretrain_lr': 0.01, 'mu': 0.9, 'finetune_lr': 0.01, 'training_epochs': 50, 'dataset': dataset, 'batch_size': 20, 'nClass': K, 'hidden_dim': [50, 16, 10], 'diminishing': False } results = [] for i in range(trials): res_metrics = test_SdC(**config) results.append(res_metrics) results_SAEKM = np.zeros((trials, 3)) results_DCN = np.zeros((trials, 3)) N = config['training_epochs'] / 5 for i in range(trials): results_SAEKM[i] = results[i][0] results_DCN[i] = results[i][N] SAEKM_mean = np.mean(results_SAEKM, axis=0) SAEKM_std = np.std(results_SAEKM, axis=0) DCN_mean = np.mean(results_DCN, axis=0) DCN_std = np.std(results_DCN, axis=0) print >> sys.stderr, (
ari_sc[n] = metrics.adjusted_rand_score(train_y, ypred) train_set = train_x, train_y dataset = [train_set, train_set, train_set] f = gzip.open('toy.pkl.gz','wb') cPickle.dump(dataset, f, protocol=2) f.close() ## Perform non-joint SAE+KM nmi_nj[n], ari_nj[n] = test_SdC_NJ(lbd = 0, finetune_lr= .01, mu = 0.9, pretraining_epochs=50, pretrain_lr=.01, training_epochs=100, dataset='toy.pkl.gz', batch_size=20, nClass = nClass, hidden_dim = [100, 50, 10, 2]) ## Perform proposed nmi_dc[n], ari_dc[n] = test_SdC(lbd = 0.2, finetune_lr= .01, mu = 0.9, pretraining_epochs=50, pretrain_lr=0.01, training_epochs=100, dataset='toy.pkl.gz', batch_size=20, nClass = nClass, hidden_dim = [100, 50, 10, 2]) result = np.concatenate((np.mean(nmi_km, keepdims=True), np.mean(ari_km, keepdims=True), np.mean(nmi_sc, keepdims=True), np.mean(ari_sc, keepdims=True), np.mean(nmi_nj, keepdims=True), np.mean(ari_nj, keepdims=True), np.mean(nmi_dc, keepdims=True), np.mean(ari_dc, keepdims=True)) ) f = gzip.open('MC_results.pkl.gz','wb') cPickle.dump(result, f, protocol=2) f.close()
from multi_layer_km import test_SdC res_metrics = test_SdC(dataset='../data/MNIST/toy.pkl.gz', save_file='toy_4.pkl.gz')
ari_km = metrics.adjusted_rand_score(train_y, ypred) print >> sys.stderr, ('NMI for Kmeans: %.2f' % (nmi_km)) print >> sys.stderr, ('ARI for Kmeans: %.2f' % (ari_km)) train_set = train_x, train_y dataset = [train_set, train_set, train_set] f = gzip.open('toy.pkl.gz', 'wb') cPickle.dump(dataset, f, protocol=2) f.close() nmi_dc, ari_dc = test_SdC(lbd=.1, finetune_lr=.05, mu=0.9, pretraining_epochs=50, pretrain_lr=0.01, training_epochs=100, dataset='toy.pkl.gz', batch_size=20, nClass=nClass, hidden_dim=[100, 50, 10, 2]) # print >> sys.stderr, ('NMI for spectral clustering: %.2f' % (nmi_sc)) print >> sys.stderr, ('ARI for spectral clustering: %.2f' % (ari_sc)) print >> sys.stderr, ('NMI for deep clustering: %.2f' % (nmi_dc)) print >> sys.stderr, ('ARI for deep clustering: %.2f' % (ari_dc)) #nmi_nj, ari_nj = test_SdC_NJ(lbd = 0, finetune_lr= .01, mu = 0.9, pretraining_epochs=50, # pretrain_lr=.01, training_epochs=100, # dataset='toy.pkl.gz', batch_size=20, nClass = nClass, hidden_dim = [100, 50, 10, 2]) #
'output_dir': 'Pendigits', 'save_file': 'pen_10.pkl.gz', 'pretraining_epochs': 50, 'pretrain_lr': 0.01, 'mu': 0.9, 'finetune_lr': 0.01, 'training_epochs': 50, 'dataset': dataset, 'batch_size': 20, 'nClass': K, 'hidden_dim': [50, 16, 10], 'diminishing': False} results = [] for i in range(trials): res_metrics = test_SdC(**config) results.append(res_metrics) results_SAEKM = np.zeros((trials, 3)) results_DCN = np.zeros((trials, 3)) N = config['training_epochs']/5 for i in range(trials): results_SAEKM[i] = results[i][0] results_DCN[i] = results[i][N] SAEKM_mean = np.mean(results_SAEKM, axis = 0) SAEKM_std = np.std(results_SAEKM, axis = 0) DCN_mean = np.mean(results_DCN, axis = 0) DCN_std = np.std(results_DCN, axis = 0) print >> sys.stderr, ('KM avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(KM_mean[0],