def process(class_id): print("class", class_id) d = CifarDataManager() model = Model(learning_rate=args.learning_rate, L1_loss_penalty=args.l1_loss_penalty, threshold=args.threshold) # print(class_id, args.number, args.learning_rate, args.l1_loss_penalty, args.threshold) train_images, train_labels = d.train.generateSpecializedData( class_id, args.number) model.encode_class_data(class_id, train_images)
if len(line) < 2: continue label = int(float(line[1])) cluster = int(float(line[0])) if label in classid_cluster: classid_cluster[label].append(cluster) else: classid_cluster[label] = [cluster] class_counter = {} class_clusters = {} for class_id in range(args.start_class, args.end_class + 1): class_counter[class_id] = 0 class_clusters[class_id] = [] data_loader = CifarDataManager() batch_size = 100 clusters = [] for _ in range(500 * 100 // batch_size): train_images, train_labels = data_loader.train.next_batch_without_onehot( batch_size) for i in range(batch_size): label = train_labels[i] if label < args.start_class or label > args.end_class: continue clusters.append(classid_cluster[label][class_counter[label]]) class_clusters[label].append( classid_cluster[label][class_counter[label]]) class_counter[label] += 1
from vggTrimmedModel import TrimmedModel from CIFAR_DataLoader import CifarDataManager import numpy as np d = CifarDataManager() model = TrimmedModel(target_class_id= np.random.randint(100,size=10), multiPruning=True) ''' Todo List: 1. Modify the accuracy in trimmed network (Done) ''' for _ in range(50): test_images, test_labels = d.test.next_batch(200) model.test_accuracy_pretrim(test_images, test_labels) model.assign_weight() model.test_accuracy(test_images, test_labels) break
def test_cluster(centroid, test_images, test_labels): d = CifarDataManager() model = TrimmedModel(centroid) model.assign_weight() return model.test_cluster(test_images, test_labels)
default=True, type=bool_string) parser.add_argument("--save_folder", help="folder to save results", default=None, type=str) parser.add_argument("--mode", help="'train' or 'test' data", default="train", type=str) args = parser.parse_args() assert args.dataset in ['imagenet', 'cifar' ], "--dataset can only be 'imagenet' or 'cifar'." d = CifarDataManager(load_cluster=False) model = Model(learning_rate=args.learning_rate, L1_loss_penalty=args.l1_loss_penalty, threshold=args.threshold, entropy_penalty=args.entropy_penalty) for i in range(args.begin_class, args.end_class + 1): print("Current class: {}".format(i)) print("Gnerating data...") if args.mode == "train": train_images, train_labels = d.train.generateSpecializedData( class_id=i, count=args.max_samples) else: train_images, train_labels = d.test.generateSpecializedData( class_id=i, count=args.max_samples) print("Encoding data...")
default="cluster", type=str) parser.add_argument("--save_dir", help="directory to save trained classifiers", default="classifiers", type=str) parser.add_argument("--cluster_dir", help="directory saving clusters", default="../data", type=str) args = parser.parse_args() mode = args.mode assert mode in ["class", "cluster"] data_loader = CifarDataManager(cluster_dir=args.cluster_dir) # cluster_ids = [3] # class_ids = [0] corre = [([3], [0]), ([4], [3]), ([1], [1]), ([2], [2]), ([6], [4]), ([0], [4])] # corre = [([2],[2,4,3]),([6],[0]),([1],[0]),([0],[3,4,1]),([4],[3,4]),([3],[1]),([5],[1])] # corre = [([3],[0]),([4],[3])] # test_images, test_labels = data_loader.test.next_batch_balance_without_onehot(200,class_ids) for item in corre: cluster_ids = item[0] class_ids = item[1] print(cluster_ids, class_ids) if mode == "class": target_ids = class_ids else: