def __init__(self, patch_size, num_parts, settings={}): self.patch_size = patch_size self._num_parts = num_parts self.parts = None self.unspread_parts = None self.unspread_parts_padded = None self.visparts = None self.settings = {} self.settings['patch_frame'] = 1 self.settings['threshold'] = 4 self.settings['threaded'] = False self.settings['samples_per_image'] = 500 self.settings['min_probability'] = 0.005 #self.settings['strides'] = 1 # TODO self.extra = {} # Or maybe just do defaults? # self.settings['bedges'] = {} self.settings['bedges'] = dict(k=5, radius=1, minimum_contrast=0.05, contrast_insensitive=True, max_edges=2) self.settings.update(settings) split_criterion = self.settings.get('split_criterion', 'IG') split_threshold = self.settings.get('split_threshold', 0.1) edge_count = [8, 4][self.settings['bedges']['contrast_insensitive']] import pnet self._net = pnet.PartsNet([ pnet.EdgeLayer(**self.settings['bedges']), pnet.BinaryTreePartsLayer( self.settings.get('tree_depth', 10), self.patch_size, settings=dict( outer_frame=self.settings['patch_frame'], em_seed=self.settings.get('train_em_seed', 0), threshold=self.threshold_in_counts( self.settings['threshold'], edge_count), samples_per_image=self.settings['samples_per_image'], max_samples=1000000, train_limit=10000, min_prob=self.settings['min_probability'], #keypoint_suppress_radius=1, min_samples_per_part=50, split_criterion=split_criterion, split_entropy=split_threshold, min_information_gain=split_threshold, )), pnet.PoolingLayer(shape=(9, 9), strides=(4, 4)), ])
def trainGaussian(): training_seed = 1 layers = [ pnet.OrientedGaussianPartsLayer(32,4,(5,5),settings=dict( seed=training_seed, n_init = 2, samples_per_image=40, max_samples=100000, channel_mode='together' #covariance_type = '' )), pnet.PoolingLayer(shape=(8,8),strides=(2,2)), pnet.SVMClassificationLayer(C=1.0) ] net = pnet.PartsNet(layers) trainingData, trainingLabel, testingData, testingLabel = load_mean_cifar10() net.train(np.rollaxis(trainingData,1,4)/255.0, trainingLabel) return net
sup_ims = [] sup_labels = [] net = None layers = [ #pnet.IntensityThresholdLayer(), pnet.EdgeLayer(k=5, radius=1, spread='orthogonal', minimum_contrast=0.05), pnet.PartsLayer(100, (6, 6), settings=dict(outer_frame=0, threshold=40, samples_per_image=40, max_samples=1000000, min_prob=0.005, )), pnet.ExtensionPartsLayer(num_parts = 100, num_components = 10, part_shape = (12,12), lowerLayerShape = (6,6)), pnet.PoolingLayer(shape=(4,4), strides=(4, 4)), pnet.MixtureClassificationLayer(n_components=1, min_prob=0.0001,block_size=200), #pnet.SVMClassificationLayer(C=None) ] net = pnet.PartsNet(layers) digits = range(10) ims = ag.io.load_mnist('training', selection=slice(10000), return_labels=False) net.train(ims) #sup_ims = [] #sup_labels = [] classificationTraining = 10
train_limit=20000, rotation_spreading_radius = 0, min_prob= 0.005, edge_type = 'colorYali', bedges = dict(k = 5, minimum_contrast = 0.05, spread = 'orthogonal', radius = 1, contrast_insensitive=False, ), )), #pnet.PoolingLayer(shape=(4,4), strides=(4, 4)), #pnet.MixtureClassificationLayer(n_components=1, min_prob=0.0001,block_size=200), pnet.ExtensionPartsLayer(num_parts = 128, num_components = 5, part_shape = (12,12),lowerLayerShape = (6,6)), pnet.PoolingLayer(shape=(8,8), strides=(8, 8)), pnet.ExtensionPoolingLayer(n_parts = 640, grouping_type = 'rbm', pooling_type = 'distance', pooling_distance = 5, weights_file = None, save_weights_file = None, settings = {}), pnet.SVMClassificationLayer(C=None) ] net = pnet.PartsNet(layers) ims, label = ag.io.load_cifar10('training', selection = slice(0,10000)) net.train(ims) classes = range(10) classificationTraining = 5000 rs = np.random.RandomState(training_seed) for d in classes: ims0,tmpLabel = ag.io.load_cifar10('training', classes = [d])
block_size=200) elif classifier == 'svm': print('svm') cl = pnet.SVMClassificationLayer(C=None) elif classifier == 'rot-mixture': cl = pnet.RotationMixtureClassificationLayer( n_components=numOfClassModel, n_orientations=orientations, min_prob=0.0001, pooling_settings=dict(shape=(poolingSize, poolingSize), strides=(poolingSize, poolingSize), rotation_spreading_radius=0)) if (poolingSize != 0) and (classifier != 'rot-mixture'): layers = [ pnet.PoolingLayer(shape=(poolingSize, poolingSize), strides=(poolingSize, poolingSize)), cl ] else: layers = [cl] net = pnet.PartsNet.load(modelFileName) clnet = pnet.PartsNet([net] + layers) digits = range(10) sup_ims = [] sup_labels = [] rs = np.random.RandomState(i) for d in digits: ims0, tmpLabel = ag.io.load_cifar10('training', [d]) indices = np.arange(ims0.shape[0]) sup_ims.append(ims0)
saveFile = args.saveFile weightsSaveFile = args.saveWeightsFile extensionPatchSize = args.extensionPatchSize data = np.load(dataFileName) training_seed = args.seed net = pnet.PartsNet.load(args.model) pooling_distance = args.poolingDistance pooling_shape = args.poolingShape print(model, numParts, numExtensionParts, dataFileName, saveFile, weightsSaveFile, extensionPatchSize, training_seed, pooling_distance, pooling_shape) print("Inside") extensionlayers = [ #pnet.ExtensionPartsLayer(num_parts = numParts, num_components = numExtensionParts, part_shape = (extensionPatchSize,extensionPatchSize), lowerLayerShape = (6,6)) pnet.PoolingLayer(shape=(pooling_shape, pooling_shape), strides=(pooling_shape, pooling_shape)), pnet.ExtensionPoolingLayer(n_parts=numParts * numExtensionParts, grouping_type='rbm', pooling_type='distance', pooling_distance=pooling_distance, save_weights_file=None, weights_file=weightsSaveFile, settings={}) ] clnet = pnet.PartsNet([net] + extensionlayers) digits = range(10) print('Extracting subsets...') ims10k = data[:10000] print('Done.')
ims10k = data[:10000] print('Done.') start0 = time.time() print('Training unsupervised...') net.train(ims10k) print('Done.') end0 = time.time() error_rates = [] test_ims, test_labels = ag.io.load_mnist('testing',return_labels=True) for i in range(11): clnet = pnet.PartsNet([net] + [pnet.PoolingLayer(shape=(4,4),strides=(4,4)), pnet.SVMClassificationLayer(C=None)]) digits = range(10) sup_ims = [] sup_labels = [] rs = np.random.RandomState(i) for d in digits: ims0 = ag.io.load_mnist('training',[d],return_labels=False) #indices = [k for k in range(len(label)) if label[k] in [d]] indices = np.arange(ims0.shape[0]) print(indices[:10]) rs.shuffle(indices) print(indices[:10]) sup_ims.append(ims0[indices[:10]]) sup_labels.append(d * np.ones(10,dtype=np.int64)) sup_ims = np.concatenate(sup_ims, axis = 0) sup_labels = np.concatenate(sup_labels, axis = 0)