def full_cv(base_dir): """Run the MNIST experiment. Iterate over each CV. @param base_dir: The full path to the base directory. This directory should contain the config as well as the pickled data. """ # Get the keyword arguments for the SP with open(os.path.join(base_dir, 'config.json'), 'r') as f: kargs = json.load(f) kargs['clf'] = LinearSVC(random_state=kargs['seed']) # Get the data (tr_x, tr_y), (te_x, te_y) = load_mnist() x, y = np.vstack((tr_x, te_x)), np.hstack((tr_y, te_y)) # Get the CV splits with open(os.path.join(base_dir, 'cv.pkl'), 'rb') as f: cv = pickle.load(f) # Execute each run for tr, te in cv: clf = SPRegion(**kargs) clf.fit(x[tr], y[tr]) # Column accuracy clf.score(x[te], y[te]) # Probabilistic accuracy clf.score(x[te], y[te], tr_x=x[tr], score_method='prob') # Dimensionality reduction method clf.score(x[te], y[te], tr_x=x[tr], score_method='reduction') ndims = len(clf.reduce_dimensions(x[0])) clf._log_stats('Number of New Dimensions', ndims)
def full_mnist(base_dir, new_dir, auto_update=False): """ Execute a full MNIST run using the parameters specified by ix. @param base_dir: The full path to the base directory. This directory should contain the config. @param new_dir: The full path of where the data should be saved. @param auto_update: If True the permanence increment and decrement amounts will automatically be computed by the runner. If False, the ones specified in the config file will be used. """ # Get the keyword arguments for the SP with open(os.path.join(base_dir, 'config.json'), 'rb') as f: kargs = json.load(f) kargs['log_dir'] = new_dir kargs['clf'] = LinearSVC(random_state=kargs['seed']) # Get the data (tr_x, tr_y), (te_x, te_y) = load_mnist() # Manually compute the permanence update amounts if auto_update: # Compute average sum of each training instance avg_s = tr_x.sum(1) # Compute the total average sum avg_ts = avg_s.mean() # Compute the average active probability a_p = avg_ts / float(tr_x.shape[1]) # Compute the scaling factor scaling_factor = 1 / avg_ts # Compute the update amounts pinc = scaling_factor * (1 / a_p) pdec = scaling_factor * (1 / (1 - a_p)) # Update the config kargs['pinc'], kargs['pdec'] = pinc, pdec # Execute clf = SPRegion(**kargs) clf.fit(tr_x, tr_y) # Column accuracy clf.score(te_x, te_y) # Probabilistic accuracy clf.score(te_x, te_y, tr_x=tr_x, score_method='prob') # Dimensionality reduction method clf.score(te_x, te_y, tr_x=tr_x, score_method='reduction') ndims = len(clf.reduce_dimensions(tr_x[0])) clf._log_stats('Number of New Dimensions', ndims)
def one_cv(base_dir, cv_split): """ Run the MNIST experiment. Only the specified CV split is executed. @param base_dir: The full path to the base directory. This directory should contain the config as well as the pickled data. @param cv_split: The index for the CV split. """ # Get the keyword arguments for the SP with open(os.path.join(base_dir, 'config-{0}.json'.format(cv_split)), 'rb') as f: kargs = json.load(f) kargs['clf'] = LinearSVC(random_state=kargs['seed']) # Get the data (tr_x, tr_y), (te_x, te_y) = load_mnist() x, y = np.vstack((tr_x, te_x)), np.hstack((tr_y, te_y)) # Get the CV splits with open(os.path.join(base_dir, 'cv.pkl'), 'rb') as f: cv = cPickle.load(f) tr, te = cv[cv_split - 1] # Remove the split directory, if it exists shutil.rmtree(os.path.join(base_dir, str(cv_split)), True) # Execute clf = SPRegion(**kargs) clf.fit(x[tr], y[tr]) # Column accuracy clf.score(x[te], y[te]) # Probabilistic accuracy clf.score(x[te], y[te], tr_x=x[tr], score_method='prob') # Dimensionality reduction method clf.score(x[te], y[te], tr_x=x[tr], score_method='reduction') ndims = len(clf.reduce_dimensions(x[0])) clf._log_stats('Number of New Dimensions', ndims)
def full_cv(base_dir): """ Run the MNIST experiment. Each CV split is executed sequentially. @param base_dir: The full path to the base directory. This directory should contain the config as well as the pickled data. """ # Get the keyword arguments for the SP with open(os.path.join(base_dir, 'config.json'), 'rb') as f: kargs = json.load(f) kargs['clf'] = LinearSVC(random_state=kargs['seed']) # Get the data (tr_x, tr_y), (te_x, te_y) = load_mnist() x, y = np.vstack((tr_x, te_x)), np.hstack((tr_y, te_y)) # Get the CV splits with open(os.path.join(base_dir, 'cv.pkl'), 'rb') as f: cv = cPickle.load(f) # Execute each run for tr, te in cv: clf = SPRegion(**kargs) clf.fit(x[tr], y[tr]) # Column accuracy clf.score(x[te], y[te]) # Probabilistic accuracy clf.score(x[te], y[te], tr_x=x[tr], score_method='prob') # Dimensionality reduction method clf.score(x[te], y[te], tr_x=x[tr], score_method='reduction') ndims = len(clf.reduce_dimensions(x[0])) clf._log_stats('Number of New Dimensions', ndims)
def base_experiment(log_dir, seed = 123456789): """ The base experiment. Build an SP using SPDataset and see how it performs. @param log_dir: The full path to the log directory. @param seed: The random seed to use. @return: Tuple containing: SP uniqueness, input uniqueness, SP overlap, input overlap. """ # Params nsamples, nbits, pct_active = 500, 100, 0.4 kargs = { 'ninputs': nbits, 'ncolumns': 200, 'nactive': 50, 'global_inhibition': True, 'trim': 1e-4, 'disable_boost': True, 'seed': seed, 'nsynapses': 75, 'seg_th': 15, 'syn_th': 0.5, 'pinc': 0.001, 'pdec': 0.001, 'pwindow': 0.5, 'random_permanence': True, 'nepochs': 10, 'log_dir': log_dir } # Seed numpy np.random.seed(seed) # Build items to store results npoints = 11 pct_noises = np.linspace(0, 1, npoints) u_sp, u_ip = np.zeros(npoints), np.zeros(npoints) o_sp, o_ip = np.zeros(npoints), np.zeros(npoints) # Metrics metrics = SPMetrics() # Vary input noise for i, pct_noise in enumerate(pct_noises): # Build the dataset ds = SPDataset(nsamples=nsamples, nbits=nbits, pct_active=pct_active, pct_noise=pct_noise, seed=seed) x = ds.data # Get the dataset stats u_ip[i] = metrics.compute_uniqueness(x) * 100 o_ip[i] = metrics.compute_overlap(x) * 100 # Build the SP sp = SPRegion(**kargs) # Train the region sp.fit(x) # Get the SP's output SDRs sp_output = sp.predict(x) # Get the stats u_sp[i] = metrics.compute_uniqueness(sp_output) * 100 o_sp[i] = (metrics.compute_overlap(sp_output) + metrics.compute_overlap(np.logical_not(sp_output))) * 50 # Log everything sp._log_stats('% Input Uniqueness', u_ip[i]) sp._log_stats('% Input Overlap', o_ip[i]) sp._log_stats('% SP Uniqueness', u_sp[i]) sp._log_stats('% SP Overlap', o_sp[i]) return u_sp, u_ip, o_sp, o_ip
def base_experiment(log_dir, seed=123456789): """ The base experiment. Build an SP using SPDataset and see how it performs. @param log_dir: The full path to the log directory. @param seed: The random seed to use. @return: Tuple containing: SP uniqueness, input uniqueness, SP overlap, input overlap. """ # Params nsamples, nbits, pct_active = 500, 100, 0.4 kargs = { 'ninputs': nbits, 'ncolumns': 200, 'nactive': 50, 'global_inhibition': True, 'trim': 1e-4, 'disable_boost': True, 'seed': seed, 'nsynapses': 75, 'seg_th': 15, 'syn_th': 0.5, 'pinc': 0.001, 'pdec': 0.001, 'pwindow': 0.5, 'random_permanence': True, 'nepochs': 10, 'log_dir': log_dir } # Seed numpy np.random.seed(seed) # Build items to store results npoints = 11 pct_noises = np.linspace(0, 1, npoints) u_sp, u_ip = np.zeros(npoints), np.zeros(npoints) o_sp, o_ip = np.zeros(npoints), np.zeros(npoints) # Metrics metrics = SPMetrics() # Vary input noise for i, pct_noise in enumerate(pct_noises): # Build the dataset ds = SPDataset(nsamples=nsamples, nbits=nbits, pct_active=pct_active, pct_noise=pct_noise, seed=seed) x = ds.data # Get the dataset stats u_ip[i] = metrics.compute_uniqueness(x) * 100 o_ip[i] = metrics.compute_overlap(x) * 100 # Build the SP sp = SPRegion(**kargs) # Train the region sp.fit(x) # Get the SP's output SDRs sp_output = sp.predict(x) # Get the stats u_sp[i] = metrics.compute_uniqueness(sp_output) * 100 o_sp[i] = (metrics.compute_overlap(sp_output) + metrics.compute_overlap(np.logical_not(sp_output))) * 50 # Log everything sp._log_stats('% Input Uniqueness', u_ip[i]) sp._log_stats('% Input Overlap', o_ip[i]) sp._log_stats('% SP Uniqueness', u_sp[i]) sp._log_stats('% SP Overlap', o_sp[i]) return u_sp, u_ip, o_sp, o_ip
def local_experiment(): """Run a single experiment, locally.""" seed = 123456789 config = { 'ninputs': 100, 'trim': 1e-4, 'disable_boost': True, 'seed': seed, 'pct_active': None, 'random_permanence': True, 'pwindow': 0.5, 'global_inhibition': True, 'ncolumns': 200, 'nactive': 50, 'nsynapses': 100, 'seg_th': 5, 'syn_th': 0.5, 'pinc': 0.001, 'pdec': 0.001, 'nepochs': 10, 'log_dir': os.path.join(os.path.expanduser('~'), 'scratch', 'param_experiments', '1-1') } # Get the data nsamples, nbits, pct_active, pct_noise = 500, 100, 0.4, 0.15 ds = SPDataset(nsamples, nbits, pct_active, pct_noise, seed) data = ds.data # Metrics metrics = SPMetrics() # Get the metrics for the dataset uniqueness_data = metrics.compute_uniqueness(data) overlap_data = metrics.compute_overlap(data) correlation_data = 1 - metrics.compute_distance(data) # Create the SP sp = SPRegion(**config) # Fit the SP sp.fit(data) # Get the SP's output sp_output = sp.predict(data) # Get the metrics for the SP's results sp_uniqueness = metrics.compute_uniqueness(sp_output) sp_overlap = metrics.compute_overlap(sp_output) sp_correlation = 1 - metrics.compute_distance(sp_output) # Log all of the metrics sp._log_stats('Input Uniqueness', uniqueness_data) sp._log_stats('Input Overlap', overlap_data) sp._log_stats('Input Correlation', correlation_data) sp._log_stats('SP Uniqueness', sp_uniqueness) sp._log_stats('SP Overlap', sp_overlap) sp._log_stats('SP Correlation', sp_correlation) print(f'Uniqueness:\t{uniqueness_data:2.4f}\t{sp_uniqueness:2.4f}') print(f'Overlap:\t{overlap_data:2.4f}\t{sp_overlap:2.4f}') print(f'Correlation:\t{correlation_data:2.4f}\t{sp_correlation:2.4f}') # Get a new random input ds2 = SPDataset(nsamples, nbits, pct_active, pct_noise, 123) print(f'\n% Overlapping old class to new: \ \t{(float(np.dot(ds.input, ds2.input)) / nbits) * 100:2.4f}%') # Test the SP on the new dataset sp_output2 = sp.predict(ds2.data) # Get average representation of first result original_result = np.mean(sp_output, 0) original_result[original_result >= 0.5] = 1 original_result[original_result < 1] = 0 # Get averaged results for each metric type sp_uniqueness2 = 0. sp_overlap2 = 0. sp_correlation2 = 0. for item in sp_output2: test = np.vstack((original_result, item)) sp_uniqueness2 = metrics.compute_uniqueness(test) sp_overlap2 = metrics.compute_overlap(test) sp_correlation2 = 1 - metrics.compute_distance(test) sp_uniqueness2 /= len(sp_output2) sp_overlap2 /= len(sp_output2) sp_correlation2 /= len(sp_output2) print(sp_uniqueness2, sp_overlap2, sp_correlation2)
def run_single_experiment(base_dir, ntrials=10, seed=123456789): """Run the actual experiment. @param base_dir: The directory to containing the experiment to be run. @param ntrials: The number of trials to perform with different seeds. @param seed: The initial seed used to generate the other random seeds. """ # Generate the number of requested seeds seeds = generate_seeds(ntrials, seed) # Get the configuration with open(os.path.join(base_dir, 'config.json'), 'r') as f: config = json.load(f) # Get the data and base metric data with open(os.path.join(base_dir, 'dataset.pkl'), 'rb') as f: data = pickle.load(f) uniqueness_data, overlap_data, correlation_data = pickle.load(f) # Metrics metrics = SPMetrics() # Execute each run for s in seeds: # Update the seed config['seed'] = s # Create the SP sp = SPRegion(**config) # Fit the SP sp.fit(data) # Get the SP's output sp_output = sp.predict(data) # Log all of the metrics sp._log_stats('Input Uniqueness', uniqueness_data) sp._log_stats('Input Overlap', overlap_data) sp._log_stats('Input Correlation', correlation_data) sp._log_stats('SP Uniqueness', metrics.compute_uniqueness(sp_output)) sp._log_stats('SP Overlap', metrics.compute_overlap(sp_output)) sp._log_stats('SP Correlation', 1 - metrics.compute_distance(sp_output))
def base_experiment(pct_noise=0.15, noverlap_bits=0, exp_name='1-1', ntrials=10, verbose=True, seed=123456789): """ Run a single experiment, locally. @param pct_noise: The percentage of noise to add to the dataset. @param noverlap_bits: The number of bits the base class should overlap with the novelty class. @param exp_name: The name of the experiment. @param ntrials: The number of times to repeat the experiment. @param verbose: If True print the results. @param seed: The random seed to use. @return: A tuple containing the percentage errors for the SP's training and testing results and the SVM's training and testing results, respectively. """ # Base parameters ntrain, ntest = 800, 200 nsamples, nbits, pct_active = ntest + ntrain, 100, 0.4 clf_th = 0.5 log_dir = os.path.join(os.path.expanduser('~'), 'scratch', 'novelty_experiments', exp_name) # Configure the SP config = { 'ninputs': 100, 'trim': 1e-4, 'disable_boost': True, 'seed': seed, 'pct_active': None, 'random_permanence': True, 'pwindow': 0.5, 'global_inhibition': True, 'ncolumns': 200, 'nactive': 50, 'nsynapses': 75, 'seg_th': 15, 'syn_th': 0.5, 'pinc': 0.001, 'pdec': 0.001, 'nepochs': 10, 'log_dir': log_dir } # Seed numpy np.random.seed(seed) # Create the base dataset x_ds = SPDataset(nsamples, nbits, pct_active, pct_noise, seed=seed) x_tr, x_te = x_ds.data[:ntrain], x_ds.data[ntrain:] # Create the outlier dataset base_indexes = set(np.where(x_ds.base_class == 1)[0]) choices = [x for x in xrange(nbits) if x not in base_indexes] outlier_base = np.zeros(nbits, dtype='bool') outlier_base[np.random.choice(choices, x_ds.nactive - noverlap_bits, False)] = 1 outlier_base[np.random.permutation(list(base_indexes))[:noverlap_bits]] = 1 y_ds = SPDataset(ntest, nbits, pct_active, pct_noise, outlier_base, seed) y_te = y_ds.data if verbose: print "\nBase class' test noise: {0:2.2f}".format( 1 - (np.mean(x_te, 0) * x_ds.base_class.astype('i')).sum() / 40.) print "Outlier's class noise: {0:2.2f}".format( 1 - (np.mean(y_te, 0) * outlier_base.astype('i')).sum() / 40.) print 'Overlap between two classes: {0}'.format( np.dot(x_ds.base_class.astype('i'), outlier_base.astype('i'))) # Metrics metrics = SPMetrics() # Get the metrics for the datasets u_x_tr = metrics.compute_uniqueness(x_tr) o_x_tr = metrics.compute_overlap(x_tr) c_x_tr = 1 - metrics.compute_distance(x_tr) u_x_te = metrics.compute_uniqueness(x_te) o_x_te = metrics.compute_overlap(x_te) c_x_te = 1 - metrics.compute_distance(x_te) u_y_te = metrics.compute_uniqueness(y_te) o_y_te = metrics.compute_overlap(y_te) c_y_te = 1 - metrics.compute_distance(y_te) # Initialize the overall results sp_x_results = np.zeros(ntrials) sp_y_results = np.zeros(ntrials) svm_x_results = np.zeros(ntrials) svm_y_results = np.zeros(ntrials) # Iterate across the trials: for i in xrange(ntrials): # Make a new seed seed2 = np.random.randint(1000000) config['seed'] = seed2 config['log_dir'] = '{0}-{1}'.format(log_dir, i + 1) # Create the SP sp = SPRegion(**config) # Fit the SP sp.fit(x_tr) # Get the SP's output sp_x_tr = sp.predict(x_tr) sp_x_te = sp.predict(x_te) sp_y_te = sp.predict(y_te) # Get the metrics for the SP's results u_sp_x_tr = metrics.compute_uniqueness(sp_x_tr) o_sp_x_tr = metrics.compute_overlap(sp_x_tr) c_sp_x_tr = 1 - metrics.compute_distance(sp_x_tr) u_sp_x_te = metrics.compute_uniqueness(sp_x_te) o_sp_x_te = metrics.compute_overlap(sp_x_te) c_sp_x_te = 1 - metrics.compute_distance(sp_x_te) u_sp_y_te = metrics.compute_uniqueness(sp_y_te) o_sp_y_te = metrics.compute_overlap(sp_y_te) c_sp_y_te = 1 - metrics.compute_distance(sp_y_te) # Log all of the metrics sp._log_stats('Input Base Class Train Uniqueness', u_x_tr) sp._log_stats('Input Base Class Train Overlap', o_x_tr) sp._log_stats('Input Base Class Train Correlation', c_x_tr) sp._log_stats('Input Base Class Test Uniqueness', u_x_te) sp._log_stats('Input Base Class Test Overlap', o_x_te) sp._log_stats('Input Base Class Test Correlation', c_x_te) sp._log_stats('Input Novelty Class Test Uniqueness', u_y_te) sp._log_stats('Input Novelty Class Test Overlap', o_y_te) sp._log_stats('Input Novelty Class Test Correlation', c_y_te) sp._log_stats('SP Base Class Train Uniqueness', u_sp_x_tr) sp._log_stats('SP Base Class Train Overlap', o_sp_x_tr) sp._log_stats('SP Base Class Train Correlation', c_sp_x_tr) sp._log_stats('SP Base Class Test Uniqueness', u_sp_x_te) sp._log_stats('SP Base Class Test Overlap', o_sp_x_te) sp._log_stats('SP Base Class Test Correlation', c_sp_x_te) sp._log_stats('SP Novelty Class Test Uniqueness', u_sp_y_te) sp._log_stats('SP Novelty Class Test Overlap', o_sp_y_te) sp._log_stats('SP Novelty Class Test Correlation', c_sp_y_te) # Print the results fmt_s = '{0}:\t{1:2.4f}\t{2:2.4f}\t{3:2.4f}\t{4:2.4f}\t{5:2.4f}\t{5:2.4f}' if verbose: print '\nDescription\tx_tr\tx_te\ty_te\tsp_x_tr\tsp_x_te\tsp_y_te' print fmt_s.format('Uniqueness', u_x_tr, u_x_te, u_y_te, u_sp_x_tr, u_sp_x_te, u_sp_y_te) print fmt_s.format('Overlap', o_x_tr, o_x_te, o_y_te, o_sp_x_tr, o_sp_x_te, o_sp_y_te) print fmt_s.format('Correlation', c_x_tr, c_x_te, c_y_te, c_sp_x_tr, c_sp_x_te, c_sp_y_te) # Get average representation of the base class sp_base_result = np.mean(sp_x_tr, 0) sp_base_result[sp_base_result >= 0.5] = 1 sp_base_result[sp_base_result < 1] = 0 # Averaged results for each metric type u_sp_base_to_x_te = 0. o_sp_base_to_x_te = 0. c_sp_base_to_x_te = 0. u_sp_base_to_y_te = 0. o_sp_base_to_y_te = 0. c_sp_base_to_y_te = 0. for x, y in zip(sp_x_te, sp_y_te): # Refactor xt = np.vstack((sp_base_result, x)) yt = np.vstack((sp_base_result, y)) # Compute the sums u_sp_base_to_x_te += metrics.compute_uniqueness(xt) o_sp_base_to_x_te += metrics.compute_overlap(xt) c_sp_base_to_x_te += 1 - metrics.compute_distance(xt) u_sp_base_to_y_te += metrics.compute_uniqueness(yt) o_sp_base_to_y_te += metrics.compute_overlap(yt) c_sp_base_to_y_te += 1 - metrics.compute_distance(yt) u_sp_base_to_x_te /= ntest o_sp_base_to_x_te /= ntest c_sp_base_to_x_te /= ntest u_sp_base_to_y_te /= ntest o_sp_base_to_y_te /= ntest c_sp_base_to_y_te /= ntest # Log the results sp._log_stats('Base Train to Base Test Uniqueness', u_sp_base_to_x_te) sp._log_stats('Base Train to Base Test Overlap', o_sp_base_to_x_te) sp._log_stats('Base Train to Base Test Correlation', c_sp_base_to_x_te) sp._log_stats('Base Train to Novelty Test Uniqueness', u_sp_base_to_y_te) sp._log_stats('Base Train to Novelty Test Overlap', o_sp_base_to_y_te) sp._log_stats('Base Train to Novelty Test Correlation', c_sp_base_to_y_te) # Print the results if verbose: print '\nDescription\tx_tr->x_te\tx_tr->y_te' print 'Uniqueness:\t{0:2.4f}\t{1:2.4f}'.format( u_sp_base_to_x_te, u_sp_base_to_y_te) print 'Overlap:\t{0:2.4f}\t{1:2.4f}'.format( o_sp_base_to_x_te, o_sp_base_to_y_te) print 'Correlation:\t{0:2.4f}\t{1:2.4f}'.format( c_sp_base_to_x_te, c_sp_base_to_y_te) # Create an SVM clf = OneClassSVM(kernel='linear', nu=0.1, random_state=seed2) # Evaluate the SVM's performance clf.fit(x_tr) svm_x_te = len(np.where(clf.predict(x_te) == 1)[0]) / float(ntest) * \ 100 svm_y_te = len(np.where(clf.predict(y_te) == -1)[0]) / float(ntest) * \ 100 # Perform classification using overlap as the feature # -- The overlap must be above 50% clf_x_te = 0. clf_y_te = 0. for x, y in zip(sp_x_te, sp_y_te): # Refactor xt = np.vstack((sp_base_result, x)) yt = np.vstack((sp_base_result, y)) # Compute the accuracy xo = metrics.compute_overlap(xt) yo = metrics.compute_overlap(yt) if xo >= clf_th: clf_x_te += 1 if yo < clf_th: clf_y_te += 1 clf_x_te = (clf_x_te / ntest) * 100 clf_y_te = (clf_y_te / ntest) * 100 # Store the results as errors sp_x_results[i] = 100 - clf_x_te sp_y_results[i] = 100 - clf_y_te svm_x_results[i] = 100 - svm_x_te svm_y_results[i] = 100 - svm_y_te # Log the results sp._log_stats('SP % Correct Base Class', clf_x_te) sp._log_stats('SP % Correct Novelty Class', clf_y_te) sp._log_stats('SVM % Correct Base Class', svm_x_te) sp._log_stats('SVM % Correct Novelty Class', svm_y_te) # Print the results if verbose: print '\nSP Base Class Detection : {0:2.2f}%'.format(clf_x_te) print 'SP Novelty Class Detection : {0:2.2f}%'.format(clf_y_te) print 'SVM Base Class Detection : {0:2.2f}%'.format(svm_x_te) print 'SVM Novelty Class Detection : {0:2.2f}%'.format(svm_y_te) return sp_x_results, sp_y_results, svm_x_results, svm_y_results
def base_experiment(pct_noise=0.15, noverlap_bits=0, exp_name='1-1', ntrials=10, verbose=True, seed=123456789): """ Run a single experiment, locally. @param pct_noise: The percentage of noise to add to the dataset. @param noverlap_bits: The number of bits the base class should overlap with the novelty class. @param exp_name: The name of the experiment. @param ntrials: The number of times to repeat the experiment. @param verbose: If True print the results. @param seed: The random seed to use. @return: A tuple containing the percentage errors for the SP's training and testing results and the SVM's training and testing results, respectively. """ # Base parameters ntrain, ntest = 800, 200 nsamples, nbits, pct_active = ntest + ntrain, 100, 0.4 clf_th = 0.5 log_dir = os.path.join(os.path.expanduser('~'), 'scratch', 'novelty_experiments', exp_name) # Configure the SP config = { 'ninputs': 100, 'trim': 1e-4, 'disable_boost': True, 'seed': seed, 'pct_active': None, 'random_permanence': True, 'pwindow': 0.5, 'global_inhibition': True, 'ncolumns': 200, 'nactive': 50, 'nsynapses': 75, 'seg_th': 15, 'syn_th': 0.5, 'pinc': 0.001, 'pdec': 0.001, 'nepochs': 10, 'log_dir': log_dir } # Seed numpy np.random.seed(seed) # Create the base dataset x_ds = SPDataset(nsamples, nbits, pct_active, pct_noise, seed=seed) x_tr, x_te = x_ds.data[:ntrain], x_ds.data[ntrain:] # Create the outlier dataset base_indexes = set(np.where(x_ds.base_class == 1)[0]) choices = [x for x in xrange(nbits) if x not in base_indexes] outlier_base = np.zeros(nbits, dtype='bool') outlier_base[np.random.choice(choices, x_ds.nactive - noverlap_bits, False)] = 1 outlier_base[np.random.permutation(list(base_indexes))[:noverlap_bits]] = 1 y_ds = SPDataset(ntest, nbits, pct_active, pct_noise, outlier_base, seed) y_te = y_ds.data if verbose: print "\nBase class' test noise: {0:2.2f}".format(1 - (np.mean(x_te, 0) * x_ds.base_class.astype('i')).sum() / 40.) print "Outlier's class noise: {0:2.2f}".format(1 - (np.mean(y_te, 0) * outlier_base.astype('i')).sum() / 40.) print 'Overlap between two classes: {0}'.format(np.dot( x_ds.base_class.astype('i'), outlier_base.astype('i'))) # Metrics metrics = SPMetrics() # Get the metrics for the datasets u_x_tr = metrics.compute_uniqueness(x_tr) o_x_tr = metrics.compute_overlap(x_tr) c_x_tr = 1 - metrics.compute_distance(x_tr) u_x_te = metrics.compute_uniqueness(x_te) o_x_te = metrics.compute_overlap(x_te) c_x_te = 1 - metrics.compute_distance(x_te) u_y_te = metrics.compute_uniqueness(y_te) o_y_te = metrics.compute_overlap(y_te) c_y_te = 1 - metrics.compute_distance(y_te) # Initialize the overall results sp_x_results = np.zeros(ntrials) sp_y_results = np.zeros(ntrials) svm_x_results = np.zeros(ntrials) svm_y_results = np.zeros(ntrials) # Iterate across the trials: for i in xrange(ntrials): # Make a new seed seed2 = np.random.randint(1000000) config['seed'] = seed2 config['log_dir'] = '{0}-{1}'.format(log_dir, i + 1) # Create the SP sp = SPRegion(**config) # Fit the SP sp.fit(x_tr) # Get the SP's output sp_x_tr = sp.predict(x_tr) sp_x_te = sp.predict(x_te) sp_y_te = sp.predict(y_te) # Get the metrics for the SP's results u_sp_x_tr = metrics.compute_uniqueness(sp_x_tr) o_sp_x_tr = metrics.compute_overlap(sp_x_tr) c_sp_x_tr = 1 - metrics.compute_distance(sp_x_tr) u_sp_x_te = metrics.compute_uniqueness(sp_x_te) o_sp_x_te = metrics.compute_overlap(sp_x_te) c_sp_x_te = 1 - metrics.compute_distance(sp_x_te) u_sp_y_te = metrics.compute_uniqueness(sp_y_te) o_sp_y_te = metrics.compute_overlap(sp_y_te) c_sp_y_te = 1 - metrics.compute_distance(sp_y_te) # Log all of the metrics sp._log_stats('Input Base Class Train Uniqueness', u_x_tr) sp._log_stats('Input Base Class Train Overlap', o_x_tr) sp._log_stats('Input Base Class Train Correlation', c_x_tr) sp._log_stats('Input Base Class Test Uniqueness', u_x_te) sp._log_stats('Input Base Class Test Overlap', o_x_te) sp._log_stats('Input Base Class Test Correlation', c_x_te) sp._log_stats('Input Novelty Class Test Uniqueness', u_y_te) sp._log_stats('Input Novelty Class Test Overlap', o_y_te) sp._log_stats('Input Novelty Class Test Correlation', c_y_te) sp._log_stats('SP Base Class Train Uniqueness', u_sp_x_tr) sp._log_stats('SP Base Class Train Overlap', o_sp_x_tr) sp._log_stats('SP Base Class Train Correlation', c_sp_x_tr) sp._log_stats('SP Base Class Test Uniqueness', u_sp_x_te) sp._log_stats('SP Base Class Test Overlap', o_sp_x_te) sp._log_stats('SP Base Class Test Correlation', c_sp_x_te) sp._log_stats('SP Novelty Class Test Uniqueness', u_sp_y_te) sp._log_stats('SP Novelty Class Test Overlap', o_sp_y_te) sp._log_stats('SP Novelty Class Test Correlation', c_sp_y_te) # Print the results fmt_s = '{0}:\t{1:2.4f}\t{2:2.4f}\t{3:2.4f}\t{4:2.4f}\t{5:2.4f}\t{5:2.4f}' if verbose: print '\nDescription\tx_tr\tx_te\ty_te\tsp_x_tr\tsp_x_te\tsp_y_te' print fmt_s.format('Uniqueness', u_x_tr, u_x_te, u_y_te, u_sp_x_tr, u_sp_x_te, u_sp_y_te) print fmt_s.format('Overlap', o_x_tr, o_x_te, o_y_te, o_sp_x_tr, o_sp_x_te, o_sp_y_te) print fmt_s.format('Correlation', c_x_tr, c_x_te, c_y_te, c_sp_x_tr, c_sp_x_te, c_sp_y_te) # Get average representation of the base class sp_base_result = np.mean(sp_x_tr, 0) sp_base_result[sp_base_result >= 0.5] = 1 sp_base_result[sp_base_result < 1] = 0 # Averaged results for each metric type u_sp_base_to_x_te = 0. o_sp_base_to_x_te = 0. c_sp_base_to_x_te = 0. u_sp_base_to_y_te = 0. o_sp_base_to_y_te = 0. c_sp_base_to_y_te = 0. for x, y in zip(sp_x_te, sp_y_te): # Refactor xt = np.vstack((sp_base_result, x)) yt = np.vstack((sp_base_result, y)) # Compute the sums u_sp_base_to_x_te += metrics.compute_uniqueness(xt) o_sp_base_to_x_te += metrics.compute_overlap(xt) c_sp_base_to_x_te += 1 - metrics.compute_distance(xt) u_sp_base_to_y_te += metrics.compute_uniqueness(yt) o_sp_base_to_y_te += metrics.compute_overlap(yt) c_sp_base_to_y_te += 1 - metrics.compute_distance(yt) u_sp_base_to_x_te /= ntest o_sp_base_to_x_te /= ntest c_sp_base_to_x_te /= ntest u_sp_base_to_y_te /= ntest o_sp_base_to_y_te /= ntest c_sp_base_to_y_te /= ntest # Log the results sp._log_stats('Base Train to Base Test Uniqueness', u_sp_base_to_x_te) sp._log_stats('Base Train to Base Test Overlap', o_sp_base_to_x_te) sp._log_stats('Base Train to Base Test Correlation', c_sp_base_to_x_te) sp._log_stats('Base Train to Novelty Test Uniqueness', u_sp_base_to_y_te) sp._log_stats('Base Train to Novelty Test Overlap', o_sp_base_to_y_te) sp._log_stats('Base Train to Novelty Test Correlation', c_sp_base_to_y_te) # Print the results if verbose: print '\nDescription\tx_tr->x_te\tx_tr->y_te' print 'Uniqueness:\t{0:2.4f}\t{1:2.4f}'.format(u_sp_base_to_x_te, u_sp_base_to_y_te) print 'Overlap:\t{0:2.4f}\t{1:2.4f}'.format(o_sp_base_to_x_te, o_sp_base_to_y_te) print 'Correlation:\t{0:2.4f}\t{1:2.4f}'.format(c_sp_base_to_x_te, c_sp_base_to_y_te) # Create an SVM clf = OneClassSVM(kernel='linear', nu=0.1, random_state=seed2) # Evaluate the SVM's performance clf.fit(x_tr) svm_x_te = len(np.where(clf.predict(x_te) == 1)[0]) / float(ntest) * \ 100 svm_y_te = len(np.where(clf.predict(y_te) == -1)[0]) / float(ntest) * \ 100 # Perform classification using overlap as the feature # -- The overlap must be above 50% clf_x_te = 0. clf_y_te = 0. for x, y in zip(sp_x_te, sp_y_te): # Refactor xt = np.vstack((sp_base_result, x)) yt = np.vstack((sp_base_result, y)) # Compute the accuracy xo = metrics.compute_overlap(xt) yo = metrics.compute_overlap(yt) if xo >= clf_th: clf_x_te += 1 if yo < clf_th: clf_y_te += 1 clf_x_te = (clf_x_te / ntest) * 100 clf_y_te = (clf_y_te / ntest) * 100 # Store the results as errors sp_x_results[i] = 100 - clf_x_te sp_y_results[i] = 100 - clf_y_te svm_x_results[i] = 100 - svm_x_te svm_y_results[i] = 100 - svm_y_te # Log the results sp._log_stats('SP % Correct Base Class', clf_x_te) sp._log_stats('SP % Correct Novelty Class', clf_y_te) sp._log_stats('SVM % Correct Base Class', svm_x_te) sp._log_stats('SVM % Correct Novelty Class', svm_y_te) # Print the results if verbose: print '\nSP Base Class Detection : {0:2.2f}%'.format(clf_x_te) print 'SP Novelty Class Detection : {0:2.2f}%'.format(clf_y_te) print 'SVM Base Class Detection : {0:2.2f}%'.format(svm_x_te) print 'SVM Novelty Class Detection : {0:2.2f}%'.format(svm_y_te) return sp_x_results, sp_y_results, svm_x_results, svm_y_results
def base_experiment(config, pct_noise=0.15, noverlap_bits=0, ntrials=10, verbose=False, seed=123456789): """ Run a single experiment, locally. @param config: The configuration parameters. @param pct_noise: The percentage of noise to add to the dataset. @param noverlap_bits: The number of bits the base class should overlap with the novelty class. @param ntrials: The number of times to repeat the experiment. @param verbose: If True print the results. @param seed: The random seed to use. """ # Base parameters ntrain, ntest = 800, 200 nsamples, nbits, pct_active = ntest + ntrain, 100, 0.4 clf_th = 0.5 # Build the directory, if needed base_dir = config['log_dir'] if not os.path.exists(base_dir): os.makedirs(base_dir) # Seed numpy np.random.seed(seed) # Create the base dataset x_ds = SPDataset(nsamples, nbits, pct_active, pct_noise, seed=seed) x_tr, x_te = x_ds.data[:ntrain], x_ds.data[ntrain:] # Create the outlier dataset base_indexes = set(np.where(x_ds.base_class == 1)[0]) choices = [x for x in xrange(nbits) if x not in base_indexes] outlier_base = np.zeros(nbits, dtype='bool') outlier_base[np.random.choice(choices, x_ds.nactive - noverlap_bits, False)] = 1 outlier_base[np.random.permutation(list(base_indexes))[:noverlap_bits]] = 1 y_ds = SPDataset(ntest, nbits, pct_active, pct_noise, outlier_base, seed) y_te = y_ds.data if verbose: print "\nBase class' test noise: {0:2.2f}".format( 1 - (np.mean(x_te, 0) * x_ds.base_class.astype('i')).sum() / 40.) print "Outlier's class noise: {0:2.2f}".format( 1 - (np.mean(y_te, 0) * outlier_base.astype('i')).sum() / 40.) print 'Overlap between two classes: {0}'.format( np.dot(x_ds.base_class.astype('i'), outlier_base.astype('i'))) # Metrics metrics = SPMetrics() # Get the metrics for the datasets u_x_tr = metrics.compute_uniqueness(x_tr) o_x_tr = metrics.compute_overlap(x_tr) u_x_te = metrics.compute_uniqueness(x_te) o_x_te = metrics.compute_overlap(x_te) u_y_te = metrics.compute_uniqueness(y_te) o_y_te = metrics.compute_overlap(y_te) # Initialize the overall results sp_x_results = np.zeros(ntrials) sp_y_results = np.zeros(ntrials) svm_x_results = np.zeros(ntrials) svm_y_results = np.zeros(ntrials) # Iterate across the trials: for i, seed2 in enumerate(generate_seeds(ntrials, seed)): # Create the SP config['seed'] = seed2 sp = SPRegion(**config) # Fit the SP sp.fit(x_tr) # Get the SP's output sp_x_tr = sp.predict(x_tr) sp_x_te = sp.predict(x_te) sp_y_te = sp.predict(y_te) # Get the metrics for the SP's results u_sp_x_tr = metrics.compute_uniqueness(sp_x_tr) o_sp_x_tr = metrics.compute_overlap(sp_x_tr) u_sp_x_te = metrics.compute_uniqueness(sp_x_te) o_sp_x_te = metrics.compute_overlap(sp_x_te) u_sp_y_te = metrics.compute_uniqueness(sp_y_te) o_sp_y_te = metrics.compute_overlap(sp_y_te) # Log all of the metrics sp._log_stats('Input Base Class Train Uniqueness', u_x_tr) sp._log_stats('Input Base Class Train Overlap', o_x_tr) sp._log_stats('Input Base Class Test Uniqueness', u_x_te) sp._log_stats('Input Base Class Test Overlap', o_x_te) sp._log_stats('Input Novelty Class Test Uniqueness', u_y_te) sp._log_stats('Input Novelty Class Test Overlap', o_y_te) sp._log_stats('SP Base Class Train Uniqueness', u_sp_x_tr) sp._log_stats('SP Base Class Train Overlap', o_sp_x_tr) sp._log_stats('SP Base Class Test Uniqueness', u_sp_x_te) sp._log_stats('SP Base Class Test Overlap', o_sp_x_te) sp._log_stats('SP Novelty Class Test Uniqueness', u_sp_y_te) sp._log_stats('SP Novelty Class Test Overlap', o_sp_y_te) # Print the results fmt_s = '{0}:\t{1:2.4f}\t{2:2.4f}\t{3:2.4f}\t{4:2.4f}\t{5:2.4f}\t{6:2.4f}' if verbose: print '\nDescription\tx_tr\tx_te\ty_te\tsp_x_tr\tsp_x_te\tsp_y_te' print fmt_s.format('Uniqueness', u_x_tr, u_x_te, u_y_te, u_sp_x_tr, u_sp_x_te, u_sp_y_te) print fmt_s.format('Overlap', o_x_tr, o_x_te, o_y_te, o_sp_x_tr, o_sp_x_te, o_sp_y_te) # Get average representation of the base class sp_base_result = np.mean(sp_x_tr, 0) sp_base_result[sp_base_result >= 0.5] = 1 sp_base_result[sp_base_result < 1] = 0 # Averaged results for each metric type u_sp_base_to_x_te = 0. o_sp_base_to_x_te = 0. u_sp_base_to_y_te = 0. o_sp_base_to_y_te = 0. for x, y in zip(sp_x_te, sp_y_te): # Refactor xt = np.vstack((sp_base_result, x)) yt = np.vstack((sp_base_result, y)) # Compute the sums u_sp_base_to_x_te += metrics.compute_uniqueness(xt) o_sp_base_to_x_te += metrics.compute_overlap(xt) u_sp_base_to_y_te += metrics.compute_uniqueness(yt) o_sp_base_to_y_te += metrics.compute_overlap(yt) u_sp_base_to_x_te /= ntest o_sp_base_to_x_te /= ntest u_sp_base_to_y_te /= ntest o_sp_base_to_y_te /= ntest # Log the results sp._log_stats('Base Train to Base Test Uniqueness', u_sp_base_to_x_te) sp._log_stats('Base Train to Base Test Overlap', o_sp_base_to_x_te) sp._log_stats('Base Train to Novelty Test Uniqueness', u_sp_base_to_y_te) sp._log_stats('Base Train to Novelty Test Overlap', o_sp_base_to_y_te) # Print the results if verbose: print '\nDescription\tx_tr->x_te\tx_tr->y_te' print 'Uniqueness:\t{0:2.4f}\t{1:2.4f}'.format( u_sp_base_to_x_te, u_sp_base_to_y_te) print 'Overlap:\t{0:2.4f}\t{1:2.4f}'.format( o_sp_base_to_x_te, o_sp_base_to_y_te) # Create an SVM clf = OneClassSVM(kernel='linear', nu=0.1, random_state=seed2) # Evaluate the SVM's performance clf.fit(x_tr) svm_x_te = len(np.where(clf.predict(x_te) == 1)[0]) / float(ntest) * \ 100 svm_y_te = len(np.where(clf.predict(y_te) == -1)[0]) / float(ntest) * \ 100 # Perform classification using overlap as the feature # -- The overlap must be above 50% clf_x_te = 0. clf_y_te = 0. for x, y in zip(sp_x_te, sp_y_te): # Refactor xt = np.vstack((sp_base_result, x)) yt = np.vstack((sp_base_result, y)) # Compute the accuracy xo = metrics.compute_overlap(xt) yo = metrics.compute_overlap(yt) if xo >= clf_th: clf_x_te += 1 if yo < clf_th: clf_y_te += 1 clf_x_te = (clf_x_te / ntest) * 100 clf_y_te = (clf_y_te / ntest) * 100 # Store the results as errors sp_x_results[i] = 100 - clf_x_te sp_y_results[i] = 100 - clf_y_te svm_x_results[i] = 100 - svm_x_te svm_y_results[i] = 100 - svm_y_te # Log the results sp._log_stats('SP % Correct Base Class', clf_x_te) sp._log_stats('SP % Correct Novelty Class', clf_y_te) sp._log_stats('SVM % Correct Base Class', svm_x_te) sp._log_stats('SVM % Correct Novelty Class', svm_y_te) # Print the results if verbose: print '\nSP Base Class Detection : {0:2.2f}%'.format(clf_x_te) print 'SP Novelty Class Detection : {0:2.2f}%'.format(clf_y_te) print 'SVM Base Class Detection : {0:2.2f}%'.format(svm_x_te) print 'SVM Novelty Class Detection : {0:2.2f}%'.format(svm_y_te) # Save the results with open(os.path.join(base_dir, 'results.pkl'), 'wb') as f: cPickle.dump( (sp_x_results, sp_y_results, svm_x_results, svm_y_results), f, cPickle.HIGHEST_PROTOCOL)
def base_experiment(config, ntrials=1, seed=123456789): """ Run a single experiment, locally. @param config: The configuration parameters to use for the SP. @param ntrials: The number of times to repeat the experiment. @param seed: The random seed to use. @return: A tuple containing the percentage errors for the SP's training and testing results and the SVM's training and testing results, respectively. """ # Base parameters ntrain, ntest = 800, 200 clf_th = 0.5 # Seed numpy np.random.seed(seed) # Get the data (tr_x, tr_y), (te_x, te_y) = load_mnist() tr_x_0 = np.random.permutation(tr_x[tr_y == 0]) x_tr = tr_x_0[:ntrain] x_te = tr_x_0[ntrain:ntrain + ntest] outliers = [np.random.permutation(tr_x[tr_y == i])[:ntest] for i in xrange(1, 10)] # Metrics metrics = SPMetrics() # Get the metrics for the datasets u_x_tr = metrics.compute_uniqueness(x_tr) o_x_tr = metrics.compute_overlap(x_tr) c_x_tr = 1 - metrics.compute_distance(x_tr) u_x_te = metrics.compute_uniqueness(x_te) o_x_te = metrics.compute_overlap(x_te) c_x_te = 1 - metrics.compute_distance(x_te) u_y_te, o_y_te, c_y_te = [], [], [] for outlier in outliers: u_y_te.append(metrics.compute_uniqueness(outlier)) o_y_te.append(metrics.compute_overlap(outlier)) c_y_te.append(1 - metrics.compute_distance(outlier)) # Initialize the overall results sp_x_results = np.zeros(ntrials) sp_y_results = [np.zeros(ntrials) for _ in xrange(9)] svm_x_results = np.zeros(ntrials) svm_y_results = [np.zeros(ntrials) for _ in xrange(9)] # Iterate across the trials: for nt in xrange(ntrials): # Make a new seeod seed2 = np.random.randint(1000000) config['seed'] = seed2 # Create the SP sp = SPRegion(**config) # Fit the SP sp.fit(x_tr) # Get the SP's output sp_x_tr = sp.predict(x_tr) sp_x_te = sp.predict(x_te) sp_y_te = [sp.predict(outlier) for outlier in outliers] # Get the metrics for the SP's results u_sp_x_tr = metrics.compute_uniqueness(sp_x_tr) o_sp_x_tr = metrics.compute_overlap(sp_x_tr) c_sp_x_tr = 1 - metrics.compute_distance(sp_x_tr) u_sp_x_te = metrics.compute_uniqueness(sp_x_te) o_sp_x_te = metrics.compute_overlap(sp_x_te) c_sp_x_te = 1 - metrics.compute_distance(sp_x_te) u_sp_y_te, o_sp_y_te, c_sp_y_te = [], [], [] for y in sp_y_te: u_sp_y_te.append(metrics.compute_uniqueness(y)) o_sp_y_te.append(metrics.compute_overlap(y)) c_sp_y_te.append(1 - metrics.compute_distance(y)) # Log all of the metrics sp._log_stats('Input Base Class Train Uniqueness', u_x_tr) sp._log_stats('Input Base Class Train Overlap', o_x_tr) sp._log_stats('Input Base Class Train Correlation', c_x_tr) sp._log_stats('Input Base Class Test Uniqueness', u_x_te) sp._log_stats('Input Base Class Test Overlap', o_x_te) sp._log_stats('Input Base Class Test Correlation', c_x_te) sp._log_stats('SP Base Class Train Uniqueness', u_sp_x_tr) sp._log_stats('SP Base Class Train Overlap', o_sp_x_tr) sp._log_stats('SP Base Class Train Correlation', c_sp_x_tr) sp._log_stats('SP Base Class Test Uniqueness', u_sp_x_te) sp._log_stats('SP Base Class Test Overlap', o_sp_x_te) sp._log_stats('SP Base Class Test Correlation', c_sp_x_te) for i, (a, b, c, d, e, f) in enumerate(zip(u_y_te, o_y_te, c_y_te, u_sp_y_te, o_sp_y_te, c_sp_y_te), 1): sp._log_stats('Input Novelty Class {0} Uniqueness'.format(i), a) sp._log_stats('Input Novelty Class {0} Overlap'.format(i), b) sp._log_stats('Input Novelty Class {0} Correlation'.format(i), c) sp._log_stats('SP Novelty Class {0} Uniqueness'.format(i), d) sp._log_stats('SP Novelty Class {0} Overlap'.format(i), e) sp._log_stats('SP Novelty Class {0} Correlation'.format(i), f) # Get average representation of the base class sp_base_result = np.mean(sp_x_tr, 0) sp_base_result[sp_base_result >= 0.5] = 1 sp_base_result[sp_base_result < 1] = 0 # Averaged results for each metric type u_sp_base_to_x_te = 0. o_sp_base_to_x_te = 0. c_sp_base_to_x_te = 0. u_sp, o_sp, c_sp = np.zeros(9), np.zeros(9), np.zeros(9) for i, x in enumerate(sp_x_te): xt = np.vstack((sp_base_result, x)) u_sp_base_to_x_te += metrics.compute_uniqueness(xt) o_sp_base_to_x_te += metrics.compute_overlap(xt) c_sp_base_to_x_te += 1 - metrics.compute_distance(xt) for j, yi in enumerate(sp_y_te): yt = np.vstack((sp_base_result, yi[i])) u_sp[j] += metrics.compute_uniqueness(yt) o_sp[j] += metrics.compute_overlap(yt) c_sp[j] += 1 - metrics.compute_distance(yt) u_sp_base_to_x_te /= ntest o_sp_base_to_x_te /= ntest c_sp_base_to_x_te /= ntest for i in xrange(9): u_sp[i] /= ntest o_sp[i] /= ntest c_sp[i] /= ntest # Log the results sp._log_stats('Base Train to Base Test Uniqueness', u_sp_base_to_x_te) sp._log_stats('Base Train to Base Test Overlap', o_sp_base_to_x_te) sp._log_stats('Base Train to Base Test Correlation', c_sp_base_to_x_te) for i, j in enumerate(xrange(1, 10)): sp._log_stats('Base Train to Novelty {0} Uniqueness'.format(j), u_sp[i]) sp._log_stats('Base Train to Novelty {0} Overlap'.format(j), o_sp[i]) sp._log_stats('Base Train to Novelty {0} Correlation'.format(j), c_sp[i]) # Create an SVM clf = OneClassSVM(kernel='linear', nu=0.1, random_state=seed2) # Evaluate the SVM's performance clf.fit(x_tr) svm_x_te = len(np.where(clf.predict(x_te) == 1)[0]) / float(ntest) * \ 100 svm_y_te = np.array([len(np.where(clf.predict(outlier) == -1)[0]) / float(ntest) * 100 for outlier in outliers]) # Perform classification using overlap as the feature # -- The overlap must be above 50% clf_x_te = 0. clf_y_te = np.zeros(9) for i, x in enumerate(sp_x_te): xt = np.vstack((sp_base_result, x)) xo = metrics.compute_overlap(xt) if xo >= clf_th: clf_x_te += 1 for j, yi in enumerate(sp_y_te): yt = np.vstack((sp_base_result, yi[i])) yo = metrics.compute_overlap(yt) if yo < clf_th: clf_y_te[j] += 1 clf_x_te = (clf_x_te / ntest) * 100 clf_y_te = (clf_y_te / ntest) * 100 # Store the results as errors sp_x_results[nt] = 100 - clf_x_te sp_y_results[nt] = 100 - clf_y_te svm_x_results[nt] = 100 - svm_x_te svm_y_results[nt] = 100 - svm_y_te # Log the results sp._log_stats('SP % Correct Base Class', clf_x_te) sp._log_stats('SVM % Correct Base Class', svm_x_te) for i, j in enumerate(xrange(1, 10)): sp._log_stats('SP % Correct Novelty Class {0}'.format(j), clf_y_te[i]) sp._log_stats('SVM % Correct Novelty Class {0}'.format(j), svm_y_te[i]) sp._log_stats('SP % Mean Correct Novelty Class', np.mean(clf_y_te)) sp._log_stats('SVM % Mean Correct Novelty Class', np.mean(svm_y_te)) sp._log_stats('SP % Adjusted Score', (np.mean(clf_y_te) * clf_x_te) / 100) sp._log_stats('SVM % Adjusted Score', (np.mean(svm_y_te) * svm_x_te) / 100) return sp_x_results, sp_y_results, svm_x_results, svm_y_results
def local_experiment(): """ Run a single experiment, locally. """ seed = 123456789 config = { 'ninputs': 100, 'trim': 1e-4, 'disable_boost': True, 'seed': seed, 'pct_active': None, 'random_permanence': True, 'pwindow': 0.5, 'global_inhibition': True, 'ncolumns': 200, 'nactive': 50, 'nsynapses': 100, 'seg_th': 5, 'syn_th': 0.5, 'pinc': 0.001, 'pdec': 0.001, 'nepochs': 10, 'log_dir': os.path.join(os.path.expanduser('~'), 'scratch', 'param_experiments', '1-1') } # Get the data nsamples, nbits, pct_active, pct_noise = 500, 100, 0.4, 0.15 ds = SPDataset(nsamples, nbits, pct_active, pct_noise, seed) data = ds.data # Metrics metrics = SPMetrics() # Get the metrics for the dataset uniqueness_data = metrics.compute_uniqueness(data) overlap_data = metrics.compute_overlap(data) correlation_data = 1 - metrics.compute_distance(data) # Create the SP sp = SPRegion(**config) # Fit the SP sp.fit(data) # Get the SP's output sp_output = sp.predict(data) # Get the metrics for the SP's results sp_uniqueness = metrics.compute_uniqueness(sp_output) sp_overlap = metrics.compute_overlap(sp_output) sp_correlation = 1 - metrics.compute_distance(sp_output) # Log all of the metrics sp._log_stats('Input Uniqueness', uniqueness_data) sp._log_stats('Input Overlap', overlap_data) sp._log_stats('Input Correlation', correlation_data) sp._log_stats('SP Uniqueness', sp_uniqueness) sp._log_stats('SP Overlap', sp_overlap) sp._log_stats('SP Correlation', sp_correlation) print 'Uniqueness:\t{0:2.4f}\t{1:2.4f}'.format(uniqueness_data, sp_uniqueness) print 'Overlap:\t{0:2.4f}\t{1:2.4f}'.format(overlap_data, sp_overlap) print 'Correlation:\t{0:2.4f}\t{1:2.4f}'.format(correlation_data, sp_correlation) # Get a new random input ds2 = SPDataset(nsamples, nbits, pct_active, pct_noise, 123) print '\n% Overlapping old class to new: \t{0:2.4f}%'.format( (float(np.dot(ds.input, ds2.input)) / nbits) * 100) # Test the SP on the new dataset sp_output2 = sp.predict(ds2.data) # Get average representation of first result original_result = np.mean(sp_output, 0) original_result[original_result >= 0.5] = 1 original_result[original_result < 1] = 0 # Get averaged results for each metric type sp_uniqueness2 = 0. sp_overlap2 = 0. sp_correlation2 = 0. for item in sp_output2: test = np.vstack((original_result, item)) sp_uniqueness2 = metrics.compute_uniqueness(test) sp_overlap2 = metrics.compute_overlap(test) sp_correlation2 = 1 - metrics.compute_distance(test) sp_uniqueness2 /= len(sp_output2) sp_overlap2 /= len(sp_output2) sp_correlation2 /= len(sp_output2) print sp_uniqueness2, sp_overlap2, sp_correlation2
def run_single_experiment(base_dir, ntrials=10, seed=123456789): """ Run the actual experiment. @param base_dir: The directory to containing the experiment to be run. @param ntrials: The number of trials to perform with different seeds. @param seed: The initial seed used to generate the other random seeds. """ # Generate the number of requested seeds seeds = generate_seeds(ntrials, seed) # Get the configuration with open(os.path.join(base_dir, 'config.json'), 'rb') as f: config = json.load(f) # Get the data and base metric data with open(os.path.join(base_dir, 'dataset.pkl'), 'rb') as f: data = cPickle.load(f) uniqueness_data, overlap_data, correlation_data = cPickle.load(f) # Metrics metrics = SPMetrics() # Execute each run for s in seeds: # Update the seed config['seed'] = s # Create the SP sp = SPRegion(**config) # Fit the SP sp.fit(data) # Get the SP's output sp_output = sp.predict(data) # Log all of the metrics sp._log_stats('Input Uniqueness', uniqueness_data) sp._log_stats('Input Overlap', overlap_data) sp._log_stats('Input Correlation', correlation_data) sp._log_stats('SP Uniqueness', metrics.compute_uniqueness(sp_output)) sp._log_stats('SP Overlap', metrics.compute_overlap(sp_output)) sp._log_stats('SP Correlation', 1 - metrics.compute_distance( sp_output))