Beispiel #1
0
def make_2d_plot(p, noise=None, overlap=None):
	"""
	Make a 2D plot for the specified slice.
	
	@param p: The full path to the results file.
	
	@param noise: The degree of noise to use, if none all are used.
	
	@param overlap: The degree of overlap to use, if none all are used.
	"""
	
	# Get the data
	with open(p, 'rb') as f:
		sp_x, sp_y, svm_x, svm_y, param = cPickle.load(f)
	
	# Fix the messed up sort order
	ix = np.array(pd.DataFrame(param).sort_values([0, 1],
		ascending=[True, True]).index).astype('i')
	param = param[ix]
	sp_x, sp_y = sp_x[ix], sp_y[ix]
	svm_x, svm_y = svm_x[ix], svm_y[ix]
	
	# Refactor the data
	ix = []
	if noise is not None:
		id = 0
		id2 = 1
		val = noise
		func = float
		name = 'overlap'
		term = 100. / 40
	elif overlap is not None:
		id = 1
		id2 = 0
		val = overlap
		func = int
		name = 'noise'
		term = 100.
	else:
		raise 'noise and overlap are exclusive parameters'
	for i in xrange(len(param)):
		if func(param[i][id]) == val: ix.append(i)
	x = param[ix][:, id2] * term
	
	# Make the plots
	dir = os.path.dirname(p)
	plot_error((x, x), (np.median(sp_x[ix], 1), np.median(svm_x[ix], 1)),
		('SP', 'SVM'), (compute_err(sp_x[ix]), compute_err(svm_x[ix])),
		'% {0}'.format(name.capitalize()), '% Error', xlim=(-5, 105),
		ylim=(-5, 105), show=False,
		out_path=os.path.join(dir, 'train-{0}.png'.format(name)))
	plot_error((x, x), (np.median(sp_y[ix], 1), np.median(svm_y[ix], 1)),
		('SP', 'SVM'), (compute_err(sp_y[ix]), compute_err(svm_y[ix])),
		'% {0}'.format(name.capitalize()), '% Error', xlim=(-5, 105),
		ylim=(-5, 105), show=False,
		out_path=os.path.join(dir, 'test-{0}.png'.format(name)))
Beispiel #2
0
def make_2d_plot(p, noise=None, overlap=None):
	"""
	Make a 2D plot for the specified slice.
	
	@param p: The full path to the results file.
	
	@param noise: The degree of noise to use, if none all are used.
	
	@param overlap: The degree of overlap to use, if none all are used.
	"""
	
	# Get the data
	with open(p, 'rb') as f:
		sp_x, sp_y, svm_x, svm_y, param = cPickle.load(f)
	
	# Fix the messed up sort order
	ix = np.array(pd.DataFrame(param).sort_values([0, 1],
		ascending=[True, True]).index).astype('i')
	param = param[ix]
	sp_x, sp_y = sp_x[ix], sp_y[ix]
	svm_x, svm_y = svm_x[ix], svm_y[ix]
	
	# Refactor the data
	ix = []
	if noise is not None:
		id = 0
		id2 = 1
		val = noise
		func = float
		name = 'overlap'
		term = 100. / 40
	elif overlap is not None:
		id = 1
		id2 = 0
		val = overlap
		func = int
		name = 'noise'
		term = 100.
	else:
		raise 'noise and overlap are exclusive parameters'
	for i in xrange(len(param)):
		if func(param[i][id]) == val: ix.append(i)
	x = param[ix][:, id2] * term
	
	# Make the plots
	dir = os.path.dirname(p)
	plot_error((x, x), (np.median(sp_x[ix], 1), np.median(svm_x[ix], 1)),
		('SP', 'SVM'), (compute_err(sp_x[ix]), compute_err(svm_x[ix])),
		'% {0}'.format(name.capitalize()), '% Error', xlim=(-5, 105),
		ylim=(-5, 105), show=False,
		out_path=os.path.join(dir, 'train-{0}.png'.format(name)))
	plot_error((x, x), (np.median(sp_y[ix], 1), np.median(svm_y[ix], 1)),
		('SP', 'SVM'), (compute_err(sp_y[ix]), compute_err(svm_y[ix])),
		'% {0}'.format(name.capitalize()), '% Error', xlim=(-5, 105),
		ylim=(-5, 105), show=False,
		out_path=os.path.join(dir, 'test-{0}.png'.format(name)))
Beispiel #3
0
def main(base_path, ntrials=4, seed=123456789):
    """
	Run the experiments.
	
	@param base_path: The full path to where the data should be stored.
	
	@param ntrials: The number of trials to use for the experiment.
	
	@param seed: The seed for the random number generator.
	"""

    # X-Axis data
    npoints = 11
    pct_noises = np.linspace(0, 1, npoints)
    x = (pct_noises * 100, pct_noises * 100)

    # Run the experiment
    results = Parallel(n_jobs=-1)(
        delayed(base_experiment)(os.path.join(base_path, 'run-{0}'.format(i)),
                                 seed2)
        for i, seed2 in enumerate(generate_seeds(ntrials, seed), 1))
    u_sp = np.zeros((len(results), npoints))
    u_ip = np.zeros((len(results), npoints))
    o_sp = np.zeros((len(results), npoints))
    o_ip = np.zeros((len(results), npoints))
    for i, (a, b, c, d) in enumerate(results):
        u_sp[i], u_ip[i], o_sp[i], o_ip[i] = a, b, c, d

    # Save the results
    with open(os.path.join(base_path, 'results.pkl'), 'wb') as f:
        cPickle.dump((u_sp, u_ip, o_sp, o_ip), f, cPickle.HIGHEST_PROTOCOL)

    # Make some plots
    e = (compute_err(u_sp, axis=0), compute_err(u_ip, axis=0))
    y = (np.median(u_sp, 0), np.median(u_ip, 0))
    plot_error(x,
               y, ('SP Output', 'Raw Data'),
               e,
               '% Noise',
               'Uniqueness [%]',
               xlim=False,
               ylim=(-5, 105),
               out_path=os.path.join(base_path, 'uniqueness.png'),
               show=False)
    e = (compute_err(o_sp, axis=0), compute_err(o_ip, axis=0))
    y = (np.median(o_sp, 0), np.median(o_ip, 0))
    plot_error(x,
               y, ('SP Output', 'Raw Data'),
               e,
               '% Noise',
               'Normalized Overlap [%]',
               xlim=False,
               ylim=(-5, 105),
               out_path=os.path.join(base_path, 'overlap.png'),
               show=False)
Beispiel #4
0
def plot_single_run(bp):
    """
	Create an error plot for a single run.
	
	@param bp: The base path.
	"""
    def read(p):
        """
		Read in the data.
		
		@param p: The path to the file to read.
		
		@return: The results.
		"""

        with open(p, 'rb') as f:
            reader = csv.reader(f)
            data = []
            for row in reader:
                data.append(float(row[1]))
        return np.array(data) * 100

    def get_data(p):
        """
		Get all of the results.
		
		@param p: The directory to obtain the data in.
		
		@return: The results.
		"""

        permanence = []
        for d in os.listdir(p):
            npath = os.path.join(p, d)
            if os.path.isdir(npath):
                permanence.append(
                    read(os.path.join(npath, 'permanence_boost.csv')))
        return np.array(permanence)

    data = get_data(bp)
    plot_error(show=True,
               x_series=(np.arange(data.shape[1]), ),
               y_series=(np.median(data, 0), ),
               y_errs=(compute_err(data, axis=0), ),
               xlim=(0, 20),
               ylim=(0, 100),
               x_label='Iteration',
               y_label='% Columns Boosted')
Beispiel #5
0
def plot_single_run(bp):
	"""
	Create an error plot for a single run.
	
	@param bp: The base path.
	"""
	
	def read(p):
		"""
		Read in the data.
		
		@param p: The path to the file to read.
		
		@return: The results.
		"""
		
		with open(p, 'rb') as f:
			reader = csv.reader(f)
			data = []
			for row in reader:
				data.append(float(row[1]))
		return np.array(data) * 100
	
	def get_data(p):
		"""
		Get all of the results.
		
		@param p: The directory to obtain the data in.
		
		@return: The results.
		"""
		
		permanence = []
		for d in os.listdir(p):
			npath = os.path.join(p, d)
			if os.path.isdir(npath):
				permanence.append(read(os.path.join(npath,
					'permanence_boost.csv')))
		return np.array(permanence)
	
	data = get_data(bp)
	plot_error(show=True,
		x_series=(np.arange(data.shape[1]),),
		y_series=(np.median(data, 0),),
		y_errs=(compute_err(data, axis=0), ),
		xlim=(0, 20), ylim=(0, 100),
		x_label='Iteration', y_label='% Columns Boosted'
		)
Beispiel #6
0
def main(base_path, ntrials=4, seed=123456789):
	"""
	Run the experiments.
	
	@param base_path: The full path to where the data should be stored.
	
	@param ntrials: The number of trials to use for the experiment.
	
	@param seed: The seed for the random number generator.
	"""
	
	# X-Axis data
	npoints = 11
	pct_noises = np.linspace(0, 1, npoints)
	x = (pct_noises * 100, pct_noises * 100)
	
	# Run the experiment
	results = Parallel(n_jobs=-1)(delayed(base_experiment)(
		os.path.join(base_path, 'run-{0}'.format(i)), seed2) for i, seed2 in
		enumerate(generate_seeds(ntrials, seed), 1))
	u_sp = np.zeros((len(results), npoints))
	u_ip = np.zeros((len(results), npoints))
	o_sp = np.zeros((len(results), npoints))
	o_ip = np.zeros((len(results), npoints))
	for i, (a, b, c, d) in enumerate(results):
		u_sp[i], u_ip[i], o_sp[i], o_ip[i] = a, b, c, d
	
	# Save the results
	with open(os.path.join(base_path, 'results.pkl'), 'wb') as f:
		cPickle.dump((u_sp, u_ip, o_sp, o_ip), f, cPickle.HIGHEST_PROTOCOL)
	
	# Make some plots
	e = (compute_err(u_sp, axis=0), compute_err(u_ip, axis=0))
	y = (np.median(u_sp, 0), np.median(u_ip, 0))
	plot_error(x, y, ('SP Output', 'Raw Data'), e, '% Noise', 'Uniqueness [%]',
		xlim=False,	ylim=(-5, 105),	out_path=os.path.join(base_path,
		'uniqueness.png'), show=False)
	e = (compute_err(o_sp, axis=0), compute_err(o_ip, axis=0))
	y = (np.median(o_sp, 0), np.median(o_ip, 0))
	plot_error(x, y, ('SP Output', 'Raw Data'), e, '% Noise',
		'Normalized Overlap [%]', xlim=False, ylim=(-5, 105),
		out_path=os.path.join(base_path, 'overlap.png'), show=False)
Beispiel #7
0
def main2(base_path):
	"""
	@param base_path: Full path to pickle file to work with.
	"""
	
	# Mapping of independent variables to indexes
	data_index = {
		'fit_time':0,
		'learn_fit_time':1,
		'pred_fit_time':2,
		'input_uniqueness':3,
		'input_overlap':4,
		'input_correlation':5,
		'sp_uniqueness':6,
		'sp_overlap':7,
		'sp_correlation':8
	}
	
	# Get the data
	with open(base_path, 'rb') as f:
		x, y = cPickle.load(f)
	x = sorted(set(x[-1])) # For now work with 1D
	
	# Pull out data for this plot
	y1 = (y[data_index['input_uniqueness']], y[data_index['sp_uniqueness']])
	y2 = (y[data_index['input_overlap']], y[data_index['sp_overlap']])
	y3 = (y[data_index['input_correlation']], y[data_index['sp_correlation']])
	
	# Refactor the data
	x_series = (x, x, x)
	med = lambda y: np.median(y, axis=1) * 100
	err = lambda y: compute_err(y, axis=1) * 100
	y1_series = map(med, y1)
	y1_errs = map(err, y1)
	y2_series = map(med, y2)
	y2_errs = map(err, y2)
	y3_series = map(med, y3)
	y3_errs = map(err, y3)
	
	# Make the main plot
	fig = plt.figure(figsize=(21, 20), facecolor='white')
	ax = fig.add_subplot(111)
	ax.spines['top'].set_color('none')
	ax.spines['bottom'].set_color('none')
	ax.spines['left'].set_color('none')
	ax.spines['right'].set_color('none')
	ax.tick_params(labelcolor='w', top='off', bottom='off', left='off',
		right='off')
	
	# Make subplots
	ax1 = fig.add_subplot(311)
	plot_error(show=False, legend=False, ax=ax1, title='Uniqueness',
		x_series=x_series, y_series=y1_series, y_errs=y1_errs, ylim=(-5, 105))
	ax2 = fig.add_subplot(312, sharex=ax1, sharey=ax1)
	plot_error(show=False, legend=False, ax=ax2, title='Overlap',
		x_series=x_series, y_series=y2_series, y_errs=y2_errs, ylim=(-5, 105))
	ax3 = fig.add_subplot(313, sharex=ax1, sharey=ax1)
	plot_error(show=False, legend=False, ax=ax3, title='Correlation',
		x_series=x_series, y_series=y3_series, y_errs=y3_errs, ylim=(-5, 105))
	plt.tight_layout(h_pad=2)
	plt.show()
Beispiel #8
0
def main(ntrials=10, seed=123456798):
    """
	Vary the amount of noise and overlap and see how the system performs.
	
	@param ntrials: The number of trials to perform.
	
	@param seed: The seed for the random number generator.
	"""

    # Amount to vary each type by
    pct_noises = np.linspace(0, 1, 101)
    noverlap_bits = np.arange(0, 41)

    # Vary the noise
    results = Parallel(n_jobs=-1)(delayed(base_experiment)(
        noise, 0, 'noise-{0}'.format(i), ntrials, False, seed)
                                  for i, noise in enumerate(pct_noises, 1))
    noise_x, noise_y, svm_noise_x, svm_noise_y = [], [], [], []
    noise_x_err1, noise_x_err2, noise_y_err1, noise_y_err2 = [], [], [], []
    svm_noise_x_err1, svm_noise_x_err2 = [], []
    svm_noise_y_err1, svm_noise_y_err2 = [], []
    for sp_x, sp_y, svm_x, svm_y in results:
        noise_x.append(np.median(sp_x))
        noise_y.append(np.median(sp_y))
        svm_noise_x.append(np.median(svm_x))
        svm_noise_y.append(np.median(svm_y))
        e = compute_err(sp_x, axis=None)
        noise_x_err1.append(e[0])
        noise_x_err2.append(e[1])
        e = compute_err(sp_y, axis=None)
        noise_y_err1.append(e[0])
        noise_y_err2.append(e[1])
        e = compute_err(svm_x, axis=None)
        svm_noise_x_err1.append(e[0])
        svm_noise_x_err2.append(e[1])
        e = compute_err(svm_y, axis=None)
        svm_noise_y_err1.append(e[0])
        svm_noise_y_err2.append(e[1])
    noise_x_err = (svm_noise_x_err1, svm_noise_x_err2)
    noise_y_err = (svm_noise_y_err1, svm_noise_y_err2)
    svm_noise_x_err = (svm_noise_x_err1, svm_noise_x_err2)
    svm_noise_y_err = (svm_noise_y_err1, svm_noise_y_err2)

    # Vary the overlaps
    results = Parallel(n_jobs=-1)(
        delayed(base_experiment)(0.35, overlap, 'overlap-{0}'.format(i),
                                 ntrials, False, seed)
        for i, overlap in enumerate(noverlap_bits, 1))
    overlap_x, overlap_y, svm_overlap_x, svm_overlap_y = [], [], [], []
    overlap_x_err1, overlap_x_err2 = [], []
    overlap_y_err1, overlap_y_err2 = [], []
    svm_overlap_x_err1, svm_overlap_x_err2 = [], []
    svm_overlap_y_err1, svm_overlap_y_err2 = [], []
    for sp_x, sp_y, svm_x, svm_y in results:
        overlap_x.append(np.median(sp_x))
        overlap_y.append(np.median(sp_y))
        svm_overlap_x.append(np.median(svm_x))
        svm_overlap_y.append(np.median(svm_y))
        e = compute_err(sp_x, axis=None)
        overlap_x_err1.append(e[0])
        overlap_x_err2.append(e[1])
        e = compute_err(sp_y, axis=None)
        overlap_y_err1.append(e[0])
        overlap_y_err2.append(e[1])
        e = compute_err(svm_x, axis=None)
        svm_overlap_x_err1.append(e[0])
        svm_overlap_x_err2.append(e[1])
        e = compute_err(svm_y, axis=None)
        svm_overlap_y_err1.append(e[0])
        svm_overlap_y_err2.append(e[1])
    overlap_x_err = (svm_overlap_x_err1, svm_overlap_x_err2)
    overlap_y_err = (svm_overlap_y_err1, svm_overlap_y_err2)
    svm_overlap_x_err = (svm_overlap_x_err1, svm_overlap_x_err2)
    svm_overlap_y_err = (svm_overlap_y_err1, svm_overlap_y_err2)

    # # Save the results
    p = os.path.join(os.path.expanduser('~'), 'scratch', 'novelty_experiments')
    with open(os.path.join(p, 'results.pkl'), 'wb') as f:
        cPickle.dump((noise_x, noise_y), f, cPickle.HIGHEST_PROTOCOL)
        cPickle.dump((svm_noise_x, svm_noise_y), f, cPickle.HIGHEST_PROTOCOL)
        cPickle.dump((noise_x_err, noise_y_err), f, cPickle.HIGHEST_PROTOCOL)
        cPickle.dump((svm_noise_x_err, svm_noise_y_err), f,
                     cPickle.HIGHEST_PROTOCOL)
        cPickle.dump((overlap_x, overlap_y), f, cPickle.HIGHEST_PROTOCOL)
        cPickle.dump((svm_overlap_x, svm_overlap_y), f,
                     cPickle.HIGHEST_PROTOCOL)
        cPickle.dump((overlap_x_err, overlap_y_err), f,
                     cPickle.HIGHEST_PROTOCOL)
        cPickle.dump((svm_overlap_x_err, svm_overlap_y_err), f,
                     cPickle.HIGHEST_PROTOCOL)

    # Make the plots
    plot_error((pct_noises * 100, pct_noises * 100), (noise_x, svm_noise_x),
               ('SP', 'SVM'), (noise_x_err, svm_noise_x_err),
               '% Noise',
               '% Error',
               'Noise: Base Class',
               out_path=os.path.join(p, 'noise_base.png'),
               xlim=(-5, 105),
               ylim=(-5, 105),
               show=False)
    plot_error((pct_noises * 100, pct_noises * 100), (noise_y, svm_noise_y),
               ('SP', 'SVM'), (noise_y_err, svm_noise_y_err),
               '% Noise',
               '% Error',
               'Noise: Novelty Class',
               out_path=os.path.join(p, 'noise_novelty.png'),
               xlim=(-5, 105),
               ylim=(-5, 105),
               show=False)
    noverlap_pct = noverlap_bits / 40. * 100
    plot_error((noverlap_pct, noverlap_pct), (overlap_x, svm_overlap_x),
               ('SP', 'SVM'), (overlap_x_err, svm_overlap_x_err),
               '% Overlapping Bits',
               '% Error',
               'Overlap: Base Class',
               out_path=os.path.join(p, 'overlap_base.png'),
               xlim=(-5, 105),
               ylim=(-5, 105),
               show=False)
    plot_error((noverlap_pct, noverlap_pct), (overlap_y, svm_overlap_y),
               ('SP', 'SVM'), (overlap_y_err, svm_overlap_y_err),
               '% Overlapping Bits',
               '% Error',
               'Overlap: Novelty Class',
               out_path=os.path.join(p, 'overlap_novelty.png'),
               xlim=(-5, 105),
               ylim=(-5, 105),
               show=False)
Beispiel #9
0
def main(ntrials=10, seed=123456798):
	"""
	Vary the amount of noise and overlap and see how the system performs.
	
	@param ntrials: The number of trials to perform.
	
	@param seed: The seed for the random number generator.
	"""
	
	# Amount to vary each type by
	pct_noises = np.linspace(0, 1, 101)
	noverlap_bits = np.arange(0, 41)
	
	# Vary the noise
	results = Parallel(n_jobs=-1)(delayed(base_experiment)(noise, 0,
		'noise-{0}'.format(i), ntrials, False, seed) for i, noise in enumerate(
		pct_noises, 1))
	noise_x, noise_y, svm_noise_x, svm_noise_y = [], [], [], []
	noise_x_err1, noise_x_err2, noise_y_err1, noise_y_err2 = [], [], [], []
	svm_noise_x_err1, svm_noise_x_err2 = [], []
	svm_noise_y_err1, svm_noise_y_err2 = [], []
	for sp_x, sp_y, svm_x, svm_y in results:
		noise_x.append(np.median(sp_x))
		noise_y.append(np.median(sp_y))
		svm_noise_x.append(np.median(svm_x))
		svm_noise_y.append(np.median(svm_y))
		e = compute_err(sp_x, axis=None)
		noise_x_err1.append(e[0])
		noise_x_err2.append(e[1])
		e = compute_err(sp_y, axis=None)
		noise_y_err1.append(e[0])
		noise_y_err2.append(e[1])
		e = compute_err(svm_x, axis=None)
		svm_noise_x_err1.append(e[0])
		svm_noise_x_err2.append(e[1])
		e = compute_err(svm_y, axis=None)
		svm_noise_y_err1.append(e[0])
		svm_noise_y_err2.append(e[1])
	noise_x_err = (svm_noise_x_err1, svm_noise_x_err2)
	noise_y_err = (svm_noise_y_err1, svm_noise_y_err2)
	svm_noise_x_err = (svm_noise_x_err1, svm_noise_x_err2)
	svm_noise_y_err = (svm_noise_y_err1, svm_noise_y_err2)
	
	# Vary the overlaps
	results = Parallel(n_jobs=-1)(delayed(base_experiment)(0.35, overlap,
		'overlap-{0}'.format(i), ntrials, False, seed) for i, overlap in
		enumerate(noverlap_bits, 1))
	overlap_x, overlap_y, svm_overlap_x, svm_overlap_y = [], [], [], []
	overlap_x_err1, overlap_x_err2 = [], []
	overlap_y_err1, overlap_y_err2 = [], []
	svm_overlap_x_err1, svm_overlap_x_err2 = [], []
	svm_overlap_y_err1, svm_overlap_y_err2 = [], []
	for sp_x, sp_y, svm_x, svm_y in results:
		overlap_x.append(np.median(sp_x))
		overlap_y.append(np.median(sp_y))
		svm_overlap_x.append(np.median(svm_x))
		svm_overlap_y.append(np.median(svm_y))
		e = compute_err(sp_x, axis=None)
		overlap_x_err1.append(e[0])
		overlap_x_err2.append(e[1])
		e = compute_err(sp_y, axis=None)
		overlap_y_err1.append(e[0])
		overlap_y_err2.append(e[1])
		e = compute_err(svm_x, axis=None)
		svm_overlap_x_err1.append(e[0])
		svm_overlap_x_err2.append(e[1])
		e = compute_err(svm_y, axis=None)
		svm_overlap_y_err1.append(e[0])
		svm_overlap_y_err2.append(e[1])
	overlap_x_err = (svm_overlap_x_err1, svm_overlap_x_err2)
	overlap_y_err = (svm_overlap_y_err1, svm_overlap_y_err2)
	svm_overlap_x_err = (svm_overlap_x_err1, svm_overlap_x_err2)
	svm_overlap_y_err = (svm_overlap_y_err1, svm_overlap_y_err2)
	
	# # Save the results
	p = os.path.join(os.path.expanduser('~'), 'scratch', 'novelty_experiments')
	with open(os.path.join(p, 'results.pkl'), 'wb') as f:
		cPickle.dump((noise_x, noise_y), f, cPickle.HIGHEST_PROTOCOL)
		cPickle.dump((svm_noise_x, svm_noise_y), f, cPickle.HIGHEST_PROTOCOL)
		cPickle.dump((noise_x_err, noise_y_err), f, cPickle.HIGHEST_PROTOCOL)
		cPickle.dump((svm_noise_x_err, svm_noise_y_err), f,
			cPickle.HIGHEST_PROTOCOL)
		cPickle.dump((overlap_x, overlap_y), f, cPickle.HIGHEST_PROTOCOL)
		cPickle.dump((svm_overlap_x, svm_overlap_y), f,
			cPickle.HIGHEST_PROTOCOL)		
		cPickle.dump((overlap_x_err, overlap_y_err), f,
			cPickle.HIGHEST_PROTOCOL)
		cPickle.dump((svm_overlap_x_err, svm_overlap_y_err), f,
			cPickle.HIGHEST_PROTOCOL)
	
	# Make the plots
	plot_error((pct_noises * 100, pct_noises * 100), (noise_x, svm_noise_x),
		('SP', 'SVM'), (noise_x_err, svm_noise_x_err), '% Noise', '% Error',
		'Noise: Base Class', out_path=os.path.join(p, 'noise_base.png'),
		xlim=(-5, 105), ylim=(-5, 105), show=False)
	plot_error((pct_noises * 100, pct_noises * 100), (noise_y, svm_noise_y),
		('SP', 'SVM'), (noise_y_err, svm_noise_y_err), '% Noise', '% Error',
		'Noise: Novelty Class', out_path=os.path.join(p, 'noise_novelty.png'),
		xlim=(-5, 105), ylim=(-5, 105), show=False)
	noverlap_pct = noverlap_bits / 40. * 100
	plot_error((noverlap_pct, noverlap_pct), (overlap_x, svm_overlap_x),
		('SP', 'SVM'), (overlap_x_err, svm_overlap_x_err),
		'% Overlapping Bits', '% Error', 'Overlap: Base Class',
		out_path=os.path.join(p, 'overlap_base.png'), xlim=(-5, 105),
		ylim=(-5, 105),show=False)
	plot_error((noverlap_pct, noverlap_pct), (overlap_y, svm_overlap_y),
		('SP', 'SVM'), (overlap_y_err, svm_overlap_y_err),
		'% Overlapping Bits', '% Error', 'Overlap: Novelty Class',
		out_path=os.path.join(p, 'overlap_novelty.png'), xlim=(-5, 105),
		ylim=(-5, 105),show=False)
def plot_single_run(bp1, bp2):
    """Create an error plot for a single run.

    @param bp1: The base path for global inhibition results.

    @param bp2: The base path for local inhibition results.
    """
    def read(p):
        """Read in the data.

        @param p: The path to the file to read.

        @return: The results.
        """
        with open(p, 'r') as f:
            reader = csv.reader(f)
            data = []
            for row in reader:
                data.append(float(row[1]))
        return np.array(data) * 100

    def get_data(p):
        """Get all of the results.

        @param p: The directory to obtain the data in.

        @return: The results.
        """
        permanence = []
        for d in os.listdir(p):
            npath = os.path.join(p, d)
            if os.path.isdir(npath):
                permanence.append(
                    read(os.path.join(npath, 'permanence_boost.csv')))
        return np.array(permanence)

    # Get the data
    data = [get_data(bp1)]
    data.append(get_data(bp2))

    # Build the series
    x_series = (np.arange(data[0].shape[1]), )

    # Make the main plot
    fig = plt.figure(figsize=(21, 20), facecolor='white')
    ax = fig.add_subplot(111)
    ax.spines['top'].set_color('none')
    ax.spines['bottom'].set_color('none')
    ax.spines['left'].set_color('none')
    ax.spines['right'].set_color('none')
    ax.tick_params(labelcolor='w',
                   top='off',
                   bottom='off',
                   left='off',
                   right='off')
    ax.set_xlabel('Iteration')
    ax.set_ylabel('% Columns Boosted')

    # Make subplots
    ax1 = fig.add_subplot(211)
    plot_error(show=False,
               legend=False,
               ax=ax1,
               title='Global Inhibition',
               x_series=x_series,
               y_series=(np.median(data[0], 0), ),
               y_errs=(compute_err(data[0], axis=0), ),
               xlim=(0, 200),
               ylim=(0, 100))
    ax2 = fig.add_subplot(212, sharex=ax1, sharey=ax1)
    plot_error(show=False,
               ax=ax2,
               title='Local Inhibition',
               legend=False,
               x_series=x_series,
               y_series=(np.median(data[1], 0), ),
               y_errs=(compute_err(data[1], axis=0), ),
               xlim=(0, 200),
               ylim=(0, 100))

    # Save it
    # plt.show()
    plt.subplots_adjust(bottom=0.15, hspace=0.3)
    plt.savefig('boost_permanence.png',
                format='png',
                facecolor=fig.get_facecolor(),
                edgecolor='none')
def plot_density_results(bp, bp2=None):
    """Average the results.

    @param bp: The base path.

    @param bp2: The second base path.
    """
    def average(p):
        """Compute the average activations for each density.

        @param p: The path to the file.

        @return: The average.
        """
        with open(p, 'r') as f:
            reader = csv.reader(f)
            data = []
            for row in reader:
                data.append(float(row[1]))
        return np.mean(data) * 100

    def get_data(p):
        """Get the data for a single run.

        @param p: The path.

        @return: A tuple containing the overlap and permanences.
        """
        overlap, permanence = [], []
        for d in os.listdir(p):
            npath = os.path.join(p, d)
            if os.path.isdir(npath):
                overlap.append(
                    average(os.path.join(npath, 'overlap_boost.csv')))
                permanence.append(
                    average(os.path.join(npath, 'permanence_boost.csv')))
        return np.array(overlap), np.array(permanence)

    def get_all_data(bp):
        """Get the data for all runs.

        @param bp: The base path.

        @return: A tuple containing the sparsity, overlap, and permanences.
        """
        overlap, permanence, sparsity = [], [], []
        for d in sorted([int(x) for x in os.listdir(bp)]):
            sparsity.append((1 - (d / 100.)) * 100)
            o, p = get_data(os.path.join(bp, str(d)))
            overlap.append(o)
            permanence.append(p)
        return np.array(sparsity[::-1]), np.array(overlap[::-1]), \
            np.array(permanence[::-1])

    def make_plot_params(sparsity, overlap, permanence, title=None):
        """Generate the parameters for the plot.

        @param sparsity: The sparsity array.

        @param overlap: The overlap array.

        @param permanence: The permanence array.

        @param title: The title for the plot.

        @return: A dictionary with the parameters.
        """
        return {
            'x_series': (sparsity, sparsity),
            'y_series': (np.median(overlap, 1), np.median(permanence, 1)),
            'series_names': ('Overlap Boosting', 'Permanence Boosting'),
            'y_errs': (compute_err(overlap), compute_err(permanence)),
            'xlim': (0, 100),
            'ylim': (0, 45),
            'title': title
        }

    data = get_all_data(bp)
    if bp2 is None:
        plot_error(**make_plot_params(*data))
    else:
        # Make main plot
        fig = plt.figure(figsize=(21, 20), facecolor='white')
        ax = fig.add_subplot(111)
        ax.spines['top'].set_color('none')
        ax.spines['bottom'].set_color('none')
        ax.spines['left'].set_color('none')
        ax.spines['right'].set_color('none')
        ax.tick_params(labelcolor='w',
                       top='off',
                       bottom='off',
                       left='off',
                       right='off')
        ax.set_xlabel('Sparsity [%]')
        ax.set_ylabel('% Columns Boosted')

        # Make subplots
        ax1 = fig.add_subplot(211)
        plot_error(show=False,
                   legend=False,
                   ax=ax1,
                   **make_plot_params(*data, title='Global Inhibition'))
        data2 = get_all_data(bp2)
        ax2 = fig.add_subplot(212, sharex=ax1, sharey=ax1)
        plot_error(show=False,
                   ax=ax2,
                   **make_plot_params(*data2, title='Local Inhibition'))

        # Save it
        # plt.show()
        plt.subplots_adjust(bottom=0.15, hspace=0.3)
        plt.savefig('boost_sparseness.png',
                    format='png',
                    facecolor=fig.get_facecolor(),
                    edgecolor='none')
Beispiel #12
0
def plot_single_run(bp1, bp2):
	"""
	Create an error plot for a single run.
	
	@param bp1: The base path for global inhibition results.
	
	@param bp2: The base path for local inhibition results.
	"""
	
	def read(p):
		"""
		Read in the data.
		
		@param p: The path to the file to read.
		
		@return: The results.
		"""
		
		with open(p, 'rb') as f:
			reader = csv.reader(f)
			data = []
			for row in reader:
				data.append(float(row[1]))
		return np.array(data) * 100
	
	def get_data(p):
		"""
		Get all of the results.
		
		@param p: The directory to obtain the data in.
		
		@return: The results.
		"""
		
		permanence = []
		for d in os.listdir(p):
			npath = os.path.join(p, d)
			if os.path.isdir(npath):
				permanence.append(read(os.path.join(npath,
					'permanence_boost.csv')))
		return np.array(permanence)
	
	# Get the data
	data = [get_data(bp1)]
	data.append(get_data(bp2))
	
	# Build the series
	x_series = (np.arange(data[0].shape[1]), )
	
	# Make the main plot
	fig = plt.figure(figsize=(21, 20), facecolor='white')
	ax = fig.add_subplot(111)
	ax.spines['top'].set_color('none')
	ax.spines['bottom'].set_color('none')
	ax.spines['left'].set_color('none')
	ax.spines['right'].set_color('none')
	ax.tick_params(labelcolor='w', top='off', bottom='off', left='off',
		right='off')
	ax.set_xlabel('Iteration')
	ax.set_ylabel('% Columns Boosted')
	
	# Make subplots
	ax1 = fig.add_subplot(211)
	plot_error(show=False, legend=False, ax=ax1, title='Global Inhibition',
		x_series=x_series, y_series=(np.median(data[0], 0), ),
		y_errs=(compute_err(data[0], axis=0),), xlim=(0, 200), ylim=(0, 100))
	ax2 = fig.add_subplot(212, sharex=ax1, sharey=ax1)
	plot_error(show=False, ax=ax2, title='Local Inhibition', legend=False,
		x_series=x_series, y_series=(np.median(data[1], 0), ),
		y_errs=(compute_err(data[1], axis=0),), xlim=(0, 200), ylim=(0, 100))
	
	# Save it
	plt.subplots_adjust(bottom=0.15, hspace=0.3)
	plt.savefig('boost_permanence.png', format='png',
		facecolor=fig.get_facecolor(), edgecolor='none')
Beispiel #13
0
def plot_density_results(bp, bp2=None):
	"""
	Average the results.
	
	@param bp: The base path.
	
	@param bp2: The second base path.
	"""
	
	def average(p):
		"""
		Compute the average activations for each density.
		
		@param p: The path to the file.
		
		@return: The average.
		"""
		
		with open(p, 'rb') as f:
			reader = csv.reader(f)
			data = []
			for row in reader:
				data.append(float(row[1]))
		return np.mean(data) * 100
	
	def get_data(p):
		"""
		Get the data for a single run.
		
		@param p: The path.
		
		@return: A tuple containing the overlap and permanences.
		"""
		
		overlap, permanence = [], []
		for d in os.listdir(p):
			npath = os.path.join(p, d)
			if os.path.isdir(npath):
				overlap.append(average(os.path.join(npath,
					'overlap_boost.csv')))
				permanence.append(average(os.path.join(npath,
					'permanence_boost.csv')))
		return np.array(overlap), np.array(permanence)
	
	def get_all_data(bp):
		"""
		Get the data for all runs.
		
		@param bp: The base path.
		
		@return: A tuple containing the sparsity, overlap, and permanences.
		"""
		
		overlap, permanence, sparsity = [], [], []
		for d in sorted([int(x) for x in os.listdir(bp)]):
			sparsity.append((1 - (d / 100.)) * 100)
			o, p = get_data(os.path.join(bp, str(d)))
			overlap.append(o)
			permanence.append(p)
		return np.array(sparsity[::-1]), np.array(overlap[::-1]), \
			np.array(permanence[::-1])
	
	def make_plot_params(sparsity, overlap, permanence, title=None):
		"""
		Generate the parameters for the plot.
		
		@param sparsity: The sparsity array.
		
		@param overlap: The overlap array.
		
		@param permanence: The permanence array.
		
		@param title: The title for the plot.
		
		@return: A dictionary with the parameters.
		"""
		
		return {'x_series':(sparsity, sparsity),
			'y_series':(np.median(overlap, 1), np.median(permanence, 1)),
			'series_names':('Overlap Boosting', 'Permanence Boosting'),
			'y_errs':(compute_err(overlap), compute_err(permanence)),
			'xlim':(0, 100), 'ylim':(0, 45), 'title':title
			}
	
	data = get_all_data(bp)
	if bp2 is None:
		plot_error(**make_plot_params(*data))
	else:
		# Make main plot
		fig = plt.figure(figsize=(21, 20), facecolor='white')
		ax = fig.add_subplot(111)
		ax.spines['top'].set_color('none')
		ax.spines['bottom'].set_color('none')
		ax.spines['left'].set_color('none')
		ax.spines['right'].set_color('none')
		ax.tick_params(labelcolor='w', top='off', bottom='off', left='off',
			right='off')
		ax.set_xlabel('Sparsity [%]')
		ax.set_ylabel('% Columns Boosted')
		
		# Make subplots
		ax1 = fig.add_subplot(211)
		plot_error(show=False, legend=False, ax=ax1, **make_plot_params(*data,
			title='Global Inhibition'))
		data2 = get_all_data(bp2)
		ax2 = fig.add_subplot(212, sharex=ax1, sharey=ax1)
		plot_error(show=False, ax=ax2, **make_plot_params(*data2,
			title='Local Inhibition'))
		
		# Save it
		plt.subplots_adjust(bottom=0.15, hspace=0.3)
		plt.savefig('boost_sparseness.png', format='png',
			facecolor=fig.get_facecolor(), edgecolor='none')