示例#1
0
def main(argv):
    parser = ArgumentParser(argv[0], description=__doc__)
    parser.add_argument('input', type=str)
    parser.add_argument('output', type=str, nargs='+')
    parser.add_argument('--filter', '-s', type=int, default=0)
    parser.add_argument(
        '--fps',
        '-f',
        type=float,
        default=100.,
        help='Up- or downsample data to match this sampling rate (100 fps).')
    parser.add_argument('--verbosity', '-v', type=int, default=1)

    args = parser.parse_args(argv[1:])

    # load data
    data = load_data(args.input)

    # preprocess data
    data = preprocess(data,
                      fps=args.fps,
                      filter=args.filter if args.filter > 0 else None,
                      verbosity=args.verbosity)

    for filepath in args.output:
        if filepath.lower().endswith('.mat'):
            # store in MATLAB format
            savemat(filepath, convert({'data': data}))
        else:
            with open(filepath, 'w') as handle:
                dump(data, handle, protocol=2)

    return 0
示例#2
0
def main(argv):
	parser = ArgumentParser(argv[0], description=__doc__)
	parser.add_argument('input',             type=str)
	parser.add_argument('output',            type=str, nargs='+')
	parser.add_argument('--filter',    '-s', type=int,   default=0)
	parser.add_argument('--fps',       '-f', type=float, default=100.,
		help='Up- or downsample data to match this sampling rate (100 fps).' )
	parser.add_argument('--verbosity', '-v', type=int,   default=1)

	args = parser.parse_args(argv[1:])

	# load data
	data = load_data(args.input)

	# preprocess data
	data = preprocess(
		data,
		fps=args.fps,
		filter=args.filter if args.filter > 0 else None,
		verbosity=args.verbosity)

	for filepath in args.output:
		if filepath.lower().endswith('.mat'):
			# store in MATLAB format
			savemat(filepath, {'data': data})
		else:
			with open(filepath, 'w') as handle:
				dump(data, handle, protocol=2)

	return 0
示例#3
0
文件: c2s-info.py 项目: cajal/c2s
def main(argv):
	parser = ArgumentParser(argv[0], description=__doc__)
	parser.add_argument('dataset',            type=str)

	args = parser.parse_args(argv[1:])

	# load data
	data = load_data(args.dataset)

	def prints(left, right):
		print('{0:<30} {1}'.format(left, right))

	num_spikes = 0
	length = 0
	for entry in data:
		length += entry['calcium'].size / float(entry['fps']) # seconds
		if 'spike_times' in entry:
			num_spikes += entry['spike_times'].size
		elif 'spikes' in entry:
			num_spikes += entry['spikes'].sum()

	if 'cell_num' in data[0]:
		num_cells = len(unique([entry['cell_num'] for entry in data]))
	else:
		num_cells = len(data)

	prints('Number of cells:', '{0}'.format(num_cells))
	prints('Number of traces:', '{0}'.format(len(data)))
	prints('Total length:', '{0} minutes, {1} seconds'.format(int(length) // 60, int(length) % 60))
	prints('Total number of spikes:', num_spikes)
	prints('Average firing rate:', '{0:.2f} [spike/sec]'.format(num_spikes / length))
	prints('Average sampling rate:', '{0:.1f}'.format(mean([entry['fps'] for entry in data])))

	return 0
示例#4
0
def main(argv):
    parser = ArgumentParser(argv[0], description=__doc__)
    parser.add_argument('dataset', type=str)
    parser.add_argument('output', type=str, nargs='+')
    parser.add_argument('--model', '-m', type=str, default='')
    parser.add_argument(
        '--preprocess',
        '-p',
        type=int,
        default=0,
        help=
        'If you haven\'t already applied `preprocess` to the data, set to 1 (default: 0).'
    )
    parser.add_argument('--verbosity', '-v', type=int, default=1)

    args = parser.parse_args(argv[1:])

    experiment = Experiment()

    # load data
    data = load_data(args.dataset)

    if args.preprocess:
        # preprocess data
        data = preprocess(data, args.verbosity)

    if args.model:
        # load training results
        results = Experiment(args.model)['models']
    else:
        # use default model
        results = None

    # predict firing rates
    data = predict(data, results, verbosity=args.verbosity)

    # remove data except predictions
    for entry in data:
        if 'spikes' in entry:
            del entry['spikes']
        if 'spike_times' in entry:
            del entry['spike_times']
        del entry['calcium']

    for filepath in args.output:
        if filepath.lower().endswith('.mat'):
            # store in MATLAB format
            savemat(filepath, {'data': data})
        else:
            with open(filepath, 'w') as handle:
                dump(data, handle, protocol=2)

    return 0
示例#5
0
文件: c2s-predict.py 项目: cajal/c2s
def main(argv):
	parser = ArgumentParser(argv[0], description=__doc__)
	parser.add_argument('dataset',            type=str)
	parser.add_argument('output',             type=str, nargs='+')
	parser.add_argument('--model',      '-m', type=str, default='')
	parser.add_argument('--preprocess', '-p', type=int, default=0,
		help='If you haven\'t already applied `preprocess` to the data, set to 1 (default: 0).')
	parser.add_argument('--verbosity',  '-v', type=int, default=1)

	args = parser.parse_args(argv[1:])

	experiment = Experiment()

	# load data
	data = load_data(args.dataset)

	if args.preprocess:
		# preprocess data
		data = preprocess(data, args.verbosity)

	if args.model:
		# load training results
		results = Experiment(args.model)['models']
	else:
		# use default model
		results = None

	# predict firing rates
	data = predict(data, results, verbosity=args.verbosity)

	# remove data except predictions
	for entry in data:
		if 'spikes' in entry:
			del entry['spikes']
		if 'spike_times' in entry:
			del entry['spike_times']
		del entry['calcium']

	for filepath in args.output:
		if filepath.lower().endswith('.mat'):
			# store in MATLAB format
			savemat(filepath, {'data': data})
		else:
			with open(filepath, 'w') as handle:
				dump(data, handle, protocol=2)

	return 0
示例#6
0
文件: c2s-info.py 项目: tapacob/c2s
def main(argv):
    parser = ArgumentParser(argv[0], description=__doc__)
    parser.add_argument('dataset', type=str)

    args = parser.parse_args(argv[1:])

    # load data
    data = load_data(args.dataset)

    def prints(left, right):
        print('{0:<30} {1}'.format(left, right))

    num_spikes = 0
    length = 0
    for entry in data:
        length += entry['calcium'].size / float(entry['fps'])  # seconds
        if 'spike_times' in entry:
            num_spikes += entry['spike_times'].size
        elif 'spikes' in entry:
            num_spikes += entry['spikes'].sum()

    if 'cell_num' in data[0]:
        num_cells = len(unique([entry['cell_num'] for entry in data]))
    else:
        num_cells = len(data)

    prints('Number of cells:', '{0}'.format(num_cells))
    prints('Number of traces:', '{0}'.format(len(data)))
    prints(
        'Total length:',
        '{0} minutes, {1} seconds'.format(int(length) // 60,
                                          int(length) % 60))
    prints('Total number of spikes:', num_spikes)
    prints('Average firing rate:',
           '{0:.2f} [spike/sec]'.format(num_spikes / length))
    prints('Average sampling rate:',
           '{0:.1f}'.format(mean([entry['fps'] for entry in data])))

    return 0
示例#7
0
文件: c2s-evaluate.py 项目: cajal/c2s
def main(argv):
	parser = ArgumentParser(argv[0], description=__doc__)
	parser.add_argument('dataset',                 type=str)
	parser.add_argument('predictions',             type=str,   nargs='?')
	parser.add_argument('--downsampling',    '-s', type=int,   default=[1, 2, 3, 4, 5, 10, 15, 20, 25, 30, 40, 50], nargs='+')
	parser.add_argument('--optimize',        '-z', type=int,   default=1,
		help='Whether or not to optimize point-wise nonlinearity when evaluating likelihood.')
	parser.add_argument('--regularization',  '-r', type=float, default=5e-8,
		help='Controls smoothness of optimized nonlinearity (default: 5e-8).')
	parser.add_argument('--method',          '-m', type=str,   default='corr', choices=['corr', 'auc', 'info'])
	parser.add_argument('--weighted-average','-w', type=int,   default=0,
		help='Whether or not traces to weight traces by their duration.')
	parser.add_argument('--output',          '-o', type=str,   default='')
	parser.add_argument('--verbosity',       '-v', type=int,   default=1)

	args, _ = parser.parse_known_args(argv[1:])

	experiment = Experiment()

	data = load_data(args.dataset)

	if not args.predictions:
		# use raw calcium signal for prediction
		calcium_min = min(hstack(entry['calcium'] for entry in data))
		for entry in data:
			entry['predictions'] = entry['calcium'] - calcium_min + 1e-5

	else:
		predictions = load_data(args.predictions)

		try:
			if len(predictions) != len(data):
				raise ValueError()

			for entry1, entry2 in zip(data, predictions):
				if entry1['calcium'].size != entry2['predictions'].size:
					raise ValueError()
				entry1['predictions'] = entry2['predictions']

		except ValueError:
			print 'These predictions seem to be for a different dataset.'
			return 1

	fps = []
	loglik = []
	correlations = []
	auc = []
	entropy = []
	functions = []


	for ds in args.downsampling:
		if args.verbosity > 0:
			if args.weighted_average:
				if args.method.lower().startswith('c'):
					print '{0:>5} {1:>7} {2:>7} {3}'.format('Cell', '#Traces', 'FPS ', 'Correlation')
				elif args.method.lower().startswith('a'):
					print '{0:>5} {1:>7} {2:>7} {3}'.format('Cell', '#Traces', 'FPS ', 'AUC')
				else:
					print '{0:>5} {1:>7} {2:>7} {3}'.format('Cell', '#Traces', 'FPS ', 'Information gain')
			else:
				if args.method.lower().startswith('c'):
					print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ', 'Correlation')
				elif args.method.lower().startswith('a'):
					print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ', 'AUC')
				else:
					print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ', 'Information gain')

		fps.append([])
		for entry in data:
			fps[-1].append(entry['fps'] / ds)

		if args.method.lower().startswith('c'):
			# compute correlations
			R = evaluate(data, method=args.method,
				optimize=args.optimize,
				downsampling=ds,
				verbosity=args.verbosity)

			correlations.append(R)

			if args.verbosity > 0:
				if args.weighted_average:
					print_weighted_average(R, data, ds)
				else:
					print_traces(R, fps)

		elif args.method.lower().startswith('a'):
			# compute correlations
			A = evaluate(data, method=args.method,
				optimize=args.optimize,
				downsampling=ds,
				verbosity=args.verbosity)

			auc.append(A)

			if args.verbosity > 0:
				if args.weighted_average:
					print_weighted_average(A, data, ds)
				else:
					print_traces(A, fps)

		else:
			# compute log-likelihoods
			L, H, f = evaluate(data, method='loglik',
				optimize=args.optimize,
				downsampling=ds,
				verbosity=args.verbosity,
				return_all=True,
				regularize=args.regularization)

			loglik.append(L)
			entropy.append(H)
			functions.append((f.x, f.y))

			if args.verbosity > 0:
				if args.weighted_average:
					print_weighted_average(H + L, data, ds)
				else:
					print_traces(H + L, fps)

	if args.output.lower().endswith('.mat'):

		if args.method.lower().startswith('c'):
			savemat(args.output, {'fps': asarray(fps), 'correlations': asarray(correlations)})
		elif args.method.lower().startswith('a'):
			savemat(args.output, {'fps': asarray(fps), 'auc': asarray(auc)})
		else:
			savemat(args.output, {
				'fps': asarray(fps),
				'loglik': asarray(loglik),
				'entropy': asarray(entropy),
				'info': asarray(loglik) + asarray(entropy)})

	elif args.output:
		if os.path.isdir(args.output):
			filepath = os.path.join(args.output, args.method + '.{0}.{1}.xpck')
		else:
			filepath = args.output

		experiment['args'] = args
		experiment['fps'] = asarray(fps)

		if args.method.lower().startswith('c'):
			experiment['correlations'] = asarray(correlations)
		elif args.method.lower().startswith('a'):
			experiment['auc'] = asarray(auc)
		else:
			experiment['loglik'] = asarray(loglik)
			experiment['entropy'] = asarray(entropy)
			experiment['info'] = asarray(loglik) + asarray(entropy)
			experiment['f'] = functions

		experiment.save(filepath, overwrite=True)

	return 0
示例#8
0
def main(argv):
	parser = ArgumentParser(argv[0], description=__doc__)
	parser.add_argument('dataset',                type=str,   nargs='+')
	parser.add_argument('output',                 type=str)
	parser.add_argument('--num_components', '-c', type=int,   default=3)
	parser.add_argument('--num_features',   '-f', type=int,   default=2)
	parser.add_argument('--num_models',     '-m', type=int,   default=4)
	parser.add_argument('--keep_all',       '-k', type=int,   default=1)
	parser.add_argument('--finetune',       '-n', type=int,   default=0)
	parser.add_argument('--num_valid',      '-s', type=int,   default=0)
	parser.add_argument('--var_explained',  '-e', type=float, default=95.)
	parser.add_argument('--window_length',  '-w', type=float, default=1000.)
	parser.add_argument('--regularize',     '-r', type=float, default=0.)
	parser.add_argument('--preprocess',     '-p', type=int,   default=0)
	parser.add_argument('--verbosity',      '-v', type=int,   default=1)

	args, _ = parser.parse_known_args(argv[1:])

	experiment = Experiment()

	# load data
	data = []
	for dataset in args.dataset:
		data = data + load_data(dataset)

	# preprocess data
	if args.preprocess:
		data = preprocess(data)

	# list of all cells
	if 'cell_num' in data[0]:
		# several trials/entries may belong to the same cell
		cells = unique([entry['cell_num'] for entry in data])
	else:
		# one cell corresponds to one trial/entry
		cells = range(len(data))
		for i in cells:
			data[i]['cell_num'] = i

	for i in cells:
		data_train = [entry for entry in data if entry['cell_num'] != i]
		data_test = [entry for entry in data if entry['cell_num'] == i]

		if args.verbosity > 0:
			print 'Test cell: {0}'.format(i)

		# train on all cells but cell i
		results = train(
			data=data_train,
			num_valid=args.num_valid,
			num_models=args.num_models,
			var_explained=args.var_explained,
			window_length=args.window_length,
			keep_all=args.keep_all,
			finetune=args.finetune,
			model_parameters={
					'num_components': args.num_components,
					'num_features': args.num_features},
			training_parameters={
				'verbosity': 0},
			regularize=args.regularize,
			verbosity=1)

		if args.verbosity > 0:
			print 'Predicting...'

		# predict responses of cell i
		predictions = predict(data_test, results, verbosity=0)

		for entry1, entry2 in zip(data_test, predictions):
			entry1['predictions'] = entry2['predictions']

	# remove data except predictions
	for entry in data:
		if 'spikes' in entry:
			del entry['spikes']
		if 'spike_times' in entry:
			del entry['spike_times']
		del entry['calcium']

	# save results
	if args.output.lower().endswith('.mat'):
		savemat(args.output, convert({'data': data}))

	elif args.output.lower().endswith('.xpck'):
		experiment['args'] = args
		experiment['data'] = data
		experiment.save(args.output)

	else:
		with open(args.output, 'w') as handle:
			dump(data, handle, protocol=2)

	return 0
示例#9
0
def main(argv):
    parser = ArgumentParser(argv[0], description=__doc__)
    parser.add_argument('dataset', type=str, nargs='+')
    parser.add_argument('output', type=str)
    parser.add_argument('--num_components', '-c', type=int, default=3)
    parser.add_argument('--num_features', '-f', type=int, default=2)
    parser.add_argument('--num_models', '-m', type=int, default=4)
    parser.add_argument('--keep_all', '-k', type=int, default=1)
    parser.add_argument('--finetune', '-n', type=int, default=0)
    parser.add_argument('--num_valid', '-s', type=int, default=0)
    parser.add_argument('--var_explained', '-e', type=float, default=95.)
    parser.add_argument('--window_length', '-w', type=float, default=1000.)
    parser.add_argument('--regularize', '-r', type=float, default=0.)
    parser.add_argument('--preprocess', '-p', type=int, default=0)
    parser.add_argument('--verbosity', '-v', type=int, default=1)

    args, _ = parser.parse_known_args(argv[1:])

    experiment = Experiment()

    # load data
    data = []
    for dataset in args.dataset:
        data = data + load_data(dataset)

    # preprocess data
    if args.preprocess:
        data = preprocess(data)

    # list of all cells
    if 'cell_num' in data[0]:
        # several trials/entries may belong to the same cell
        cells = unique([entry['cell_num'] for entry in data])
    else:
        # one cell corresponds to one trial/entry
        cells = range(len(data))
        for i in cells:
            data[i]['cell_num'] = i

    for i in cells:
        data_train = [entry for entry in data if entry['cell_num'] != i]
        data_test = [entry for entry in data if entry['cell_num'] == i]

        if args.verbosity > 0:
            print 'Test cell: {0}'.format(i)

        # train on all cells but cell i
        results = train(data=data_train,
                        num_valid=args.num_valid,
                        num_models=args.num_models,
                        var_explained=args.var_explained,
                        window_length=args.window_length,
                        keep_all=args.keep_all,
                        finetune=args.finetune,
                        model_parameters={
                            'num_components': args.num_components,
                            'num_features': args.num_features
                        },
                        training_parameters={'verbosity': 0},
                        regularize=args.regularize,
                        verbosity=1)

        if args.verbosity > 0:
            print 'Predicting...'

        # predict responses of cell i
        predictions = predict(data_test, results, verbosity=0)

        for entry1, entry2 in zip(data_test, predictions):
            entry1['predictions'] = entry2['predictions']

    # remove data except predictions
    for entry in data:
        if 'spikes' in entry:
            del entry['spikes']
        if 'spike_times' in entry:
            del entry['spike_times']
        del entry['calcium']

    # save results
    if args.output.lower().endswith('.mat'):
        savemat(args.output, {'data': data})

    elif args.output.lower().endswith('.xpck'):
        experiment['args'] = args
        experiment['data'] = data
        experiment.save(args.output)

    else:
        with open(args.output, 'w') as handle:
            dump(data, handle, protocol=2)

    return 0
示例#10
0
def main(argv):
    parser = ArgumentParser(argv[0], description=__doc__)
    parser.add_argument('dataset', type=str)
    parser.add_argument('--preprocess', '-p', type=int, default=0)
    parser.add_argument('--output', '-o', type=str, default='')
    parser.add_argument('--seconds', '-S', type=int, default=60)
    parser.add_argument('--offset', '-O', type=int, default=0)
    parser.add_argument('--width', '-W', type=int, default=10)
    parser.add_argument('--height', '-H', type=int, default=0)
    parser.add_argument('--cells', '-c', type=int, default=[], nargs='+')
    parser.add_argument('--dpi', '-D', type=int, default=100)
    parser.add_argument('--font', '-F', type=str, default='Arial')

    args = parser.parse_args(argv[1:])

    # load data
    data = load_data(args.dataset)
    cells = args.cells if args.cells else range(1, len(data) + 1)
    data = [data[c - 1] for c in cells]

    if args.preprocess:
        data = preprocess(data)

    plt.rcParams['font.family'] = args.font
    plt.rcParams['savefig.dpi'] = args.dpi

    plt.figure(figsize=(args.width,
                        args.height if args.height > 0 else len(data) * 1.5 +
                        .3))

    for k, entry in enumerate(data):
        offset = int(entry['fps'] * args.offset)
        length = int(entry['fps'] * args.seconds)
        calcium = entry['calcium'].ravel()[offset:offset + length]

        plt.subplot(len(data), 1, k + 1)
        plt.plot(args.offset + arange(calcium.size) / entry['fps'],
                 calcium,
                 color=(.1, .6, .4))

        if 'spike_times' in entry:
            spike_times = entry['spike_times'].ravel() / 1000.
            spike_times = spike_times[logical_and(
                spike_times > args.offset,
                spike_times < args.offset + args.seconds)]

            for st in spike_times:
                plt.plot([st, st], [-1, -.5], 'k', lw=1.5)

        plt.yticks([])
        plt.ylim([-2., 5.])
        plt.xlim([args.offset, args.offset + args.seconds])
        plt.ylabel('Cell {0}'.format(cells[k]))
        plt.grid()

        if k < len(data) - 1:
            plt.xticks(plt.xticks()[0], [])

    plt.xlabel('Time [seconds]')
    plt.tight_layout()

    if args.output:
        plt.savefig(args.output)
    else:
        plt.show()

    return 0
示例#11
0
def main(argv):
    parser = ArgumentParser(argv[0], description=__doc__)
    parser.add_argument('dataset', type=str)
    parser.add_argument('predictions', type=str, nargs='?')
    parser.add_argument('--downsampling',
                        '-s',
                        type=int,
                        default=[1, 2, 3, 4, 5, 10, 15, 20, 25, 30, 40, 50],
                        nargs='+')
    parser.add_argument(
        '--optimize',
        '-z',
        type=int,
        default=1,
        help=
        'Whether or not to optimize point-wise nonlinearity when evaluating likelihood.'
    )
    parser.add_argument(
        '--regularization',
        '-r',
        type=float,
        default=5e-8,
        help='Controls smoothness of optimized nonlinearity (default: 5e-8).')
    parser.add_argument('--method',
                        '-m',
                        type=str,
                        default='corr',
                        choices=['corr', 'auc', 'info'])
    parser.add_argument(
        '--weighted-average',
        '-w',
        type=int,
        default=0,
        help='Whether or not traces to weight traces by their duration.')
    parser.add_argument('--output', '-o', type=str, default='')
    parser.add_argument('--verbosity', '-v', type=int, default=1)

    args, _ = parser.parse_known_args(argv[1:])

    experiment = Experiment()

    data = load_data(args.dataset)

    if not args.predictions:
        # use raw calcium signal for prediction
        calcium_min = min(hstack(entry['calcium'] for entry in data))
        for entry in data:
            entry['predictions'] = entry['calcium'] - calcium_min + 1e-5

    else:
        predictions = load_data(args.predictions)

        try:
            if len(predictions) != len(data):
                raise ValueError()

            for entry1, entry2 in zip(data, predictions):
                if entry1['calcium'].size != entry2['predictions'].size:
                    raise ValueError()
                entry1['predictions'] = entry2['predictions']

        except ValueError:
            print 'These predictions seem to be for a different dataset.'
            return 1

    fps = []
    loglik = []
    correlations = []
    auc = []
    entropy = []
    functions = []

    for ds in args.downsampling:
        if args.verbosity > 0:
            if args.weighted_average:
                if args.method.lower().startswith('c'):
                    print '{0:>5} {1:>7} {2:>7} {3}'.format(
                        'Cell', '#Traces', 'FPS ', 'Correlation')
                elif args.method.lower().startswith('a'):
                    print '{0:>5} {1:>7} {2:>7} {3}'.format(
                        'Cell', '#Traces', 'FPS ', 'AUC')
                else:
                    print '{0:>5} {1:>7} {2:>7} {3}'.format(
                        'Cell', '#Traces', 'FPS ', 'Information gain')
            else:
                if args.method.lower().startswith('c'):
                    print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ',
                                                     'Correlation')
                elif args.method.lower().startswith('a'):
                    print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ', 'AUC')
                else:
                    print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ',
                                                     'Information gain')

        fps.append([])
        for entry in data:
            fps[-1].append(entry['fps'] / ds)

        if args.method.lower().startswith('c'):
            # compute correlations
            R = evaluate(data,
                         method=args.method,
                         optimize=args.optimize,
                         downsampling=ds,
                         verbosity=args.verbosity)

            correlations.append(R)

            if args.verbosity > 0:
                if args.weighted_average:
                    print_weighted_average(R, data, ds)
                else:
                    print_traces(R, fps)

        elif args.method.lower().startswith('a'):
            # compute correlations
            A = evaluate(data,
                         method=args.method,
                         optimize=args.optimize,
                         downsampling=ds,
                         verbosity=args.verbosity)

            auc.append(A)

            if args.verbosity > 0:
                if args.weighted_average:
                    print_weighted_average(A, data, ds)
                else:
                    print_traces(A, fps)

        else:
            # compute log-likelihoods
            L, H, f = evaluate(data,
                               method='loglik',
                               optimize=args.optimize,
                               downsampling=ds,
                               verbosity=args.verbosity,
                               return_all=True,
                               regularize=args.regularization)

            loglik.append(L)
            entropy.append(H)
            functions.append((f.x, f.y))

            if args.verbosity > 0:
                if args.weighted_average:
                    print_weighted_average(H + L, data, ds)
                else:
                    print_traces(H + L, fps)

    if args.output.lower().endswith('.mat'):

        if args.method.lower().startswith('c'):
            savemat(
                args.output,
                convert({
                    'fps': asarray(fps),
                    'correlations': asarray(correlations)
                }))
        elif args.method.lower().startswith('a'):
            savemat(args.output,
                    convert({
                        'fps': asarray(fps),
                        'auc': asarray(auc)
                    }))
        else:
            savemat(
                args.output,
                convert({
                    'fps': asarray(fps),
                    'loglik': asarray(loglik),
                    'entropy': asarray(entropy),
                    'info': asarray(loglik) + asarray(entropy)
                }))

    elif args.output:
        if os.path.isdir(args.output):
            filepath = os.path.join(args.output, args.method + '.{0}.{1}.xpck')
        else:
            filepath = args.output

        experiment['args'] = args
        experiment['fps'] = asarray(fps)

        if args.method.lower().startswith('c'):
            experiment['correlations'] = asarray(correlations)
        elif args.method.lower().startswith('a'):
            experiment['auc'] = asarray(auc)
        else:
            experiment['loglik'] = asarray(loglik)
            experiment['entropy'] = asarray(entropy)
            experiment['info'] = asarray(loglik) + asarray(entropy)
            experiment['f'] = functions

        experiment.save(filepath, overwrite=True)

    return 0
示例#12
0
def main(argv):
    parser = ArgumentParser(argv[0],
                            description=__doc__,
                            formatter_class=lambda prog: HelpFormatter(
                                prog, max_help_position=10, width=120))
    parser.add_argument('dataset',
                        type=str,
                        nargs='+',
                        help='Dataset(s) used for training.')
    parser.add_argument(
        'output',
        type=str,
        help='Directory or file where trained models will be stored.')
    parser.add_argument(
        '--num_components',
        '-c',
        type=int,
        default=3,
        help='Number of components used in STM model (default: %(default)d).')
    parser.add_argument(
        '--num_features',
        '-f',
        type=int,
        default=2,
        help=
        'Number of quadratic features used in STM model (default: %(default)d).'
    )
    parser.add_argument(
        '--num_models',
        '-m',
        type=int,
        default=4,
        help=
        'Number of models trained (predictions will be averaged across models, default: %(default)d).'
    )
    parser.add_argument(
        '--keep_all',
        '-k',
        type=int,
        default=1,
        help=
        'If set to 0, only the best model of all trained models is kept (default: %(default)d).'
    )
    parser.add_argument(
        '--finetune',
        '-n',
        type=int,
        default=0,
        help=
        'If set to 1, enables another finetuning step which is performed after training (default: %(default)d).'
    )
    parser.add_argument(
        '--num_train',
        '-t',
        type=int,
        default=0,
        help='If specified, a (random) subset of cells is used for training.')
    parser.add_argument(
        '--num_valid',
        '-s',
        type=int,
        default=0,
        help=
        'If specified, a (random) subset of cells will be used for early stopping based on validation error.'
    )
    parser.add_argument(
        '--var_explained',
        '-e',
        type=float,
        default=95.,
        help=
        'Controls the degree of dimensionality reduction of fluorescence windows (default: %(default).0f).'
    )
    parser.add_argument(
        '--window_length',
        '-w',
        type=float,
        default=1000.,
        help=
        'Length of windows extracted from calcium signal for prediction (in milliseconds, default: %(default).0f).'
    )
    parser.add_argument(
        '--regularize',
        '-r',
        type=float,
        default=0.,
        help=
        'Amount of parameter regularization (filters are regularized for smoothness, default: %(default).1f).'
    )
    parser.add_argument(
        '--preprocess',
        '-p',
        type=int,
        default=0,
        help=
        'If the data is not already preprocessed, this can be used to do it.')
    parser.add_argument('--seed', '-S', type=int, default=-1)
    parser.add_argument('--verbosity', '-v', type=int, default=1)

    args, _ = parser.parse_known_args(argv[1:])

    # set RNG seed
    if args.seed > -1:
        numpy.random.seed(args.seed)
        cmt.utils.seed(args.seed)

    experiment = Experiment()

    if not args.dataset:
        print('You have to specify at least 1 dataset.')
        return 0

    data = []
    for filepath in args.dataset:
        data.extend(load_data(filepath))

    if args.preprocess:
        data = preprocess(data, args.verbosity)

    if 'cell_num' not in data[0]:
        # no cell number is given, assume traces correspond to cells
        for k, entry in enumerate(data):
            entry['cell_num'] = k

    # collect cell ids
    cell_ids = unique([entry['cell_num'] for entry in data])

    # pick cells for training
    if args.num_train > 0:
        training_cells = random_select(args.num_train, len(cell_ids))
    else:
        # use all cells for training
        training_cells = range(len(cell_ids))

    models = train(
        [entry for entry in data if entry['cell_num'] in training_cells],
        num_valid=args.num_valid,
        num_models=args.num_models,
        var_explained=args.var_explained,
        window_length=args.window_length,
        keep_all=args.keep_all,
        finetune=args.finetune,
        model_parameters={
            'num_components': args.num_components,
            'num_features': args.num_features
        },
        training_parameters={'verbosity': 1},
        regularize=args.regularize,
        verbosity=args.verbosity)

    experiment['args'] = args
    experiment['training_cells'] = training_cells
    experiment['models'] = models

    if os.path.isdir(args.output):
        experiment.save(os.path.join(args.output, 'model.xpck'))
    else:
        experiment.save(args.output)

    return 0
示例#13
0
def main(argv):
	parser = ArgumentParser(argv[0], description=__doc__)
	parser.add_argument('dataset',            type=str)
	parser.add_argument('--preprocess', '-p', type=int, default=0)
	parser.add_argument('--output',     '-o', type=str, default='')
	parser.add_argument('--seconds',    '-S', type=int, default=60)
	parser.add_argument('--offset',     '-O', type=int, default=0)
	parser.add_argument('--width',      '-W', type=int, default=10)
	parser.add_argument('--height',     '-H', type=int, default=0)
	parser.add_argument('--cells',      '-c', type=int, default=[], nargs='+')
	parser.add_argument('--dpi',        '-D', type=int, default=100)
	parser.add_argument('--font',       '-F', type=str, default='Arial')

	args = parser.parse_args(argv[1:])

	# load data
	data = load_data(args.dataset)
	cells = args.cells if args.cells else range(1, len(data) + 1)
	data = [data[c - 1] for c in cells]

	if args.preprocess:
		data = preprocess(data)

	plt.rcParams['font.family'] = args.font
	plt.rcParams['savefig.dpi'] = args.dpi

	plt.figure(figsize=(
		args.width,
		args.height if args.height > 0 else len(data) * 1.5 + .3))

	for k, entry in enumerate(data):
		offset = int(entry['fps'] * args.offset)
		length = int(entry['fps'] * args.seconds)
		calcium = entry['calcium'].ravel()[offset:offset + length]

		plt.subplot(len(data), 1, k + 1)
		plt.plot(args.offset + arange(calcium.size) / entry['fps'], calcium,
			color=(.1, .6, .4))

		if 'spike_times' in entry:
			spike_times = entry['spike_times'].ravel() / 1000.
			spike_times = spike_times[logical_and(
				spike_times > args.offset,
				spike_times < args.offset + args.seconds)]

			for st in spike_times:
				plt.plot([st, st], [-1, -.5], 'k', lw=1.5)

		plt.yticks([])
		plt.ylim([-2., 5.])
		plt.xlim([args.offset, args.offset + args.seconds])
		plt.ylabel('Cell {0}'.format(cells[k]))
		plt.grid()

		if k < len(data) - 1:
			plt.xticks(plt.xticks()[0], [])

	plt.xlabel('Time [seconds]')
	plt.tight_layout()

	if args.output:
		plt.savefig(args.output)
	else:
		plt.show()

	return 0
示例#14
0
def main(argv):
	parser = ArgumentParser(argv[0], description=__doc__,
		formatter_class=lambda prog: HelpFormatter(prog, max_help_position=10, width=120))
	parser.add_argument('dataset',                type=str, nargs='+',
		help='Dataset(s) used for training.')
	parser.add_argument('output',                 type=str,
		help='Directory or file where trained models will be stored.')
	parser.add_argument('--num_components', '-c', type=int,   default=3,
		help='Number of components used in STM model (default: %(default)d).')
	parser.add_argument('--num_features',   '-f', type=int,   default=2,
		help='Number of quadratic features used in STM model (default: %(default)d).')
	parser.add_argument('--num_models',     '-m', type=int,   default=4,
		help='Number of models trained (predictions will be averaged across models, default: %(default)d).')
	parser.add_argument('--keep_all',       '-k', type=int,   default=1,
		help='If set to 0, only the best model of all trained models is kept (default: %(default)d).')
	parser.add_argument('--finetune',       '-n', type=int,   default=0,
		help='If set to 1, enables another finetuning step which is performed after training (default: %(default)d).')
	parser.add_argument('--num_train',      '-t', type=int,   default=0,
		help='If specified, a (random) subset of cells is used for training.')
	parser.add_argument('--num_valid',      '-s', type=int,   default=0,
		help='If specified, a (random) subset of cells will be used for early stopping based on validation error.')
	parser.add_argument('--var_explained',  '-e', type=float, default=95.,
		help='Controls the degree of dimensionality reduction of fluorescence windows (default: %(default).0f).')
	parser.add_argument('--window_length',  '-w', type=float, default=1000.,
		help='Length of windows extracted from calcium signal for prediction (in milliseconds, default: %(default).0f).')
	parser.add_argument('--regularize',     '-r', type=float, default=0.,
		help='Amount of parameter regularization (filters are regularized for smoothness, default: %(default).1f).')
	parser.add_argument('--preprocess',     '-p', type=int,   default=0,
		help='If the data is not already preprocessed, this can be used to do it.')
	parser.add_argument('--seed',           '-S', type=int,   default=-1)
	parser.add_argument('--verbosity',      '-v', type=int,   default=1)

	args, _ = parser.parse_known_args(argv[1:])

	# set RNG seed
	if args.seed > -1:
		numpy.random.seed(args.seed)
		cmt.utils.seed(args.seed)

	experiment = Experiment()

	if not args.dataset:
		print('You have to specify at least 1 dataset.')
		return 0

	data = []
	for filepath in args.dataset:
		data.extend(load_data(filepath))

	if args.preprocess:
		data = preprocess(data, args.verbosity)

	if 'cell_num' not in data[0]:
		# no cell number is given, assume traces correspond to cells
		for k, entry in enumerate(data):
			entry['cell_num'] = k

	# collect cell ids
	cell_ids = unique([entry['cell_num'] for entry in data])
	
	# pick cells for training
	if args.num_train > 0:
		training_cells = random_select(args.num_train, len(cell_ids))
	else:
		# use all cells for training
		training_cells = range(len(cell_ids))

	models = train([entry for entry in data if entry['cell_num'] in training_cells],
		num_valid=args.num_valid,
		num_models=args.num_models,
		var_explained=args.var_explained,
		window_length=args.window_length,
		keep_all=args.keep_all,
		finetune=args.finetune,
		model_parameters={
			'num_components': args.num_components,
			'num_features': args.num_features},
		training_parameters={
			'verbosity': 1},
		regularize=args.regularize,
		verbosity=args.verbosity)

	experiment['args'] = args
	experiment['training_cells'] = training_cells
	experiment['models'] = models

	if os.path.isdir(args.output):
		experiment.save(os.path.join(args.output, 'model.xpck'))
	else:
		experiment.save(args.output)

	return 0