Esempio n. 1
0
def main(argv):
    parser = ArgumentParser(argv[0], description=__doc__)
    parser.add_argument('input', type=str)
    parser.add_argument('output', type=str, nargs='+')
    parser.add_argument('--filter', '-s', type=int, default=0)
    parser.add_argument(
        '--fps',
        '-f',
        type=float,
        default=100.,
        help='Up- or downsample data to match this sampling rate (100 fps).')
    parser.add_argument('--verbosity', '-v', type=int, default=1)

    args = parser.parse_args(argv[1:])

    # load data
    data = load_data(args.input)

    # preprocess data
    data = preprocess(data,
                      fps=args.fps,
                      filter=args.filter if args.filter > 0 else None,
                      verbosity=args.verbosity)

    for filepath in args.output:
        if filepath.lower().endswith('.mat'):
            # store in MATLAB format
            savemat(filepath, convert({'data': data}))
        else:
            with open(filepath, 'w') as handle:
                dump(data, handle, protocol=2)

    return 0
Esempio n. 2
0
def main(argv):
    parser = ArgumentParser(argv[0], description=__doc__)
    parser.add_argument('dataset', type=str)
    parser.add_argument('output', type=str, nargs='+')
    parser.add_argument('--model', '-m', type=str, default='')
    parser.add_argument(
        '--preprocess',
        '-p',
        type=int,
        default=0,
        help=
        'If you haven\'t already applied `preprocess` to the data, set to 1 (default: 0).'
    )
    parser.add_argument('--verbosity', '-v', type=int, default=1)

    args = parser.parse_args(argv[1:])

    experiment = Experiment()

    # load data
    data = load_data(args.dataset)

    if args.preprocess:
        # preprocess data
        data = preprocess(data, args.verbosity)

    if args.model:
        # load training results
        results = Experiment(args.model)['models']
    else:
        # use default model
        results = None

    # predict firing rates
    data = predict(data, results, verbosity=args.verbosity)

    # remove data except predictions
    for entry in data:
        if 'spikes' in entry:
            del entry['spikes']
        if 'spike_times' in entry:
            del entry['spike_times']
        del entry['calcium']

    for filepath in args.output:
        if filepath.lower().endswith('.mat'):
            # store in MATLAB format
            savemat(filepath, convert({'data': data}))
        else:
            with open(filepath, 'w') as handle:
                dump(data, handle, protocol=2)

    return 0
Esempio n. 3
0
def main(argv):
	parser = ArgumentParser(argv[0], description=__doc__)
	parser.add_argument('input',             type=str)
	parser.add_argument('output',            type=str, nargs='+')
	parser.add_argument('--filter',    '-s', type=int,   default=0)
	parser.add_argument('--fps',       '-f', type=float, default=100.,
		help='Up- or downsample data to match this sampling rate (100 fps).' )
	parser.add_argument('--seed',      '-S', type=int,  default=-1)
	parser.add_argument('--verbosity', '-v', type=int,   default=1)

	args = parser.parse_args(argv[1:])

	# set RNG seed
	if args.seed > -1:
		numpy.random.seed(args.seed)
		cmt.utils.seed(args.seed)

	# load data
	data = load_data(args.input)

	# preprocess data
	data = preprocess(
		data,
		fps=args.fps,
		filter=args.filter if args.filter > 0 else None,
		verbosity=args.verbosity)

	for filepath in args.output:
		if filepath.lower().endswith('.mat'):
			# store in MATLAB format
			savemat(filepath, convert({'data': data}))
		else:
			with open(filepath, 'w') as handle:
				dump(data, handle, protocol=2)

	return 0
Esempio n. 4
0
def main(argv):
	parser = ArgumentParser(argv[0], description=__doc__)
	parser.add_argument('dataset',                type=str,   nargs='+')
	parser.add_argument('output',                 type=str)
	parser.add_argument('--num_components', '-c', type=int,   default=3)
	parser.add_argument('--num_features',   '-f', type=int,   default=2)
	parser.add_argument('--num_models',     '-m', type=int,   default=4)
	parser.add_argument('--keep_all',       '-k', type=int,   default=1)
	parser.add_argument('--finetune',       '-n', type=int,   default=0)
	parser.add_argument('--num_valid',      '-s', type=int,   default=0)
	parser.add_argument('--var_explained',  '-e', type=float, default=95.)
	parser.add_argument('--window_length',  '-w', type=float, default=1000.)
	parser.add_argument('--regularize',     '-r', type=float, default=0.)
	parser.add_argument('--preprocess',     '-p', type=int,   default=0)
	parser.add_argument('--verbosity',      '-v', type=int,   default=1)

	args, _ = parser.parse_known_args(argv[1:])

	experiment = Experiment()

	# load data
	data = []
	for dataset in args.dataset:
		data = data + load_data(dataset)

	# preprocess data
	if args.preprocess:
		data = preprocess(data)

	# list of all cells
	if 'cell_num' in data[0]:
		# several trials/entries may belong to the same cell
		cells = unique([entry['cell_num'] for entry in data])
	else:
		# one cell corresponds to one trial/entry
		cells = range(len(data))
		for i in cells:
			data[i]['cell_num'] = i

	for i in cells:
		data_train = [entry for entry in data if entry['cell_num'] != i]
		data_test = [entry for entry in data if entry['cell_num'] == i]

		if args.verbosity > 0:
			print 'Test cell: {0}'.format(i)

		# train on all cells but cell i
		results = train(
			data=data_train,
			num_valid=args.num_valid,
			num_models=args.num_models,
			var_explained=args.var_explained,
			window_length=args.window_length,
			keep_all=args.keep_all,
			finetune=args.finetune,
			model_parameters={
					'num_components': args.num_components,
					'num_features': args.num_features},
			training_parameters={
				'verbosity': 0},
			regularize=args.regularize,
			verbosity=1)

		if args.verbosity > 0:
			print 'Predicting...'

		# predict responses of cell i
		predictions = predict(data_test, results, verbosity=0)

		for entry1, entry2 in zip(data_test, predictions):
			entry1['predictions'] = entry2['predictions']

	# remove data except predictions
	for entry in data:
		if 'spikes' in entry:
			del entry['spikes']
		if 'spike_times' in entry:
			del entry['spike_times']
		del entry['calcium']

	# save results
	if args.output.lower().endswith('.mat'):
		savemat(args.output, convert({'data': data}))

	elif args.output.lower().endswith('.xpck'):
		experiment['args'] = args
		experiment['data'] = data
		experiment.save(args.output)

	else:
		with open(args.output, 'w') as handle:
			dump(data, handle, protocol=2)

	return 0
Esempio n. 5
0
def main(argv):
	parser = ArgumentParser(argv[0], description=__doc__)
	parser.add_argument('dataset',                 type=str)
	parser.add_argument('predictions',             type=str,   nargs='?')
	parser.add_argument('--downsampling',    '-s', type=int,   default=[1, 2, 3, 4, 5, 10, 15, 20, 25, 30, 40, 50], nargs='+')
	parser.add_argument('--optimize',        '-z', type=int,   default=1,
		help='Whether or not to optimize point-wise nonlinearity when evaluating likelihood.')
	parser.add_argument('--regularization',  '-r', type=float, default=5e-8,
		help='Controls smoothness of optimized nonlinearity (default: 5e-8).')
	parser.add_argument('--method',          '-m', type=str,   default='corr', choices=['corr', 'auc', 'info'])
	parser.add_argument('--weighted-average','-w', type=int,   default=0,
		help='Whether or not traces to weight traces by their duration.')
	parser.add_argument('--output',          '-o', type=str,   default='')
	parser.add_argument('--verbosity',       '-v', type=int,   default=1)

	args, _ = parser.parse_known_args(argv[1:])

	experiment = Experiment()

	data = load_data(args.dataset)

	if not args.predictions:
		# use raw calcium signal for prediction
		calcium_min = min(hstack(entry['calcium'] for entry in data))
		for entry in data:
			entry['predictions'] = entry['calcium'] - calcium_min + 1e-5

	else:
		predictions = load_data(args.predictions)

		try:
			if len(predictions) != len(data):
				raise ValueError()

			for entry1, entry2 in zip(data, predictions):
				if entry1['calcium'].size != entry2['predictions'].size:
					raise ValueError()
				entry1['predictions'] = entry2['predictions']

		except ValueError:
			print 'These predictions seem to be for a different dataset.'
			return 1

	fps = []
	loglik = []
	correlations = []
	auc = []
	entropy = []
	functions = []


	for ds in args.downsampling:
		if args.verbosity > 0:
			if args.weighted_average:
				if args.method.lower().startswith('c'):
					print '{0:>5} {1:>7} {2:>7} {3}'.format('Cell', '#Traces', 'FPS ', 'Correlation')
				elif args.method.lower().startswith('a'):
					print '{0:>5} {1:>7} {2:>7} {3}'.format('Cell', '#Traces', 'FPS ', 'AUC')
				else:
					print '{0:>5} {1:>7} {2:>7} {3}'.format('Cell', '#Traces', 'FPS ', 'Information gain')
			else:
				if args.method.lower().startswith('c'):
					print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ', 'Correlation')
				elif args.method.lower().startswith('a'):
					print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ', 'AUC')
				else:
					print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ', 'Information gain')

		fps.append([])
		for entry in data:
			fps[-1].append(entry['fps'] / ds)

		if args.method.lower().startswith('c'):
			# compute correlations
			R = evaluate(data, method=args.method,
				optimize=args.optimize,
				downsampling=ds,
				verbosity=args.verbosity)

			correlations.append(R)

			if args.verbosity > 0:
				if args.weighted_average:
					print_weighted_average(R, data, ds)
				else:
					print_traces(R, fps)

		elif args.method.lower().startswith('a'):
			# compute correlations
			A = evaluate(data, method=args.method,
				optimize=args.optimize,
				downsampling=ds,
				verbosity=args.verbosity)

			auc.append(A)

			if args.verbosity > 0:
				if args.weighted_average:
					print_weighted_average(A, data, ds)
				else:
					print_traces(A, fps)

		else:
			# compute log-likelihoods
			L, H, f = evaluate(data, method='loglik',
				optimize=args.optimize,
				downsampling=ds,
				verbosity=args.verbosity,
				return_all=True,
				regularize=args.regularization)

			loglik.append(L)
			entropy.append(H)
			functions.append((f.x, f.y))

			if args.verbosity > 0:
				if args.weighted_average:
					print_weighted_average(H + L, data, ds)
				else:
					print_traces(H + L, fps)

	if args.output.lower().endswith('.mat'):

		if args.method.lower().startswith('c'):
			savemat(args.output, convert({'fps': asarray(fps), 'correlations': asarray(correlations)}))
		elif args.method.lower().startswith('a'):
			savemat(args.output, convert({'fps': asarray(fps), 'auc': asarray(auc)}))
		else:
			savemat(args.output, convert({
				'fps': asarray(fps),
				'loglik': asarray(loglik),
				'entropy': asarray(entropy),
				'info': asarray(loglik) + asarray(entropy)}))

	elif args.output:
		if os.path.isdir(args.output):
			filepath = os.path.join(args.output, args.method + '.{0}.{1}.xpck')
		else:
			filepath = args.output

		experiment['args'] = args
		experiment['fps'] = asarray(fps)

		if args.method.lower().startswith('c'):
			experiment['correlations'] = asarray(correlations)
		elif args.method.lower().startswith('a'):
			experiment['auc'] = asarray(auc)
		else:
			experiment['loglik'] = asarray(loglik)
			experiment['entropy'] = asarray(entropy)
			experiment['info'] = asarray(loglik) + asarray(entropy)
			experiment['f'] = functions

		experiment.save(filepath, overwrite=True)

	return 0
Esempio n. 6
0
def main(argv):
    parser = ArgumentParser(argv[0], description=__doc__)
    parser.add_argument('dataset', type=str)
    parser.add_argument('predictions', type=str, nargs='?')
    parser.add_argument('--downsampling',
                        '-s',
                        type=int,
                        default=[1, 2, 3, 4, 5, 10, 15, 20, 25, 30, 40, 50],
                        nargs='+')
    parser.add_argument(
        '--optimize',
        '-z',
        type=int,
        default=1,
        help=
        'Whether or not to optimize point-wise nonlinearity when evaluating likelihood.'
    )
    parser.add_argument(
        '--regularization',
        '-r',
        type=float,
        default=5e-8,
        help='Controls smoothness of optimized nonlinearity (default: 5e-8).')
    parser.add_argument('--method',
                        '-m',
                        type=str,
                        default='corr',
                        choices=['corr', 'auc', 'info'])
    parser.add_argument(
        '--weighted-average',
        '-w',
        type=int,
        default=0,
        help='Whether or not traces to weight traces by their duration.')
    parser.add_argument('--output', '-o', type=str, default='')
    parser.add_argument('--verbosity', '-v', type=int, default=1)

    args, _ = parser.parse_known_args(argv[1:])

    experiment = Experiment()

    data = load_data(args.dataset)

    if not args.predictions:
        # use raw calcium signal for prediction
        calcium_min = min(hstack(entry['calcium'] for entry in data))
        for entry in data:
            entry['predictions'] = entry['calcium'] - calcium_min + 1e-5

    else:
        predictions = load_data(args.predictions)

        try:
            if len(predictions) != len(data):
                raise ValueError()

            for entry1, entry2 in zip(data, predictions):
                if entry1['calcium'].size != entry2['predictions'].size:
                    raise ValueError()
                entry1['predictions'] = entry2['predictions']

        except ValueError:
            print 'These predictions seem to be for a different dataset.'
            return 1

    fps = []
    loglik = []
    correlations = []
    auc = []
    entropy = []
    functions = []

    for ds in args.downsampling:
        if args.verbosity > 0:
            if args.weighted_average:
                if args.method.lower().startswith('c'):
                    print '{0:>5} {1:>7} {2:>7} {3}'.format(
                        'Cell', '#Traces', 'FPS ', 'Correlation')
                elif args.method.lower().startswith('a'):
                    print '{0:>5} {1:>7} {2:>7} {3}'.format(
                        'Cell', '#Traces', 'FPS ', 'AUC')
                else:
                    print '{0:>5} {1:>7} {2:>7} {3}'.format(
                        'Cell', '#Traces', 'FPS ', 'Information gain')
            else:
                if args.method.lower().startswith('c'):
                    print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ',
                                                     'Correlation')
                elif args.method.lower().startswith('a'):
                    print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ', 'AUC')
                else:
                    print '{0:>5} {1:>7} {2}'.format('Trace', 'FPS ',
                                                     'Information gain')

        fps.append([])
        for entry in data:
            fps[-1].append(entry['fps'] / ds)

        if args.method.lower().startswith('c'):
            # compute correlations
            R = evaluate(data,
                         method=args.method,
                         optimize=args.optimize,
                         downsampling=ds,
                         verbosity=args.verbosity)

            correlations.append(R)

            if args.verbosity > 0:
                if args.weighted_average:
                    print_weighted_average(R, data, ds)
                else:
                    print_traces(R, fps)

        elif args.method.lower().startswith('a'):
            # compute correlations
            A = evaluate(data,
                         method=args.method,
                         optimize=args.optimize,
                         downsampling=ds,
                         verbosity=args.verbosity)

            auc.append(A)

            if args.verbosity > 0:
                if args.weighted_average:
                    print_weighted_average(A, data, ds)
                else:
                    print_traces(A, fps)

        else:
            # compute log-likelihoods
            L, H, f = evaluate(data,
                               method='loglik',
                               optimize=args.optimize,
                               downsampling=ds,
                               verbosity=args.verbosity,
                               return_all=True,
                               regularize=args.regularization)

            loglik.append(L)
            entropy.append(H)
            functions.append((f.x, f.y))

            if args.verbosity > 0:
                if args.weighted_average:
                    print_weighted_average(H + L, data, ds)
                else:
                    print_traces(H + L, fps)

    if args.output.lower().endswith('.mat'):

        if args.method.lower().startswith('c'):
            savemat(
                args.output,
                convert({
                    'fps': asarray(fps),
                    'correlations': asarray(correlations)
                }))
        elif args.method.lower().startswith('a'):
            savemat(args.output,
                    convert({
                        'fps': asarray(fps),
                        'auc': asarray(auc)
                    }))
        else:
            savemat(
                args.output,
                convert({
                    'fps': asarray(fps),
                    'loglik': asarray(loglik),
                    'entropy': asarray(entropy),
                    'info': asarray(loglik) + asarray(entropy)
                }))

    elif args.output:
        if os.path.isdir(args.output):
            filepath = os.path.join(args.output, args.method + '.{0}.{1}.xpck')
        else:
            filepath = args.output

        experiment['args'] = args
        experiment['fps'] = asarray(fps)

        if args.method.lower().startswith('c'):
            experiment['correlations'] = asarray(correlations)
        elif args.method.lower().startswith('a'):
            experiment['auc'] = asarray(auc)
        else:
            experiment['loglik'] = asarray(loglik)
            experiment['entropy'] = asarray(entropy)
            experiment['info'] = asarray(loglik) + asarray(entropy)
            experiment['f'] = functions

        experiment.save(filepath, overwrite=True)

    return 0
Esempio n. 7
0
def main(argv):
    parser = ArgumentParser(argv[0], description=__doc__)
    parser.add_argument('dataset', type=str, nargs='+')
    parser.add_argument('output', type=str)
    parser.add_argument('--num_components', '-c', type=int, default=3)
    parser.add_argument('--num_features', '-f', type=int, default=2)
    parser.add_argument('--num_models', '-m', type=int, default=4)
    parser.add_argument('--keep_all', '-k', type=int, default=1)
    parser.add_argument('--finetune', '-n', type=int, default=0)
    parser.add_argument('--num_valid', '-s', type=int, default=0)
    parser.add_argument('--var_explained', '-e', type=float, default=95.)
    parser.add_argument('--window_length', '-w', type=float, default=1000.)
    parser.add_argument('--regularize', '-r', type=float, default=0.)
    parser.add_argument('--preprocess', '-p', type=int, default=0)
    parser.add_argument('--verbosity', '-v', type=int, default=1)

    args, _ = parser.parse_known_args(argv[1:])

    experiment = Experiment()

    # load data
    data = []
    for dataset in args.dataset:
        data = data + load_data(dataset)

    # preprocess data
    if args.preprocess:
        data = preprocess(data)

    # list of all cells
    if 'cell_num' in data[0]:
        # several trials/entries may belong to the same cell
        cells = unique([entry['cell_num'] for entry in data])
    else:
        # one cell corresponds to one trial/entry
        cells = range(len(data))
        for i in cells:
            data[i]['cell_num'] = i

    for i in cells:
        data_train = [entry for entry in data if entry['cell_num'] != i]
        data_test = [entry for entry in data if entry['cell_num'] == i]

        if args.verbosity > 0:
            print 'Test cell: {0}'.format(i)

        # train on all cells but cell i
        results = train(data=data_train,
                        num_valid=args.num_valid,
                        num_models=args.num_models,
                        var_explained=args.var_explained,
                        window_length=args.window_length,
                        keep_all=args.keep_all,
                        finetune=args.finetune,
                        model_parameters={
                            'num_components': args.num_components,
                            'num_features': args.num_features
                        },
                        training_parameters={'verbosity': 0},
                        regularize=args.regularize,
                        verbosity=1)

        if args.verbosity > 0:
            print 'Predicting...'

        # predict responses of cell i
        predictions = predict(data_test, results, verbosity=0)

        for entry1, entry2 in zip(data_test, predictions):
            entry1['predictions'] = entry2['predictions']

    # remove data except predictions
    for entry in data:
        if 'spikes' in entry:
            del entry['spikes']
        if 'spike_times' in entry:
            del entry['spike_times']
        del entry['calcium']

    # save results
    if args.output.lower().endswith('.mat'):
        savemat(args.output, convert({'data': data}))

    elif args.output.lower().endswith('.xpck'):
        experiment['args'] = args
        experiment['data'] = data
        experiment.save(args.output)

    else:
        with open(args.output, 'w') as handle:
            dump(data, handle, protocol=2)

    return 0