Ejemplo n.º 1
0
def get_category(event):
    """
    Category lookup.
    If category_id exists in url path lookup by id
    else find all categories
    :param event: dict() aws api gateway object
    :return: list() or dict()  many or single category
    """
    category_id = event['path'].get('category_id')
    if category_id:
        return connect('category').find_by_id(category_id)
    return [x for x in connect('category').find()]
Ejemplo n.º 2
0
Archivo: feeds.py Proyecto: dmc2015/mpt
def posts(page=1, per_page=10):
    skip = (page - 1) * per_page
    db = mongo.connect()
    c = db.posts.find({}).sort('published_parsed', -1).limit(per_page)
    if skip:
        c = c.skip(skip)
    return list(c)
Ejemplo n.º 3
0
def upcoming_events():
    today = datetime.datetime.combine(datetime.date.today(), datetime.time(0, 0, 0, 0))

    db = mongo.connect()
    c = db.events.find({"start": {"$gte": today}}).sort("start")

    return list(c)
Ejemplo n.º 4
0
def previous_events():
    today = datetime.datetime.combine(datetime.date.today(), datetime.time(0, 0, 0, 0))

    db = mongo.connect()
    c = db.events.find({"start": {"$lt": today}}).sort("start", 0)

    return list(c)
Ejemplo n.º 5
0
def post_category(event):
    """
    Add a category to mongo collection
    :param event: dict() aws api gateway object
    :return: str() inserted id
    """
    inserted = connect('category').insert_one(event['payload'])
    return inserted.inserted_id
Ejemplo n.º 6
0
def main():
    
    client, db = mongo.connect()
    
    # Test molecule ID
    db_id = 40001873
    molecule = db.shifts.find_one({'db_id': db_id})
    molecule_obj = mongo.deserialise(molecule['mol'])
Ejemplo n.º 7
0
Archivo: users.py Proyecto: dmc2015/mpt
def add_user(username, password):
    hashed = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt())
    db = mongo.connect()
    doc = {
        'username': username,
        'password_hash': hashed,
        'active': True,
    }
    db.users.insert(doc)
Ejemplo n.º 8
0
Archivo: users.py Proyecto: dmc2015/mpt
def get_user(username, password=None):
    db = mongo.connect()
    doc = db.users.find_one({'username': username})
    if doc:
        user = User(doc['username'])
        user.active = doc.get('active', False)
        if password and valid_password(username, password):
            user.authenticated = True
        return user
Ejemplo n.º 9
0
def timeline(coll, page=1, per_page=5):
    db = mongo.connect()
    c = db[coll].find({}).sort('id_str', -1).limit(per_page)

    skip = (page - 1) * per_page
    if skip:
        c = c.skip(skip)

    return list(c)
Ejemplo n.º 10
0
def get_page_config():
    if request.method == 'POST':
        "see if the page is in db already"

        uri = request.json['uri']
        user_id = request.json['user_id']
        survey_db = mongo.connect('survey')
        user_info = [s for s in survey_db.find({'user_id': user_id})]
        if len(user_info)<=0 or u'gender' not in user_info[0]:
            print 'Returning Need Survey'
            return json.dumps({'response': "Need Survey"})

        user_info = user_info[0]

        user_gender = user_info['gender']
        user_condition = user_info['condition']
        user_info['condition'] = user_condition
        page_id = get_id_from_uri(uri)
        page_info = {'condition': user_condition}
        video_info = get_video_info(uri)


        page_info['video_info'] = video_info
        page_db = mongo.connect('page')

        #print 'debug: 120 ', uri, user_gender
        scale, male, female, error_code, comments, gender_subcondition = count_gender_on_page(uri, user_gender)
        #print 'debug: 122 ', scale, male, female, error_code, comments
        page_info['error_code'] = error_code
        page_info['response'] = "OK"
        page_info['comments'] = comments
        query = {"_id": {'page_id': page_id, 'user_id': user_id}}

        if page_db.find(query).count() == 0:
            page_info['same_gender_scale'] = scale
            page_info['gender'] = {'user_gender': user_gender, 'scale': scale, 'male_count': male, 'female_count':female, 'error_code': error_code, 'subcondition': gender_subcondition}
            page_info['geo'] = get_geo()
            page_info['_id'] = {'page_id': page_id, 'user_id': user_id}
            page_db.insert(page_info, manipulate=False)
        else:
            page_info = [p for p in page_db.find(query)][0]

        return json.dumps(page_info)
Ejemplo n.º 11
0
def main ():
    client, db = mongo.connect()
    results = screen_2(db)

    results_file = 'r.txt'

    i = 0 

    with open(results_file, 'w') as handle:

        for r in list(results):
            handle.write(r['g_safename']+'\n')
Ejemplo n.º 12
0
 def func_wrapper(*args):
     from mongo import connect
     event = args[0]
     context = args[1]
     e = None
     try:
         acl = connect('acl').find_by_auth(auth_key(event))
         if not acl:
             e = RestException("Unauthorized", 403)
             is_authorized(acl, event, context)
     except RestException as e:
         return func(*args, exception=e)
     return func(*args, exception=e or None)
Ejemplo n.º 13
0
def store_survey():
    if request.method == 'POST':
        condition = randomly_assign_condition()
        if request.json != None:
            d = request.json
        else:
            d = request.form.to_dict()

        d['condition'] = condition
        survey_db = mongo.connect('survey')
        survey_db.insert(d, manipulate = False)
        d['response'] = "OK"
        return jsonify(d)
    return json.dumps({"response": "ERROR"})
Ejemplo n.º 14
0
def pre_count(board):
    import itertools
    res = connect('PTT', board).find({}, {'_id': 0, 'content_seg': 1})
    res = [i['content_seg'] for i in res]
    res = [[i[0] for i in j] for j in res]
    types = itertools.chain.from_iterable(res)
    types = list(set(types))
    dic = {}
    for t in types:
        dic[t] = 0
    for doc in res:
        for t in types:
            print t
            if t in doc:
                dic[t] += 1
    return dic
Ejemplo n.º 15
0
def tfidf(text, board):
    res = connect('PTT', board).find({}, {'_id': 0, 'content_seg': 1})
    res = [i['content_seg'] for i in res]
    res = [[i[0] for i in j] for j in res]
    types = list(set(text))
    dic = {}
    for t in types:
        dic[t] = 0
    con = []
    # counting each type number appeared in the whole docs
    for doc in res:
        for t in types:
            if t in doc:
                dic[t] += 1
    for t in types:
        tf = 1.0 * text.count(t) / len(text)
        idf = log(1.0 * len(res) / (dic[t] + 1))
        v = tf * idf
        con.append((t, v))
    return con
Ejemplo n.º 16
0
def refresh():

    resp = requests.get(ICALENDAR_URL)
    cal = Calendar.from_ical(resp.content)

    for event in cal.subcomponents:

        uid = event["UID"]
        title = event["SUMMARY"]
        url = event["DESCRIPTION"]

        location = event["LOCATION"]

        start = parsedt(event["DTSTART"])
        end = parsedt(event["DTEND"])

        spec = {"uid": uid}
        doc = {"uid": uid, "title": title, "url": url, "location": location, "start": start, "end": end}

        db = mongo.connect()
        db.events.update(spec, {"$set": doc}, upsert=True)
Ejemplo n.º 17
0
def main():

	client, db = mongo.connect()
	
	parser = argparse.ArgumentParser(description='TEST SET MANAGEMENT')
	parser.add_argument('dataset', help='the name of the test set', nargs='?')
	parser.add_argument('-d', '--delete', help='Removes the spectrum from the lists of spectra in the database')
	parser.set_defaults(which='main')

	subparser = parser.add_subparsers(help='TEST SET COMMANDS')

	delete = subparser.add_parser('delete', help="Removes the spectrum from the lists of spectra in the database")
	delete.set_defaults(which='del')

	insert = subparser.add_parser('insert', help="Inserts processed spectra into the database")
	insert.set_defaults(which='insert')

	cp = subparser.add_parser('cp', help='COPY')
	cp.add_argument('name', help='The new testset name')
	cp.add_argument('-f', '--full', action='store_true', help='Copy all files in dir')
	cp.add_argument('-s', '--success', action='store_true', help='Copy only successfully optimised molecules')
	cp.set_defaults(which='cp')

	nw = subparser.add_parser('nw', help='NEW')
	nw.set_defaults(which='nw')

	rb = subparser.add_parser('rebuild', help='REBUILD')
	rb.set_defaults(which='rebuild')

	minimise = subparser.add_parser('mini', help='MINIMISATION OPERATIONS')
	minimise.add_argument('-c', '--conformers', type=int, help='Set the number of conformers to try')
	minimise.add_argument('-r', '--rdkit', action='store_true', help='Minimise with rdkit mmff')
	minimise.set_defaults(which='minimise')

	convert = subparser.add_parser('convert', help='CONVERSION OPERATIONS')
	convert.add_argument('-a', '--arguments', nargs='+', default=[], help='Supply the conversion arguments')
	convert.add_argument('-s', '--savecpm', help='Location to save the com file')
	# convert.add_argument('convert', help='Convert to com file, default save as g_safename in same dir')
	convert.set_defaults(which='convert')

	molecules = subparser.add_parser('molecule', help='MOLECULE OPERATIONS')
	molecules.add_argument('-a', '--add', metavar='<FILE>', type=argparse.FileType('r', 0), help='Add molecules from file to the test set')
	molecules.add_argument('-r', '--remove', nargs='+', help='Remove named molecules, or all')
	molecules.add_argument('-n', '--num', action='store_true', help='How many molecules in the dataset')
	molecules.add_argument('-c', '--convert', action='store_true', help='Convert to com file')
	molecules.add_argument('--molfiles', default=False, help='Dump to molfiles, with no hydrgens (for Chemdraw)')
	molecules.set_defaults(which='molecules')

	tensors = subparser.add_parser('tensor', help='TENSOR OPERATIONS')
	tensors.add_argument('-p', '--process', action='store_true', help='Process the tensors from the dataset')
	tensors.add_argument('-o', '--output', help='Writes a tensor analysis to csv')
	tensors.set_defaults(which='tensors')

	printer = subparser.add_parser('print', help='PRINT OPERATIONS')
	printer.add_argument('pprint', metavar='<ATTRIBUTE>', help='Print a dataset attribute by name')
	printer.set_defaults(which='printing')

	args = parser.parse_args()

	if args.dataset.endswith('/'):
		print 'No trailing slash!'
		sys.exit(1)

	if not os.path.exists(args.dataset) and args.which != 'nw':
		print "dataset directory does not exist"
		sys.exit(1)

	elif args.which == 'nw':
		print 'Creating new dataset directory: ' + args.dataset

		if os.path.isdir(args.dataset):
			print 'Directory already exists!'
			sys.exit(1)

		else:
			dataset = testset.TestSet(args.dataset)
			print 'Serialising...'
			dataset.save()
			sys.exit(0)

	elif os.path.exists(args.dataset):
		try:
			dataset = testset.TestSet.deserialise(args.dataset)
		except IOError:
			print 'This dataset has a filestructure but no serialised set found'
			i = raw_input("Would you like to create one? y/n ")

			if i == 'y':
				dataset = testset.TestSet.build(db, args.dataset)
				print 'Dataset {0} created with {1} molecules'.format(dataset.dataset, len(dataset.molecules))
			else:
				return

	if args.which == 'del':
		mongo.delete_spectrum(db, dataset.dataset)

	elif args.which == 'printing':

		if '.' in args.pprint:
			args.pprint, attr2 = args.pprint.split('.')

		if hasattr(dataset, args.pprint):
			attr = getattr(dataset, args.pprint)

			if isinstance(attr, list):
				for item in attr:
					if hasattr(item, attr2):
						print getattr(item, attr2)
					else:
						print item
			else:
				print attr

		else:
			print 'No attribute with name "{0}" found'.format(args.pprint)

	if args.which == 'molecules':

		if args.add:
			print 'Reading molecules from {0}'.format(args.add.name)
			dataset.import_molecules(db, args.add)
			dataset.save()

		elif args.remove:

			if args.remove == ['all']:
				i = raw_input("""Are you sure you wish to remove all molecules from this dataset? All directories will be removed - y/n: """)
				if i == 'y':
					dataset.remove_molecules(dataset.molecules)
					dataset.save()
				else:
					sys.exit(0)
			else:
				print 'Removing molecules: '+str(args.remove)
				dataset.remove_molecules(args.remove)

		elif args.num:
			print '{0} molecules in the dataset'.format(len(dataset.molecules))

		elif args.molfiles:

			cwd = os.getcwd()
			location = os.path.join(cwd, args.molfiles)

			for m in dataset.molecules:
				m.mol_to_molfile(dir=location, hydrogens=False)


	elif args.which == 'convert':

		if args.arguments:
			params = {'cpus':args.arguments[0], 'memory':args.arguments[1], 'command':args.arguments[2]}
		else:
			params = molecule.input_gaussian_params()

		dataset.prepare_com_files(**params)


	elif args.which == 'tensors':

		if args.process:
			dataset.process_logs()

		elif args.output:
			"""Write the tensors to file"""

			stream = tensor_analysis.TensorStream.build(db, args.dataset)
			stream.scale_stream()

			with open(args.output, 'w') as handle:
				handle.write('Output from dataset {} on {}\n'.format(args.dataset, str(dt.now())))
				handle.write('molecule, atomid, hybridisation, mulliken charge, exp, calc, scaled error\n')
				for line in stream:
					print line
					handle.write('{}\n'.format(','.join(map(str,line))))

				print 'Wrote output to {}'.format(args.output)


	elif args.which == 'cp':

		if os.path.exists(args.name):
			print 'This dir exists, please choose another name'
			sys.exit(1)

		else:
			print 'Duplicating {0} to {1}'.format(dataset.dataset, args.name)
			dataset = dataset.duplicate_set(args.name, args.full, args.success)

			dataset.save()


	elif args.which == 'minimise':

		if args.conformers:

			print 'Scanning conformer space with {0} starting points'.format(args.conformers)
			dataset.minimise_molecules(molecule.m_rdkit_conformers, confs=args.conformers)
			dataset.save()

		elif args.rdkit:

			print 'Minimising with RDKIT'
			dataset.minimise_molecules(molecule.m_rdkit)
			dataset.save()


	elif args.which == 'rebuild':

		print 'Rebuilding dataset'
		newset = dataset.build(db, dataset.dataset)
		newset.save()


	elif args.which == 'insert':

		print 'Inserting tensors'
		dataset.insert_tensors(db)
Ejemplo n.º 18
0
def main ():

	client, db = mongo.connect()
	
	parser = argparse.ArgumentParser(description='UTILITIES')
	parser.add_argument('molecule', help='the name of the molecule/molecues', nargs='?')
	parser.set_defaults(which='main')

	subparser = parser.add_subparsers(help='TEST SET COMMANDS')

	plotter = subparser.add_parser('plot', help='PLOTTING OPERATIONS')
	plotter.add_argument('-d', '--dataset', required=True, help='Dataset to plot')
	plotter.add_argument('-s', '--savefig', help='Location to save the figure')
	plotter.set_defaults(which='plotting')

	converter = subparser.add_parser('convert', help='CONVERSION OPERATIONS')
	converter.add_argument('-s', '--save', help='Name the com file')
	converter.set_defaults(which='convert')

	spectra = subparser.add_parser('spectra', help='SPECTRA GRAB')
	spectra.add_argument('-s', '--savefig', help='Location to save the figure')
	spectra.add_argument('-d', '--dataset', help='dataset to examine, otherwise all', default=None)
	spectra.set_defaults(which='spectra')

	args = parser.parse_args()

	mol = molecule.Molecule(os.getcwd()).pull_record(db, {'g_safename':args.molecule})

	if args.which == 'plotting':

		if args.savefig:
			loc = args.savefig
		else:
			loc = None

		main_title = mol.g_safename + ' ' + args.dataset 

		shift_sets = tensor_analysis.molecule_tensor_analysis(mol, args.dataset)
		exp, calc, names = tensor_analysis.extract_data(shift_sets)

		fig = plotting.Figure(dimensions=(12,5), cols=1, main_title=main_title)
		plotting.plot_tensor_scatter(calc, exp, fig, loc, legend=False)

		fig.show_figure()

		# plotting.plot_tensors(tensors, title=args.dataset, loc=loc)


	elif args.which == 'convert':

		params = molecule.input_gaussian_params()
		molecule.convert_to_com(mol, **params)
		mol.cleanup()

		if args.save:

			if not args.save.endswith('.com'):
				args.save += '.com'

			old = os.path.join(os.getcwd(), mol.g_safename, mol.g_safename+'.com')
			new = os.path.join(os.path.dirname(old), args.save)

			os.rename(old, new)


	elif args.which == 'spectra':

		shift_list = []

		for x in mol.get_spectra(nuclei='13C'):

			if hasattr(x, 'dataset'):
				name = "(C) " + x.dataset
			else:
				name = "(E) " + str(x.assignment_method)

			shifts = sorted(x.shifts, key=lambda y: y[0])
			shifts.insert(0, name)

			shift_list.append(shifts)

		print '\n'

		for sl in sublist(shift_list, 5):
			for line in zip(*sl):
				# print shifts
				print ''.join(['{:<30}'.format(x) for x in line])
			print '\n'

		exp_spectra = mol.get_spectra(computed=False, nuclei='13C')

		if args.dataset:
			comp_spectra = mol.get_spectra(computed=True, dataset=args.dataset, assignment_method='Computed')
		else:	
			comp_spectra = mol.get_spectra(computed=True, assignment_method='Computed')

		if comp_spectra:
			print '{:<30}{:<25}{:<20}{:<20}{:<20}'.format('Experimental', 'Computed', 'slope', 'intercept', 'R')

			for exp_spec in exp_spectra:
				for comp_spec in comp_spectra:

					stats = tensor_analysis.regres_stats(comp_spec, exp_spec)
					print '{:<30}{:<25}{:<20}{:<20}{:<20}'.format(exp_spec.assignment_method, comp_spec.dataset, round(stats[0],4),
						round(stats[1],4), round(stats[2],4))

					if args.savefig:
						print 'Saving figure to {0}'.format(saveto)
						t = tensor_analysis.molecule_tensor_analysis(mol, comp_spec.dataset)
						plotting.plot_tensors(t, title=mol.g_safename, loc=saveto)
Ejemplo n.º 19
0
Archivo: users.py Proyecto: dmc2015/mpt
def valid_password(username, password):
    db = mongo.connect()
    user = db.users.find_one({'username': username})
    if user:
        current_hash = user['password_hash'].encode('utf-8')
        return bcrypt.hashpw(password.encode('utf-8'), current_hash) == current_hash
Ejemplo n.º 20
0
Archivo: users.py Proyecto: dmc2015/mpt
def update_password(username, password):
    hashed = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt())
    db = mongo.connect()
    db.users.update({'username': username}, {'$set': {'password_hash': hashed}})
Ejemplo n.º 21
0
		points.append(scaled_point)

	return points


def csv_dump (handle, xs):

	'''xs needs to be a list of lists, writes comma separated lines'''

	for x in xs:
		handle.write(','.join(list(xs)))


if __name__ == '__main__':

	client, db = mongo.connect()

	parser = argparse.ArgumentParser(description='TENSOR ANALYSIS UTILITIES')
	parser.add_argument('-l', '--list_ml', action='store_true', help='list molecules')
	# parser.add_argument('-o', '--output', action='store_true', help='print to terminal')
	parser.add_argument('-f', '--file', metavar='FILE', help='read from a list of newline separated molecule safenames')
	parser.add_argument('-n', '--name', help='pull a specific molecule safename from the db')
	parser.add_argument('-d', '--dataset', required=True, help='The name of the dataset to compare with exp shifts')
	parser.add_argument('-c', '--csv', action='store_true', help='Output a tensor comparison csv for each molecule')

	args = parser.parse_args()

	fl = path_incrementor(args.dataset)

	all_shifts_fl = open(fl, 'w')
Ejemplo n.º 22
0
def save_tweets(coll, tweets):
    db = mongo.connect()
    for tweet in tweets:
        db[coll].update({'id_str': tweet['id_str']}, {'$set': tweet}, upsert=True)
Ejemplo n.º 23
0
#!/usr/bin/env python3

if __name__ == '__main__':
    import sys, os
    sys.path.insert(
        0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

    from mongo import connect
    from users import UserCollection

    from pymongo.errors import DuplicateKeyError
    from common.exc import AuthError

    db = connect('fakesford_test')
    print('all collections: %s' % db.list_collection_names())

    uc = UserCollection(db)
    uc._collection.drop()

    uc.add('maxuta', '123', '123', 'pupil', age=27, subjects=['a', 'b'])
    uc.add('kolya', '111', '111', 'tutor', age=26, subjects=['math'])

    print('all: %s' % list(uc.iter_all()))

    try:
        uc.add('maxuta',
               '555',
               '555',
               'tutor',
               age=13,
               subjects=['aaaaa', 'bbbb'])
        try:
            video_info['title'] = entry.media.title.text
            video_info['published_on'] = entry.published.text
            video_info['description'] = entry.media.description.text
            video_info['category'] = entry.media.category[0].text
            video_info['tags'] = entry.media.keywords.text
            video_info['duration_seconds'] = entry.media.duration.seconds
            video_info['view_count'] = entry.statistics.view_count
            video_info['rating'] = entry.rating.average
        except:
            pass
    return video_info



collection = mongo.connect("page")

for obj in collection.find():

    url = "https://www.youtube.com/watch?v=" + obj['_id']['page_id']
    try:
        video_info = get_video_info(url)
    except:
        continue

    if 'video_info' not in obj or 'published_on' not in obj['video_info'] or obj['video_info']['published_on'] is None:
        obj['video_info'] = video_info

    collection.save(obj)

Ejemplo n.º 25
0
def store_record():
    if request.method == 'POST':
        record_db = mongo.connect('record')
        record_db.insert(request.json, manipulate=False)
        return json.dumps({'response': "OK"})
    return json.dumps({'response': "ERROR"})
Ejemplo n.º 26
0
def main():
	
	client, db = mongo.connect()
	
	parser = argparse.ArgumentParser(description='PLOTTING')
	parser.add_argument('-d', '--dataset', nargs='+', required=True, default=[], help='The dataset(s) to draw from')
	parser.add_argument('-m', '--molecules', type = argparse.FileType('r'), default = [])
	parser.add_argument('-s', '--savefig', help='Location to save the figure')
	parser.add_argument('-t', '--title', required=True, help='The title of the figure - enclose in ""')
	parser.add_argument('-p', '--plot_title', action='store_true', help='Will try to create plot titles from name of dataset')
	parser.add_argument('-l', '--legend', action='store_true', help='Includes legends on plot')
	parser.add_argument('--size', nargs=2, default=[14,8], type=float, help='The size of the plot to draw')
	parser.add_argument('--cols', default=4, type=float, help='The number of columns to display')
	parser.set_defaults(which='main')

	subparser = parser.add_subparsers(help='Plot types')

	scatter = subparser.add_parser('scatter', help="Plot a scatter graph")
	scatter.add_argument('-e', '--errors', action='store_true', help='Includes error plots')
	scatter.set_defaults(which='scatter')

	error = subparser.add_parser('error', help="Plot an error graph")
	error.set_defaults(which='error')

	hybrid = subparser.add_parser('hybrid', help="Plot a hybrid error graph")
	hybrid.set_defaults(which='hybrid')

	charge = subparser.add_parser('charge', help="Plot a charge error graph")
	charge.set_defaults(which='charge')

	charge3D = subparser.add_parser('charge_3D', help="Plot a 3D charge error graph")
	charge3D.set_defaults(which='charge_3D')

	args = parser.parse_args()

	dataset_names = glob_datasets(db.shifts.distinct('spectra.dataset'), args.dataset)
	figure = plotting.Figure(dimensions=args.size, main_title=args.title, cols=args.cols)

	if isinstance(args.molecules, file):
		lines = args.molecules.readlines()
		args.molecules = [x.rstrip() for x in lines]

	for i,name in enumerate(dataset_names):

		if args.plot_title:
			axis_title = name.split('/')[-1]
			functional, basis = axis_title.split('_')
			basis = '6-311+G(2d,p)'if basis == '631+G' else '6-31G(d,p)'
			axis_title = functional + ' // ' + basis
		else:
			axis_title = name
			#axis_title = ''

		if args.molecules:
			molecules_of_interest = args.molecules
		else:
			molecules_of_interest = []

		stream = tensor_analysis.TensorStream.build(db, name, molecules_of_interest)
		exp, calc = stream.exp, stream.calc
		scaled_tensors = stream.scale_stream()

		if args.which == 'scatter':
			plotting.plot_tensor_scatter(exp, calc, figure, plot_title=axis_title, legend=args.legend)
			if args.errors:
				plotting.plot_scaled_shift_errors(scaled_tensors, figure, plot_title=axis_title, legend=args.legend)

		if args.which == 'error':
			plotting.plot_scaled_shift_errors(scaled_tensors, figure, plot_title=axis_title, legend=args.legend)

		if args.which == 'hybrid':
			figure = plotting.plot_scaled_tensors_by_hybridisation(stream.hybridisations, scaled_tensors, plot_title=args.title)

		if args.which == 'charge':
			figure = plotting.plot_charge_chart(stream.hybridisations, stream.charges, scaled_tensors, plot_title=args.title)

		if args.which == 'charge_3D':
			print args.which
			figure = plotting.plot_3D_scatter(stream.hybridisations, stream.charges, scaled_tensors, plot_title=args.title)


	if args.savefig:
		figure.save_figure(args.savefig)
	else:
		figure.show_figure()
Ejemplo n.º 27
0
from flask import *
from functools import wraps
from mongo import connect
import logging
from logging.handlers import RotatingFileHandler
from functools import wraps
from mongo import connect

#from config import conf

app = Flask(__name__)
app.secret_key = 'token'
app.config['ASK_VERIFY_REQUESTS'] = False
db = connect()

handler = RotatingFileHandler('app.log', maxBytes=10000, backupCount=1)
handler.setFormatter(
    logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
                      '[in %(pathname)s:%(lineno)d]'))
handler.setLevel(logging.WARNING)
app.logger.addHandler(handler)

from .index import *
from .users import *
from .records import *
from .certifacte import *
from .setting import *
Ejemplo n.º 28
0
Archivo: web.py Proyecto: dmc2015/mpt
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))

EMPTY_BLOCK = """<br><br>"""


app = Flask(__name__)
app.secret_key = os.environ.get('FLASK_SECRETKEY', '1234567890')

app.wsgi_app = WhiteNoise(app.wsgi_app, root=os.path.join(PROJECT_ROOT, 'static'), prefix='static/')
app.wsgi_app.add_files(os.path.join(PROJECT_ROOT, 'www'))

#
# database
#

db = mongo.connect()


#
# Postmark config
#

POSTMARK_API_KEY = os.environ.get('POSTMARK_API_KEY')
POSTMARK_SENDER = os.environ.get('POSTMARK_SENDER')
POSTMARK_RECIPIENTS = os.environ.get('POSTMARK_RECIPIENTS')
GENERIC_RECIPIENT = os.environ.get('GENERIC_RECIPIENT')


#
# request lifecycle
#
Ejemplo n.º 29
0
from flask import Flask, request, render_template, redirect, url_for, send_from_directory, session, make_response
from werkzeug import secure_filename
import flask
import os
import json
import mongo
import requests
import urllib2, urllib
import base64
#import dbc

ALLOWED_EXTENSIONS = set(['txt', 'png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
mongo.connect()
app.config['UPLOAD_FOLDER'] = "uploads"
app.secret_key = "ThomasWroteSOMETHINGCOOL"
def allowed_file(filename):
    return '.' in filename and \
           filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/', methods = ["GET", "POST"])
def index():
    return render_template("index.html")
@app.route('/butts')
def butts():
    return open("static/butts.html").read()

@app.route('/login', methods = ["GET", "POST"])
def login():
    """join an existing group"""
    if request.method == "POST":
        response = make_response(redirect("/check/"+request.form["code"]))
Ejemplo n.º 30
0
Archivo: users.py Proyecto: dmc2015/mpt
def get_users():
    db = mongo.connect()
    return [User(u['username'], is_active=u.get('active', False)) for u in db.users.find()]
Ejemplo n.º 31
0
Archivo: users.py Proyecto: dmc2015/mpt
def remove_user(username):
    db = mongo.connect()
    db.users.remove({'username': username})
Ejemplo n.º 32
0
def display():
    SERVER_ADDR = request.form['serverid']
    global collection
    connection = mongo.connect(SERVER_ADDR)
    collection = mongo.importDB(SERVER_ADDR)
    return render_template("newpage.html")
Ejemplo n.º 33
0
Archivo: feeds.py Proyecto: dmc2015/mpt
def save_post(post):
    db = mongo.connect()
    db.posts.update({'id': post['id']}, {'$set': post}, upsert=True)
Ejemplo n.º 34
0
#-*-coding:utf-8-*-

import mongo
from datetime import datetime

year_con = dict()

for i in xrange(2001, datetime.today().year+1):
    year_con[i] = 0


BOARDS = mongo.connect('PTTmeta', 'info').find({}, {'_id':0, 'board':1, 'board_cht':1})
BOARDS = dict([(i['board'], i['board_cht']) if i.has_key('board_cht') else (i['board'], i['board']) for i in BOARDS])


for board in BOARDS:
    res = mongo.connect('PTT', board).find({}, {'_id':0, 'content_seg':1, 'post_time':1})
    for doc in res:
        year = doc['post_time'].year
        content_seg = doc['content_seg']
        toknum = len(content_seg)
        print board, year, toknum
        year_con[year] += toknum


newdic = dict()

for k, v in year_con.iteritems():
    newdic[str(k)] = v

Ejemplo n.º 35
0
Archivo: users.py Proyecto: dmc2015/mpt
def deactivate_user(username):
    db = mongo.connect()
    db.users.update({'username': username}, {'$set': {'active': False}})
Ejemplo n.º 36
0
#-*-coding:utf8-*-
from __future__ import division
from CWB.CL import Corpus
import PyCQP_interface
from datetime import datetime
import os
import re
from mongo import connect

BOARDREF = connect('PTTmeta', 'info').find({}, {
    '_id': 0,
    'board': 1,
    'board_cht': 1
})
BOARDREF = dict([(i['board'], i['board_cht']) if i.has_key('board_cht') else
                 (i['board'], i['board']) for i in BOARDREF])

toknumByYear = connect('PTTmeta', 'meta').find({}, {
    '_id': 0,
    'toknumByYear': 1
})
toknumByYear = toknumByYear.next()['toknumByYear']


class Cqp(object):
    def __init__(self, window_size=6, corpus_name='PTT', time_order=-1):
        self.window_size = window_size
        self.corpus_name = corpus_name
        self.time_order = time_order
        self.freq_by_year = dict()
Ejemplo n.º 37
0
 def __init__(self,name):
     self.db = mongo.connect(None, 'lemonpay')
     self.collection = self.db[name]