Пример #1
0
def main(argv):
    if len(argv) < 2:
        print __doc__
        sys.exit(-1)

    pathname = argv[1]

    store = getStore(False)
    store.load(pathname)
    wlib = getWeblib()

    print "Loaded %s\ncategory_description:\n%s\n#tags %s\n#webpages %s" % (
        argv[1],
        wlib.category.getDescription().encode("raw_unicode_escape")[:300],
        len(wlib.tags),
        len(wlib.webpages),
    )

    newTag = weblib.Tag(name="hello tag")
    print >> sys.stderr, "id", newTag.id
    newTag = store.writeTag(newTag)
    print >> sys.stderr, "id", newTag.id
    store.removeItem(newTag)

    # save
    if len(argv) > 2:
        store.save(wlib, argv[2])
Пример #2
0
def menu():
	try:
		menu_print()
		user_input = input('\033[47m' + 'Please choose: ')
		if int(user_input) == 1:
			commands.lists(store.load())

		elif int(user_input) == 2:
			commands.add(store.load())
			commands.lists(store.load())

		elif int(user_input) == 3:
			commands.lists(store.load())
			commands.complete(store.load())

		elif int(user_input) == 4:
			commands.lists(store.load())
			commands.removes(store.load())

		elif int(user_input) == 5:
			commands.remove_completed(store.load())

		elif int(user_input) == 6:
			exit()
		else:
			print('\n' + '\033[31m' + 'Wrong input!\n')

	except ValueError:
		print('\n' + '\033[31m' + 'Please only enter numbers!\n')
		menu()
Пример #3
0
def saveCurPos (fileName, line, col):
	assert fileName != None

	if (line == 0) and (col == 0):
		pass
	else:
		db = store.load(storeName, {})

		key = os.path.realpath(fileName)
		oldVal = db.get(key)
		if oldVal == None:
			toDel = None
			for k, v in db.iteritems():
				l, c, last = v
				if last == CUR_POS_DB_LEN - 1:
					assert toDel == None
					toDel = k
				else:
					db[k] = (l, c, last + 1)
			if toDel != None:
				del db[toDel]
				if Trace: print toDel, 'removed from cursor position db'
		else:
			oldLast = oldVal[2]
			for k, v in db.iteritems():
				l, c, last = v
				if last < oldLast:
					db[k] = (l, c, last + 1)

		db[key] = (line, col, 0)

		ok = store.save(storeName, db)
		if ok:
			if Trace: print 'saved cursor position:', line, col
def main():
    store.load()
    while True:
        # Run the program in the loop.
        run()
        print()

        # Ask the user if he wants more entries.
        should_continue = input('More Entries (Y/N)? ')

        # If the answer is not 'Y', exit.
        if should_continue.upper() != 'Y':
            break

    # This will save the user data to a file.
    store.save()
Пример #5
0
    def load(self):
        '''
        t.load()

        Loads the results from file.
        '''
        assert self.can_load()
        self._result = store.load(self.filename())
        self.finished = True
Пример #6
0
def break_into_sentences(limit=5):
    content = load()

    content["sentences"] = []
    
    for sentence in sent_tokenize(content["source_sanitized_content"])[:limit]:
        content["sentences"].append({'text':sentence, 'keywords':watson_keywords(sentence), 'images':[]})
    
    save(content)
Пример #7
0
def fetch_sentence_images():
    content = load()

    for index, sentence in enumerate(content["sentences"]):
        query = content["search_term"] + ' ' + sentence['keywords'][0]
        content["sentences"][index]["google_search_query"] = query
        content["sentences"][index]["images"] = fetch_images_urls(query)

    save(content)
Пример #8
0
def dump(args):
    replacement = common.item(args, 0, None)
    readmefile = ["readme.md"]

    sdata = store.load(readmefile)

    if replacement:
        sdata = sdata.replace("MYAPP", replacement)

    print(sdata)
Пример #9
0
    def init(self,
             YL,
             YR,
             RL,
             RR,
             load_calibration,
             NoiseSensor,
             Buzzer,
             USTrigger,
             USEcho,
             LA=-1,
             RA=-1):
        self._servo_pins[0] = YL
        self._servo_pins[1] = YR
        self._servo_pins[2] = RL
        self._servo_pins[3] = RR
        self._servo_totals = 4
        if LA != -1:
            self._servo_pins[4] = LA
            self._servo_pins[5] = RA
            self._servo_totals = 6
        self.attachServos()
        self.setRestState(False)
        if load_calibration:
            trims = store.load('Trims', [0, 0, 0, 0, 0, 0])
            for i in range(0, self._servo_totals):
                servo_trim = trims[i]
                if servo_trim > 128:
                    servo_trim -= 256
                self._servo[i].SetTrim(servo_trim)
        for i in range(
                0, self._servo_totals
        ):  # -- this could be eliminated as we already initialize
            self._servo_position[
                i] = 90  # -- the array from __init__() above ...

        self.noiseSensor = NoiseSensor
        self.buzzer = Buzzer
        self.usTrigger = USTrigger
        self.usEcho = USEcho

        if self.usTrigger >= 0 and self.usEcho >= 0:
            self.us = us(self.usTrigger, self.usEcho)

        if self.buzzer >= 0:
            self.buzzerPin = Pin(self.buzzer, Pin.OUT)
            self.buzzerPin.value(0)

        if self.noiseSensor >= 0:
            self.noiseSensorPin = ADC(Pin(NoiseSensor))
            self.noiseSensorPin.atten(
                ADC.ATTN_11DB)  # read the full voltage 0-3.6V
Пример #10
0
def fetch_wikipedia_article():
    content = load()
    
    algorithmia_key = ""
    with open("credentials.json", "r") as file:
        algorithmia_key = json.loads(file.read())["algorithmia_key"]

    client = Algorithmia.client(algorithmia_key)   
    algo = client.algo('web/WikipediaParser/0.1.2')  
    
    content["source_original_content"] = algo.pipe(content['search_term']).result["content"]

    save(content)
Пример #11
0
def sanitize_content():
    content = load()

    article = content['source_original_content']

    # Removes blank lines and markdown
    article = "".join([i for i in article.splitlines(True) if not (len(i) < 2 or i.startswith('='))])

    # Removes dates in parentheses
    article = re.sub("/\((?:\([^()]*\)|[^()])*\)/gm", '', article)

    content["source_sanitized_content"] = article
    
    save(content)
Пример #12
0
def loadCurPos (fileName):
	assert fileName != None

	db = store.load(storeName, None)
	if db != None:
		r = db.get( os.path.realpath(fileName) )
		if r != None:
			line, col, last = r
			if Trace: print 'loaded cursor position:', line, col
			return line, col
		else:
			if Trace: print 'can not restore cursor position: not saved'
			return 0, 0
	else:
		return 0, 0
Пример #13
0
def add_network_saved(network):
    if not 'ssid' in network:
        raise ValueError()

    if not 'pass' in network:
        raise ValueError()

    network_saved_list = store.load('networks', [])

    # Remove a previous entry if it was there...
    network_saved_list = [
        x for x in network_saved_list if x['ssid'] != network['ssid']]

    network_saved_list.append(network)

    store.save('networks', network_saved_list)
def process_main(help_function, function_name: str, dataset_name, *args):
	"""
	The main function for the process. The function can work by only using this function.
	@param help_function: the help function, complish main process logic
	@param function_name: the name of the function
	@param dataset_name: the dataset need to process
	@param args: the remain parameters, for help function
	@return: the json result of the query
	"""
	# assert the dataset name is valid
	dataset_name = assert_dataset(function_name, dataset_name)

	# get the json file name
	json_file = function_name + "_" + str(dataset_name) + "_" + "_".join([str(i) for i in args])

	# if the query result exist in store directory, get result from store directory
	if store.exist(json_file): return store.load(json_file)

	# if the query result doesn't exist in store directory, get result from the help function
	json_result = help_function(dataset_name, args)
	
	# store the json result
	store.store(json_file, json_result)
	return json_result
Пример #15
0
def main():
    users = store.load()
    print_users(users)
Пример #16
0
	index = dict(index)

	print l, len(index)

	json.dump(index, file(os.path.join('test', 'filelist.json'), 'w'))



	root_index = store.dump(index, os.path.join('test', 'index'), index_depth=1)

##	##!!! this is not used in anything yet...
##	json.dump(root_index, file(os.path.join('test', 'index', 'file_index.json'), 'w'))

	store.pack(os.path.join('test', 'index'), keep_files=False)

	d = store.load(os.path.join('test', 'index'))


	print len(d)

	k = d.keys()[0]

	i = store.Index(os.path.join('test', 'index'))

	print len(i)

##	print i[k]

	ic = store.IndexWithCache(os.path.join('test', 'index'))

	print ic[k]
Пример #17
0
def computer(function,**kwargs):

	"""
	Compute function figures out how to run a calculation over a simulation.
	"""

	work = kwargs['workspace']
	calc = kwargs['calc']
	
	#---perform a calculation over all collections
	if 'collections' in calc: 
		cols = tuple([calc['collections']]) if type(calc['collections'])==str else calc['collections']
		sns = unique(flatten([work.vars['collections'][i] for i in cols]))
	else: sns = work.sns()
	
	#---get slices (required)
	slice_name = calc['slice_name']
	group = calc['group'] if 'group' in calc else None
	
	#---pass data to the function according to upstream data type
	incoming_type = calc['uptype']
	jobs,data = [],dict([(sn,{}) for sn in sns])
	combined_slices = []
	for sn in sns:
		new_job = {'sn':sn,'slice_name':slice_name,'group':group}
		if incoming_type == 'simulation':
			#---prepare combinations in a dictionary
			if slice_name not in work.slice(sn):
				raise Exception(
					'\n[ERROR] the slices yaml file is missing a slice named "%s" for simulation "%s"'%
					(slice_name,sn))
			try: mfp = work.slice(sn)[slice_name][group]['missing_frame_percent']
			except: 
				print "[WARNING] no missing frame percentage here"
				mfp = 0.0
			if mfp>work.missing_frame_tolerance:
				status('upstream slice failure: %s,%s,%s missing_frame_percent=%.1f'%(
					sn,slice_name,group,mfp),tag='warning')
				continue
			#---defaulting to 'all' group if group is None
			new_job['grofile'] = work.postdir+\
				work.slice(sn)[slice_name][group if group else 'all']['gro']
			#---! xtc must become a flag. recommend 'xtc' becomes work.cursor[1]
			#---defaulting to 'all' group if group is None
			new_job['trajfile'] = work.postdir+work.slice(sn)[slice_name][group if group else 'all']['xtc']
		if 'specs' not in calc: calc['specs'] = ''
		if 'upstream' in calc['specs']:
			#---if no loop on upstream you can use a list
			if type(calc['specs']['upstream'])==list: 
				upstream_ask = dict([(key,None) for key in calc['specs']['upstream']])
			elif type(calc['specs']['upstream'])==str: 
				upstream_ask = {calc['specs']['upstream']:None}
			else: upstream_ask = calc['specs']['upstream']
			for key,val in upstream_ask.items():
				upspecs = deepcopy(work.calc[key])
				#---identify the list of particular options along with the stubs
				options,stubs = work.interpret_specs(upspecs,return_stubs=True)
				#---identify paths and values over which we "whittle" the total list of specs
				whittles = [(i,j) for i,j in catalog(val)]
				#---if no loop on upstream pickles we interpret none and send blank specs
				if val in ['None','none',None]: 
					specs = [options[ss] for r,v in whittles for ss,s in enumerate(stubs)]
				else:
					#---select the correct option by matching all catalogued routes from the incoming
					#---...key to the original calculation
					specs = [options[ss] for r,v in whittles for ss,s in enumerate(stubs) 
						if delve(s['specs'],*r)==v]
				if len(specs)!=1 and 'loop' not in upspecs['slice_name']: 
					import pdb;pdb.set_trace()
					raise Exception('[ERROR] redundant upstream selection %s'%str(select))
				#---if there are multiple slices
				#---! note that we expect that if slice_names is a list it will be ordered here too
				for slicenum,spec in enumerate(specs):
					#---if the upstream calculation has a group then use it in the filename
					if not group:
						if 'group' in work.calc[key]: upgroup = work.calc[key]['group']
						else: upgroup = None
					else: upgroup = group
					if not upgroup: 
						sl = work.slice(sn)[spec['slice_name']]
						fn_base = re.findall('^v[0-9]+\.[0-9]+-[0-9]+-[0-9]+',
							work.slice(sn)[upspecs['slice_name']]['all']['filekey']
							)[0]+'.%s'%key
					else: 
						sl = work.slice(sn)[spec['slice_name']][upgroup]
						fn_base = '%s.%s'%(sl['filekey'],key)
					#---! moved the following block left recently
					fn = work.select_postdata(fn_base,spec)
					if not fn: 
						print '[ERROR] missing %s'%fn
						import pdb;pdb.set_trace()
					outkey = key if len(specs)==1 else '%s%d'%(key,slicenum)
					#---before each calculation the master loop loads the filename stored here
					data[sn][outkey] = os.path.basename(fn)[:-4]+'dat'
			new_job['upstream'] = data[sn].keys()
		jobs.append(new_job)
	
	#---master loop
	for outgoing in jobs:
		sn,slice_name,group = outgoing['sn'],outgoing['slice_name'],outgoing['group']
		
		#---if we combine slices for this calculation we use the whole time span in the base filename
		if type(slice_name)==list:
			#---! simple method for making the combination file key
			start = min([work.slice(sn)[s]['all' if not group else group]['start'] for s in slice_name])
			end = max([work.slice(sn)[s]['all' if not group else group]['end'] for s in slice_name])
			skip = work.slice(sn)[s]['all' if not group else group]['skip']
			#---! this filekey construction means the user will have to anticipate the names of combos
			fn_base = '%s.%d-%d-%d.%s'%(work.prefixer(sn),start,end,skip,function.__name__)
		else:
			#---we index all calculations automatically in case we loop over specs later
			index,fn_key = -1,''
			if not group:
				fn_base = re.findall('^v[0-9]+\.[0-9]+-[0-9]+-[0-9]+',
					work.slice(sn)[slice_name][
					'all' if not group else group]['filekey'])[0]+'.%s'%function.__name__
			else:
				try: fn_base = work.slice(sn)[slice_name][
					'all' if not group else group]['filekey']+'.%s'%function.__name__
				except:
					print "no group and cannot get base filename"
					import pdb;pdb.set_trace()
		prev = glob.glob(work.postdir+fn_base+'*.dat')
		if prev == []: index = 0
		else: index = max(map(lambda x:int(re.findall('^.+\/%s\.n([0-9]+)\.dat'%fn_base,x)[0]),prev))+1
		fn_key = '.n%d'%index
		fn = fn_base+fn_key+'.dat'
		#---safety check for file errors to prevent overwriting however this should be handled by indices
		if os.path.isfile(work.postdir+fn): raise Exception('[ERROR] %s exists'%(work.postdir+fn))
		
		#---check for specs file with the exact same specifications
		exists = True if index != -1 and work.select_postdata(fn_base,calc) != None else False
		if not exists:
			import ipdb;ipdb.set_trace()
			status("%s %s"%(function.__name__,str(outgoing)),tag='compute')
			outgoing['workspace'] = work
			outgoing['calc'] = calc
			if 'upstream' in outgoing:
				sn = outgoing['sn']
				outgoing['upstream'] = dict([(k,
					load(data[sn][k],work.postdir)) for k in outgoing['upstream']])
			result,attrs = function(**outgoing)
			"""
			spec files are carefully constructed
			they prevent redundant calculations
			they allow us to loop over many parameters while saving files with a single index
			the calculation dictionary in the specs file contains meta-parameters for looping
			we are careful not to save meta parameters to the spec file
			we only save parameters which are relevant to the calculation itself
			the calculation dictionary in the spec file must therefore separate these parameters
			in a sub-dictionary called 'specs'
			we prefer attrs to be small and specific
			since attrs is also used to uniquely specify the data
			all big data should be stored as a result via numpy
			"""
			#---if any calculation specifications are not in attributes we warn the user here
			if 'specs' in calc: unaccounted = [i for i in calc['specs'] if i not in attrs]
			else: unaccounted = []
			if 'upstream' in unaccounted and 'upstream' not in attrs: 
				status('automatically appending upstream data',tag='status')
				unaccounted.remove('upstream')
				attrs['upstream'] = calc['specs']['upstream']
			if any(unaccounted):
				print computer_error_attrs_passthrough+'\n\n'
				status('some calculation specs were not saved: %s'%
					str(unaccounted),tag='STATUS')
				import pdb;pdb.set_trace()
			store(result,fn,work.postdir,attrs=attrs)
			with open(work.postdir+fn_base+fn_key+'.spec','w') as fp: fp.write(json.dumps(attrs)+'\n')
	#---no modifications to work so no save
	return
Пример #18
0
def _get_shares_from_store(limit=settings.rss_limit):
    return store.load(limit)
Пример #19
0
    index = dict(index)

    print l, len(index)

    json.dump(index, file(os.path.join('test', 'filelist.json'), 'w'))

    root_index = store.dump(index,
                            os.path.join('test', 'index'),
                            index_depth=1)

    ##	##!!! this is not used in anything yet...
    ##	json.dump(root_index, file(os.path.join('test', 'index', 'file_index.json'), 'w'))

    store.pack(os.path.join('test', 'index'), keep_files=False)

    d = store.load(os.path.join('test', 'index'))

    print len(d)

    k = d.keys()[0]

    i = store.Index(os.path.join('test', 'index'))

    print len(i)

    ##	print i[k]

    ic = store.IndexWithCache(os.path.join('test', 'index'))

    print ic[k]
Пример #20
0
def get_shares(n=0, since=None):
    return store.load(n, since)
Пример #21
0
 def loadMeshData(self, event=None):
     self.canvasFrame.mesh.load(load(self.inputimage))
Пример #22
0
def get_network_saved_list():
    return store.load('networks', [])
Пример #23
0
	dialog.set_transient_for(parent)
	if old != None:
		r = dialog.set_font_name(old)
		if Trace: print 'set font name:', r
	resp = dialog.run()
	if resp == gtk.RESPONSE_OK:
		new = dialog.get_font_name()
		if Trace: print 'selected font:', new
	else:
		new = old
	dialog.destroy()
	return new

defaultSettings = { 'font': None }
_settingsStoreName = 'settings'
loadSettings = lambda: store.load(_settingsStoreName, defaultSettings)
saveSettings = lambda settings: store.save(_settingsStoreName, settings)

def translateBuilder (builder):
	for obj in builder.get_objects():
		# делаем так, потому что при попытке получения свойств по крейней мере у SeparatorMenuItem он исчезает (где-то ошибка в gtk или в pygtk)
		if type(obj) in (gtk.Window, gtk.MenuItem, gtk.Label, gtk.Button, gtk.CheckButton):
			if type(obj) is gtk.Window:
				label = obj.get_title().decode('utf-8')
			else:
				label = obj.get_property('label').decode('utf-8')
			# print label
			if label.startswith('#'):
				label = tr(label.encode('ascii'))
				if type(obj) is gtk.Window:
					obj.set_title(label.encode('utf-8'))
Пример #24
0
import commands
import store

items = store.load('todos.txt')

menu = [
 '1. What you have to do this week',
 '2. Add new task',
 '3. Task already in progress',
 '4. How much time do you have?',
 '5. How lazy were you?',
 '6. Complete a task',
 '7. What you have done',
 '8. Remove a task',
 '9. Show all the tasks',
 '10. Switch a task to "in progress" state',
 '11. Exit menu '
]

while True:
    for i in menu:
        print(i)
    try:
        print('----' * 10)
        choice = int(input("Choose a task: "))
        print('----' * 10)
    except ValueError:
        print('You have to enter the number of your choice')
    else:
        if choice == 1:
            commands.print_todos(items)