def runTask(nloop, nsec): tools.log("DataInterfaceTask Start--",'warn') access_token = openapi.getToken() requestId = str(int(time.time())) task.task(access_token,requestId) tools.log("DataInterfaceTask End--",'warn') time.sleep(nsec)
def op(name): opcode = request.form.get('op') push_id = request.form.get('_push_id') if not opcode: return save(name) if not push_id: flash(opcode) task(op=opcode, project_id=name, push_id=push_id) return redirect('/projects/%s/'%name)
def save(self): path = "data.json" with open(path, "w") as outfile: target = [task("a1"), task("b2"), task("c3")] s = json.dumps(target, lambda o: o.__dict__, indent=4) print(s) #outfile.write(s) #s = json.dumps(self.__dict__, lambda o: o.__dict__, indent=4) #outfile.write(s) #target = self.tasks[0] #print(target.__dict__) #s = json.dumps(target.__dict__, lambda o: o.__dict__, indent=4) #outfile.write(s) print("Successfully saved to " + path)
def add_tasks(self, tareas): """ Añade todos los procesos de un directorio y crea un proceso idle""" ## if not proc_dir.endswith(".tsk"): ## for dir, subdir, files in os.walk(proc_dir): ## if dir == proc_dir: ## tareas = [d for d in files if d.endswith(".tsk")] ## else: ## # Se ha pasado una única tarea ## tareas = [proc_dir] if not tareas: error = "'%s' no contiene ficheros con tareas." % tareas raise NoTaskOrIdleDir(error) for t in tareas: self.add_single_task(t) #tmp = task(os.path.join(proc_dir, t)) #print "Intentando crear tarea desde", os.path.join(proc_dir, t) #self.tasks.append(tmp); # Se inserta el proceso en la lista global. if not self.cpu.idle_task: print "Creando proceso IDLE de la CPU." self.cpu.idle_task = task() self.cpu.init_idle_task(self.cpu.idle_task) self.cpu.rq.idle = self.cpu.idle_task print "Tareas de '%s' añadidas." % tareas
def eval_fitness_A(net, print_=False): """ Evaluates fitness of the genome that was used to generate provided net Arguments: net: The feed-forward neural network generated from genome Returns: The fitness score - the higher score the means the better fit organism. Maximal score: 16.0 """ t = task.task(5, 2) history = '' mymodel = hpneat.SeparatedModulatoryModel(net, hpneat_config) mymodel.fitness = 0 for step in range(20): t.step = step output = mymodel.activate([1, 0, 0, 0]) if (print_): print('step', step, 'output:', output) if (t.rule == output.index(max(output))): mymodel.fitness += 1 mymodel.activate([0, 1, 1, 0]) history += 'o' else: mymodel.activate([0, 1, 0, 1]) history += 'x' if (print_): print(history) return mymodel.fitness
def addGeneratorTaskSeed(self, taskSeed): global planAheadTime assigneeCount = len(taskSeed["assignees"]) seedStartDate = dateutil.parser.parse(taskSeed["startDate"]) seedInterval = datetime.timedelta(seconds=taskSeed["interval"]) i = 0 while seedStartDate + datetime.timedelta(seconds=( seedInterval.total_seconds() * i )) < datetime.datetime.now() + datetime.timedelta(days=planAheadTime): j = 0 for subtask in taskSeed["subtasks"]: startDate = str(seedStartDate + datetime.timedelta( seconds=(seedInterval.total_seconds() * i))) taskname = taskSeed["taskName"] + ":" + subtask["taskName"] if len(taskSeed["subtasks"]) == 1: taskname = subtask["taskName"] assignee = "everyone" if assigneeCount > 0: assignee = taskSeed["assignees"][(j + (assigneeCount - i - 1)) % assigneeCount] taskToBeBuilt = task(taskname, assignee, startDate, subtask["startMessage"]) if (taskToBeBuilt.getSecondsToGo() > 0): self.tasks.append(taskToBeBuilt) j += 1 i += 1 if i > 200: return
def addStaticTaskSeed(self, taskSeed): for instance in taskSeed["instances"]: taskToBeBuilt = task(taskSeed["taskName"], instance["assignee"], instance["startDate"], taskSeed["startMessage"]) if (taskToBeBuilt.getSecondsToGo() > 0): self.tasks.append(taskToBeBuilt)
def add_new_task(self): print("Enter task monster:") monster = input() t = task(monster) self.selected_task = t self.tasks.append(t) print(monster + " slayer task added and selected.")
def toTask(self, item): ind = item.split('. ') elements = ind[1].split(', ') date = elements[2].split('-') date = list(map(int, date)) due = dt(date[0], date[1], date[2]) t = task(elements[0], int(elements[1]), due) self.tasks.append(t)
def addTask(self, type, functionName, scriptContent, ownerTask): logging.debug("Adding task to the buffer") if (self.countItems >= self.size): return "Buffer full, your task will not be executed" self.buffer[self.countItems] = task(type, functionName, scriptContent, ownerTask) self.countItems = self.countItems + 1 return "Task added to the buffer"
def test_more_zeros(self): self.assertEqual(task("100"), 1) self.assertEqual(task("1000"), 1) self.assertEqual(task("1000000000000000000000"), 1) self.assertEqual(task("1100"), 2) self.assertEqual(task("110000000000000000"), 2) self.assertEqual(task("111111111111111111111111100"), 25) self.assertEqual(task("11111111111111111111111110000"), 25) self.assertEqual( task("1111111111111111111111111000000000000000000000000000"), 25)
def test_task(): print("") result = "" t = task.task() for i in range(10): result += str(t.rule) print(t.rule) print(t.is_bonus) t.step += 1 print(result) assert result == "0101010101" result = "" t = task.task(3) for i in range(10): result += str(t.rule) print(t.rule) print(t.is_bonus) t.step += 1 print(result) assert result == "0001110001" result = "" t = task.task(2, 3) for i in range(10): result += str(t.rule) print(t.rule) print(t.is_bonus) t.step += 1 print(result) assert result == "0011220011" result = "" t = task.task(3, 3) for i in range(10): print(t.rule) print(t.is_bonus) result += str(t.rule) t.step += 1 print(result) assert result == "0001112220"
def autoScale(self, numberOfRequests, requestId): # creating a thread pool for executing the resources executorService = ThreadPoolExecutor(max_workers = self.resourcesUsed) if self.customerUserStatus == "paid_account": paidTaskToDo = task.task(self.resourcesUsed) executorService.submit(paidTaskToDo.taskPerRequest(self.customerUserStatus)) elif self.customerUserStatus == "free_account": if self.resourcesUsed > 1: pass else: # auto scaling is not possible for the free accounts unpaidTask = task.task(self.resourcesUsed) executorService.submit(unpaidTask.taskPerRequest(self.customerUserStatus)) # auto scaling should be done only after 60% elif self.customerUserStatus == "partially_paid_account": partialTaskToDO = task.task(1) lb = loadBalancer.loadBalancer().requestList if partialTaskToDO.taskCompleted(60): finishPartialTask = task.task(self.resourcesUsed - 1) executorService.submit(finishPartialTask.taskPerRequest(self.customerUserStatus))
def worker(self, sleep_for): # global p is_sub = True # is_sub = False if is_sub: _cmd_str = f'/home/rijumone/Kitchen/python/.venv/bin/python /home/rijumone/Kitchen/python/celery/task.py -tid {self.request.id} -sf {sleep_for}' # logger.debug(_cmd_str) # check_output(_cmd_str.split(' '), ) p = Popen(_cmd_str.split(' '), ) ph.set_proc(process=p) # setattr(ProcessHolder, 'on_task_revoked', on_task_revoked) # signals.task_revoked.connect(ProcessHolder.on_task_revoked, dispatch_uid='on_task_revoked') logger.critical(p) p.wait() else: task( task_id=self.request.id, sleep_for=sleep_for, )
def __init__(self, myParent): self.dColumnWidths = create_column_widths_dictionary() lHours = [200, 400, 600, 800] headerFrame = tk.Frame(myParent, bg='grey') headerFrame.grid(row=0, column=0) self.rowFrame = tk.Frame(myParent, bg='tan') self.rowFrame.grid(row=1, column=0) create_header(headerFrame, self.dColumnWidths, lHours) self.oTopLevelTask = task.task() self.oTopLevelTask.update_hours(lHours) self.oTopLevelTask.percent = 100 self.oTopLevelTask.name = 'TopLevelTask' self.paintRowFrame()
def addTask(self, *args): if len(args) == 0: print("Uzduoties duomenys") newTask = self.__taskDetails__() self.addTask(newTask[0], newTask[1], newTask[2]) elif (len(args) == 3 and isinstance(args[0], str) and isinstance(args[1], int) and isinstance(args[2], dt)): print("Irasoma: ") print(" " + args[0] + ", " + str(args[1]) + ", " + str(args[2])) newTask = task(args[0], args[1], args[2]) self.tasks.append(newTask) self.__sort__() self.__rewriteIndexes__()
def initTestTasks(self): # def __init__(self,difficulty,initialValue,dependenceList,nullZone,nominalDays): for i in range(0, self.numTasks): self.taskCounter = self.taskCounter + 1 difficulty = 5.0 intialValue = 0 nullZone = .01 nominalDays = 90 taskIDstring = str(self.ID) + '-' + str(self.taskCounter) self.taskList.append( task(difficulty, intialValue, nullZone, nominalDays, taskIDstring))
def on_message(self, mosq, obj, msg): topic = msg.topic payload = msg.payload print topic + ': ' + payload topic_list = topic.split('/') projID = topic_list[0] dev = topic_list[1] lines = payload.split('\r\n') while '' in lines: lines.remove('') orderList = lines[0].split(' ') if len(orderList) < 4: print 'Invalid command: ' + orderList return serNumber = orderList[1] cmd = orderList[2] argc = orderList[3] delay = None tempTask = task.task() tempTask.setProjID(projID) tempTask.setSerNumber(serNumber) tempTask.setDevice(dev) tempTask.setOperation(cmd) if cmd != 'filec2d' and int(argc) != (len(lines) - 1): tempTask.setValidFlag(False) if cmd == 'filec2d' and len(lines) != 4: tempTask.setValidFlag(False) lines[0] = int(argc) tempTask.setArgs(lines) if not (cmd in macro.cmd_list.keys()): print 'cmd "' + cmd + '" not exist' return delay = random.randint(macro.cmd_delay[macro.cmd_list[cmd]]-macro.deviation, macro.cmd_delay[macro.cmd_list[cmd]]+macro.deviation) tempTask.setDelay(delay) self.tlLock.acquire() self.tasklist.append(tempTask) self.tlLock.release()
def get_task_lists(self, folder): # build tasklist from folder contents. follow structure of ./lists tasklists = [] lists = os.listdir(folder) for tl in lists: newtasklist = tasklist.tasklist(tl, []) tasklists.append(newtasklist) with open(folder + tl + os.sep + tl + ".txt", "r") as orderfile: taskfolders = orderfile.read().splitlines() for t in taskfolders: p = folder + tl + os.sep + t if os.path.isdir(p): task_files = os.listdir(p) # build task from folder newtask = task.task(t, 'text', 'image', 'sound') newtasklist.tasks.append(newtask) textfile = p + os.sep + t + ".txt" if os.path.isfile(textfile): with open(textfile, "r") as myfile: newtask.text = myfile.read() imgext = [".jpg", ".png"] for e in imgext: imagefile = p + os.sep + t + e if os.path.isfile(imagefile): newtask.image = imagefile sndext = [".ac3", ".aac", "mp3"] for e in sndext: soundfile = p + os.sep + t + e if os.path.isfile(soundfile): newtask.sound = soundfile return tasklists
def readTaskLists(FileAddress): tasksListDB = open(FileAddress, 'r') # get configuration data TASKS_SET_NUMBERS, TASKS_NUMBER_IN_A_SET = tasksListDB.readline().split() # change the type from string to integer TASKS_SET_NUMBERS = int(TASKS_SET_NUMBERS) TASKS_NUMBER_IN_A_SET = int(TASKS_NUMBER_IN_A_SET) # It's a two dimentional list that keeps all tasks list files TaskSetsHolder = [] for taskSetID in range(0, TASKS_SET_NUMBERS): # it's not needed for this function but it holds the task ID number # I just use this here to skip this line taskID = tasksListDB.readline() # Holds data about a Task list taskListObj = [] totalU = 0 for i in range(0, TASKS_NUMBER_IN_A_SET): jobInfo = tasksListDB.readline().split() # This steps store the data wich has been readed form file # calculate new period , utilization and execution time # I have saved 3 floating point for utilization and multiply period with 1000 in order to have integer execution time # It's a lot easier to coup with integer execution time than float one p = int(jobInfo[0]) * 1000 u = floor(float(jobInfo[1]) * 1000) / 1000 e = round(p * u) # just for test , we should turn this off for real Tasksets #p = int(jobInfo[0]) #u = float(jobInfo[1]) #e = round(p * u) newCreatedTask = task(p, e) # increased by one, starts ID's from one newCreatedTask.setID(i + 1) taskListObj.append(newCreatedTask) TaskSetsHolder.append(taskListObj) return TaskSetsHolder, TASKS_SET_NUMBERS, TASKS_NUMBER_IN_A_SET
def __init__(self, num_tasks): self.set_numTasks(num_tasks) for i in range(0, num_tasks): temp_id = i + 1 #assign an ID to the task mem = random.uniform( 1, 10 ) #assign a sample point for memory required from a uniform distribution between 1 - 10 exec = random.uniform( 1, 10 ) #assign a sample point for execution time (number of instructions) from a uniform distribution between 1 - 10 t = task(temp_id, mem, exec) self.task_list.append(t) #add the task to the sorted task list self.task_list_random.append( t) #add the task to the randomly sorted task list self.task_list.sort( key=lambda x: x.reqExecTime, reverse=True) #sort the task list in decreasing order
def toJson(self,idUserHistory): '''Permite encontrar una historia de usuario por su id''' checkTypeId = type(idUserHistory) == int foundUserHistory = None jsonUserHistory = {} oTask = task.task() if checkTypeId: checkLenIdUserHistory = idUserHistory >= CONST_MIN_ID if checkLenIdUserHistory: foundUserHistory = clsUserHistory.query.filter_by(UH_idUserHistory = idUserHistory).all() if foundUserHistory: jsonUserHistory = { "id": foundUserHistory[0].UH_idUserHistory, "code": foundUserHistory[0].UH_codeUserHistory, "superHistory": foundUserHistory[0].UH_idSuperHistory, "actionType": foundUserHistory[0].UH_accionType, "idAction": foundUserHistory[0].UH_idAccion, "escala": foundUserHistory[0].UH_scale, "idSprint": foundUserHistory[0].UH_idSprint, "resumen": foundUserHistory[0].UH_resume, "iniciado": foundUserHistory[0].UH_iniciado, "completado": foundUserHistory[0].UH_completed, "fechaInicio": time.mktime(foundUserHistory[0].UH_fechaInicio.timetuple()), "fechaFin": time.mktime(foundUserHistory[0].UH_fechaFin.timetuple()) } tareas = oTask.taskAsociatedToUserHistory(idUserHistory) jsonUserHistory['tareas'] = [oTask.toJson(tarea.HW_idTask) for tarea in tareas] return jsonUserHistory
"topology": { "cpu": 1, "disk": 1 }, "cpu_num": 2, "memory": 512000000, "disk_os": 30, "disk_data": 100, "install_method": 1, "vm_file": "/home/iso/centos6.7-for-xingyu.iso", "father_id": 1, "uri": "http://download.microsoft.com/download/B/8/9/B898E46E-CBAE-4045-A8E2-2D33DD36F3C4/vs2015.pro_chs.iso", "save_path": "/home/", "md5": "111", } import os, sys parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, parentdir) from vm import vm from host import vmmhost import task a = vm("cds") tm = task.task_manager() ctask = task.task(task_dict) tm.task_action(ctask)
max_work = 1 while True: job, max_work = self.get_job_with_most_work() if max_work == 0: break task = self.get_task_not_done(job) self.update_current_time_machine(job, task) task.status = "done" job.last_done_task = task def update_current_time_machine(self, job, task): if job.last_done_task == None or task.machine.current_time >= job.last_done_task.time: task.machine.current_time += task.time else: task.machine.current_time += (task.time + job.last_done_task.time) def print_time_machines(self): for machine in self.machines: print "Maquina: %s \/ Tempo Total: %s" %(machine.numero, machine.current_time) if __name__ == '__main__': machine1 = Machine(1) machine2 = Machine(2) machine3 = Machine(3) job1 = Job([task(3,machine1), task(5,machine3)]) job2 = Job([task(2,machine2), task(3,machine1), task(1,machine3)]) job3 = Job([task(1,machine2), task(2,machine3)]) jobs_shop = JobsShop([job1,job2,job3],[machine1,machine2,machine3]) jobs_shop.run() jobs_shop.print_time_machines()
def inputTask(self, taskName, taskType, taskObjId=0): self.q.put(task.task(taskName, taskType, taskObjId))
def createTask(self, taskname, engineer, startDate, effortInDays, dependentTasks): if (self.project == None): return None T = task(taskname, engineer, startDate, EffortinDays, dependentTask) self.project.tasks.append(T)
from task import task # difficult, initial value, dependence list, nullzone, nominal days to complete testTask = task(5.0,0.0,[],0.0,30.0) cv = 0 days = 0 # def getColleagueModifier(self,IDlist,errorList,influenceList,scaleFactor): IDlist = [1,2,3] errorList = [.5 , .1 , .1] influenceList = [0,1,1] scaleFactor = .25 while (cv<1): days = days + 1 cv = testTask.doWork(5.0,.9,.25) cm = testTask.getColleagueModifier(IDlist,errorList,influenceList,scaleFactor) print 'colleague modifier is %1.6f' % cm print 'task took user ' + str(days) + ' days'
""" """ import numpy as np from task import task # SET GLOBAL PARAMETERS global_parameters = {"power": 3, "size": (10,)} results = [] SEEDS = (1, 12, 123, 1234) for seed in SEEDS: task_parameters = {"seed": seed} # DO WORK results.append(task(global_parameters, task_parameters)) # AGGREGATE INTERMEDIATE STATISTICS print(sum(results))
def eval_fitness_B(net, print_=False): t4 = task.task(4, 2) t5 = task.task(5, 2) t6 = task.task(6, 2) history4 = '' history5 = '' history6 = '' mymodel4 = hpneat.SeparatedModulatoryModel(net, hpneat_config) mymodel5 = hpneat.SeparatedModulatoryModel(net, hpneat_config) mymodel6 = hpneat.SeparatedModulatoryModel(net, hpneat_config) mymodel4.fitness = 0 mymodel5.fitness = 0 mymodel6.fitness = 0 for step in range(20): t4.step = step t5.step = step t6.step = step output4 = mymodel4.activate([1, 0, 0, 0]) output5 = mymodel5.activate([1, 0, 0, 0]) output6 = mymodel6.activate([1, 0, 0, 0]) if (print_): print('step', step, 'output4:', output4) print('step', step, 'output5:', output5) print('step', step, 'output6:', output6) if (t4.rule == output4.index(max(output4))): mymodel4.fitness += 1 mymodel4.activate([0, 1, 1, 0]) history4 += 'o' #if(t4.is_bonus == True): #ドンピシャでボーナス #mymodel4.fitness += 1 else: mymodel4.activate([0, 1, 0, 1]) history4 += 'x' if (t5.rule == output5.index(max(output5))): mymodel5.fitness += 1 mymodel5.activate([0, 1, 1, 0]) history5 += 'o' #if(t5.is_bonus == True): #ドンピシャでボーナス #mymodel5.fitness += 1 else: mymodel5.activate([0, 1, 0, 1]) history5 += 'x' if (t6.rule == output6.index(max(output6))): mymodel6.fitness += 1 mymodel6.activate([0, 1, 1, 0]) history6 += 'o' #if(t6.is_bonus == True): #ドンピシャでボーナス #mymodel6.fitness += 1 else: mymodel6.activate([0, 1, 0, 1]) history6 += 'x' if (print_): print(history4) print(history5) print(history6) return (mymodel4.fitness + mymodel5.fitness + mymodel6.fitness) / 3
from task import task task("")
def add_single_task(self, task_path): tmp = task(task_path) self.tasks.append(tmp) # preparación para do_fork tmp.update_state() self.do_fork(tmp)
for row in reader: data_all[k].append(row[0].split(' ')) data_all[k].pop(0) # remove first one N = len(data_all[k]) for i in xrange(N): del data_all[k][i][-1] data_all[k][i] = [float(data_all[k][i][j]) for j in xrange(d)] with open(path_data + 'labelc' + str(k + 1), 'rb') as f: reader = csv.reader(f) for row in reader: label_all[k].append(float(row[0][:-1])) label_all[k].pop(0) tasks.append(task(path_results, dataset, data_all[k], label_all[k], conn, k, Lambda, ITER, p_ite, step_task, d, p_train, wait_time[k])) tasks[-1].model_init() elif dataset == 'real': for k in xrange(T): with open(path_data + 'data' + str(k + 1), 'rb') as f: reader = csv.reader(f) for row in reader: data_all[k].append(row[0].split(' ')) data_all[k].pop(0) # remove first one N = len(data_all[k]) for i in xrange(N): del data_all[k][i][-1] data_all[k][i] = [float(data_all[k][i][j]) for j in xrange(d)]
def addTask(self, name): self.taskList.append(task(name, self.taskIndex)) self.taskIndex = self.taskIndex + 1
from task import task from tkinter import Tk, Canvas if __name__ == '__main__': root = Tk() canv = Canvas(root, width=1000, height=1000, bg="white", cursor="pencil") task(canv, root) canv.pack() root.mainloop()
"type": 1, "vm_name": "xingyu", "formula": "", "topology" : { "cpu" : 1, "disk" : 1}, "cpu_num":2, "memory":512000000, "disk_os":30, "disk_data":100, "install_method": 1, "vm_file": "/home/iso/centos6.7-for-xingyu.iso", "father_id": 1, "uri":"http://download.microsoft.com/download/B/8/9/B898E46E-CBAE-4045-A8E2-2D33DD36F3C4/vs2015.pro_chs.iso", "save_path":"/home/", "md5": "111", } import os,sys parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0,parentdir) from vm import vm from host import vmmhost import task a = vm("cds") #a.write_vm_file("/etc/sysconfig/network-scripts/ifcfg-bond0", "aaaaa") tm = task.task_manager() ctask = task.task(task_dict) tm.task_set_ip(ctask)
def DEBUG_steel_dragon(self): t = task("steel dragon") self.tasks.append(t) self.selected_task = t print(str(self.__dict__)) self.save()
def taskproc(job): logger = atlogger.g_logger t = task.task(job, logger) t.work()
def get_tasks(): """function that get task list of sorted tasks""" # getting dictionaries cal_code={k.split('"')[1].split('.')[2]:k.split('"')[3] for k in codecs.open(FILE_SETTINGS,'r',encoding='utf8') if (k[:28]=='user_pref("calendar.registry' and k.split('"')[1].split('.')[3]=='name')} cal_name=dict() for k in cal_code: cal_name[cal_code[k]]=k # parsing config file config_order=[] for line in open(CFG_FILE,'r'): line=line.rstrip() if line[0]=='[': if line[-1]=='+': config_order.append('+'+line[1:-2]) continue if line[-1]=='-': config_order.append('-'+line[1:-2]) continue else: config_order.append([line[1:-1],]) else: config_order[len(config_order)-1].append(line) # creating querry # DEBUGGING # print cal_name inSelect='' for k in config_order: if type(k)==type([]): if k[0]=='category': tmp='' for x in range(1,len(k)): tmp+="WHEN prop2.value='"+k[x]+"' THEN "+str(x)+'\n\t\t\t' inSelect+=""" CASE """+tmp+"""ELSE """+str(len(k))+""" END as category_value,""" if k[0]=='calendar': tmp='' for x in range(1,len(k)): tmp+="WHEN cal_todos.cal_id='"+cal_name[k[x].decode('cp1251')].encode('cp1251')+"' THEN "+str(x)+'\n\t\t\t' inSelect+=""" CASE """+tmp+"""ELSE """+str(len(k))+""" END as calendar_value,""" # it can be now just category or calendar name # deleting , in the end inSelect=inSelect[:-1] #preparing order by clause tmp='' inOrder='' for k in config_order: if type(k)==type(''): tmp=tmp+k[1:]+(' ASC' if k[0]=='+' else ' DESC')+',' # tmp=tmp+k[1:]+('' if k[0]=='+' else ' DESC')+',' if type(k)==type([]): tmp=tmp+k[0]+'_value,' # deleting , in the end inOrder=tmp[:-1] #our querry SELECT="""select cal_todos.title, cal_todos.todo_due, CASE WHEN cal_todos.priority IS NULL THEN 5 ELSE cal_todos.priority END as priority, cal_todos.ical_status, prop1.value as percent, prop2.value as cat, cal_alarms.icalString, cal_todos.flags, cal_todos.todo_entry, cal_todos.cal_id, """+inSelect+""" from cal_todos left outer join cal_properties as prop1 on cal_todos.id=prop1.item_id and prop1.key='PERCENT-COMPLETE' left outer join cal_properties as prop2 on cal_todos.id=prop2.item_id and prop2.key='CATEGORIES' left outer join cal_alarms ON cal_alarms.item_id=cal_todos.id where (cal_todos.ical_status<>'COMPLETED' or cal_todos.ical_status is Null) group by cal_todos.title order by """+inOrder+';' # DEBUGGING # file=open('tmp1','w') # print (SELECT+'\n') # exit() #making request conn=sqlite3.connect(r'C:\Users\Ishayahu\AppData\Roaming\Thunderbird\Profiles\r8urnv5e.default\calendar-data\local.sqlite') c=conn.cursor() c.execute(SELECT) # ('Name of the task', 1333184400000000, 1, 'NEEDS-ACTION', 1, 'Мотя', '0a3c9b8b-f784-46c2-8c7a-ebb05e7c4987') # def __init__(self,title,until,priority,status,percent,cat,calendar): tasks=[] #now we make list with our tasks # print "*"*60 for k in c: # tasks.append(task.task(k[0],k[1],k[2],k[3],k[4],k[5],k[6],k[7])) tasks.append(task.task(*k)) # print str(k[:1]).decode('cp1251') # deleting futures tasks now=datetime.datetime.now() # as I understand flag 0b10000 - specify that this is repeated task. Repeated asks I don't need becouse they have theyr alarm # print_tasks(tasks) # print '*'*60 # tasks_end=[] for t in tasks[:]: # print t.title.encode('cp1251'),t.flags,t.flags&16 if t.alarm>now: # print '\tremoved by alarm' tasks.remove(t) if t.flags & 16 : # print '\tremoved by flags' tasks.remove(t) # else: # tasks_end.append(t) # print '*'*20 # print_tasks(tasks) return tasks