def __init__(self): TaskManager.__init__(self) self.ReportInfo = [ 'username', 'password', 'cookies', 'proxy', 'Timestamp' ] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = ['VerifyEmail', 'BadPassword'] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 1 self.MaxRetry = 3 self.ProxyEnable = True self.AuthProxy = True self.ProxyTimeout = 60 self.SubMaxRetry = 2 self.TaskName = 'Account View' self.TaskInfos = AccountTable for item in self.TaskInfos: item.update({'cookies': ''}) item.update({ 'billingaddress': FinanceFrame.loc[item['username']].to_dict() }) item.update({ 'shippingaddress': AddressFrame.loc[item['username']].to_dict() }) pass
def test_redo() -> None: manager = TaskManager([]) manager.add(Task("sop")) manager.undo() assert not manager.tasks() manager.redo() assert manager.tasks() == [Task("sop")]
def __init__(self): TaskManager.__init__(self) self.ReportInfo = [ 'username', 'password', 'cookies', 'proxy', 'Timestamp', 'ordernumber', 'asins' ] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = [ 'VerifyEmail', 'BadPassword', 'FixAddress', 'AddressNotMatch', 'GiftCardUsed', 'BadGiftCard' ] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 1 self.MaxRetry = 0 self.ProxyEnable = True self.AuthProxy = True self.ProxyTimeout = 60 self.SubMaxRetry = 2 self.TaskName = 'PlaceOrder_auto' self.TaskInfos = OrderTaskTable for item in self.TaskInfos: item.update({'cookies': ''}) item.update(AccountFrame.loc[item['username']].to_dict()) item.update({ 'billingaddress': FinanceFrame.loc[item['username']].to_dict() }) item.update({ 'shippingaddress': AddressFrame.loc[item['username']].to_dict() }) pass
def main(): args = parser() ##Configure and get the tasks tasks = taskReturner(args.task, config=args.config)() ##Run the manager manager = TaskManager(args.check_output) manager.tasks = tasks manager.runTasks()
def __init__(self): TaskManager.__init__(self) self.ReportInfo = ['username', 'password', 'cookies', 'errorcode'] self.ReportErrorInfo = ['retrynumber'] self.FatalError = ['BadPassword', 'BadEmail'] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 3 self.MaxRetry = 3 self.ProxyEnable = True self.TaskName = 'VerifyAccount' self.WalkaroundEn = True self.TaskInfos = parseinfo.AccountTable
def __init__(self, node, config_desc, keys_auth, client, use_ipv6=False, use_docker_machine_manager=True): self.client = client self.keys_auth = keys_auth self.config_desc = config_desc self.node = node self.task_keeper = TaskHeaderKeeper(client.environments_manager, min_price=config_desc.min_price) self.task_manager = TaskManager( config_desc.node_name, self.node, self.keys_auth, root_path=TaskServer.__get_task_manager_root(client.datadir), use_distributed_resources=config_desc. use_distributed_resource_management, tasks_dir=os.path.join(client.datadir, 'tasks')) self.task_computer = TaskComputer( config_desc.node_name, task_server=self, use_docker_machine_manager=use_docker_machine_manager) self.task_connections_helper = TaskConnectionsHelper() self.task_connections_helper.task_server = self self.task_sessions = {} self.task_sessions_incoming = WeakList() self.max_trust = 1.0 self.min_trust = 0.0 self.last_messages = [] self.last_message_time_threshold = config_desc.task_session_timeout self.results_to_send = {} self.failures_to_send = {} self.use_ipv6 = use_ipv6 self.forwarded_session_request_timeout = config_desc.waiting_for_task_session_timeout self.forwarded_session_requests = {} self.response_list = {} self.deny_set = get_deny_set(datadir=client.datadir) network = TCPNetwork( ProtocolFactory(MidAndFilesProtocol, self, SessionFactory(TaskSession)), use_ipv6) PendingConnectionsServer.__init__(self, config_desc, network)
def __init__(self): TaskManager.__init__(self) self.ReportInfo = [ 'newemail', 'newpassword', 'newcookies', 'Timestamp', 'username' ] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = ['BadPassword', 'BadEmail', 'VerifyEmail'] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 1 self.MaxRetry = 3 self.ProxyEnable = False self.TaskName = 'ModifyAcount' self.TaskInfos = parseinfo.AccountTable
def __init__(self): TaskManager.__init__(self) self.ReportInfo = ['asin', 'keyword', 'retrynumber', 'Timestamp'] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = [''] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 1 self.MaxRetry = 2000 self.ProxyEnable = False self.speed = 12 self.TaskName = 'CreatAcount' self.TaskLogEn = False self.TaskRetryEn = False self.TaskInfos = parseinfo.ViewTaskTable
def openApplication(debugMode=False): """ Opens the main Qt application """ # retrieve a path to QML sources pwd = os.path.dirname(__file__) qmldir = os.path.join(pwd, "qml") qmlfile = os.path.join(qmldir, "main.qml") # enable material style os.environ["QT_QUICK_CONTROLS_STYLE"] = "Material" # catch warning & errors logger = Logger() QtCore.qInstallMessageHandler(logger.messageHandler) # new Qt application app = QGuiApplication(sys.argv) engine = QQmlApplicationEngine() # override the standard QML engine when debug mode is enabled if debugMode: print("DEBUG mode") engine = qmlinstantengine.QmlInstantEngine() engine.addFilesFromDirectory(qmldir, recursive=True) # add a custom module import path moduledir = os.path.join(qmldir, "modules") engine.addImportPath(moduledir) # expose custom properties to the QML side taskManager = TaskManager("main task manager") engine.rootContext().setContextProperty("_taskManager", taskManager) engine.rootContext().setContextProperty("_logger", logger) engine.rootContext().setContextProperty("_debug", debugMode) # load our main QML file & start the application engine.load(qmlfile) engine.quit.connect(app.quit) sys.exit(app.exec_())
def __init__(self): TaskManager.__init__(self) self.ReportInfo = [ 'username', 'password', 'cookies', 'proxy', 'Timestamp' ] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = ['VerifyEmail', 'BadPassword'] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 1 self.MaxRetry = 3 self.ProxyEnable = True self.AuthProxy = True self.ProxyTimeout = 60 self.SubMaxRetry = 2 self.TaskName = 'Account View' self.TaskInfos = AccountTable
def main(): args = parser() tasks = [] taskDir = "" ##Read general config and update with given config if args.config or args.abs_config: confDir = "{}/ChargedAnalysis/Workflow/config/".format( os.environ["CHDIR"]) config = yaml.load(open("{}/general.yaml".format(confDir), "r"), Loader=yaml.Loader) if args.config: config.update( yaml.load(open("{}/{}.yaml".format(confDir, args.config), "r"), Loader=yaml.Loader)) else: config.update( yaml.load(open(args.abs_config, "r"), Loader=yaml.Loader)) taskDir = "{}/Results/{}/{}".format(os.environ["CHDIR"], args.task, config["dir"].split("/")[0]) os.makedirs(taskDir, exist_ok=True) with open("{}/config.yaml".format(taskDir), "w") as conf: yaml.dump(config, conf, default_flow_style=False, indent=4) config["dir"] = "{}/Results/{}/{}".format(os.environ["CHDIR"], args.task, config["dir"]) config["era"] = args.era ##Configure and get the tasks tasks = taskReturner(args.task, config)() elif args.run_again: taskDir = "/".join(args.run_again.split("/")[:-1]) ##Run the manager manager = TaskManager(tasks=tasks, existingFlow=args.run_again, dir=taskDir, longCondor=args.long_condor, globalMode=args.global_mode, nCores=args.n_cores) manager.run(args.dry_run)
def __init__(self): TaskManager.__init__(self) self.ReportInfo = ['username', 'password', 'cookies', 'Timestamp'] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = [''] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 4 self.MaxRetry = 3 self.ProxyEnable = True self.speed = 20 self.LoadImage = False self.TaskName = 'CreatAcount' self.AcountNumber = 12 self.TaskInfos = [] for i in range(self.AcountNumber): self.TaskInfos.append({'AounctID': str(i)})
def __init__(self): TaskManager.__init__(self) self.ReportInfo = ['Timestamp', 'username', 'asins'] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = [ 'VerifyEmail', 'FixAddress', 'AddressNotMatch', 'GiftCardUsed', 'BadGiftCard' ] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 5 self.MaxRetry = 5 self.ProxyEnable = True self.TaskName = 'PrepareOrder' self.TaskInfos = parseinfo.OrderTaskTable for item in self.TaskInfos: item.update(parseinfo.AccountFrame.loc[item['username']].to_dict()) item.update(parseinfo.AddressFrame.loc[item['username']].to_dict())
def __init__(self): TaskManager.__init__(self) self.ReportInfo = [ 'username', 'password', 'cookies', 'fullname', 'address', 'postalcode', 'city', 'state', 'phonenumber' ] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = ['BadPassword', 'BadEmail', 'InsufficientAddress'] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 5 self.MaxRetry = 3 self.ProxyEnable = True self.TaskName = 'FillShippingAddress' self.TaskInfos = parseinfo.AccountTable self.AddressList = [] for name, item in parseinfo.AddressFrame.iterrows(): self.AddressList.append(item.to_dict()) pass
def __init__(self): TaskManager.__init__(self) self.ReportInfo = [ 'username', 'password', 'cookies', 'nameoncard', 'ccnumber', 'ccmonth', 'ccyear', 'checkaccount' ] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = ['BadPassword', 'BadEmail', 'InsufficientCard'] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 5 self.MaxRetry = 3 self.ProxyEnable = True self.TaskName = 'FillCreditCard' self.TaskInfos = parseinfo.AccountTable self.CreditCardList = [] for name, item in parseinfo.FinanceFrame.iterrows(): self.CreditCardList.append(item.to_dict()) pass
def __init__(self): TaskManager.__init__(self) self.ReportInfo = [ 'asin', 'department', 'buyboxprice', 'orderprice', 'keyword', 'brand', 'Timestamp' ] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = [''] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 5 self.MaxRetry = 3 self.ProxyEnable = False self.TaskName = 'TestSearch' self.TaskInfos = [] parseinfo.ProductFrame.reset_index(inplace=True) for i in range(parseinfo.ProductFrame.shape[0]): ProductInfo = parseinfo.ProductFrame.loc[i] ProductInfo.dropna(inplace=True) self.TaskInfos.append(ProductInfo.to_dict())
def __init__(self): TaskManager.__init__(self) self.ReportInfo = [ 'username', 'password', 'cookies', 'asin', 'reviewstar', 'reviewertitle', 'reviewercontent', 'Timestamp' ] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = ['BadPassword', 'BadEmail'] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 5 self.MaxRetry = 5 self.LoadImage = False self.ProxyEnable = True self.ProxyTimeout = 60 self.TaskName = 'SubmitReviewer' self.submitedUser = [] self.TaskInfos = [] for name, item in ReviewerFrame.iterrows(): self.TaskInfos.append(item.to_dict()) pass
def __init__(self): TaskManager.__init__(self) self.ReportInfo = ['username', 'password', 'customername', 'cookies', 'proxy', 'asin', 'Timestamp'] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = ['BadPassword', 'BadEmail'] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 1 self.MaxRetry = 2 self.LoadImage = False self.ProxyEnable = True self.AuthProxy = True self.ProxyTimeout = 60 self.TaskName = 'SubmitReviewer' self.submitedUser = [] self.TaskInfos = [] for name, item in ReviewerFrame.iterrows(): dict_item = AccountFrame.loc[item['username']].to_dict() dict_item.update(item.to_dict()) self.TaskInfos.append(dict_item) pass
def __init__(self): TaskManager.__init__(self) self.ReportInfo = [ 'Timestamp', 'department', 'asin', 'keyword', 'lowprice', 'highprice', 'buyboxprice', 'country', 'taskid' ] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = ['OverDay'] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 2 self.MaxRetry = 3 self.AuthProxy = False self.ProxyEnable = True self.ProxyTimeout = 60 self.day_date = datetime.now().day self.TaskName = 'ViewTask' self.TaskSummury = ProductTable self.TaskInfos = [] for item in ProductTable: for i in range(int(item['sections'])): TaskInfo = {'taskid': i} TaskInfo.update(item) keywordSelect = [ 'Keywords1', 'Keywords2', 'Keywords3', 'Keywords4', 'Keywords5', 'Keywords6', 'Keywords7', 'Keywords8', 'Keywords9', 'Keywords10' ] keywordGoup = [] for key in keywordSelect: if key in item: if item[key]: keywordGoup.append(item[key]) random.shuffle(keywordGoup) TaskInfo.update({'keyword': keywordGoup[0]}) self.TaskInfos.append(TaskInfo) random.shuffle(self.TaskInfos) pass
def __init__(self): TaskManager.__init__(self) self.ReportInfo = [ 'reviewerid', 'asin', 'reviewstar', 'reviewertitle', 'reviewercontent', 'reviewerusername', 'Timestamp' ] self.ReportErrorInfo = ['errorcode', 'retrynumber'] self.FatalError = ['BadPassword', 'BadEmail', 'VerifyEmail'] self.TaskSync = False self.CodeLogEn(False) self.ThreadNumber = 5 self.MaxRetry = 3 self.ProxyEnable = True self.TaskName = 'SubmitReview' self.LoadImage = False self.TaskLogEn = False self.TaskRetryEn = False self.TaskInfos = parseinfo.OrderTaskTable for item in self.TaskInfos: item.update(parseinfo.AccountFrame.loc[item['username']].to_dict()) self.ReviewList = [] for name, item in parseinfo.ReviewerFrame.iterrows(): self.ReviewList.append(item.to_dict()) pass
def __init__(self): api.API.__init__(self) self.task_manager = TaskManager( os.path.join(self.context.get_user_path(), 'tasks.json') ) builder = gtk.Builder() builder.add_from_file( os.path.join(self.context.working_directory, 'add-dialog.glade') ) self.dialog = builder.get_object('dialog') self.dialog.connect('delete_event', self.dialog.hide) self.title = builder.get_object('title') self.description = builder.get_object('description').get_buffer() self.priority = builder.get_object('priority') self.deadline = builder.get_object('deadline') self.tags = builder.get_object('tags') self.calendar = builder.get_object('calendar') self.calendar_win = builder.get_object('calendar_win') self.calendar_win.connect('delete_event', lambda *args: self.calendar_win.hide()) builder.get_object('calendar_btn').connect('clicked', self.show_calendar)
import db import requests import json from datetime import datetime from taskmanager import TaskManager taskmanager = TaskManager() def register_new_appeal_json(json): subject_id = json["subject_id"] text_of_appeal = json["text"] start_date = datetime.now() _register_new_appeal(subject_id=subject_id, text=text_of_appeal, date=start_date) def get_appeal_information_json(appeal_id): appeal = db.get_appeal_by_id(appeal_id) d = {"appeal_id": appeal[0]} def _register_new_appeal(subject_id, text, date): new_task = db.add_new_appeal(subject_id, text, date) taskmanager.start_initial_analyze(text, new_task) if new_task != None: print(new_task) return new_task
def main(): # top-level parser parser = argparse.ArgumentParser( epilog='Multi-word values' ' must be encased in quotes, like "that".') subparsers = parser.add_subparsers() # subparser for the "add" command create_parser = subparsers.add_parser('add', help='create new task') create_parser.set_defaults(action='add') create_parser.add_argument( '--name', type=valid_name, required=True, help='name of the task (required, max. 20 characters)', metavar='') create_parser.add_argument( '--deadline', type=valid_date, help='task\'s deadline in ISO format (yyyy-mm-dd)', metavar='') create_parser.add_argument('--description', type=str, help='description of the task', metavar='') # subparser for the "update" command update_parser = subparsers.add_parser('update', help='update task') update_parser.set_defaults(action='update') update_parser.add_argument('TASK_HASH', help='Task identifier') update_parser.add_argument('--name', type=str, help='name of the task', metavar='') update_parser.add_argument( '--deadline', type=valid_date, help='task\'s deadline in ISO format (yyyy-mm-dd)', metavar='') update_parser.add_argument('--description', type=str, help='description of the task', metavar='') # subparser for the "delete" command delete_parser = subparsers.add_parser('delete', help='delete task') delete_parser.set_defaults(action='delete') delete_parser.add_argument('TASK_HASH', type=str, help='Task identifier') # subparser for the "list" command list_parser = subparsers.add_parser('list', help='list tasks') list_parser.set_defaults(action='list') list_group = list_parser.add_mutually_exclusive_group(required=True) list_group.add_argument('--all', action='store_true') list_group.add_argument('--today', action='store_true') args = parser.parse_args() task_manager = TaskManager() if args.action == "add": task_manager.add_task(args.name, args.deadline, args.description) elif args.action == "update": task_manager.update_task(args.TASK_HASH, args.name, args.deadline, args.description) elif args.action == "delete": task_manager.delete_task(args.TASK_HASH) elif args.action == "list": if args.all: task_manager.list_tasks('all') else: task_manager.list_tasks('today')
from taskmanager import TaskManager from pluginmanager import PluginManager if len(sys.argv) == 3: port = int(sys.argv[1]) plugin_path = sys.argv[2] else: raise Exception("Usage: %s path_to_plugin_dir"%sys.argv[0]) def debug( message ): print message tm = TaskManager() pm = PluginManager() pm.load_plugins(plugin_path) class PluginTCPHandler(SocketServer.StreamRequestHandler): def handle(self): processor_dict = {"start": self.command_start, "status": self.command_status} data = self.rfile.readline() debug(">"+data) try: command = json.loads(data) command_name = command.get("command", "") command_callable = processor_dict.get(command_name, self.command_not_found)
class TaskServer(PendingConnectionsServer): def __init__(self, node, config_desc, keys_auth, client, use_ipv6=False, use_docker_machine_manager=True): self.client = client self.keys_auth = keys_auth self.config_desc = config_desc self.node = node self.task_keeper = TaskHeaderKeeper(client.environments_manager, min_price=config_desc.min_price) self.task_manager = TaskManager( config_desc.node_name, self.node, self.keys_auth, root_path=TaskServer.__get_task_manager_root(client.datadir), use_distributed_resources=config_desc. use_distributed_resource_management, tasks_dir=os.path.join(client.datadir, 'tasks')) self.task_computer = TaskComputer( config_desc.node_name, task_server=self, use_docker_machine_manager=use_docker_machine_manager) self.task_connections_helper = TaskConnectionsHelper() self.task_connections_helper.task_server = self self.task_sessions = {} self.task_sessions_incoming = WeakList() self.max_trust = 1.0 self.min_trust = 0.0 self.last_messages = [] self.last_message_time_threshold = config_desc.task_session_timeout self.results_to_send = {} self.failures_to_send = {} self.use_ipv6 = use_ipv6 self.forwarded_session_request_timeout = config_desc.waiting_for_task_session_timeout self.forwarded_session_requests = {} self.response_list = {} self.deny_set = get_deny_set(datadir=client.datadir) network = TCPNetwork( ProtocolFactory(MidAndFilesProtocol, self, SessionFactory(TaskSession)), use_ipv6) PendingConnectionsServer.__init__(self, config_desc, network) def key_changed(self): """React to the fact that key id has been changed. Inform task manager about new key """ self.task_manager.key_id = self.keys_auth.get_key_id() def sync_network(self): self._sync_pending() self.__send_waiting_results() self.task_computer.run() self.task_connections_helper.sync() self._sync_forwarded_session_requests() self.__remove_old_tasks() # self.__remove_old_sessions() self._remove_old_listenings() if tmp_cycler.next() == 0: logger.debug('TASK SERVER TASKS DUMP: %r', self.task_manager.tasks) logger.debug('TASK SERVER TASKS STATES: %r', self.task_manager.tasks_states) def get_environment_by_id(self, env_id): return self.task_keeper.environments_manager.get_environment_by_id( env_id) # This method chooses random task from the network to compute on our machine def request_task(self): theader = self.task_keeper.get_task() if theader is None: return None try: env = self.get_environment_by_id(theader.environment) if env is not None: performance = env.get_performance(self.config_desc) else: performance = 0.0 if self.should_accept_requestor(theader.task_owner_key_id): self.task_manager.add_comp_task_request( theader, self.config_desc.min_price) args = { 'node_name': self.config_desc.node_name, 'key_id': theader.task_owner_key_id, 'task_id': theader.task_id, 'estimated_performance': performance, 'price': self.config_desc.min_price, 'max_resource_size': self.config_desc.max_resource_size, 'max_memory_size': self.config_desc.max_memory_size, 'num_cores': self.config_desc.num_cores } self._add_pending_request(TASK_CONN_TYPES['task_request'], theader.task_owner, theader.task_owner_port, theader.task_owner_key_id, args) return theader.task_id except Exception as err: logger.warning("Cannot send request for task: {}".format(err)) self.task_keeper.remove_task_header(theader.task_id) def request_resource(self, subtask_id, resource_header, address, port, key_id, task_owner): if subtask_id in self.task_sessions: session = self.task_sessions[subtask_id] session.request_resource(subtask_id, resource_header) else: logger.error( "Cannot map subtask_id {} to session".format(subtask_id)) return subtask_id def pull_resources(self, task_id, resources, client_options=None): self.client.pull_resources(task_id, resources, client_options=client_options) def send_results(self, subtask_id, task_id, result, computing_time, owner_address, owner_port, owner_key_id, owner, node_name): if 'data' not in result or 'result_type' not in result: raise AttributeError("Wrong result format") Trust.REQUESTED.increase(owner_key_id) if subtask_id not in self.results_to_send: value = self.task_manager.comp_task_keeper.get_value( task_id, computing_time) if self.client.transaction_system: self.client.transaction_system.add_to_waiting_payments( task_id, owner_key_id, value) delay_time = 0.0 last_sending_trial = 0 self.results_to_send[subtask_id] = WaitingTaskResult( task_id, subtask_id, result['data'], result['result_type'], computing_time, last_sending_trial, delay_time, owner_address, owner_port, owner_key_id, owner) else: raise RuntimeError("Incorrect subtask_id: {}".format(subtask_id)) return True def send_task_failed(self, subtask_id, task_id, err_msg, owner_address, owner_port, owner_key_id, owner, node_name): Trust.REQUESTED.decrease(owner_key_id) if subtask_id not in self.failures_to_send: self.failures_to_send[subtask_id] = WaitingTaskFailure( task_id, subtask_id, err_msg, owner_address, owner_port, owner_key_id, owner) def new_connection(self, session): self.task_sessions_incoming.append(session) def get_tasks_headers(self): ths = self.task_keeper.get_all_tasks( ) + self.task_manager.get_tasks_headers() return [th.to_dict() for th in ths] def add_task_header(self, th_dict_repr): try: if not self.verify_header_sig(th_dict_repr): raise Exception("Invalid signature") task_id = th_dict_repr["task_id"] key_id = th_dict_repr["task_owner_key_id"] task_ids = self.task_manager.tasks.keys() new_sig = True if task_id in self.task_keeper.task_headers: header = self.task_keeper.task_headers[task_id] new_sig = th_dict_repr["signature"] != header.signature if task_id not in task_ids and key_id != self.node.key and new_sig: self.task_keeper.add_task_header(th_dict_repr) return True except Exception as err: logger.warning("Wrong task header received {}".format(err)) return False def verify_header_sig(self, th_dict_repr): _bin = TaskHeader.dict_to_binary(th_dict_repr) _sig = th_dict_repr["signature"] _key = th_dict_repr["task_owner_key_id"] return self.verify_sig(_sig, _bin, _key) def remove_task_header(self, task_id): self.task_keeper.remove_task_header(task_id) def add_task_session(self, subtask_id, session): self.task_sessions[subtask_id] = session def remove_task_session(self, task_session): self.remove_pending_conn(task_session.conn_id) self.remove_responses(task_session.conn_id) for tsk in self.task_sessions.keys(): if self.task_sessions[tsk] == task_session: del self.task_sessions[tsk] def set_last_message(self, type_, t, msg, address, port): if len(self.last_messages) >= 5: self.last_messages = self.last_messages[-4:] self.last_messages.append([type_, t, address, port, msg]) def get_last_messages(self): return self.last_messages def get_waiting_task_result(self, subtask_id): return self.results_to_send.get(subtask_id, None) def get_node_name(self): return self.config_desc.node_name def get_key_id(self): return self.keys_auth.get_key_id() def encrypt(self, message, public_key): if public_key == 0: return message return self.keys_auth.encrypt(message, public_key) def decrypt(self, message): return self.keys_auth.decrypt(message) def sign(self, data): return self.keys_auth.sign(data) def verify_sig(self, sig, data, public_key): return self.keys_auth.verify(sig, data, public_key) def get_resource_addr(self): return self.client.node.prv_addr def get_resource_port(self): return self.client.resource_port def get_subtask_ttl(self, task_id): return self.task_manager.comp_task_keeper.get_subtask_ttl(task_id) def add_resource_peer(self, node_name, addr, port, key_id, node_info): self.client.add_resource_peer(node_name, addr, port, key_id, node_info) def task_result_sent(self, subtask_id): return self.results_to_send.pop(subtask_id, None) def retry_sending_task_result(self, subtask_id): wtr = self.results_to_send.get(subtask_id, None) if wtr: wtr.already_sending = False def change_config(self, config_desc, run_benchmarks=False): PendingConnectionsServer.change_config(self, config_desc) self.config_desc = config_desc self.last_message_time_threshold = config_desc.task_session_timeout self.task_manager.change_config( self.__get_task_manager_root(self.client.datadir), config_desc.use_distributed_resource_management) self.task_computer.change_config(config_desc, run_benchmarks=run_benchmarks) self.task_keeper.change_config(config_desc) def change_timeouts(self, task_id, full_task_timeout, subtask_timeout): self.task_manager.change_timeouts(task_id, full_task_timeout, subtask_timeout) def get_task_computer_root(self): return os.path.join(self.client.datadir, "ComputerRes") def subtask_rejected(self, subtask_id): logger.debug("Subtask {} result rejected".format(subtask_id)) self.task_result_sent(subtask_id) task_id = self.task_manager.comp_task_keeper.get_task_id_for_subtask( subtask_id) if task_id is not None: self.decrease_trust_payment(task_id) # self.remove_task_header(task_id) # TODO Inform transaction system and task manager about failed payment else: logger.warning("Not my subtask rejected {}".format(subtask_id)) def reward_for_subtask_paid(self, subtask_id): logger.info("Receive payment for subtask {}".format(subtask_id)) task_id = self.task_manager.comp_task_keeper.get_task_id_for_subtask( subtask_id) if task_id is None: logger.warning( "Received payment for unknown subtask {}".format(subtask_id)) return node_id = self.task_manager.comp_task_keeper.get_node_for_task_id( task_id) if node_id is None: logger.warning( "Unknown node try to make a payment for task {}".format( task_id)) return Trust.PAYMENT.increase(node_id, self.max_trust) def subtask_accepted(self, subtask_id, reward): logger.debug("Subtask {} result accepted".format(subtask_id)) self.task_result_sent(subtask_id) def subtask_failure(self, subtask_id, err): logger.info("Computation for task {} failed: {}.".format( subtask_id, err)) node_id = self.task_manager.get_node_id_for_subtask(subtask_id) Trust.COMPUTED.decrease(node_id) self.task_manager.task_computation_failure(subtask_id, err) def accept_result(self, subtask_id, account_info): mod = min( max(self.task_manager.get_trust_mod(subtask_id), self.min_trust), self.max_trust) Trust.COMPUTED.increase(account_info.key_id, mod) task_id = self.task_manager.get_task_id(subtask_id) value = self.task_manager.get_value(subtask_id) if not value: logger.info(u"Invaluable subtask: %r value: %r", subtask_id, value) return if not self.client.transaction_system: logger.info( u"Transaction system not ready. Ignoring payment for subtask: %r", subtask_id) return if not account_info.eth_account.address: logger.warning(u"Unknown payment address of %r (%r). Subtask: %r", account_info.node_name, account_info.addr, subtask_id) return payment = self.client.transaction_system.add_payment_info( task_id, subtask_id, value, account_info) logger.debug(u'Result accepted for subtask: %s Created payment: %r', subtask_id, payment) def increase_trust_payment(self, task_id): node_id = self.task_manager.comp_task_keeper.get_node_for_task_id( task_id) Trust.PAYMENT.increase(node_id, self.max_trust) def decrease_trust_payment(self, task_id): node_id = self.task_manager.comp_task_keeper.get_node_for_task_id( task_id) Trust.PAYMENT.decrease(node_id, self.max_trust) def pay_for_task(self, task_id, payments): if not self.client.transaction_system: return all_payments = { eth_account: desc.value for eth_account, desc in payments.items() } try: self.client.transaction_system.pay_for_task(task_id, all_payments) except Exception as err: # FIXME: Decide what to do when payment failed logger.error("Can't pay for task: {}".format(err)) def reject_result(self, subtask_id, account_info): mod = min( max(self.task_manager.get_trust_mod(subtask_id), self.min_trust), self.max_trust) Trust.WRONG_COMPUTED.decrease(account_info.key_id, mod) def unpack_delta(self, dest_dir, delta, task_id): self.client.resource_server.unpack_delta(dest_dir, delta, task_id) def get_computing_trust(self, node_id): return self.client.get_computing_trust(node_id) def start_task_session(self, node_info, super_node_info, conn_id): args = { 'key_id': node_info.key, 'node_info': node_info, 'super_node_info': super_node_info, 'ans_conn_id': conn_id } self._add_pending_request(TASK_CONN_TYPES['start_session'], node_info, node_info.prv_port, node_info.key, args) def respond_to(self, key_id, session, conn_id): self.remove_pending_conn(conn_id) responses = self.response_list.get(conn_id, None) if responses: while responses: res = responses.popleft() res(session) else: session.dropped() def respond_to_middleman(self, key_id, session, conn_id, dest_key_id): if conn_id in self.response_list: self.respond_to(dest_key_id, session, conn_id) else: logger.warning("No response for {}".format(dest_key_id)) session.dropped() def be_a_middleman(self, key_id, open_session, conn_id, asking_node, dest_node, ask_conn_id): key_id = asking_node.key response = lambda session: self.__asking_node_for_middleman_connection_established( session, conn_id, key_id, open_session, asking_node, dest_node, ask_conn_id) if key_id in self.response_list: self.response_list[conn_id].append(response) else: self.response_list[conn_id] = deque([response]) self.client.want_to_start_task_session(key_id, self.node, conn_id) open_session.is_middleman = True def wait_for_nat_traverse(self, port, session): session.close_now() args = { 'super_node': session.extra_data['super_node'], 'asking_node': session.extra_data['asking_node'], 'dest_node': session.extra_data['dest_node'], 'ask_conn_id': session.extra_data['ans_conn_id'] } self._add_pending_listening(TaskListenTypes.StartSession, port, args) def organize_nat_punch(self, addr, port, client_key_id, asking_node, dest_node, ans_conn_id): self.client.inform_about_task_nat_hole(asking_node.key, client_key_id, addr, port, ans_conn_id) def traverse_nat(self, key_id, addr, port, conn_id, super_key_id): connect_info = TCPConnectInfo( [SocketAddress(addr, port)], self.__connection_for_traverse_nat_established, self.__connection_for_traverse_nat_failure) self.network.connect(connect_info, client_key_id=key_id, conn_id=conn_id, super_key_id=super_key_id) def traverse_nat_failure(self, conn_id): pc = self.pending_connections.get(conn_id) if pc: pc.failure(conn_id, *pc.args) def get_socket_addresses(self, node_info, port, key_id): if self.client.get_suggested_conn_reverse(key_id): return [] socket_addresses = PendingConnectionsServer.get_socket_addresses( self, node_info, port, key_id) addr = self.client.get_suggested_addr(key_id) if addr: socket_addresses = [SocketAddress(addr, port)] + socket_addresses return socket_addresses def quit(self): self.task_computer.quit() def receive_subtask_computation_time(self, subtask_id, computation_time): self.task_manager.set_computation_time(subtask_id, computation_time) def remove_responses(self, conn_id): self.response_list.pop(conn_id, None) def final_conn_failure(self, conn_id): self.remove_responses(conn_id) super(TaskServer, self).final_conn_failure(conn_id) # TODO: extend to multiple sessions def add_forwarded_session_request(self, key_id, conn_id): if self.task_computer.waiting_for_task: self.task_computer.wait(ttl=self.forwarded_session_request_timeout) self.forwarded_session_requests[key_id] = dict(conn_id=conn_id, time=time.time()) def remove_forwarded_session_request(self, key_id): return self.forwarded_session_requests.pop(key_id, None) def should_accept_provider(self, node_id): if node_id in self.deny_set: return False trust = self.get_computing_trust(node_id) logger.debug("Computing trust level: {}".format(trust)) return trust >= self.config_desc.computing_trust def should_accept_requestor(self, node_id): if node_id in self.deny_set: return False trust = self.client.get_requesting_trust(node_id) logger.debug("Requesting trust level: {}".format(trust)) return trust >= self.config_desc.requesting_trust def _sync_forwarded_session_requests(self): now = time.time() for key_id, data in self.forwarded_session_requests.items(): if data: if now - data['time'] >= self.forwarded_session_request_timeout: logger.debug('connection timeout: %s', data) self.final_conn_failure(data['conn_id']) self.remove_forwarded_session_request(key_id) else: self.forwarded_session_requests.pop(key_id) def _get_factory(self): return self.factory(self) def _listening_established(self, port, **kwargs): logger.debug('_listening_established(%r)', port) self.cur_port = port logger.info(" Port {} opened - listening".format(self.cur_port)) self.node.prv_port = self.cur_port self.task_manager.listen_address = self.node.prv_addr self.task_manager.listen_port = self.cur_port self.task_manager.node = self.node def _listening_failure(self, **kwargs): logger.error("Listening on ports {} to {} failure".format( self.config_desc.start_port, self.config_desc.end_port)) # FIXME: some graceful terminations should take place here # sys.exit(0) def _listening_for_start_session_established(self, port, listen_id, super_node, asking_node, dest_node, ask_conn_id): logger.debug("_listening_for_start_session_established()") logger.debug("Listening on port {}".format(port)) listening = self.open_listenings.get(listen_id) if listening: self.listening.time = time.time() self.listening.listening_port = port else: logger.warning( "Listening {} not in open listenings list".format(listen_id)) def _listening_for_start_session_failure(self, listen_id, super_node, asking_node, dest_node, ask_conn_id): if listen_id in self.open_listenings: del self.open_listenings['listen_id'] self.__connection_for_nat_punch_failure(listen_id, super_node, asking_node, dest_node, ask_conn_id) ############################# # CONNECTION REACTIONS # ############################# def __connection_for_task_request_established(self, session, conn_id, node_name, key_id, task_id, estimated_performance, price, max_resource_size, max_memory_size, num_cores): self.remove_forwarded_session_request(key_id) session.task_id = task_id session.key_id = key_id session.conn_id = conn_id self._mark_connected(conn_id, session.address, session.port) self.task_sessions[task_id] = session session.send_hello() session.request_task(node_name, task_id, estimated_performance, price, max_resource_size, max_memory_size, num_cores) def __connection_for_task_request_failure(self, conn_id, node_name, key_id, task_id, estimated_performance, price, max_resource_size, max_memory_size, num_cores, *args): response = lambda session: self.__connection_for_task_request_established( session, conn_id, node_name, key_id, task_id, estimated_performance, price, max_resource_size, max_memory_size, num_cores) if key_id in self.response_list: self.response_list[conn_id].append(response) else: self.response_list[conn_id] = deque([response]) self.client.want_to_start_task_session(key_id, self.node, conn_id) pc = self.pending_connections.get(conn_id) if pc: pc.status = PenConnStatus.WaitingAlt pc.time = time.time() def __connection_for_task_result_established(self, session, conn_id, waiting_task_result): self.remove_forwarded_session_request(waiting_task_result.owner_key_id) session.key_id = waiting_task_result.owner_key_id session.conn_id = conn_id self._mark_connected(conn_id, session.address, session.port) self.task_sessions[waiting_task_result.subtask_id] = session session.send_hello() payment_addr = (self.client.transaction_system.get_payment_address() if self.client.transaction_system else None) session.send_report_computed_task(waiting_task_result, self.node.prv_addr, self.cur_port, payment_addr, self.node) def __connection_for_task_result_failure(self, conn_id, waiting_task_result): def response(session): self.__connection_for_task_result_established( session, conn_id, waiting_task_result) if waiting_task_result.owner_key_id in self.response_list: self.response_list[conn_id].append(response) else: self.response_list[conn_id] = deque([response]) self.client.want_to_start_task_session( waiting_task_result.owner_key_id, self.node, conn_id) pc = self.pending_connections.get(conn_id) if pc: pc.status = PenConnStatus.WaitingAlt pc.time = time.time() def __connection_for_task_failure_established(self, session, conn_id, key_id, subtask_id, err_msg): self.remove_forwarded_session_request(key_id) session.key_id = key_id session.conn_id = conn_id self._mark_connected(conn_id, session.address, session.port) self.task_sessions[subtask_id] = session session.send_hello() session.send_task_failure(subtask_id, err_msg) def __connection_for_task_failure_failure(self, conn_id, key_id, subtask_id, err_msg): response = lambda session: self.__connection_for_task_failure_established( session, conn_id, key_id, subtask_id, err_msg) if key_id in self.response_list: self.response_list[conn_id].append(response) else: self.response_list[conn_id] = deque([response]) self.client.want_to_start_task_session(key_id, self.node, conn_id) pc = self.pending_connections.get(conn_id) if pc: pc.status = PenConnStatus.WaitingAlt pc.time = time.time() def __connection_for_resource_request_established(self, session, conn_id, key_id, subtask_id, resource_header): session.key_id = key_id session.task_id = subtask_id session.conn_id = conn_id self._mark_connected(conn_id, session.address, session.port) self.task_sessions[subtask_id] = session session.send_hello() session.request_resource(subtask_id, resource_header) def __connection_for_resource_request_failure(self, conn_id, key_id, subtask_id, resource_header): response = lambda session: self.__connection_for_resource_request_established( session, conn_id, key_id, subtask_id, resource_header) if key_id in self.response_list: self.response_list[conn_id].append(response) else: self.response_list[conn_id] = deque([response]) self.client.want_to_start_task_session(key_id, self.node, conn_id) pc = self.pending_connections.get(conn_id) if pc: pc.status = PenConnStatus.WaitingAlt pc.time = time.time() def __connection_for_result_rejected_established(self, session, conn_id, key_id, subtask_id): self.remove_forwarded_session_request(key_id) session.key_id = key_id session.conn_id = conn_id self._mark_connected(conn_id, session.address, session.port) session.send_hello() session.send_result_rejected(subtask_id) def __connection_for_result_rejected_failure(self, conn_id, key_id, subtask_id): response = lambda session: self.__connection_for_result_rejected_established( session, conn_id, key_id, subtask_id) if key_id in self.response_list: self.response_list[conn_id].append(response) else: self.response_list[conn_id] = deque([response]) self.client.want_to_start_task_session(key_id, self.node, conn_id) pc = self.pending_connections.get(conn_id) if pc: pc.status = PenConnStatus.WaitingAlt pc.time = time.time() def __connection_for_start_session_established(self, session, conn_id, key_id, node_info, super_node_info, ans_conn_id): self.remove_forwarded_session_request(key_id) session.key_id = key_id session.conn_id = conn_id self._mark_connected(conn_id, session.address, session.port) session.send_hello() session.send_start_session_response(ans_conn_id) def __connection_for_start_session_failure(self, conn_id, key_id, node_info, super_node_info, ans_conn_id): logger.info( "Failed to start requested task session for node {}".format( key_id)) self.final_conn_failure(conn_id) # self.__initiate_nat_traversal(key_id, node_info, super_node_info, ans_conn_id) def __initiate_nat_traversal(self, key_id, node_info, super_node_info, ans_conn_id): if super_node_info is None: logger.info("Permanently can't connect to node {}".format(key_id)) return if self.node.nat_type in TaskServer.supported_nat_types: args = { 'super_node': super_node_info, 'asking_node': node_info, 'dest_node': self.node, 'ans_conn_id': ans_conn_id } self._add_pending_request(TASK_CONN_TYPES['nat_punch'], super_node_info, super_node_info.prv_port, super_node_info.key, args) else: args = { 'key_id': super_node_info.key, 'asking_node_info': node_info, 'self_node_info': self.node, 'ans_conn_id': ans_conn_id } self._add_pending_request(TASK_CONN_TYPES['middleman'], super_node_info, super_node_info.prv_port, super_node_info.key, args) def __connection_for_nat_punch_established(self, session, conn_id, super_node, asking_node, dest_node, ans_conn_id): session.key_id = super_node.key session.conn_id = conn_id session.extra_data = { 'super_node': super_node, 'asking_node': asking_node, 'dest_node': dest_node, 'ans_conn_id': ans_conn_id } session.send_hello() session.send_nat_punch(asking_node, dest_node, ans_conn_id) def __connection_for_nat_punch_failure(self, conn_id, super_node, asking_node, dest_node, ans_conn_id): self.final_conn_failure(conn_id) args = { 'key_id': super_node.key, 'asking_node_info': asking_node, 'self_node_info': dest_node, 'ans_conn_id': ans_conn_id } self._add_pending_request(TASK_CONN_TYPES['middleman'], super_node, super_node.prv_port, super_node.key, args) def __connection_for_traverse_nat_established(self, session, client_key_id, conn_id, super_key_id): self.respond_to(client_key_id, session, conn_id) # FIXME def __connection_for_traverse_nat_failure(self, client_key_id, conn_id, super_key_id): logger.error("Connection for traverse nat failure") self.client.inform_about_nat_traverse_failure(super_key_id, client_key_id, conn_id) def __connection_for_middleman_established(self, session, conn_id, key_id, asking_node_info, self_node_info, ans_conn_id): session.key_id = key_id session.conn_id = conn_id session.send_hello() session.send_middleman(asking_node_info, self_node_info, ans_conn_id) def __connection_for_middleman_failure(self, conn_id, key_id, asking_node_info, self_node_info, ans_conn_id): self.final_conn_failure(conn_id) logger.info("Permanently can't connect to node {}".format(key_id)) return def __asking_node_for_middleman_connection_established( self, session, conn_id, key_id, open_session, asking_node, dest_node, ans_conn_id): session.key_id = key_id session.conn_id = conn_id session.send_hello() session.send_join_middleman_conn(key_id, ans_conn_id, dest_node.key) session.open_session = open_session open_session.open_session = session def __connection_for_task_request_final_failure( self, conn_id, node_name, key_id, task_id, estimated_performance, price, max_resource_size, max_memory_size, num_cores, *args): logger.warning("Cannot connect to task {} owner".format(task_id)) logger.warning("Removing task {} from task list".format(task_id)) self.task_computer.task_request_rejected(task_id, "Connection failed") self.task_keeper.request_failure(task_id) self.task_manager.comp_task_keeper.request_failure(task_id) self.remove_pending_conn(conn_id) self.remove_responses(conn_id) def __connection_for_resource_request_final_failure( self, conn_id, key_id, subtask_id, resource_header): logger.warning("Cannot connect to task {} owner".format(subtask_id)) logger.warning("Removing task {} from task list".format(subtask_id)) self.task_computer.resource_request_rejected(subtask_id, "Connection failed") self.remove_task_header(subtask_id) self.remove_pending_conn(conn_id) self.remove_responses(conn_id) def __connection_for_result_rejected_final_failure(self, conn_id, key_id, subtask_id): logger.warning( "Cannot connect to deliver information about rejected result for task {}" .format(subtask_id)) self.remove_pending_conn(conn_id) self.remove_responses(conn_id) def __connection_for_task_result_final_failure(self, conn_id, key_id, waiting_task_result): logger.warning("Cannot connect to task {} owner".format( waiting_task_result.subtask_id)) waiting_task_result.lastSendingTrial = time.time() waiting_task_result.delayTime = self.config_desc.max_results_sending_delay waiting_task_result.alreadySending = False self.remove_pending_conn(conn_id) self.remove_responses(conn_id) def __connection_for_task_failure_final_failure(self, conn_id, key_id, subtask_id, err_msg): logger.warning("Cannot connect to task {} owner".format(subtask_id)) self.task_computer.session_timeout() self.remove_pending_conn(conn_id) self.remove_responses(conn_id) def __connection_for_start_session_final_failure(self, conn_id, key_id, node_info, super_node_info, ans_conn_id): logger.warning("Impossible to start session with {}".format(node_info)) self.task_computer.session_timeout() self.remove_pending_conn(conn_id) self.remove_responses(conn_id) self.remove_pending_conn(ans_conn_id) self.remove_responses(ans_conn_id) def __connection_for_middleman_final_failure(self, *args, **kwargs): pass def __connection_for_nat_punch_final_failure(self, *args, **kwargs): pass def noop(self, *args, **kwargs): logger.debug('Noop(%r, %r)', args, kwargs) # SYNC METHODS ############################# def __remove_old_tasks(self): self.task_keeper.remove_old_tasks() nodes_with_timeouts = self.task_manager.check_timeouts() for node_id in nodes_with_timeouts: Trust.COMPUTED.decrease(node_id) def __remove_old_sessions(self): cur_time = time.time() sessions_to_remove = [] for subtask_id, session in self.task_sessions.iteritems(): if cur_time - session.last_message_time > self.last_message_time_threshold: sessions_to_remove.append(subtask_id) for subtask_id in sessions_to_remove: if self.task_sessions[subtask_id].task_computer is not None: self.task_sessions[subtask_id].task_computer.session_timeout() self.task_sessions[subtask_id].dropped() def __send_waiting_results(self): for subtask_id in self.results_to_send.keys(): wtr = self.results_to_send[subtask_id] now = time.time() if not wtr.already_sending: if now - wtr.last_sending_trial > wtr.delay_time: wtr.already_sending = True wtr.last_sending_trial = now session = self.task_sessions.get(subtask_id, None) if session: self.__connection_for_task_result_established( session, session.conn_id, wtr) else: args = {'waiting_task_result': wtr} self._add_pending_request( TASK_CONN_TYPES['task_result'], wtr.owner, wtr.owner_port, wtr.owner_key_id, args) for subtask_id in self.failures_to_send.keys(): wtf = self.failures_to_send[subtask_id] session = self.task_sessions.get(subtask_id, None) if session: self.__connection_for_task_failure_established( session, session.conn_id, wtf.owner_key_id, subtask_id, wtf.err_msg) else: args = { 'key_id': wtf.owner_key_id, 'subtask_id': wtf.subtask_id, 'err_msg': wtf.err_msg } self._add_pending_request(TASK_CONN_TYPES['task_failure'], wtf.owner, wtf.owner_port, wtf.owner_key_id, args) self.failures_to_send.clear() # CONFIGURATION METHODS ############################# @staticmethod def __get_task_manager_root(datadir): return os.path.join(datadir, "res") def _set_conn_established(self): self.conn_established_for_type.update({ TASK_CONN_TYPES['task_request']: self.__connection_for_task_request_established, #TASK_CONN_TYPES['resource_request']: self.__connection_for_resource_request_established, #TASK_CONN_TYPES['result_rejected']: self.__connection_for_result_rejected_established, TASK_CONN_TYPES['task_result']: self.__connection_for_task_result_established, TASK_CONN_TYPES['task_failure']: self.__connection_for_task_failure_established, TASK_CONN_TYPES['start_session']: self.__connection_for_start_session_established, TASK_CONN_TYPES['middleman']: self.__connection_for_middleman_established, TASK_CONN_TYPES['nat_punch']: self.__connection_for_nat_punch_established, }) def _set_conn_failure(self): self.conn_failure_for_type.update({ TASK_CONN_TYPES['task_request']: self.__connection_for_task_request_failure, #TASK_CONN_TYPES['resource_request']: self.__connection_for_resource_request_failure, #TASK_CONN_TYPES['result_rejected']: self.__connection_for_result_rejected_failure, TASK_CONN_TYPES['task_result']: self.__connection_for_task_result_failure, TASK_CONN_TYPES['task_failure']: self.__connection_for_task_failure_failure, TASK_CONN_TYPES['start_session']: self.__connection_for_start_session_failure, TASK_CONN_TYPES['middleman']: self.__connection_for_middleman_failure, TASK_CONN_TYPES['nat_punch']: self.__connection_for_nat_punch_failure, }) def _set_conn_final_failure(self): self.conn_final_failure_for_type.update({ TASK_CONN_TYPES['task_request']: self.__connection_for_task_request_final_failure, #TASK_CONN_TYPES['resource_request']: self.__connection_for_resource_request_final_failure, #TASK_CONN_TYPES['result_rejected']: self.__connection_for_result_rejected_final_failure, TASK_CONN_TYPES['task_result']: self.__connection_for_task_result_final_failure, TASK_CONN_TYPES['task_failure']: self.__connection_for_task_failure_final_failure, TASK_CONN_TYPES['start_session']: self.__connection_for_start_session_final_failure, TASK_CONN_TYPES['middleman']: self.noop, TASK_CONN_TYPES['nat_punch']: self.noop, }) def _set_listen_established(self): self.listen_established_for_type.update({ TaskListenTypes.StartSession: self._listening_for_start_session_established }) def _set_listen_failure(self): self.listen_failure_for_type.update({ TaskListenTypes.StartSession: self._listening_for_start_session_failure })
class Tasks(api.API): def __init__(self): api.API.__init__(self) self.task_manager = TaskManager( os.path.join(self.context.get_user_path(), 'tasks.json') ) builder = gtk.Builder() builder.add_from_file( os.path.join(self.context.working_directory, 'add-dialog.glade') ) self.dialog = builder.get_object('dialog') self.dialog.connect('delete_event', self.dialog.hide) self.title = builder.get_object('title') self.description = builder.get_object('description').get_buffer() self.priority = builder.get_object('priority') self.deadline = builder.get_object('deadline') self.tags = builder.get_object('tags') self.calendar = builder.get_object('calendar') self.calendar_win = builder.get_object('calendar_win') self.calendar_win.connect('delete_event', lambda *args: self.calendar_win.hide()) builder.get_object('calendar_btn').connect('clicked', self.show_calendar) @api.expose @api.in_main_thread def add_task(self): '''Add a task to the database. The data is provided by the dialog.''' self.reset_dialog() if self.dialog.run() == 1: data = self.get_data() self.task_manager.add_task( data['title'], data['description'], data['tags'], data['priority'], data['deadline'] ) self.dialog.hide() @api.expose @api.in_main_thread def edit_task(self, id): '''Edit a task with the given id.''' task = self.task_manager.get_task(int(id)) self.set_dialog_entries(task) if self.dialog.run() == 1: data = self.get_data() self.task_manager.edit_task( int(id), data['title'], data['description'], data['tags'], data['priority'], data['deadline'] ) self.dialog.hide() @api.expose def set_task_status(self, id, status): '''Set the tasks status.''' self.task_manager.set_task_status(int(id), int(status)) @api.expose def list_tasks(self): '''Get all tasks which are not marked as done.''' tasks = self.task_manager.list_tasks() return [t.to_json() for t in sorted( filter(lambda t: t.status != Status.DONE, tasks), key=lambda t: t.deadline )] def get_data(self): '''Retrieve the data from the dialog''' return { 'title': self.title.get_text(), 'description': self.description.get_text( self.description.get_start_iter(), self.description.get_end_iter(), False ), 'tags': self.tags.get_text(), 'priority': self.priority.get_active(), 'deadline': int(time.mktime(time.strptime( self.deadline.get_text(), '%d.%m.%Y' )) ) } def set_dialog_entries(self, task): '''When editing a task, set the entries to edit them''' self.title.set_text(task.title) self.description.set_text(task.description) self.priority.set_active(task.priority) timestamp = task.deadline today = time.localtime(timestamp)[:3] self.calendar.select_month(today[1] -1, today[0]) self.calendar.select_day(today[2]) self.deadline.set_text('{0}.{1}.{2}'.format(today[2], today[1], today[0])) def reset_dialog(self): '''When adding a task, clear the dialog''' self.title.set_text('') self.description.set_text('') self.priority.set_active(0) self.deadline.set_text('') today = time.localtime() self.calendar.select_month(today[1] -1, today[0]) self.calendar.select_day(today[2]) self.tags.set_text('') def show_calendar(self, widget, *args): '''Show the calendarwindow''' if self.calendar_win.run() == 1: date = self.calendar.get_date() self.deadline.set_text('{0}.{1}.{2}'.format(date[2], date[1] + 1, date[0])) self.calendar_win.hide()
def test_remove_snooze() -> None: manager = TaskManager([Task("j54", snooze=date(5, 8, 1))]) manager.remove_snooze(Task("j54", snooze=date(5, 8, 1))) assert manager.tasks() == [Task("j54")]
def test_set_importance() -> None: manager = TaskManager([Task("fio")]) manager.set_importance(Task("fio"), Importance.Important) assert manager.tasks() == [Task("fio", importance=Importance.Important)]
def test_is_redoable_after_undo() -> None: manager = TaskManager([]) manager.add(Task()) assert not manager.is_redoable() manager.undo() assert manager.is_redoable()
def main(): # Creates taskmanager instance and opens file for output. tm = TaskManager() # Start program. # Imports task list from file & displays it. print("Hi! I'm Herupa, your task helper.") tm.displayTasks() print() # If we have no tasks, create a list. if not tm.hasTasks(): tm.makeTaskList() tm.displayTasks() # While we have tasks, let the user enter a task, finish a task, or view completed tasks. while (tm.hasTasks()): print() print("E [task] to enter new task.") print("F [task] to finish a task.") print("L to see a list of completed tasks.") print("Q to quit.") response = input() if response[0] == "F" or response[0] == "f": cur_task = response[2:] tm.finishTask(cur_task) tm.displayTasks() elif response[0] == "e" or response[0] == "e": cur_task = response[2:] tm.newTask(cur_task) tm.displayTasks() elif response[0] == "q" or response[0] == "Q": break elif response[0] == 'l' or response[0] == "L": tm.displayCompletedTasks() else: print("Not a valid command.") print() tm.displayCompletedTasks() tm.closeFile()
class MainPresenter: def __init__(self, view: _View, serializer_type: Type[_Serializer] = JsonSerializer) -> None: self._view = view self._serializer_type = serializer_type self._serializer: Optional[_Serializer] = None self._task_manager: Optional[TaskManager] = None def load_from_file(self, path: Path) -> None: self._serializer = self._serializer_type(path) self._task_manager = TaskManager(self._serializer.load()) self._view.setWindowTitle(path.name) self.request_update() def request_update(self) -> None: if self._task_manager is None: self._view.hide_lists() else: self._view.update_tasks(self._task_manager.tasks()) def _save_and_update_view(self) -> None: assert self._task_manager is not None assert self._serializer is not None self._serializer.save(self._task_manager.tasks()) self._view.update_tasks(self._task_manager.tasks()) self._view.set_undoable(self._task_manager.is_undoable()) self._view.set_redoable(self._task_manager.is_redoable()) def add_task(self, task: Task) -> None: assert self._task_manager is not None self._task_manager.add(task) self._save_and_update_view() def complete_task(self, task: Task, completed: bool = True) -> None: assert self._task_manager is not None self._task_manager.set_complete(task, completed) self._save_and_update_view() def delete_task(self, task: Task) -> None: assert self._task_manager is not None self._task_manager.delete(task) self._save_and_update_view() def rename_task(self, task: Task, name: str) -> None: assert self._task_manager is not None self._task_manager.rename(task, name) self._save_and_update_view() def set_task_due(self, task: Task, due: Optional[date]) -> None: assert self._task_manager is not None self._task_manager.schedule_task(task, due) self._save_and_update_view() def set_task_snooze(self, task: Task, snooze: Optional[date]) -> None: assert self._task_manager is not None self._task_manager.snooze(task, snooze) self._save_and_update_view() def set_importance(self, task: Task, importance: Importance) -> None: assert self._task_manager is not None self._task_manager.set_importance(task, importance) self._save_and_update_view() def undo(self) -> None: assert self._task_manager is not None self._task_manager.undo() self._save_and_update_view() def redo(self) -> None: assert self._task_manager is not None self._task_manager.redo() self._save_and_update_view()
def load_from_file(self, path: Path) -> None: self._serializer = self._serializer_type(path) self._task_manager = TaskManager(self._serializer.load()) self._view.setWindowTitle(path.name) self.request_update()
def synchronizer(tables, clear): config, logging = get_basic_utilities().get_utils((CONFIG, LOGGER)) redis_config = config['redis'] redis_helper = get_singleton_redis_client(redis_config['host'], redis_config['port'], redis_config['db']) try: generate_config_file() except (FileNotFoundError, Exception) as e: logging.error(f'unable to generate pm2 config file: {e}', exc_info=True) return False pm2_config_path = get_config_path() producer_process = PM2('arango-producer', pm2_config_path) consumer_process = PM2('clickhouse-consumer', pm2_config_path) task_manager = TaskManager(redis_helper) # clear redis cache db if specified if clear: redis_helper.client.flushdb() logging.info('redis cache cleared') else: # delete consumer specific keys for table in tables: for key in redis_helper.client.keys(f'{table}*'): redis_helper.client.delete(key) # stop the producer process if not producer_process.stop(): logging.error('unable to stop producer') return False # stop the consumer process for table in tables: consumer_active = task_manager.ping(table) if consumer_active: result = task_manager.stop_task(table) if result == Status.INACTIVE.name: logging.info(f'stopped the consumer {table}') else: logging.error(f'unable to stop consumer {table}') return False else: logging.info(f'consumer {table} not active') # delete topics all_deleted = delete_topics(tables) if not all_deleted: logging.error(f'unable to delete all kafka topics') return False # create topic for table in tables: created = create_topic(table) if not created: logging.error(f'unable to delete topic: {table}') return False # start producer process if not producer_process.start(): logging.error('unable to start producer') return False # sync existing collection data for table in tables: is_data_loaded = load_collection_data(collection=table, store_tick=True, batch_size=100000) if is_data_loaded: logging.info('existing data loaded to clickhouse') else: logging.error(f'failed to load {table} data') return False # start the consumer if task_manager.ping(table): result = task_manager.start_task(table) if result == Status.ACTIVE.name: logging.info(f'{table} consumer process started') else: logging.error('unable to start consumer, restarting using pm2') if consumer_process.restart(): logging.info('pm2 consumer restarted') else: logging.error('unable to restart pm2 consumers') return False return True