Beispiel #1
0
def main():
    # command line argument parser
    parser = argparse.ArgumentParser()
    parser.add_argument("-u")
    parser.add_argument("-d")
    parser.add_argument("-t")
    parser.add_argument("-g")
    parser.add_argument("-f")
    args = vars(parser.parse_args())
    # parses user id from command line
    user_id = args["u"]
    # parses document id from command line
    doc_id = args["d"]
    # parses task id from command line
    task_id = args["t"]
    # parses if you want gui argument from command line
    display = args["g"]
    # parses file name from command line
    file = args["f"]
    cmd = True
    g = None
    data = TaskManager.load_file(file)
    # loads gui
    if display == "yes":
        g = GUI(data)
        g.main.mainloop()
    # uses command line interface
    else:
        TaskManager.task_handler(doc_id, user_id, task_id, data, g, cmd)
    return
Beispiel #2
0
    def scan(self):
        '''
        scan the queues and create pairs of vehicle--event
        '''
        # reload work, reload when get device signal
        # for vehicle in self._vehicles.getIdleAndEmptyVehicles():
        #     TaskManager.createReloadTask(vehicle)

        for vehicle in self._vehicles.getIdleAndFullVehicles():
            TaskManager.createDropTask(vehicle)

        # normal work
        for queue in self._queues:
            # no event
            if len(queue) == 0:
                continue

            # check event nearby vehicle TODO

            #
            evt = queue[0]
            vehicle = self._vehicles.getAvailableVehicleByEvent(evt)

            # no available vehicle
            if None == vehicle:
                continue

            queue.popleft()
            TaskManager.createNormalTask(evt, vehicle)
Beispiel #3
0
def new_task(args):
    name = ' '.join(args.name)

    if not name:
        print(INVALID_NAME_MSG)
        return

    date_str = args.date

    if date_str:
        try:
            date = interpret_date(date_str)
        except ValueError as e:
            print(e)
            return
    else:
        date = None

    estimated_hours = float(args.est_hours)

    manager = TaskManager(fileio.load())
    id = manager.new_task(name, estimated_hours, date)
    fileio.save(manager.task_dict)

    print_table(manager, id)
Beispiel #4
0
 def start_task_manager(self, task):
     """Takes information from GUI, instantiates a task manager, and runs appropriate task"""
     file = self.text_input[0].get()
     user = self.text_input[1].get()
     doc = self.text_input[2].get()
     manager = TaskManager(file, doc, user)
     manager.run(task)
Beispiel #5
0
    def run(self):
        client = self.get_boto_client()

        job_output = client.describe_job(vaultName=self.vault,
                                         jobId=self.job_id)
        completed = job_output["Completed"]

        while not completed:
            self.next_check = (datetime.now() +
                               timedelta(seconds=self.SLEEP_TIME)).time()
            self.update_task()

            time.sleep(self.SLEEP_TIME)

            job_output = client.describe_job(vaultName=self.vault,
                                             jobId=self.job_id)
            completed = job_output["Completed"]

        if self.job_output == JobOutput.INVENTORY:
            TaskManager.add_task(
                DownloadInventoryTask(self.region, self.vault, self.job))
        else:
            TaskManager.add_task(
                DownloadArchiveTask(self.region, self.vault, self.job,
                                    self.output_file))
 def __init__(self, task_loader, conf):
     self.lock = threading.Lock()
     self.workerManager = WorkerManager(conf)
     self.taskManager = TaskManager(task_loader)
     self.running_tasks = {}
     self.conf = conf
     self.load_tasks()
Beispiel #7
0
    def run(self):
        client = self.get_boto_client()
        logging.info("Initiating job to retrieve inventory")
        job = client.initiate_job(vaultName=self.vault, jobParameters={"Type": "inventory-retrieval"})
        logging.info(f"Initiated job to retrieve inventory. Job id = {job['jobId']}")

        TaskManager.add_task(WaitForJobTask(self.region, self.vault, job, JobOutput.INVENTORY, ""))
Beispiel #8
0
 def btn_click_search(self):
     tasks = {"2a": "Viewers by Country",
              "2b": "Viewers by Continent",
              "3a": "All Browsers Simple",
              "3b": "All Browsers Sorted",
              "4":  "Top 10 Readers",
              "5a": "Users Who Read Document",
              "5b": "Documents That User Read",
              "5c": "Also Likes",
              "5d": "Also Likes Sorted by Reader Profile",
              "5e": "Also Likes Sorted By Readership"}
     # check if input the task textbox is valid
     if self.tid.get() not in tasks.keys():
         showerror("Input Error", "Please Enter a Valid Task")
         return
     # check if it requires a document id and user id inputs
     if self.tid.get() not in ["3a", "3b", "4"]:
         if self.tid.get() == "5a" and self.did.get() not in self.docs:
             showerror("Input Error", "Please Enter a Valid Document ID")
             return
         if self.tid.get() == "5b" and self.uid.get() not in self.users:
             showerror("Input Error", "Please Enter a Valid User ID")
             return
         if self.tid.get() not in ["5a", "5b"] and self.did.get() not in self.docs:
             showerror("Input Error", "Please Enter a Valid Document ID")
             return
         if self.tid.get() not in ["5a", "5b"] and self.uid.get() not in TaskManager.get_all_users_by_doc(self.did.get(), self.data):
             showerror("Input Error", "Please Enter a Valid User ID")
             return
     self.test.set(tasks[self.tid.get()])
     TaskManager.task_handler(self.did.get(), self.uid.get(), self.tid.get(), self.data, self, False)
class PsNowPlugin(Plugin):
    def __init__(self, reader, writer, token):
        super().__init__(
            Platform.PlayStation,  # choose platform from available list
            "0.1",  # version
            reader,
            writer,
            token)
        self.games_cache = GamesCache(self)
        self.backend_client = BackendClient(self.games_cache)
        self.task_manager = TaskManager(self, self.games_cache)

    # required
    async def authenticate(self, stored_credentials=None):
        return Authentication()

    # required
    async def get_owned_games(self):
        return await self.backend_client.get_games()

    def tick(self):
        self.task_manager.tick()

    async def shutdown(self):
        await self.backend_client.close()
 def __init__(self, reader, writer, token):
     super().__init__(
         Platform.PlayStation,  # choose platform from available list
         "0.1",  # version
         reader,
         writer,
         token)
     self.games_cache = GamesCache(self)
     self.backend_client = BackendClient(self.games_cache)
     self.task_manager = TaskManager(self, self.games_cache)
    def on_click(self):
        logging.info("Create vault clicked!")
        region: Region = widgets_map['region_tree'].get_selected_region()

        vault_name, ok = QInputDialog.getText(self.button, "New vault name",
                                              "Name for the new vault:")
        if not ok or not vault_name:
            return

        TaskManager.add_task(CreateVaultTask(region, vault_name))
Beispiel #12
0
    def on_click(self):
        logging.info("Upload clicked!")
        files_table = widgets_map['files_table']
        region: Region = files_table.displayed_region
        vault: str = files_table.displayed_vault

        file_location = QFileDialog.getOpenFileName(self.button, "Select file to upload")[0]
        if not file_location:
            return

        TaskManager.add_task(UploadFileTask(region, vault, file_location))
def main():
    """Entry point. Gets user input, runs GUI or task manager depending on the input"""
    user_input = UserInput()
    if user_input.gui_flag:
        g = Gui(user_input.get_file(), user_input.get_docid(),
                user_input.get_userid())
        g.run()
    else:
        manager = TaskManager(user_input.get_file(), user_input.get_docid(),
                              user_input.get_userid())
        task = user_input.task
        manager.run(task)
Beispiel #14
0
    def on_click(self):
        logging.info("Download clicked!")
        files_table = widgets_map['files_table']
        region: Region = files_table.displayed_region
        vault: str = files_table.displayed_vault
        archive: Archive = files_table.get_active_archive()

        output_file = QFileDialog.getSaveFileName(self.button,
                                                  "Output file location",
                                                  archive.description)[0]
        if not output_file:
            return

        TaskManager.add_task(
            GetArchiveTask(region, vault, archive, output_file))
Beispiel #15
0
 def get_one_page(self, keyword, page):
     task_manager = TaskManager()
     task_manager.task_begin(keyword, page)
     res = Fetch.get_one_page_by_keyword(keyword, page)
     if res:
         task_manager.task_end(keyword, page)
     else:
         task_manager.task_failed(keyword, page)
Beispiel #16
0
def launch_aux_processes():
    # Close any open server connections before forking and...
    app.data_engine._reset_pool()
    app.cache_engine._reset_pool()
    # ...spawn the remaining services
    StatsManager.run_server(
        app.config['STATS_SERVER'],
        app.config['STATS_SERVER_PORT'],
        app.config['DEBUG']
    )
    TaskManager.run_server(
        app.config['TASK_SERVER'],
        app.config['TASK_SERVER_PORT'],
        app.config['DEBUG']
    )
def main(process_num):
    host_name = socket.gethostname()

    if not config.task_table_path:
        task_table_path = os.path.join(config.data_dir, 'task_table.pkl')
    else:
        task_table_path = config.task_table_path

    global_task_table = TaskManager.load_task_table(task_table_path)
    local_task_table = global_task_table.ix[global_task_table.node_name == host_name]

    launcher_script = SourceFileLoader('launcher_script',
                                       os.path.join(config.script_dir, 'launcher_script.py')).load_module()
    executor_script = SourceFileLoader('executor_script',
                                       os.path.join(config.script_dir, 'executor_script.py')).load_module()
    collector_script = SourceFileLoader('collector_script',
                                        os.path.join(config.script_dir, 'collector_script.py')).load_module()

    launch_func = launcher_script.QLaunchFunctor(*config.launch_arguments)
    execute_func = executor_script.ExecuteFunctor(*config.execute_arguments)
    collect_func = collector_script.CollectFunctor(*config.collect_arguments)

    processor = Processor(local_task_table, launch_func, execute_func, collect_func, process_num,
                          config.input_queue_length)
    processor.process()
Beispiel #18
0
    def run(self):
        client = self.get_boto_client()
        logging.info("Initiating job to retrieve archive")
        job_params = {
            "Type": "archive-retrieval",
            "ArchiveId": self.archive.id,
            "Tier": "Bulk"
        }
        job = client.initiate_job(vaultName=self.vault,
                                  jobParameters=job_params)
        logging.info(
            f"Initiated job to retrieve archive. Job id = {job['jobId']}")

        TaskManager.add_task(
            WaitForJobTask(self.region, self.vault, job, JobOutput.ARCHIVE,
                           self.output_file))
Beispiel #19
0
    def initialize(self, window: QMainWindow):
        self.view: QTreeView = cast(QTreeView,
                                    window.findChild(QTreeView, 'treeView'))

        self.view.clicked.connect(self.on_clicked)
        self.view.customContextMenuRequested.connect(self.open_menu)

        self.model = QStandardItemModel()
        root: QStandardItem = self.model.invisibleRootItem()
        self.view.setModel(self.model)

        for region in REGIONS:
            item = QStandardItem(region.name)
            root.appendRow(item)
            if keys.Keys.has_keys():
                TaskManager.add_task(ListVaultsTask(region))
 def __init__(self, task_loader, conf):
     self.lock = threading.Lock()
     self.workerManager = WorkerManager(conf)
     self.taskManager = TaskManager(task_loader)
     self.running_tasks = {}
     self.conf = conf
     self.load_tasks()
Beispiel #21
0
 def __init__(self, data):
     self.main = Tk()
     self.main.configure(background="white")
     self.data = data
     self.docs = TaskManager.get_all_documents(self.data)
     self.users = [""]
     self.tasks = ["2a", "2b", "3a", "3b", "4", "5a", "5b", "5c", "5d", "5e"]
     self.menu = Frame(self.main).pack(side=TOP)
     self.test = StringVar()
     self.task_title = Label(self.menu, textvariable=self.test, bg="white", font="-weight bold -size 26").pack(side=TOP)
     self.task = Label(self.menu, text="Task ID", bg="white").pack(side=TOP)
     self.tid = StringVar()
     self.tid.trace("w", callback=self.find_users_by_task)
     self.task_options = ttk.Combobox(self.main, textvariable=self.tid)
     self.task_options['values'] = self.tasks
     self.task_options.pack(side=TOP)
     self.doc_title = Label(self.menu, text="Document ID", bg="white").pack(side=TOP)
     self.did = StringVar()
     self.did.trace("w", callback=self.find_users_by_doc)
     self.doc_options = ttk.Combobox(self.main, textvariable=self.did, width=50)
     self.doc_options['values'] = self.docs
     self.doc_options.pack(side=TOP)
     self.user = Label(self.menu, text="User ID", bg="white").pack(side=TOP)
     self.uid = StringVar()
     self.user_options = ttk.Combobox(self.main, textvariable=self.uid)
     self.user_options['values'] = self.users
     self.user_options.pack(side=TOP)
     self.canvas = None
     self.toolbar = None
     self.listbox = None
     self.btn_search = Button(self.menu, text="Search", command=self.btn_click_search).pack(side=TOP)
     self.btn_file = Button(self.menu, text="Open File", command=self.btn_click_file).pack(side=LEFT)
Beispiel #22
0
    def __init__(self):
        def get_input_handlers(controller, app_config):
            handlers = [PygameInputHandler(controller, app_config)]
            try:
                from pi_input_handler import PiInputHandler

                handlers.append(PiInputHandler(controller, app_config))
            except ImportError:
                print('Unable to import raspberrypi input handler')
            return handlers

        def get_zoneminder_client(app_config):
            zm_client = ZoneMinderClient(app_config.config[SERVER_HOST],
                                         app_config.config[SERVER_PORT],
                                         app_config.config[ZM_WEB_PATH],
                                         app_config.config[USER_NAME],
                                         app_config.config[PASSWORD],
                                         app_config.config[ZMS_WEB_PATH])
            return zm_client

        config = AppConfig()
        event_bus = EventBus()
        client = get_zoneminder_client(config)
        self.app_state = AppState()
        self.app_context = AppContext(config, client, event_bus)

        self.display = PygameDisplay(config)
        self.display.init()
        self.app_context.display_size = self.display.get_display_size()

        zm_stream_component = MonitorStreamComponent(self.app_context)
        group_selector_component = GroupSelectorComponent(self.app_context)
        monitor_selector_component = MonitorSelectorComponent(self.app_context)
        shutdown_prompt_component = ShutdownPromptSelector(self.app_context)
        menu_selector = MenuSelector(self.app_context)
        startup_component = StartUpComponent(self.app_context)
        self.component_manager = AppComponentManager(
            self.display, event_bus, startup_component, zm_stream_component, [
                group_selector_component, monitor_selector_component,
                shutdown_prompt_component, menu_selector
            ])
        self.input_manager = InputManager(get_input_handlers(
            event_bus, config))
        self.app_controller = AppController(self.app_context,
                                            self.input_manager, self.app_state)
        self.task_manager = TaskManager(event_bus)
Beispiel #23
0
def complete_task(args):
    manager = TaskManager(fileio.load())
    ids = args.id
    if not ids:
        print(INVALID_ID_MSG)
    else:
        completed_tasks_dict = dict()
        for i in ids:
            try:
                completed_tasks_dict[i] = manager.task_dict[i]
                manager.complete(i)
                fileio.save(manager.task_dict)
            except KeyError:
                print(INVALID_ID_MSG)
        print_table(manager)
        print()
        print_completed_tasks(completed_tasks_dict)
def parse_using_file(filepath, n):
    """
    Собственно загрузка данных

     Создаем менеджер задач, читаем из файла код акции и тут же кладем задания
     на скачивание данныъ по этой акции

     В конце ждем выполнения всех задач и выходим

    :param filepath: путь до файла
    :param n: количество потоков для скачивания
    :return:
    """
    tm = TaskManager(n)
    with open(filepath) as f:
        for symbol in f:
            symbol = symbol.strip().upper()
            create_tasks(symbol=symbol, tm=tm)

    tm.finish()
Beispiel #25
0
def modify_task(args):
    manager = TaskManager(fileio.load())
    task_id = args.id
    try:
        old_task: task.Task = manager.task_dict[task_id]
        new_name = args.name if args.name else old_task.name
        if args.date:
            new_date = interpret_date(args.date)
        elif args.floating or isinstance(old_task, task.FloatingTask):
            new_date = None
        else:
            new_date = old_task.due_date
        new_hours_remaining = args.est_hours if args.est_hours else old_task.hours_remaining
        manager.new_task(new_name, new_hours_remaining, new_date, task_id)
        fileio.save(manager.task_dict)
        print_table(manager, task_id)
    except KeyError:
        print(INVALID_ID_MSG)
    except ValueError as e:
        print(e)
Beispiel #26
0
class DiscordBot:
    def __init__(self, params={}):
        self.server_name = None
        self.discord_token = None
        self.client = None
        self.taskman = None

        self.set_params_from_dict(params=params)
        self.set_other_params()

        self.run_bot()

    def set_params_from_dict(self, params):
        self.server_name = params.get("server_name", "")
        self.discord_token = params.get("discord_token", "")

    def set_other_params(self):
        self.client = discord.Client()
        self.taskman = TaskManager()

    # ================================================================

    def run_bot(self):
        self.on_ready = self.client.event(self.on_ready)
        self.on_member_join = self.client.event(self.on_member_join)
        self.on_message = self.client.event(self.on_message)

        self.client.run(self.discord_token)

    async def on_ready(self):
        print(f'{self.client.user} est descendu des cieux pour vous servir!')
        guild = discord.utils.get(self.client.guilds, name=self.server_name)
        print(f'{self.client.user} est connecté dans le serveur:\n'
              f'{guild.name}(id: {guild.id})')

        members = '\n - '.join([member.name for member in guild.members])
        print(f'Sont dans le nuage:\n - {members}')

    async def on_member_join(self, member):
        pass
        #await member.create_dm()
        #await member.dm_channel.send(
        #    f'Hi {member.name}, welcome to my Discord server!'
        #)

    async def on_message(self, message):
        print(f"{message.author.name} : {message.content}")
        # avoid infinite loop where the bot talks to itself.
        if message.author == self.client.user:
            return

        response = self.taskman.trigger_task(message.content.lower())
        if response:
            await message.channel.send(response)
Beispiel #27
0
def main():
    parser = make_parser()
    args = parser.parse_args()
    task_mngr = TaskManager()
    task_mngr.add_task_enum("test", "test_type", "regr", "RF", args.param, 10,
                            100, 10)
    task_mngr.dump_queue("q")
Beispiel #28
0
    def __init__(self, world, image, direction, location, nest):
        super(Ant, self).__init__(world, location, (1, 1), image)
        self.world = world
        self.image = image
        self.nest = nest
        self.direction = direction
        self.location = location
        self.food = 0
        self.health = 1
        self.food_scent_strength = 0
        self.home_scent_strength = 0

        self.task_manager = TaskManager()
Beispiel #29
0
def clock(args):
    manager = TaskManager(fileio.load())
    task_id = args.id
    to_deduct = args.to_deduct
    # TODO Complete if less than/eq 0?
    try:
        old_task: task.Task = manager.task_dict[task_id]
        old_task.hours_remaining -= to_deduct

        fileio.save(manager.task_dict)
        print_table(manager, task_id)
    except KeyError:
        print(INVALID_ID_MSG)
Beispiel #30
0
    def on_connect_button_clicked(self):
        '''当连接按钮按下时'''
        if self.has_connected == False:  #尚未建立连接
            # 则根据输入的ip、port、user、password连接到服务器
            ip = self.ipInput.text()
            port = self.portInput.text()
            user = self.userInput.text()
            password = self.passwordInput.text()
            if not self.client.open(ip, port):  #连接失败
                QMessageBox.information(self.centralwidget, "连接错误","ip或者port错误",QMessageBox.Yes)
                return
            self.client.user(user)
            reply = self.client.password(password)
            if reply.startswith("5"):  #若登录失败
                QMessageBox.information(self.centralwidget, "连接错误", "user或者password错误", QMessageBox.Yes)
            else:  #登录成功
                self.update_folder()  #更新文件浏览窗体
                self.connectButton.setText("disconnect")
                self.has_connected = True

                self.task_manager = TaskManager(ip, port, user, password)  #创建任务工厂
                self.timer.start(self.interval)  #开始刷新数据传输进度

                self.menubar.setEnabled(True)
                self.statusbar.setEnabled(True)
        else:  #如果已经建立连接
            self.timer.stop() #停止数据传输进度的刷新
            # 则客户端断开连接
            self.client.bye()
            self.client.close()
            self.connectButton.setText("connect")
            self.has_connected = False
            del self.task_manager
            self.task_manager = None

            self.menubar.setEnabled(False)
            self.statusbar.setEnabled(False)
Beispiel #31
0
    def eval(self):
        tm = TaskManager()
        # models = [
            # "/data/nlp/corpora/multi_word_embedding/data/models/unsupervised/mwe_f_ft_2_200_complete_rs1_wd00005",
            # "/data/nlp/corpora/multi_word_embedding/data/models/unsupervised/mwe_f_ft_2_200_complete_rs2_wd00005",
            # "/data/nlp/corpora/multi_word_embedding/data/models/unsupervised/mwe_f_ft_2_200_complete_rs3_wd00005",
            # "/data/nlp/corpora/multi_word_embedding/data/models/unsupervised/mwe_f_ft_2_200_complete_rs4_wd00005",
            # "/data/nlp/corpora/multi_word_embedding/data/models/unsupervised/mwe_f_ft_2_200_complete_rs5_wd00005",
        # ]
        models = [
            "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_normal_complete_rs1",
            "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_normal_complete_rs2",
            "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_normal_complete_rs3",
            "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_normal_complete_rs4",
            "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_normal_complete_rs5",

            # "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_sentencewise_frt_complete_rs1",
            # "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_sentencewise1_complete_frt_rs2",
            # "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_sentencewise1_complete_frt_rs3",
            # "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_sentencewise1_complete_frt_rs4",
            # "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_sentencewise1_complete_frt_rs5",

            # "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_sentencewise_complete_rs1",
            # "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_sentencewise1_complete_rs2",
            # "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_sentencewise1_complete_rs3",
            # "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_sentencewise1_complete_rs4",
            # "/data/nlp/corpora/multi_word_embedding/data/models/supervised/mwe_f_ft_2_200_sentencewise1_complete_rs5",
        ]
        # for i, model in enumerate(models):
            # init_random(i+1)
            # print(model)
        # self.task_model.mwe_f.load_state_dict(torch.load(f"{model}.pt", map_location=self.device))
        # self.task_model.eval()
        score, model_number = tm.evaluateOnTratzBestModel(self.params, self.task_model.mwe_f, self.sg_embeddings, self.embedding_device, self.device)
        # print(f"Score: {score} - {model_number}\n")
        return 0
Beispiel #32
0
async def main(port, log_storage="/home/tbjc1magic/log"):
    server = aio.server()
    task_manager = TaskManager()
    task_manager_thread = asyncio.to_thread(task_manager.run)
    data_collector_service_pb2_grpc.add_DataCollectorServicer_to_server(
        DataCollectorServicer(log_storage, task_manager), server)
    service_names = (
        data_collector_service_pb2.DESCRIPTOR.
        services_by_name["DataCollector"].full_name,
        reflection.SERVICE_NAME,
    )
    reflection.enable_server_reflection(service_names, server)
    server.add_insecure_port(f"[::]:{port}")
    await server.start()
    await asyncio.gather(server.wait_for_termination(), task_manager_thread)
Beispiel #33
0
    def __init__(self, conf_file):
        self.config = ConfigParser.ConfigParser(allow_no_value=True)
        self.clean_time_gap = None
        self.wait_time_for_slave = None
        self.master_queue_name = None
        self.task_queue_name = None
        self.task_queue_size_limit = None
        self.task_file_name = None
        self.task_counter_file = None
        self.ssh_key = None
        self.s3_bucket = None
        self.s3_folder = None
        self.slave_num_every_packup = None
        self.slave_max_sec_each_task = None
        self.slave_python_version = None
        self.master_ip = None
        self.slaves_ip = None
        self.slave_awake_frequency = None
        self.configure(conf_file)

        self.last_wake_time = None

        self.repeated_timer = None
        self.is_started = False
        self.pop_forever_handler = None

        logging.info('Starting task manager...')
        self.task_manager = TaskManager(self.task_file_name, self.task_counter_file)
        logging.info('Starting slave manager...')
        self.slave_manager = SlaveManager(master_ip=self.master_ip,
                                          slaves_ip=self.slaves_ip,
                                          ssh_key=self.ssh_key,
                                          s3_bucket=self.s3_bucket,
                                          s3_folder=self.s3_folder,
                                          slave_num_every_packup=self.slave_num_every_packup,
                                          slave_max_sec_each_task=self.slave_max_sec_each_task,
                                          slave_python_version=self.slave_python_version,
                                          slave_awake_frequency=self.slave_awake_frequency,
                                          slave_buffer_size=1)
        logging.info('Starting connection manager...')
        self.message_connection = ConnectionManager(queue_name=self.master_queue_name,
                                                    durable=False,
                                                    callback=self.msg_callback,
                                                    no_ack=True)
Beispiel #34
0
def run(user_ids, tweet_ids, curr_datetime, root_dir):
    """
    This run method assumes that data is periodically collected from Twitter
    and stored in folders ordered by timestamp of data collection.

    The task_manager scans the previously stored data, and saves the delta.
        - In the case of followers/followees, it stores the list of followers
          added/subtracted at each run.
        - In the case of timelines, it stores the new tweets by a user after
          the last fetched tweet from a user's timeline.

    Parameters:
        - user_ids (Twitter username/user_id): The list of users for which
          data needs to be collected.
        - tweet_ids: The list of tweets for which data needs to be collected.
        - curr_datetime (str): The current timestamp used to create the
          corresponding directory to store the Twitter data.
        - root_dir (str): The root directory path where all the timestamp
          folders are created.
    """
    print(" --- Collecting twitter data for {} tweets and {} users ---".format(
        len(tweet_ids), len(user_ids)))

    apis = create_api_objects()

    base_folder_path = root_dir + '/'

    # Load list of users to ignore
    user_ignore_list = set()
    if os.path.exists(base_folder_path + 'user_ignore_list.txt'):
        with open(base_folder_path + 'user_ignore_list.txt') as f:
            for line in f:
                user_ignore_list.add(line.strip())

    twitter_folder_path = base_folder_path + curr_datetime + '/' + 'twitter/'

    if not os.path.exists(twitter_folder_path):
        os.makedirs(twitter_folder_path)

    task_manager = TaskManager(base_folder_path, twitter_folder_path)

    process_tweets(tweet_ids, user_ignore_list, task_manager, apis)
    process_users(user_ids, user_ignore_list, task_manager, apis)
class ProviderManager(object):
    log = logging.getLogger("nodepool.ProviderManager")

    @staticmethod
    def reconfigure(old_config, new_config, use_taskmanager=True):
        stop_managers = []
        for p in new_config.providers.values():
            oldmanager = None
            if old_config:
                oldmanager = old_config.provider_managers.get(p.name)
            if oldmanager and p != oldmanager.provider:
                stop_managers.append(oldmanager)
                oldmanager = None
            if oldmanager:
                new_config.provider_managers[p.name] = oldmanager
            else:
                ProviderManager.log.debug("Creating new ProviderManager object"
                                          " for %s" % p.name)
                new_config.provider_managers[p.name] = \
                    get_provider_manager(p, use_taskmanager)
                new_config.provider_managers[p.name].start()

        for stop_manager in stop_managers:
            stop_manager.stop()

    @staticmethod
    def stopProviders(config):
        for m in config.provider_managers.values():
            m.stop()
            m.join()

    def __init__(self, provider, use_taskmanager):
        self.provider = provider
        self._images = {}
        self._networks = {}
        self.__flavors = {}
        self._use_taskmanager = use_taskmanager
        self._taskmanager = None

    def start(self):
        if self._use_taskmanager:
            self._taskmanager = TaskManager(None, self.provider.name,
                                            self.provider.rate)
            self._taskmanager.start()
        self.resetClient()

    def stop(self):
        if self._taskmanager:
            self._taskmanager.stop()

    def join(self):
        if self._taskmanager:
            self._taskmanager.join()

    @property
    def _flavors(self):
        if not self.__flavors:
            self.__flavors = self._getFlavors()
        return self.__flavors

    def _getClient(self):
        if self._use_taskmanager:
            manager = self._taskmanager
        else:
            manager = None
        return shade.OpenStackCloud(
            cloud_config=self.provider.cloud_config,
            manager=manager,
            **self.provider.cloud_config.config)

    def resetClient(self):
        self._client = self._getClient()
        if self._use_taskmanager:
            self._taskmanager.setClient(self._client)

    def _getFlavors(self):
        flavors = self.listFlavors()
        flavors.sort(lambda a, b: cmp(a['ram'], b['ram']))
        return flavors

    def findFlavor(self, min_ram, name_filter=None):
        # Note: this will throw an error if the provider is offline
        # but all the callers are in threads (they call in via CreateServer) so
        # the mainloop won't be affected.
        for f in self._flavors:
            if (f['ram'] >= min_ram
                    and (not name_filter or name_filter in f['name'])):
                return f
        raise Exception("Unable to find flavor with min ram: %s" % min_ram)

    def findImage(self, name):
        if name in self._images:
            return self._images[name]

        with shade_inner_exceptions():
            image = self._client.get_image(name)
        self._images[name] = image
        return image

    def findNetwork(self, name):
        if name in self._networks:
            return self._networks[name]

        with shade_inner_exceptions():
            network = self._client.get_network(name)
        self._networks[name] = network
        return network

    def deleteImage(self, name):
        if name in self._images:
            del self._images[name]

        with shade_inner_exceptions():
            return self._client.delete_image(name)

    def addKeypair(self, name):
        key = paramiko.RSAKey.generate(2048)
        public_key = key.get_name() + ' ' + key.get_base64()
        with shade_inner_exceptions():
            self._client.create_keypair(name=name, public_key=public_key)
        return key

    def listKeypairs(self):
        with shade_inner_exceptions():
            keypairs = self._client.list_keypairs()
        return keypairs

    def deleteKeypair(self, name):
        with shade_inner_exceptions():
            return self._client.delete_keypair(name=name)

    def createServer(self, name, min_ram, image_id=None, image_name=None,
                     az=None, key_name=None, name_filter=None,
                     config_drive=None, nodepool_node_id=None,
                     nodepool_image_name=None,
                     nodepool_snapshot_image_id=None):
        if image_name:
            image = self.findImage(image_name)
        else:
            image = {'id': image_id}
        flavor = self.findFlavor(min_ram, name_filter)
        create_args = dict(name=name,
                           image=image,
                           flavor=flavor,
                           config_drive=config_drive)
        if key_name:
            create_args['key_name'] = key_name
        if az:
            create_args['availability_zone'] = az
        nics = []
        for network in self.provider.networks:
            if network.id:
                nics.append({'net-id': network.id})
            elif network.name:
                net_id = self.findNetwork(network.name)['id']
                nics.append({'net-id': net_id})
            else:
                raise Exception("Invalid 'networks' configuration.")
        if nics:
            create_args['nics'] = nics
        # Put provider.name and image_name in as groups so that ansible
        # inventory can auto-create groups for us based on each of those
        # qualities
        # Also list each of those values directly so that non-ansible
        # consumption programs don't need to play a game of knowing that
        # groups[0] is the image name or anything silly like that.
        nodepool_meta = dict(provider_name=self.provider.name)
        groups_meta = [self.provider.name]
        if nodepool_node_id:
            nodepool_meta['node_id'] = nodepool_node_id
        if nodepool_snapshot_image_id:
            nodepool_meta['snapshot_image_id'] = nodepool_snapshot_image_id
        if nodepool_image_name:
            nodepool_meta['image_name'] = nodepool_image_name
            groups_meta.append(nodepool_image_name)
        create_args['meta'] = dict(
            groups=json.dumps(groups_meta),
            nodepool=json.dumps(nodepool_meta)
        )

        with shade_inner_exceptions():
            return self._client.create_server(wait=False, **create_args)

    def getServer(self, server_id):
        with shade_inner_exceptions():
            return self._client.get_server(server_id)

    def waitForServer(self, server, timeout=3600):
        with shade_inner_exceptions():
            return self._client.wait_for_server(
                server=server, auto_ip=False, reuse=False,
                timeout=timeout)

    def waitForServerDeletion(self, server_id, timeout=600):
        for count in iterate_timeout(
                timeout, exceptions.ServerDeleteException,
                "server %s deletion" % server_id):
            if not self.getServer(server_id):
                return

    def waitForImage(self, image_id, timeout=3600):
        last_status = None
        for count in iterate_timeout(
                timeout, exceptions.ImageCreateException, "image creation"):
            try:
                image = self.getImage(image_id)
            except NotFound:
                continue
            except ManagerStoppedException:
                raise
            except Exception:
                self.log.exception('Unable to list images while waiting for '
                                   '%s will retry' % (image_id))
                continue

            # shade returns None when not found
            if not image:
                continue

            status = image['status']
            if (last_status != status):
                self.log.debug(
                    'Status of image in {provider} {id}: {status}'.format(
                        provider=self.provider.name,
                        id=image_id,
                        status=status))
                if status == 'ERROR' and 'fault' in image:
                    self.log.debug(
                        'ERROR in {provider} on {id}: {resason}'.format(
                            provider=self.provider.name,
                            id=image_id,
                            resason=image['fault']['message']))
            last_status = status
            # Glance client returns lower case statuses - but let's be sure
            if status.lower() in ['active', 'error']:
                return image

    def createImage(self, server, image_name, meta):
        with shade_inner_exceptions():
            return self._client.create_image_snapshot(
                image_name, server, **meta)

    def getImage(self, image_id):
        with shade_inner_exceptions():
            return self._client.get_image(image_id)

    def uploadImage(self, image_name, filename, image_type=None, meta=None,
            md5=None, sha256=None):
        # configure glance and upload image.  Note the meta flags
        # are provided as custom glance properties
        # NOTE: we have wait=True set here. This is not how we normally
        # do things in nodepool, preferring to poll ourselves thankyouverymuch.
        # However - two things to note:
        #  - PUT has no aysnc mechanism, so we have to handle it anyway
        #  - v2 w/task waiting is very strange and complex - but we have to
        #              block for our v1 clouds anyway, so we might as well
        #              have the interface be the same and treat faking-out
        #              a shade-level fake-async interface later
        if not meta:
            meta = {}
        if image_type:
            meta['disk_format'] = image_type
        with shade_inner_exceptions():
            image = self._client.create_image(
                name=image_name,
                filename=filename,
                is_public=False,
                wait=True,
                md5=md5,
                sha256=sha256,
                **meta)
        return image.id

    def listImages(self):
        with shade_inner_exceptions():
            return self._client.list_images()

    def listFlavors(self):
        with shade_inner_exceptions():
            return self._client.list_flavors(get_extra=False)

    def listServers(self):
        # shade list_servers carries the nodepool server list caching logic
        with shade_inner_exceptions():
            return self._client.list_servers()

    def deleteServer(self, server_id):
        with shade_inner_exceptions():
            return self._client.delete_server(server_id, delete_ips=True)

    def cleanupServer(self, server_id):
        server = self.getServer(server_id)
        if not server:
            raise NotFound()

        key_name = server.get('key_name')
        if key_name and key_name != self.provider.keypair:
            with shade_inner_exceptions():
                self._client.delete_keypair(name=server['key_name'])

        self.log.debug('Deleting server %s' % server_id)
        self.deleteServer(server_id)

    def cleanupLeakedFloaters(self):
        with shade_inner_exceptions():
            self._client.delete_unattached_floating_ips()
Beispiel #36
0
#           non-completed task.
#       Add a remove_task method that removes only one task by id
#
#       Upon calling complete() on a task, set _value of that task object to the number of occurrences of the
#           string "CCN" (case in-sensitive) that appears in the task's name.
#
#       Fix the Task object id, so that it is unique for each new task.
#       Fix other bugs.

# Note: - You cannot edit/change the TaskManager class directly. Think of it as a 3rd party library
#       - You can create new objects, etc

from task_manager import  Task, TaskManager

if __name__ == "__main__":
    task_manager = TaskManager()
    first_task = Task('!!nZ@xr>492CCN;SDRC2#6CcN_$5UcCNq]*m44AhW`')
    second_task = Task('g}~x?C*n9K|LccN_YEL@<=44jkc.dB-v{!#;7*[[')
    third_task = Task('ekCcN,h9=!B46)j6acCN;`n68M+2ZR2CCn^:CUw')

    task_manager.import_task(first_task)
    task_manager.import_task(second_task)
    task_manager.import_task(third_task)

    task_manager.complete_tasks()

    last_task = Task('>.`8tCcn{xsS3sa!G@{cCn(w},U+s)**sACc]NAn#')
    task_manager.import_task(last_task)
    task_manager.complete_tasks()

    task_manager.remove_tasks()
Beispiel #37
0
WORK_TODAY = True
USER_SPEC_DAYS = {}

try:
	with open('user_specified_days', 'r') as f:
		user_specified_days_raw = f.read()
	for line in user_specified_days_raw.splitlines():
		date, hours = line.split(";")
		date_dt = datetime.date(*map(int, date.split('-')))
		offset = (date_dt - datetime.date.today()).days
		if offset >= 0:
			USER_SPEC_DAYS[offset] = int(hours)
except IOError:
	print "[*] User specified days not provided."

mgr = TaskManager()

with open('todo', 'r') as f:
	tasks_input = f.read().splitlines()

def index_to_date(i):
	return datetime.date.today() + datetime.timedelta(i)

def week_index_to_day(i):
	if i == 0:
		return "Monday"
	elif i == 1:
		return "Tuesday"
	elif i == 2:
		return "Wednesday"
	elif i == 3:
Beispiel #38
0
def task_handler(task_id):
    print 'task_id:', task_id
    task = Task.get(task_id)
    if task:
        #print task.id
        print task
    else :
        print 'None'
    #res = urllib2.urlopen(task.url)
    #print 'request_result:', len(res.read())


if __name__ == '__main__':
    worker_count=4
    pool = Pool(processes=worker_count)
    task_manager = TaskManager('once_task')


    time_format = "%Y-%m-%d %H:%M:%S"
    run_at = datetime.now() + timedelta(seconds=random.randint(5, 10))
    url = 'http://qq.com'

    def create_task(run_at, url):
        id = Task.create(url, run_at.strftime(time_format))
        task_manager.push(id, time.mktime(run_at.timetuple()))

    run_at = datetime.now()
    create_task(run_at, url)
    create_task(run_at, url)
    create_task(run_at, url)
Beispiel #39
0
class Master:

    def msg_callback(self, ch, method, properties, body):
        callback_set = {'SUCCESS': self.success,
                        'FAIL': self.fail,
                        'AWAKE': self.update_slave_response_time,
                        'STOP': self.stop,
                        'ADD_SLAVE': self.add_slave,
                        'KILL_SLAVE': self.kill_slave,
                        'RESTART_SLAVE': self.restart_slave,
                        'STAT': self.stat,
                        'START': self.start,
                        'RECONFIGURE': self.configure,
                        'REFRESH': self.refresh
                        }
        try:
            command = body[:body.find(' ')]
            info = body[body.find(' ')+1:]
            if command in callback_set:
                callback_set[command](ujson.loads(info))
            else:
                logging.debug(" [x] Unknown command %r" % (str(body),))
        except KeyError as e:
            if str(e) == "'Queue.DeclareOk'":
                logging.debug("Queue.DelcareOk at %r" % (str(body),))
            else:
                logging.error("Unknown KeyError at %r:" % (str(body),))
        except RuntimeError as e:
            if 'recursion' in str(e):
                logging.error('MAXIMUM RECURSION ERROR')

    def __init__(self, conf_file):
        self.config = ConfigParser.ConfigParser(allow_no_value=True)
        self.clean_time_gap = None
        self.wait_time_for_slave = None
        self.master_queue_name = None
        self.task_queue_name = None
        self.task_queue_size_limit = None
        self.task_file_name = None
        self.task_counter_file = None
        self.ssh_key = None
        self.s3_bucket = None
        self.s3_folder = None
        self.slave_num_every_packup = None
        self.slave_max_sec_each_task = None
        self.slave_python_version = None
        self.master_ip = None
        self.slaves_ip = None
        self.slave_awake_frequency = None
        self.configure(conf_file)

        self.last_wake_time = None

        self.repeated_timer = None
        self.is_started = False
        self.pop_forever_handler = None

        logging.info('Starting task manager...')
        self.task_manager = TaskManager(self.task_file_name, self.task_counter_file)
        logging.info('Starting slave manager...')
        self.slave_manager = SlaveManager(master_ip=self.master_ip,
                                          slaves_ip=self.slaves_ip,
                                          ssh_key=self.ssh_key,
                                          s3_bucket=self.s3_bucket,
                                          s3_folder=self.s3_folder,
                                          slave_num_every_packup=self.slave_num_every_packup,
                                          slave_max_sec_each_task=self.slave_max_sec_each_task,
                                          slave_python_version=self.slave_python_version,
                                          slave_awake_frequency=self.slave_awake_frequency,
                                          slave_buffer_size=1)
        logging.info('Starting connection manager...')
        self.message_connection = ConnectionManager(queue_name=self.master_queue_name,
                                                    durable=False,
                                                    callback=self.msg_callback,
                                                    no_ack=True)

    def run(self):
        logging.info(' [*] Waiting for messages. To exit press CTRL+C')
        try:
            self.message_connection.start_accepting_message()
        except KeyboardInterrupt:
            logging.info('Stopping master...')
            master.stop(None)
        except EOFError:
            logging.info('Download finishes. Shutting down master.')
            master.stop(None)
        # except Exception as e:
        #     logging.info(str(e))
        #     logging.info('Stopping master...')

    # TODO: write all configuration in one file
    def configure(self, conf_file):
        self.config.read(conf_file)
        self.clean_time_gap = self.config.getint('main', 'clean_time_gap')
        self.wait_time_for_slave = self.config.getint('main', 'wait_time_for_slave')
        self.slave_awake_frequency = self.config.get('main', 'slave_awake_frequency')
        self.master_ip = self.config.get('main', 'master_private_ip')
        self.slaves_ip = self.config.get('main', 'slaves_private_ip')
        self.master_queue_name = self.config.get('main', 'master_queue_name')
        self.task_queue_name = self.config.get('main', 'task_queue_name')
        self.task_file_name = self.config.get('main', 'task_file')
        self.task_queue_size_limit = int(self.config.get('main', 'task_queue_size_limit'))
        self.task_counter_file = self.config.get('main', 'task_counter_file')
        self.ssh_key = self.config.get('main', 'ssh_key')
        self.s3_bucket = self.config.get('main', 's3_bucket')
        self.s3_folder = self.config.get('main', 's3_folder')
        self.slave_num_every_packup = self.config.get('main', 'slave_num_every_packup')
        self.slave_max_sec_each_task = self.config.get('main', 'slave_max_sec_each_task')
        self.slave_python_version = self.config.get('main', 'slave_python_version')

    def add_slave(self, slave_info):
        if self.slave_manager.exist_slave(slave_info):
            logging.info('Slave ' + slave_info['host'] + ' already exists.')
            return
        logging.info('master: add slave' + str(slave_info))
        new_slave_info = self.slave_manager.add_slave(slave_info)
        self.slave_manager.run_slave(new_slave_info)
        # TODO:

    def kill_slave(self, slave_info):
        if not self.slave_manager.exist_slave(slave_info):
            return
        logging.info('kill slave ' + str(slave_info))
        self.slave_manager.kill_slave(slave_info)

    def restart_slave(self, slave_info):
        logging.info(slave_info['host'])
        logging.info('restart_slave' + str(slave_info))
        self.kill_slave(slave_info)
        self.add_slave(slave_info)

    def start(self, info):
        logging.info('Master Starts')
        self.last_wake_time = datetime.datetime.utcnow()
        self.is_started = True

        self.pop_forever_handler = threading.Thread(target=self.start_popping_tasks)
        self.pop_forever_handler.start()

        self.repeated_timer = RepeatedTimer(self.clean_time_gap, self.notice_refresh, None)

    def pop_forever(self):
        self.start_popping_tasks()

    def get_task_queue_size(self):
        pass

    # TODO: There is a bottle neck here
    def start_popping_tasks(self):
        task_connection = ConnectionManager(queue_name=self.task_queue_name,
                                            durable=True, no_ack=False)
        eof_reached = False
        while self.is_started and not eof_reached:
            current_task_queue_size = task_connection.get_task_queue_size()
            while self.is_started and current_task_queue_size < self.task_queue_size_limit:
                task = self.task_manager.pop_task()
                if task is None:
                    # TODO: Don't use Error. Just break and handle the case later in this function
                    logging.info('EOF Reached')
                    eof_reached = True
                    break
                message = 'WORK ' + ujson.dumps(task)
                task_connection.publish(message)
                current_task_queue_size += 1

        task_connection.stop()

    def fail(self, slave_task_info):
        self.task_manager.add_task(slave_task_info['task'])
        self.slave_manager.update_last_response(slave_task_info)

    def success(self, slave_task_info):
        slave_info = self.slave_manager.update_last_response(slave_task_info)

    def update_slave_response_time(self, slave_task_info):
        slave_info = self.slave_manager.update_last_response(slave_task_info)

    def stop(self, info):
        self.is_started = False
        self.notice_slaves_stop()
        if self.pop_forever_handler is not None:
            self.pop_forever_handler.join()
        if self.repeated_timer is not None:
            self.repeated_timer.stop()
        self.slave_manager.stop()
        self.task_manager.stop()
        self.message_connection.stop()

    def notice_slaves_stop(self):
        task_connection = ConnectionManager(queue_name=self.task_queue_name,
                                            durable=True, no_ack=False)
        screen_list = [key for key in self.slave_manager.slave_dict.keys()]
        for screen in screen_list:
            task_connection.publish('STOP {}')
        # task_connection.broadcast_task('STOP {}')
        task_connection.stop()

    def refresh(self, info):
        cur_progress, total_task = self.task_manager.get_progress()
        logging.info('downloading {}/{} files'.format(cur_progress, total_task))
        if not self.is_started:
            return

        # if time interval met, check failed slave
        if self.last_wake_time is None:
            self.last_wake_time = datetime.datetime.utcnow()

        if self.last_wake_time + datetime.timedelta(
                seconds=self.clean_time_gap) > datetime.datetime.utcnow():
            return
        failed_slaves = self.slave_manager.get_failed_slaves(self.wait_time_for_slave)
        if len(failed_slaves) != 0:
            logging.info('Finding failed slaves... ' + str(failed_slaves))
        for slave in failed_slaves:
            self.restart_slave(slave)
        self.last_wake_time = datetime.datetime.utcnow()

    def notice_refresh(self, info):
        try:
            self.message_connection.publish('REFRESH {}')
        except IndexError:
            logging.critical('INDEX_ERROR')

    def stat(self, info):
        logging.info('=====================================')
        logging.info('Num of slave: ', self.slave_manager.get_num_slaves())
        logging.info('=====================================')
        if len(info) > 0:
            for slave in self.slave_manager.slave_list:
                if slave['last_response'] is None:
                    delta = 'new slave'
                else:
                    delta = datetime.datetime.utcnow() - slave['last_response']
                logging.info(slave['host'], '|', slave['queue'], '|', delta)
            logging.info('====================================')
 def start(self):
     if self._use_taskmanager:
         self._taskmanager = TaskManager(None, self.provider.name,
                                         self.provider.rate)
         self._taskmanager.start()
     self.resetClient()
class Master(object):

    def __init__(self, task_loader, conf):
        self.lock = threading.Lock()
        self.workerManager = WorkerManager(conf)
        self.taskManager = TaskManager(task_loader)
        self.running_tasks = {}
        self.conf = conf
        self.load_tasks()

    def get_status(self):
        return {
            'total_workers': self.workerManager.get_workers(),
            'tasks': self.taskManager.get_tasks_stats(),
            'idle_workers': self.workerManager.get_idle_workers()
        }


    def clean_death_workers(self):
        '''定期检查worker的心跳信息,及时清除死亡worker'''
        workers,tasks = self.workerManager.clean_death_workers()
        logging.info("death workers:%s; relatedTasks:%s", workers, tasks)
        for task in tasks:
            self.taskManager.fail_task(task.uuid, TaskStatus.notReturned)
        return workers

    def register_worker(self, worker):
        '''注册作业节点'''
        logging.info("%s come in", worker)
        status = "OK"
        if worker is not None:
            self.workerManager.add_worker(worker)
        else:
            status = "Invalid"
        # logging.info(self.workerManager.get_workers())
        return status
        
    def remove_worker(self, worker):
        status = "OK"
        if worker is None:
            status = "Invalid"
            return status
        identifier = worker.get_uuid()
        w, tasks = self.workerManager.remove_worker(identifier)
        for task in tasks:
            self.taskManager.fail_task(task.get_uuid(), TaskStatus.notReturned)
        if w is None:
            status = "NOT EXISTS"
        return status

    def task_complete(self, worker, taskResult):
        '''worker完成一个作业,返回作业统计信息,worker重新归于队列'''
        self.workerManager.finish_task(worker, taskResult)
        self.workerManager.update_worker(worker)
        if taskResult.is_success():
            self.taskManager.finish_task(taskResult.get_task_uuid())
        else:
            self.taskManager.fail_task(taskResult.get_task_uuid(), TaskStatus.failedToExecute)
        return True

    def heartbeat(self, worker):
        '''收到心跳信息,更新该工作节点的信息'''
        self.workerManager.update_worker(worker)
        return True

    def lookup_spider(self, spider):
        pass

    def load_tasks(self):
        self.taskManager.load_tasks()

    def schedule_next(self):
        logging.info('tasks: %s',self.taskManager.get_tasks_stats())
        task = self.taskManager.next_task()
        worker = self.workerManager.next_worker()
        self.workerManager.assign_task(worker, task)
        try:
            proxy = common.RPCServerProxy.get_proxy(worker)
            proxy.assign_task(task)
        except Exception,e:
            traceback.print_exc()
            self.remove_worker(worker)
Beispiel #42
0
        app.config['CACHE_DATABASE_POOL_SIZE']
    )
    app.cache_engine = cache_engine

    # Create database management engine
    data_engine = DataManager(
        cache_engine,
        logger,
        app.config['MGMT_DATABASE_CONNECTION'],
        app.config['MGMT_DATABASE_POOL_SIZE']
    )
    app.data_engine = data_engine

    # Create background task processing client
    task_engine = TaskManager(
        data_engine,
        logger
    )
    task_engine.init_housekeeping_tasks()
    app.task_engine = task_engine

    # Create a user permissions engine
    permissions_engine = PermissionsManager(
        data_engine,
        cache_engine,
        task_engine,
        app.config,
        logger
    )
    app.permissions_engine = permissions_engine

    # Create the main imaging engine
            out_file.write("host_switch_sample={host_switch_sample}\treplace={replace}\tmemory_type={memory_type}\tmemory_times={memory_times}\tfreq={freq}\tinject_or_tag_packet={inject_or_tag_packet}\tavg_fn={avg_fn}\tstdv_fn={stdv_fn}\tavg_fp={avg_fp}\tstdv_fp={stdv_fp}\tavg_accuracy={avg_accuracy}\tstdv_accuracy={stdv_accuracy}\tavg_sample_map_size={avg_sample_map_size}\traw_host_sample_switch_hold_accuracy={raw_host_sample_switch_hold_accuracy}\tavg_real_target_flow_num={avg_real_target_flow_num}\tavg_fn_num={avg_fn_num}\tavg_condition_pkt_num={avg_condition_pkt_num}\n" \
            .format(host_switch_sample=host_switch_sample, \
                replace=replace, memory_type=memory_type, memory_times=memory_times, freq=freq, \
                inject_or_tag_packet=inject_or_tag_packet, \
                avg_fn = one_setting_result.avg_fn, \
                stdv_fn = one_setting_result.stdv_fn, \
                avg_fp = 0, \
                stdv_fp = 0, \
                avg_accuracy = one_setting_result.avg_accuracy, \
                stdv_accuracy = one_setting_result.stdv_accuracy, \
                avg_sample_map_size = one_setting_result.avg_sample_map_size, \
                raw_host_sample_switch_hold_accuracy = one_setting_result.raw_host_sample_switch_hold_accuracy, \
                avg_real_target_flow_num = one_setting_result.avg_targetflow_num,\
                avg_fn_num = one_setting_result.avg_fn_num,\
                avg_condition_pkt_num = 0
            )) 

if __name__ == '__main__':
    if len(sys.argv) !=2:
        print("usage: python3 result_analysis.py result_path")
        exit(0)

    result_path = sys.argv[1]
    print(result_path)

    task_manager = TaskManager()
    task_manager.readTasksFromFile("../public_lib/query3_task_info.txt")

    results_analyzer = results_analyzer_c()
    results_analyzer.get_all_setting_result(result_path, task_manager)
from launcher_collector_scheme import Processor
import time
import pickle
import executor_script
import multiprocessing as mp
import concurrent.futures
from data_parser import DataParser
from task_manager import TaskManager
import config
import socket
from importlib.machinery import SourceFileLoader
import os.path


dp = DataParser(config.data_dir, config.initial_time, config.time_step, config.max_chunk_size)
global_task_table = TaskManager.load_task_table(config.task_table_path)
local_task_table = global_task_table.ix[global_task_table.node_name == socket.gethostname()]
launcher_script = SourceFileLoader('launcher_script',
                                   os.path.join(config.script_dir, 'launcher_script.py')).load_module()
launch_func = launcher_script.QLaunchFunctor(*config.launch_arguments)









Beispiel #45
0
def main():
    # The default directory Sikwidgets looks into
    # for images of widgets can be changed.
    # This is useful if the same application has
    # different looks across OSs.
    #
    settings.IMAGES_PATH = "images-winxp-default"

    # More status messages 
    # will be printed to the screen and windows, widgets,
    # and mouse actions will be highlighted.
    # Note: The program will run significantly slower.
    #
    #settings.debug()

    # This setting changes how much an image is compacted
    # for processing as well as slightly changing the 
    # similarity threshold for finding image matches.
    #
    # settings.accuracy_low()
    # settings.accuracy_med()
    settings.accuracy_high()

    # Pretty self-explanatory.
    # 
    # settings.mouse_speed_slow()
    # settings.mouse_speed_med()
    settings.mouse_speed_fast()

    tm = TaskManager()
    # Explicitly open the app by running the open_cmd
    tm.open()

    # Wait 5 seconds before scanning the focused
    # window to see if it matches the description
    # of any of the defined Windows in the app.
    # If it does, the appropriate window is instantiated
    # and returned. Otherwise, this will be None.
    tasks = tm.focused_window(5)
    if not tasks:
        print "Task Manager window not found"
        return

    # Have some fun with the window :)
    tasks.applications_tab.click()
    tasks.processes_tab.click()
    #tasks.processes_table.column['user_name'].click()
    #tasks.processes_table.column['cpu'].click()
    #tasks.processes_table.column['memory'].click()
    #tasks.processes_table.column['image_name'].click()

    # Since there is no java.exe folder under the
    # processes_table folder, looking for 'java.exe'
    # will result in the text of each row being read
    # (using Sikuli's OCR) and compared against what
    # we specified.
    #cell = tasks.processes_table.column['image_name'].first_cell_matching('java.exe')
    # If we found a 'java.exe' cell, click it!
    #if cell:
    #    cell.click()

    rows = tasks.processes_table.rows_where(user_name='system')
    for row in rows:
        row.scroll_to()
        row.click()

    row = tasks.processes_table.first_row_where(image_name='winlogon.exe',
                                                user_name='system')
    if row:
        print "Found the row"
        row.click()
Beispiel #46
0
def main():
    logging.basicConfig(filename='log.main',
                        level=logging.DEBUG,
                        filemode='w',
                        format='%(asctime)s %(levelname)s %(message)s')

    logging.info('Loading local configration...')
    config = Configuration()
    node_details = config.load_node_main()

    logging.info('Loading task_manager...')
    task_manager = TaskManager(node_details)

    logging.info('Setting up task storage db...')
    task_storage = TaskStorage()

    task_manager.start()

    try:
        task_manager.add(PingProbe('149.210.184.36',
                                   recurrence_time=1,
                                   run_on_nodes=['trucks']))
        task_manager.add(TraceProbe('8.8.8.8',
                                    recurrence_time=3))
        task_manager.add(PingProbe('10.0.0.1',
                                   run_on_nodes=['miles']))

        while True:
            # Here we can send probes to the task_manager
            # e.g. task_manager.add(IcmpProbe('127.0.0.1'))
            db_tasks = task_storage.get_tasks()
            for task in db_tasks:
                if node_details['name'] in task['run_on_nodes']:
                    if task['type'] == 'PingProbe':
                        task_manager.add(PingProbe(task['dest_addr'],
                                         recurrence_time=task['recurrence_time'],
                                         recurrence_count=task['recurrence_count'],
                                         run_on_nodes=task['run_on_nodes']))
                    if task['type'] == 'TraceProbe':
                        task_manager.add(TraceProbe(task['dest_addr'],
                                         recurrence_time=task['recurrence_time'],
                                         recurrence_count=task['recurrence_count'],
                                         run_on_nodes=task['run_on_nodes']))
            time.sleep(5)
    except KeyboardInterrupt:
        task_manager.stop()
        print("\nThanks for joining!\n")