Пример #1
0
def logger(pipe, log_file, node_list, manager, startTime, interval, nodes, services, close_pipe):
    f = open("{0}_results.csv".format(log_file), 'w', newline='')
    f2 = open("{0}_observations.csv".format(log_file), 'w', newline='')
    close_flag = False
    logWriter = csv.writer(f, dialect='excel')
    logWriter2 = csv.writer(f2, dialect='excel')
    logWriter.writerow(["SQL CPU", "Web Worker CPU", "SQL Memory", "Web Worker Memory", "# SQL Containers", "# Web Worker Containers", "Delta Requests", "# Requests", "Iteration", "Minutes", "Seconds"])
    logWriter2.writerow(["SQL CPU", "Web Worker CPU", "SQL Memory", "Web Worker Memory", "Minutes", "Seconds"])
    #services = {}
    #nodes = {}
    #getNodeIDs(node_list, nodes)
    #getServices(services, manager)
    sql_cpu_avg = 0
    web_worker_cpu_avg = 0
    sql_mem_avg = 0
    web_worker_mem_avg = 0
    sql_cpu_usages = []
    sql_mem_usages = []
    web_worker_cpu_usages = []
    web_worker_mem_usages = []
    for service_name, service in services.items():
        get_tasks(service, manager)
    sql_cpu_avg, web_worker_cpu_avg, sql_mem_avg, web_worker_mem_avg = get_stats(services, sql_cpu_usages, sql_mem_usages, web_worker_cpu_usages, web_worker_mem_usages, nodes)
    diff_time = time.time() - startTime
    logWriter2.writerow([sql_cpu_avg, web_worker_cpu_avg, sql_mem_avg, web_worker_mem_avg, diff_time//60, diff_time%60])
    while not close_flag:
        while not close_pipe.poll():
            time.sleep(interval)
            sql_cpu_usages = []
            sql_mem_usages = []
            web_worker_cpu_usages = []
            web_worker_mem_usages = []
            if pipe.poll():
                pipe_tuple = pipe.recv()
                if pipe_tuple == "close":
                    print("Logger shutting down")
                    close_flag = True
                    f.close()
                    f2.close()
                else:
                    if pipe_tuple[11] == True:
                    #time.sleep(interval)
                        for service_name, service in services.items():
                            get_tasks(service, manager)
                
                    logWriter.writerow(pipe_tuple[:11])
            sql_cpu_avg, web_worker_cpu_avg, sql_mem_avg, web_worker_mem_avg = get_stats(services, sql_cpu_usages, sql_mem_usages, web_worker_cpu_usages, web_worker_mem_usages, nodes)
            diff_time = time.time() - startTime
            logWriter2.writerow([sql_cpu_avg, web_worker_cpu_avg, sql_mem_avg, web_worker_mem_avg, diff_time//60, diff_time%60])
            
        while not close_flag:
            if pipe.poll():
                pipe_tuple = pipe.recv()
                if pipe_tuple == "close":
                    print("Logger shutting down")
                    close_flag = True
                    f.close()
                    f2.close()
                else:                
                    logWriter.writerow(pipe_tuple[:11])
Пример #2
0
def web_manage_project(request):

    result = {'user': None}
    #if True:
    try:

        user, token = check_auth(request)

        project_id = request.GET['project_id']

        result['user'] = user

        result['project'] = get_project(user.id, project_id)
        
        result['tasks'] = get_tasks(project_id, completed=False)

        result['tickets'] = get_tickets(project_id)

        result['lists'] = [] #get_lists(project_id)

        #result['projects'] = get_projects(user);

    except:
        pass

    return result
Пример #3
0
def web_task(request):

    result = {'user': None}
    #if True:
    try:

        user, token = check_auth(request)
        result['user'] = user

        task_id = request.GET['task_id']
        #project_id = request.GET['project_id']

        task = get_task(task_id)
        result['task'] = task

        result['comments'] = get_task_comments(task['id'])

        result['tasks'] = get_tasks(task['project_id'], completed=False)

        result['project'] = get_project(user.id, task['project_id'])


    except:
        pass

    return result
Пример #4
0
def web_tasks(request):

    result = {'user': None}
    #if True:
    try:

        user, token = check_auth(request)
        result['user'] = user

        project_id = int(request.GET['project_id'])

        completed = False
        try:
            completed = int(request.GET['completed']);
        except:
            pass

        result['completed'] = completed

        result['tasks'] = get_tasks(project_id, completed)

        result['project'] = get_project(user.id, project_id)

    except:
        pass

    return result 
Пример #5
0
def web_projectsettings(request):

    result = {'user': None}
    #if True:
    try:

        user, token = check_auth(request)
        result['user'] = user

        project_id = request.GET['project_id']
            
        result['project'] = get_project(user.id, project_id)

        result['organization_users'] = get_organization_users(user.organization_id);
        
        result['assigned_users'] = get_users_assigned_to_project(user.id, project_id)

        result['tasks'] = get_tasks(project_id)

        result['tickets'] = get_tickets(project_id)


    except:
        pass

    return result
    def _prot_or_task(self):

        if self.protVtask.isChecked():
            self.running_protocol = True
            self.protocol_combo.clear()
            self.available_tasks = [
                i for i in os.listdir(protocol_dir) if '.prot' in i
            ]
            self.protocol_combo.addItems(['Select Protocol'] +
                                         self.available_tasks)

        else:
            self.running_protocol = False
            self.protocol_combo.clear()
            self.available_tasks = get_tasks(self.GUI.GUI_filepath)
            self.protocol_combo.addItems(['Select Task'] +
                                         self.available_tasks)
Пример #7
0
def web_new_task(request):

    result = {'user': None}
    #if True:
    try:

        user, token = check_auth(request)
        result['user'] = user

        project_id = request.GET['project_id']

        result['project'] = get_project(user.id, project_id)

        result['assigned_users'] = get_users_assigned_to_project(user.id, project_id)

        result['tasks'] = get_tasks(project_id)

    except:
        pass

    return result
Пример #8
0
    def __init__(self, setup_id, GUI, parent=None):
        super(direct_pyboard_dialog, self).__init__(parent)
        self.setGeometry(10, 30, 500, 200)  # Left, top, width, height.
        self.setup_id = setup_id
        self.selected_task = 'None'
        self.GUI = GUI
        layoutH = QtGui.QHBoxLayout(self)

        #
        self.PYC = self.GUI.controllers[self.setup_id].PYC
        if not self.GUI.controllers[self.setup_id].data_consumers:
            self.GUI.controllers[self.setup_id].data_consumers = [self]
        else:
            self.GUI.controllers[self.setup_id].data_consumers.append(self)
        self.GUI.controllers[self.setup_id]
        self.reject = self._done

        #self.setGeometry(10, 30, 400, 200) # Left, top, width, height.
        self.task_combo = QtGui.QComboBox()
        self.task_combo.addItems(['None'] + get_tasks(self.GUI.GUI_filepath))

        self.start_stop_button = QtGui.QPushButton('Start')
        self.start_stop_button.clicked.connect(self.start_stop)

        #self.onClose_chechbox = QtGui.Qte
        self.onClose_chechbox = QtGui.QCheckBox(
            "Stop task when closing dialog?")
        self.onClose_chechbox.setChecked(True)

        layout2 = QtGui.QVBoxLayout(self)
        layout2.addWidget(self.task_combo)
        layout2.addWidget(self.onClose_chechbox)
        layout2.addWidget(self.start_stop_button)

        self.log_textbox = QtGui.QTextEdit()
        self.log_textbox.setFont(QtGui.QFont('Courier', 9))
        self.log_textbox.setReadOnly(True)

        layoutH.addLayout(layout2)
        layoutH.addWidget(self.log_textbox)
Пример #9
0
 def fromPHIDs(phids):
     tasks = []
     raw = utils.get_tasks(phids)
     for r in raw:
         tasks.append(Task(r))
     return tasks
Пример #10
0
def solver(data):
    # initialise solver
    solver = pywrapcp.Solver("allocations")

    tasks = utils.get_tasks(data['scheduledTasks'])
    workers = utils.get_workers(data['workers'])

    cost_matrix = data['costMatrix']
    solver_option = data['solverOption']
    time_limit = data['timeLimit']
    extra_constraints = data['constraints'] if 'constraints' in data else {}

    print('solver_option', solver_option)

    num_tasks = len(tasks)
    num_workers = len(workers)

    # declare decision variables and a reference matrix
    assignment_costs = []
    assignments = []
    assignments_ref = []
    for worker in workers:
        worker_assignments = []
        worker_assignments_ref = []
        worker_assignment_costs = []
        for task in tasks:
            worker_assignments.append(
                solver.IntVar(0, 1, f'worker: , task: {task.id}'))
            worker_assignments_ref.append(Worker_task(worker, task))
            worker_assignment_costs.append(cost_matrix[str(
                worker.id)][task.id])
        assignments.append(worker_assignments)
        assignments_ref.append(worker_assignments_ref)
        assignment_costs.append(worker_assignment_costs)

    constraints = Constraints(
        tasks,
        workers,
        assignment_costs,
        assignments,
        assignments_ref,
    )

    # objective

    # Only add objective if optimisation requested
    if solver_option != 'noOptimisation':
        total_cost = solver.IntVar(0, 3000, "total_cost")

        solver.Add(total_cost == solver.Sum([
            assignment_costs[i][j] * assignments[i][j]
            for i in range(num_workers) for j in range(num_tasks)
        ]))

        objective = solver.Minimize(total_cost, 5)

    # constraints

    # each task assigned it's given qty
    constraints.add_task_qty_constraint(solver)

    # a worker cannot work on two tasks that are on at the same time
    constraints.add_same_worker_same_task_time(solver)

    # a worker can at most be assigned to the same orderTask date once (i.e cannot take up multiple qty)
    # maybe add any cannot work constraints
    # maybe add any must work constraints
    must_map = extra_constraints[
        'mustWork'] if 'mustWork' in extra_constraints else None
    cannot_map = extra_constraints[
        'cannotWork'] if 'cannotWork' in extra_constraints else None
    constraints.must_cannot_work(solver, must_map, cannot_map)

    # add must combined must work
    if 'combinedMustWork' in extra_constraints:
        constraints.combined_must_work_all(
            solver, extra_constraints['combinedMustWork'])

    # add at least has to work constraint
    if 'atLeastWork' in extra_constraints:
        constraints.add_at_least_work_task(solver,
                                           extra_constraints['atLeastWork'])

    # add total time fatigue constraints
    if 'timeFatigueTotal' in extra_constraints:
        constraints.add_time_fatigue_total(
            solver, extra_constraints['timeFatigueTotal'])

    # add total overall time fatigue constraints
    if 'overallTimeFatigueTotal' in extra_constraints:
        constraints.add_overall_total_fatigue_time(
            solver, extra_constraints['overallTimeFatigueTotal'])

    # add consecutive fatigue constaints
    if 'overallTimeFatigueConsecutive' in extra_constraints:
        constraints.add_overall_consecutive_total_fatigue_time(
            solver, extra_constraints['overallTimeFatigueConsecutive'])

    # add unavailable time constraints
    if 'unavailable' in extra_constraints:
        constraints.add_unavailability(solver,
                                       extra_constraints['unavailable'])

    # add buddy constraints
    if 'buddy' in extra_constraints:
        constraints.add_buddy(solver, extra_constraints['buddy'])

    # add nemesis constraints
    if 'nemesis' in extra_constraints:
        constraints.add_nemesis(solver, extra_constraints['nemesis'])

    # works must be assigned to at least n tasks (this could change later per worker)
    # [solver.Add(solver.Sum(assignments[i][j] for j in range(num_tasks)) >= 3) for i in range(num_workers)]

    # Create the decision builder.

    # Want to sort the decision variables by least cost to the solution

    if solver_option != 'noOptimisation':
        assignment_ref_copy = copy.deepcopy(assignments_ref)
        assignment_ref_copy_flat = [
            assignment_ref_copy[i][j] for i in range(num_workers)
            for j in range(num_tasks)
        ]
        # Sort by least cost
        assignment_ref_copy_flat.sort(key=lambda wrk_tsk: cost_matrix[str(
            wrk_tsk.worker.id)][wrk_tsk.task.id])
        # map to assignment vars
        assignments_flat = [
            assignments[ref.worker.index][ref.task.index]
            for ref in assignment_ref_copy_flat
        ]
    else:
        assignments_flat = [
            assignments[i][j] for i in range(num_workers)
            for j in range(num_tasks)
        ]

    db = solver.Phase(assignments_flat, solver.CHOOSE_FIRST_UNBOUND,
                      solver.ASSIGN_MAX_VALUE)

    # Create solution collector depending on solver option requested
    if (solver_option == 'optimise'
            and time_limit != None) or solver_option == 'optimal':
        collector = solver.BestValueSolutionCollector(
            False)  # False finds minimum as best solution
    else:
        collector = solver.FirstSolutionCollector()

    # Add decision vars to collector
    collector.Add(assignments_flat)

    monitor = pywrapcp.SearchMonitor(solver)

    monitor.RestartSearch()

    # Set time limit if given
    if solver_option == 'optimise' and time_limit != None:
        print('time_limit', time_limit)
        solver_time_limit = solver.TimeLimit(time_limit * 60 * 1000)

    # Solve appropriately
    if solver_option == 'optimal':
        collector.AddObjective(total_cost)
        status = solver.Solve(db, [objective, collector, monitor])
    elif solver_option == 'optimise' and time_limit != None:
        collector.AddObjective(total_cost)
        status = solver.Solve(
            db, [objective, collector, solver_time_limit, monitor])
    else:
        status = solver.Solve(db, [collector])

    print("Time:", solver.WallTime(), "ms")
    print('status', status)

    # If solution found, collect all assignments
    if status:
        solution_by_task = {}
        solution_by_worker = {}
        for i in range(num_workers):
            for j in range(num_tasks):
                if collector.Value(0, assignments[i][j]) == 1:
                    worker_task = assignments_ref[i][j]
                    # Group solution by worker and task

                    if worker_task.task.id in solution_by_task:
                        solution_by_task[worker_task.task.id] = [
                            *solution_by_task[worker_task.task.id],
                            worker_task.worker.id
                        ]
                    else:
                        solution_by_task[worker_task.task.id] = [
                            worker_task.worker.id
                        ]

                    if worker_task.worker.id in solution_by_worker:
                        solution_by_worker[worker_task.worker.id] = [
                            *solution_by_worker[worker_task.worker.id],
                            worker_task.task.id
                        ]
                    else:
                        solution_by_worker[worker_task.worker.id] = [
                            worker_task.task.id
                        ]

        if solver_option == 'optimal' or (solver_option == 'optimise'
                                          and time_limit != None):
            objective_value = collector.ObjectiveValue(0)
        else:
            objective_value = get_non_optimised_cost(cost_matrix,
                                                     solution_by_task)

        return {
            "status": status,
            "solutionByTask": solution_by_task,
            "solutionByWorker": solution_by_worker,
            "objectiveValue": objective_value
        }

    return {
        "status": status,
        "solutionByTask": None,
        "solutionByWorker": None,
        "objectiveValue": None
    }
Пример #11
0
def get_all_tasks(message):
    user_id = message.chat.id
    all_tasks = utils.get_tasks(user_id)
    if all_tasks:
        return group_tasks(user_id, all_tasks)
    bot.send_message(user_id, 'Task list is empty')
    def __init__(self, GUI, parent=None):
        super(new_experiment_dialog, self).__init__(parent)

        self.GUI = GUI
        self.setGeometry(100, 30, 1300, 600)  # Left, top, width, height.

        self.df_setup_tmp = pd.DataFrame(columns=[
            'COM', 'COM_AC', 'Setup_ID', 'in_use', 'connected', 'User',
            'Experiment', 'Protocol', 'Mouse_training', 'Door', 'n_mice'
        ])

        self.df_mouse_tmp = pd.DataFrame(columns=[
            'Mouse_ID', 'RFID', 'Sex', 'Age', 'Experiment', 'Protocol',
            'Stage', 'Task', 'User', 'Start_date', 'Current_weight',
            'Start_weight', 'is_training', 'is_assigned', 'training_log',
            'Setup_ID', 'in_system'
        ])

        #print(self.GUI.setup_df)
        self.used_setups = []
        self.used_mice = []
        self.global_task = True
        self.running_protocol = False

        self.left_column = QtGui.QVBoxLayout()

        #### Data related to experiment
        self.exp_name_groupbox = QtGui.QGroupBox('Status')

        self.expLabel = QtGui.QLabel()
        self.expLabel.setText("Experiment Name:")

        self.expName = QtGui.QLineEdit()

        self.protVtask = QtGui.QCheckBox("Run Protocol")
        self.protVtask.setChecked(False)
        self.protVtask.stateChanged.connect(self._prot_or_task)

        self.shared_protocol = QtGui.QCheckBox("Share Protocol")
        self.shared_protocol.setChecked(True)
        self.shared_protocol.stateChanged.connect(self._enable_prot_sel)

        self.protocol_combo = QtGui.QComboBox()
        self.available_tasks = get_tasks(self.GUI.GUI_filepath)
        self.protocol_combo.addItems(['Select Task'] + self.available_tasks)

        self.exp_GOButton = QtGui.QPushButton(
        )  #First stage of specifying experiment whereby set name and (potentially) protocol
        self.exp_GOButton.setText("Set")
        self.exp_GOButton.clicked.connect(self.set_name)

        self.name_layout = QtGui.QHBoxLayout()
        self.name_layout2 = QtGui.QHBoxLayout()
        self.name_layout.addWidget(self.expLabel)
        self.name_layout.addWidget(self.expName)
        self.name_layout.addWidget(self.shared_protocol)
        self.name_layout.addWidget(self.protVtask)

        self.name_layout2.addWidget(self.protocol_combo)
        self.name_layout2.addWidget(self.exp_GOButton)

        self.nameVlayout = QtGui.QVBoxLayout()
        self.nameVlayout.addLayout(self.name_layout)
        self.nameVlayout.addLayout(self.name_layout2)
        self.exp_name_groupbox.setLayout(self.nameVlayout)

        ## Column to add setups to the experiment
        self.setup_groupbox = QtGui.QGroupBox('Setups')

        self.setups_column = QtGui.QVBoxLayout()

        #populate this column

        #Controls for adding a setup to an experiment
        self.CAT = QtGui.QGroupBox(
            'Add Setup')  #cage_setup_table(GUI=self.GUI,exp_dialog=self)
        self.cat_layout = QtGui.QHBoxLayout()

        self.setup_combo = QtGui.QComboBox()
        self.available_setups = [
            rw['Setup_ID'] for kk, rw in self.GUI.setup_df.iterrows()
            if rw['connected'] and not rw['in_use']
        ]
        self.setup_combo.addItems(['Select Setup'] + self.available_setups)
        self.setup_combo.currentTextChanged.connect(self.on_scb_changed)

        self.add_button = QtGui.QPushButton()
        self.add_button.setText("Add Setup")
        self.add_button.setEnabled(False)
        self.prot_label = Qt.QtWidgets.QLabel(
            "Protocol:          ")  #protocol label
        self.exp_label = Qt.QtWidgets.QLabel(
            "Experiemnt:         ")  #experiment label

        self.cat_layout.addWidget(self.setup_combo)
        self.cat_layout.addWidget(self.exp_label)
        self.cat_layout.addWidget(self.prot_label)

        self.cat_layout.addWidget(self.add_button)

        self.add_button.clicked.connect(self.add_cage)

        self.CAT.setLayout(self.cat_layout)

        #####################################################
        #############     Overview of setups     ############
        #####################################################
        self.CLT = cage_list_table(GUI=self.GUI, tab=self)
        self.setups_column.addWidget(self.CAT, 1)
        self.setups_column.addWidget(self.CLT, 10)
        self.setup_groupbox.setLayout(self.setups_column)
        self.setup_groupbox.setEnabled(False)
        #self.CLT

        self.left_column.addWidget(self.exp_name_groupbox)
        self.left_column.addWidget(self.setup_groupbox)

        ###############################################################
        ###############################################################

        ####################################################
        #############      Mouse Adder Box      ############
        ####################################################

        self.MAT = QtGui.QGroupBox("Add Mouse")

        self.mat_layout = QtGui.QVBoxLayout()
        self.matL1 = QtGui.QHBoxLayout()
        self.matL2 = QtGui.QHBoxLayout()

        self.mouse_name_label = QtGui.QLabel("Mouse_ID:")
        self.mouse_name = QtGui.QLineEdit("")

        self.RFID_label = QtGui.QLabel("RFID:")
        self.RFID = QtGui.QLineEdit("")

        self.sex_label = QtGui.QLabel("Sex:")
        self.sex = QtGui.QComboBox()
        self.sex.addItems([
            'F',
            'M',
        ])

        self.age_label = QtGui.QLabel("Age (weeks):")
        self.age = QtGui.QLineEdit("")

        self.weight_label = QtGui.QLabel("Weight (g):")
        self.weight = QtGui.QLineEdit("")

        self.add_mouse_button = QtGui.QPushButton("Add Mouse")
        self.add_mouse_button.clicked.connect(self.add_mouse)

        self.mouse_prot = QtGui.QComboBox()
        self.mouse_prot.addItems(['Select Task'] + self.available_tasks)

        ###############################################################
        ###############################################################

        ####################################################
        ###########      Set Variables Table      ##########
        ####################################################
        self.filter_categories = ['Setup', 'Mouse']
        self.vars_filter_checkbox = QtGui.QCheckBox("Filter mice")

        self.vars_combo_type = QtGui.QComboBox()
        self.vars_combo_type.addItems(['Filter by'] + self.filter_categories)

        self.vars_combo_ID = QtGui.QComboBox()
        self.vars_combo_ID.addItems(['Filter by'] + self.filter_categories)

        #self.vars_hlayout1 = QtGui.QHBoxLayout(self)
        #self.vars_hlayout1.addWidget(self.vars_filter_checkbox)
        #self.vars_hlayout1.addWidget(self.vars_combo_type)
        #self.vars_hlayout1.addWidget(self.vars_combo_ID)
        #self.task_combo.currentIndexChanged.connect(self.picked_task)

        self.mouse_var_table = variables_table(GUI=self.GUI)

        ###############################################################
        ###############################################################

        self.matL1.addWidget(self.mouse_name_label)
        self.matL1.addWidget(self.mouse_name)

        self.matL1.addWidget(self.RFID_label)
        self.matL1.addWidget(self.RFID)

        self.matL1.addWidget(self.sex_label)
        self.matL1.addWidget(self.sex)

        self.matL2.addWidget(self.mouse_prot)
        self.matL2.addWidget(self.age_label)
        self.matL2.addWidget(self.age)

        self.matL2.addWidget(self.weight_label)
        self.matL2.addWidget(self.weight)
        self.matL2.addWidget(self.add_mouse_button)

        self.mat_layout.addLayout(self.matL1)
        self.mat_layout.addLayout(self.matL2)
        self.MAT.setLayout(self.mat_layout)
        #self.MAT.setEnabled(False)

        self.MICE = QtGui.QGroupBox('Mouse Overview')
        self.mice_column = QtGui.QVBoxLayout()
        self.MLT = mouse_list_table(GUI=self.GUI, tab=self)

        self.mice_column.addWidget(self.MAT)
        self.mice_column.addWidget(self.MLT)
        #self.mice_column.addLayout(self.vars_hlayout1)
        self.mice_column.addWidget(self.mouse_var_table)
        self.MICE.setLayout(self.mice_column)

        ####################################################
        #############      Run Experiments      ############
        ####################################################

        self.runGroup = QtGui.QGroupBox("Run")
        self.run_layout = QtGui.QHBoxLayout()
        self.run_button = QtGui.QPushButton('Run Protocol')
        self.run_button.clicked.connect(self.run_experiment)
        self.run_layout.addWidget(self.run_button)
        self.runGroup.setLayout(self.run_layout)

        self.right_column = QtGui.QVBoxLayout()
        self.right_column.addWidget(self.MICE)
        self.right_column.addWidget(self.runGroup)

        self.all_columns = QtGui.QHBoxLayout()
        self.all_columns.addLayout(self.left_column)
        self.all_columns.addLayout(self.right_column)

        self.vLayout = QtGui.QVBoxLayout(self)
        self.vLayout.addLayout(self.name_layout)
        self.vLayout.addLayout(self.all_columns)

        self.MICE.setEnabled(False)
Пример #13
0
    def __init__(self, parent=None):

        super(QtGui.QWidget, self).__init__(parent)

        self.GUI = self.parent()

        self.prot_dict = {}
        self.stage_dict = {}
        self.task_variables = []

        #add stage to protocol
        self.ATP = QtGui.QGroupBox("Add stage to protocol")

        self.protocol_namer = QtGui.QLineEdit("")
        self.protocol_namer_button = QtGui.QPushButton("Set protocol name")
        self.protocol_namer_button.clicked.connect(self.set_protocol_name)

        self.clear_button = QtGui.QPushButton("Clear")
        self.clear_button.clicked.connect(self.clear_all)

        self.save_button = QtGui.QPushButton("Save Protocol")
        self.save_button.clicked.connect(self.save)

        self.add_stage_button = QtGui.QPushButton("Add task stage")
        self.add_stage_button.clicked.connect(self.add_stage)

        self.task_combo = QtGui.QComboBox()

        self.task_set_button = QtGui.QPushButton("Set")
        self.task_set_button.clicked.connect(self.set_stage_task)
        self.task_set_button.setEnabled(False)

        self.available_tasks = get_tasks(self.GUI.GUI_filepath)
        self.task_combo.addItems(['Select Task'] + self.available_tasks)
        self.task_combo.currentIndexChanged.connect(self.picked_task)

        #track value stuff
        self.trackV_label = QtGui.QLabel("Track value")
        self.trackV_combo = QtGui.QComboBox()
        self.trackV_add = QtGui.QPushButton("Add")
        self.trackV_add.clicked.connect(self.trackV_change)

        self.trackV_combo.setEnabled(False)

        #threshold value stuff
        self.threshV_label = QtGui.QLabel("Threshold")
        self.threshV_combo = QtGui.QComboBox()
        self.threshV_add = QtGui.QPushButton("Add")
        self.threshV_add.clicked.connect(self.threshV_change)

        self.threshV_combo.setEnabled(False)
        self.threshV_value = QtGui.QLineEdit()

        ## Default value stuff
        self.defaultV_label = QtGui.QLabel("Default value")
        self.defaultV_combo = QtGui.QComboBox()
        self.defaultV_add = QtGui.QPushButton("Add")
        self.defaultV_value = QtGui.QLineEdit()

        self.defaultV_combo.setEnabled(False)

        self.Hlayout1 = QtGui.QHBoxLayout()
        self.Hlayout2 = QtGui.QHBoxLayout()
        self.Hlayout3 = QtGui.QHBoxLayout()
        self.Hlayout4 = QtGui.QHBoxLayout()

        self.Hlayout1.addWidget(self.protocol_namer)
        self.Hlayout1.addWidget(self.protocol_namer_button)
        self.Hlayout1.addWidget(self.task_combo)
        self.Hlayout1.addWidget(self.task_set_button)

        self.Hlayout2.addWidget(self.threshV_label)
        self.Hlayout2.addWidget(self.threshV_combo)
        self.Hlayout2.addWidget(self.threshV_value)
        self.Hlayout2.addWidget(self.threshV_add)

        self.Hlayout3.addWidget(self.defaultV_label)
        self.Hlayout3.addWidget(self.defaultV_combo)
        self.Hlayout3.addWidget(self.defaultV_value)
        self.Hlayout3.addWidget(self.defaultV_add)

        self.Hlayout4.addWidget(self.trackV_label)
        self.Hlayout4.addWidget(self.trackV_combo)
        self.Hlayout4.addWidget(self.trackV_add)
        #self.Hlayout4.addWidget(self.add_stage_button)

        self.Vlayout_add = QtGui.QVBoxLayout()
        self.Vlayout_add.addLayout(self.Hlayout1)
        self.Vlayout_add.addLayout(self.Hlayout2)
        self.Vlayout_add.addLayout(self.Hlayout3)
        self.Vlayout_add.addLayout(self.Hlayout4)
        self.Vlayout_add.addWidget(self.add_stage_button)

        self.ATP.setLayout(self.Vlayout_add)

        self.protocol_table = protocol_table(GUI=self.GUI, tab=self)

        self.dummy_overview = QtGui.QGroupBox('Current Stage Overview')
        self.dummy_layout = QtGui.QVBoxLayout()
        self.protocol_table_dummy = protocol_table(GUI=self.GUI,
                                                   tab=self,
                                                   nRows=1)
        self.dummy_layout.addWidget(self.protocol_table_dummy)
        self.dummy_overview.setLayout(self.dummy_layout)

        self.Vlayout = QtGui.QVBoxLayout(self)

        self.Vlayout.addWidget(self.ATP, 4)
        self.Vlayout.addWidget(self.dummy_overview, 1)
        self.Vlayout.addWidget(self.protocol_table, 10)

        self.save_clear_layout = QtGui.QHBoxLayout()
        self.save_clear_layout.addWidget(self.clear_button)
        self.save_clear_layout.addWidget(self.save_button)
        self.Vlayout.addLayout(self.save_clear_layout)
Пример #14
0
def main(arguments):
    parser = argparse.ArgumentParser(description=__doc__,
                                     formatter_class=argparse.RawDescriptionHelpFormatter)

    # Logistics
    parser.add_argument("--gpu_id", help="gpu id to use", type=int, default=0)
    parser.add_argument("--seed", help="Random seed", type=int, default=19)
    parser.add_argument("--use_pytorch", help="1 to use PyTorch", type=int, default=0)
    parser.add_argument("--out_dir", help="Dir to write preds to", type=str, default='')
    parser.add_argument("--log_file", help="File to log to", type=str, default='')
    parser.add_argument("--load_data", help="0 to read data from scratch", type=int, default=1)

    # Model options
    parser.add_argument("--batch_size", help="Batch size to use", type=int, default=16)
    parser.add_argument("--model_dir", help="path to model folder")
    parser.add_argument("--prefix1", help="prefix to model 1", default='nli_large_bothskip_parse')
    parser.add_argument("--prefix2", help="prefix to model 2", default='nli_large_bothskip')
    parser.add_argument("--word_vec_file", help="path to pretrained vectors")
    parser.add_argument("--strategy", help="Approach to create sentence embedding last/max/best",
                        choices=["best", "max", "last"], default="best")

    # Task options
    parser.add_argument("--tasks", help="Tasks to evaluate on, as a comma separated list", type=str)
    parser.add_argument("--max_seq_len", help="Max sequence length", type=int, default=40)


    # Classifier options
    parser.add_argument("--cls_batch_size", help="Batch size to use for the classifier", type=int,
                        default=16)

    args = parser.parse_args(arguments)
    logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
    if not os.path.exists(args.out_dir):
        os.makedirs(args.out_dir)
    log_file = os.path.join(args.out_dir, "results.log")
    fileHandler = logging.FileHandler(log_file)
    logging.getLogger().addHandler(fileHandler)
    logging.info(args)
    torch.cuda.set_device(args.gpu_id)

    # Set up SentEval
    params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': args.use_pytorch, 'kfold': 10,
            'max_seq_len': args.max_seq_len, 'batch_size': args.batch_size, 'load_data': args.load_data,
            'seed': args.seed}
    params_senteval['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': args.cls_batch_size,
            'tenacity': 5, 'epoch_size': 4, 'cudaEfficient': True}

    # Load model
    # import GenSen package
    sys.path.insert(0, args.model_dir)
    from gensen import GenSen, GenSenSingle

    ckpt_dir = os.path.join(args.model_dir, "data", "models")
    gensen_1 = GenSenSingle(model_folder=ckpt_dir, filename_prefix=args.prefix1,
                            pretrained_emb=args.word_vec_file, cuda=bool(args.gpu_id >= 0))
    gensen_2 = GenSenSingle(model_folder=ckpt_dir, filename_prefix=args.prefix2,
                            pretrained_emb=args.word_vec_file, cuda=bool(args.gpu_id >= 0))
    gensen = GenSen(gensen_1, gensen_2)
    global STRATEGY
    STRATEGY = args.strategy
    params_senteval['gensen'] = gensen

    # Do SentEval stuff
    se = senteval.engine.SE(params_senteval, batcher, prepare)
    tasks = get_tasks(args.tasks)
    results = se.eval(tasks)
    write_results(results, args.out_dir)
    logging.info(results)
Пример #15
0
def main(arguments):
    parser = argparse.ArgumentParser(description=__doc__,
                    formatter_class=argparse.RawDescriptionHelpFormatter)

    # Logistics
    parser = argparse.ArgumentParser(description='DisSent SentEval Evaluation')
    parser.add_argument("--seed", help="Random seed", type=int, default=19)
    parser.add_argument("--gpu_id", type=int, default=0, help="GPU ID, we map all model's gpu to this id")
    parser.add_argument("--use_pytorch", help="1 to use PyTorch", type=int, default=1)
    parser.add_argument("--log_file", help="File to log to", type=str)
    parser.add_argument("--load_data", help="0 to read data from scratch", type=int, default=1)
    parser.add_argument("--out_dir", help="Dir to write preds to", type=str, default='')

    # Task options
    parser.add_argument("--tasks", help="Tasks to evaluate on, as a comma separated list", type=str)
    parser.add_argument("--max_seq_len", help="Max sequence length", type=int, default=40)

    # Model options
    parser.add_argument("--word_vec_file", type=str)
    parser.add_argument("--model_dir", type=str, help="Directory containing model snapshots")
    parser.add_argument("--outputmodelname", type=str, default='dis-model')
    parser.add_argument("--search_start_epoch", type=int, default=-1, help="Search from [start, end] epochs ")
    parser.add_argument("--search_end_epoch", type=int, default=-1, help="Search from [start, end] epochs")
    parser.add_argument("--batch_size", help="Batch size to use", type=int, default=64)

    # Classifier options
    parser.add_argument("--cls_batch_size", help="Batch size to use", type=int, default=64)

    args = parser.parse_args(arguments)
    logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
    if not os.path.exists(args.out_dir):
        os.makedirs(args.out_dir)
    log_file = os.path.join(args.out_dir, "results.log")
    file_handler = logging.FileHandler(log_file)
    logging.getLogger().addHandler(file_handler)
    logging.info(args)

    # define senteval params
    params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': args.use_pytorch, 'kfold': 10,
                       'max_seq_len': args.max_seq_len, 'batch_size': args.batch_size,
                       'load_data': args.load_data, 'seed': args.seed}
    params_senteval['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': args.cls_batch_size,
                                     'tenacity': 5, 'epoch_size': 4, 'cudaEfficient': args.gpu_id > 0}

    # set gpu device
    torch.cuda.set_device(args.gpu_id)
    # We map cuda to the current cuda device, this only works when we set args.gpu_id = 0
    map_locations = {}
    for d in range(4):
        if d != args.gpu_id:
            map_locations['cuda:{}'.format(d)] = "cuda:{}".format(args.gpu_id)

    tasks = get_tasks(args.tasks)

    # collect number of epochs trained in directory
    model_files = filter(lambda s: args.outputmodelname + '-' in s and 'encoder' not in s,
                         os.listdir(args.model_dir))
    epoch_numbers = map(lambda s: s.split(args.outputmodelname + '-')[1].replace('.pickle', ''), model_files)
    # ['8', '7', '9', '3', '11', '2', '1', '5', '4', '6']
    # this is discontinuous :)
    #epoch_numbers = map(lambda i: int(i), epoch_numbers)
    epoch_numbers = map(int, epoch_numbers)
    epoch_numbers = sorted(epoch_numbers)  # now sorted

    # original setting
    if args.search_start_epoch == -1 or args.search_end_epoch == -1:
        # Load model
        MODEL_PATH = pjoin(args.model_dir, args.outputmodelname + ".pickle.encoder")

        params_senteval['infersent'] = torch.load(MODEL_PATH, map_location=map_locations)
        params_senteval['infersent'].set_glove_path(args.word_vec_file)

        se = senteval.engine.SE(params_senteval, batcher, prepare)
        results = se.eval(tasks)
        write_results(results, args.out_dir)
        logging.info(results)
    else:
        # search through all epochs
        filtered_epoch_numbers = filter(lambda i: args.search_start_epoch <= i <= args.search_end_epoch,
                                        epoch_numbers)
        assert len(filtered_epoch_numbers) >= 1, \
                "the epoch search criteria [{}, {}] returns null, available epochs are: {}".format(
                args.search_start_epoch, args.search_end_epoch, epoch_numbers)

        for epoch in filtered_epoch_numbers:
            logging.info("******* Epoch {} Evaluation *******".format(epoch))
            model_name = args.outputmodelname + '-{}.pickle'.format(epoch)
            model_path = pjoin(args.model_dir, model_name)

            dissent = torch.load(model_path, map_location=map_locations)
            if args.gpu_id > -1:
                dissent = dissent.cuda()
            params_senteval['infersent'] = dissent.encoder  # this might be good enough
            params_senteval['infersent'].set_glove_path(args.word_vec_file)

            se = senteval.SentEval(params_senteval, batcher, prepare)
            results = se.eval(tasks)
            write_results(results, args.out_dir)
            logging.info(results)
Пример #16
0
def main(arguments):
    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)

    # Logistics
    parser.add_argument("--cuda", help="CUDA id to use", type=int, default=0)
    parser.add_argument("--seed", help="Random seed", type=int, default=19)
    parser.add_argument("--use_pytorch",
                        help="1 to use PyTorch",
                        type=int,
                        default=1)
    parser.add_argument("--out_dir",
                        help="Dir to write preds to",
                        type=str,
                        default='.')

    # Task options
    parser.add_argument("--tasks",
                        help="Tasks to evaluate on, as a comma separated list",
                        type=str)
    parser.add_argument("--max_seq_len",
                        help="Max sequence length",
                        type=int,
                        default=40)
    parser.add_argument("--load_data",
                        help="0 to read data from scratch",
                        type=int,
                        default=1)

    # Model options
    parser.add_argument("--word_vec_file",
                        help="File to load vectors from",
                        type=str)
    parser.add_argument("--batch_size",
                        help="Batch size to use",
                        type=int,
                        default=16)

    # Classifier options
    parser.add_argument("--cls_batch_size",
                        help="Batch size to use for classifier",
                        type=int,
                        default=16)

    args = parser.parse_args(arguments)
    logging.basicConfig(format='%(asctime)s : %(message)s',
                        level=logging.DEBUG)
    if not os.path.exists(args.out_dir):
        os.makedirs(args.out_dir)
    log_file = os.path.join(args.out_dir, "results.log")
    fileHandler = logging.FileHandler(log_file)
    logging.getLogger().addHandler(fileHandler)
    logging.info(args)

    global PATH_TO_VEC
    PATH_TO_VEC = args.word_vec_file

    # SentEval params
    params_senteval = {
        'task_path': PATH_TO_DATA,
        'usepytorch': args.use_pytorch,
        'kfold': 10,
        'max_seq_len': args.max_seq_len,
        'batch_size': args.batch_size,
        'load_data': args.load_data,
        'seed': args.seed
    }
    params_senteval['classifier'] = {
        'nhid': 0,
        'optim': 'adam',
        'batch_size': args.cls_batch_size,
        'tenacity': 5,
        'epoch_size': 4,
        'cudaEfficient': args.cuda > 0
    }

    se = senteval.engine.SE(params_senteval, batcher, prepare)
    tasks = get_tasks(args.tasks)
    results = se.eval(tasks)
    write_results(results, args.out_dir)
    logging.info(results)
Пример #17
0
def controller(input_pipe, number_of_processes, node_list, req_list, manager,
               polling_interval, polls_per_update, log_file, nodes, services):
    close_flag = False
    # Node list
    #node_list = ["192.168.56.102:4000", "192.168.56.103:4000", "192.168.56.101:4000"]
    #manager = "192.168.56.102:4000"
    #services = {}

    # upper and lower cpu usage thresholds where scaling should happen on
    cpu_upper_threshold = 50.0
    cpu_lower_threshold = 20.0
    # create list of processes and pipes
    process_list = []
    spike_list = []
    # pipes that main thread will read from and load threads will write to
    par_pipes = []
    spike_par_pipes = []
    # pipes that main thread will write to and load threads will read from
    child_pipes = []
    spike_child_pipes = []
    sql_cpu_usages = []
    sql_mem_usages = []
    web_worker_cpu_usages = []
    web_worker_mem_usages = []
    sql_cpu_avg = 0
    sql_mem_avg = 0
    web_worker_mem_avg = 0
    web_worker_cpu_avg = 0
    num_web_workers = 2
    num_sql = 1
    num_requests = 0
    # Storage variables
    prev_sql_cpu_avg = 0
    prev_sql_mem_avg = 0
    prev_web_worker_mem_avg = 0
    prev_web_worker_cpu_avg = 0
    prev_num_web_workers = 0
    prev_num_sql = 0
    prev_num_requests = 0
    spike_size = 3
    # CREATE SPECIFIED NUMBER OF PROCESSES
    for i in range(0, number_of_processes):
        # Create new pipe
        par_pipe, child_pipe = multiprocessing.Pipe()

        par_pipes.append(par_pipe)
        child_pipes.append(child_pipe)

        temp_process = multiprocessing.Process(target=load_process,
                                               args=(req_list, child_pipes[i]))
        process_list.append(temp_process)

    for i in range(0, spike_size * 2):
        par_pipe, child_pipe = multiprocessing.Pipe()

        spike_par_pipes.append(par_pipe)
        spike_child_pipes.append(child_pipe)
        temp_process = multiprocessing.Process(target=load_process,
                                               args=(req_list,
                                                     spike_child_pipes[i]))
        spike_list.append(temp_process)

    # get services, nodes and tasks

    #Always start with 2 web worker and 1 sql
    scale(services["web-worker"], num_web_workers, manager)
    scale(services["mysql"], num_sql, manager)
    time.sleep(7)
    for service_name, service in services.items():
        get_tasks(service, manager)

    # get initial stats
    # get web-worker stats
    sql_cpu_avg, web_worker_cpu_avg, sql_mem_avg, web_worker_mem_avg = get_stats(
        services, sql_cpu_usages, sql_mem_usages, web_worker_cpu_usages,
        web_worker_mem_usages, nodes)

    # initalize estimator
    init_x = np.asarray(
        (sql_cpu_avg, web_worker_cpu_avg, sql_mem_avg, web_worker_mem_avg))
    init_x = init_x.reshape(init_x.size, 1)
    estimator = kalmanEstimator(np.identity(4), np.random.random((4, 3)),
                                init_x)

    # APPROACH:
    # We need at least 4 measurements to ensure that a solution can be found
    # 1st & 2nd containers will remain the same

    # ******************************************************************************************************************
    # ********************************************* 1st DIFF MEASUREMENT ***********************************************

    # store measurements
    prev_sql_cpu_avg = sql_cpu_avg
    prev_sql_mem_avg = sql_mem_avg
    prev_web_worker_cpu_avg = web_worker_cpu_avg
    prev_web_worker_mem_avg = web_worker_mem_avg
    prev_num_requests = num_requests
    prev_num_sql = num_sql
    prev_num_web_workers = num_web_workers
    # Start generating a load
    process_list[0].start()
    # Wait a couple seconds
    time.sleep(5)

    # Send poll request to the process we started
    par_pipes[0].send("poll")
    while not par_pipes[0].poll():
        pass
    # If the loop above has been broken then we can read the information from the pipe
    num_requests = par_pipes[0].recv()
    #print('BOOM {}'.format(num_requests))

    # get the stats
    sql_cpu_usages = []
    sql_mem_usages = []
    web_worker_cpu_usages = []
    web_worker_mem_usages = []
    sql_cpu_avg, web_worker_cpu_avg, sql_mem_avg, web_worker_mem_avg = get_stats(
        services, sql_cpu_usages, sql_mem_usages, web_worker_cpu_usages,
        web_worker_mem_usages, nodes)
    # create some np arrays for the regression
    sql_cpu_history = np.asarray(sql_cpu_avg - prev_sql_cpu_avg)
    sql_mem_history = np.asarray(sql_mem_avg - prev_sql_mem_avg)
    web_worker_cpu_history = np.asarray(web_worker_cpu_avg -
                                        prev_web_worker_cpu_avg)
    web_worker_mem_history = np.asarray(web_worker_mem_avg -
                                        prev_web_worker_mem_avg)
    request_history = np.asarray(num_requests - prev_num_requests)
    web_work_history = np.asarray(num_web_workers - prev_num_web_workers)
    sql_history = np.asarray(num_sql - prev_num_sql)
    # As before we store the stats
    prev_sql_cpu_avg = sql_cpu_avg
    prev_sql_mem_avg = sql_mem_avg
    prev_web_worker_cpu_avg = web_worker_cpu_avg
    prev_web_worker_mem_avg = web_worker_mem_avg
    prev_num_requests = num_requests
    prev_num_sql = num_sql
    prev_num_web_workers = num_web_workers
    # Wait a couple more seconds
    time.sleep(5)

    # ******************************************************************************************************************
    # ********************************************* 2nd DIFF MEASUREMENT ***********************************************

    # Send poll request to the process we started
    par_pipes[0].send("poll")
    while not par_pipes[0].poll():
        pass
    # If the loop above has been broken then we can read the information from the pipe
    num_requests = par_pipes[0].recv()
    # get the stats
    sql_cpu_usages = []
    sql_mem_usages = []
    web_worker_cpu_usages = []
    web_worker_mem_usages = []
    sql_cpu_avg, web_worker_cpu_avg, sql_mem_avg, web_worker_mem_avg = get_stats(
        services, sql_cpu_usages, sql_mem_usages, web_worker_cpu_usages,
        web_worker_mem_usages, nodes)
    # Append new values to the histories
    sql_cpu_history = np.append(sql_cpu_history,
                                sql_cpu_avg - prev_sql_cpu_avg)
    sql_mem_history = np.append(sql_mem_history,
                                sql_mem_avg - prev_sql_mem_avg)
    web_worker_cpu_history = np.append(
        web_worker_cpu_history, web_worker_cpu_avg - prev_web_worker_cpu_avg)
    web_worker_mem_history = np.append(
        web_worker_mem_history, web_worker_mem_avg - prev_web_worker_mem_avg)
    request_history = np.append(request_history,
                                num_requests - prev_num_requests)
    web_work_history = np.append(web_work_history,
                                 num_web_workers - prev_num_web_workers)
    sql_history = np.append(sql_history, num_sql - prev_num_sql)
    print(web_worker_cpu_avg)
    # Store the stats
    prev_sql_cpu_avg = sql_cpu_avg
    prev_sql_mem_avg = sql_mem_avg
    prev_web_worker_cpu_avg = web_worker_cpu_avg
    prev_web_worker_mem_avg = web_worker_mem_avg
    prev_num_requests = num_requests
    prev_num_sql = num_sql
    prev_num_web_workers = num_web_workers

    print(web_worker_cpu_usages)
    # ******************************************************************************************************************
    # ********************************************* 3rd DIFF MEASUREMENT ***********************************************
    print("Two measurements taken\n")
    # Start 2 new containers
    num_web_workers = num_web_workers + 1
    num_sql = num_sql + 1
    scale(services["web-worker"], num_web_workers, manager)
    scale(services["mysql"], num_sql, manager)
    # We also start another load generator
    process_list[1].start()
    # as before we sleep and will update
    time.sleep(5)

    # poll pipes [0] & [1]
    for i in range(0, 2):
        par_pipes[i].send("poll")
    pipes_ready = poll_pipes(par_pipes, 2)
    # reset number of requests
    num_requests = 0
    for i in range(0, 2):
        num_requests = num_requests + par_pipes[i].recv()

    # update tasks since we scaled
    for service_name, service in services.items():
        get_tasks(service, manager)
    # get the stats
    sql_cpu_usages = []
    sql_mem_usages = []
    web_worker_cpu_usages = []
    web_worker_mem_usages = []
    sql_cpu_avg, web_worker_cpu_avg, sql_mem_avg, web_worker_mem_avg = get_stats(
        services, sql_cpu_usages, sql_mem_usages, web_worker_cpu_usages,
        web_worker_mem_usages, nodes)
    # Append new values to the histories
    sql_cpu_history = np.append(sql_cpu_history,
                                sql_cpu_avg - prev_sql_cpu_avg)
    sql_mem_history = np.append(sql_mem_history,
                                sql_mem_avg - prev_sql_mem_avg)
    web_worker_cpu_history = np.append(
        web_worker_cpu_history, web_worker_cpu_avg - prev_web_worker_cpu_avg)
    web_worker_mem_history = np.append(
        web_worker_mem_history, web_worker_mem_avg - prev_web_worker_mem_avg)
    request_history = np.append(request_history,
                                num_requests - prev_num_requests)
    web_work_history = np.append(web_work_history,
                                 num_web_workers - prev_num_web_workers)
    sql_history = np.append(sql_history, num_sql - prev_num_sql)
    # Store the stats
    prev_sql_cpu_avg = sql_cpu_avg
    prev_sql_mem_avg = sql_mem_avg
    prev_web_worker_cpu_avg = web_worker_cpu_avg
    prev_web_worker_mem_avg = web_worker_mem_avg
    prev_num_requests = num_requests
    prev_num_sql = num_sql
    prev_num_web_workers = num_web_workers

    # ******************************************************************************************************************
    # ********************************************* 4th DIFF MEASUREMENT ***********************************************
    print("3 measurements taken\n")
    # Now we get the 4th measurement
    # Scale down the number of sql containers and scale up web-worker
    num_sql = num_sql - 1
    num_web_workers = num_web_workers + 1
    scale(services["web-worker"], num_web_workers, manager)
    scale(services["mysql"], num_sql, manager)
    # as before we sleep and will update
    time.sleep(5)
    for service_name, service in services.items():
        get_tasks(service, manager)
    for i in range(0, 2):
        par_pipes[i].send("poll")
    pipes_ready = poll_pipes(par_pipes, 2)
    # reset number of requests
    num_requests = 0
    for i in range(0, 2):
        num_requests = num_requests + par_pipes[i].recv()

    # get the stats
    sql_cpu_usages = []
    sql_mem_usages = []
    web_worker_cpu_usages = []
    web_worker_mem_usages = []
    sql_cpu_avg, web_worker_cpu_avg, sql_mem_avg, web_worker_mem_avg = get_stats(
        services, sql_cpu_usages, sql_mem_usages, web_worker_cpu_usages,
        web_worker_mem_usages, nodes)

    # Append new values to the histories
    sql_cpu_history = np.append(sql_cpu_history,
                                sql_cpu_avg - prev_sql_cpu_avg)
    sql_mem_history = np.append(sql_mem_history,
                                sql_mem_avg - prev_sql_mem_avg)
    web_worker_cpu_history = np.append(
        web_worker_cpu_history, web_worker_cpu_avg - prev_web_worker_cpu_avg)
    web_worker_mem_history = np.append(
        web_worker_mem_history, web_worker_mem_avg - prev_web_worker_mem_avg)
    request_history = np.append(request_history,
                                num_requests - prev_num_requests)
    web_work_history = np.append(web_work_history,
                                 num_web_workers - prev_num_web_workers)
    sql_history = np.append(sql_history, num_sql - prev_num_sql)
    # Store the stats
    prev_sql_cpu_avg = sql_cpu_avg
    prev_sql_mem_avg = sql_mem_avg
    prev_web_worker_cpu_avg = web_worker_cpu_avg
    prev_web_worker_mem_avg = web_worker_mem_avg
    prev_num_requests = num_requests
    prev_num_sql = num_sql
    prev_num_web_workers = num_web_workers

    # ******************************************************************************************************************
    # ********************************************* REGRESSION *********************************************************

    # Use these lines whenever we update the regression
    # TODO put this into a function
    target_mat = np.vstack([
        sql_cpu_history, web_worker_cpu_history, sql_mem_history,
        web_worker_mem_history
    ]).T
    design_mat = np.vstack([sql_history, web_work_history, request_history]).T
    control_matrix = regularized_lin_regression(design_mat, target_mat, 0.0001)
    #print(control_matrix)
    estimator.update_B(control_matrix.T)
    #print(control_matrix.T)
    obs = np.array(
        [[sql_cpu_avg, web_worker_cpu_avg, sql_mem_avg, web_worker_mem_avg]]).T
    estimator.update(obs, np.identity(4))
    #Helper vars
    polls_since_update = 0
    processes_started = 2
    delta_web = 0
    delta_sql = 0
    delta_requests = 0
    scaling_triggered = False
    # TODO We have generated an initial estimate
    # Begin by starting up the rest of the load generators and then monitoring and adjust
    close_flag = False
    #print("Experiment Started\n")
    output_pipe, log_pipe = multiprocessing.Pipe()
    close_pipe, log_close_pipe = multiprocessing.Pipe()
    startTime = time.time()
    log_process = multiprocessing.Process(
        target=logger,
        args=(log_pipe, log_file, node_list, manager, startTime,
              polling_interval / 4.0, nodes, services, log_close_pipe))
    log_process.start()
    iteration_count = 0
    #old_time = datetime.datetime.now()
    output_pipe.send([
        estimator.x[0][0], estimator.x[1][0], estimator.x[2][0],
        estimator.x[3][0], num_sql, num_web_workers, delta_requests,
        num_requests, iteration_count, 0.0, 0.0, True
    ])
    print("Experiment Started")
    spike = False
    spike_number = 0
    while not close_flag:
        #old_time = time.time()
        if input_pipe.poll():
            message = input_pipe.recv()
            if message == "Quit":
                close_flag = True
                print("Shutting down")
                for i in range(0, processes_started):
                    par_pipes[i].send("close")

                    process_list[i].join()
                    print("Load process {0}".format(i))
                print("Loads spun down")
                scale(services["web-worker"], 2, manager)
                scale(services["mysql"], 1, manager)
                output_pipe.send("close")
                close_pipe.send("close")
                log_process.join()
                print("Logger shut down")
                print(estimator.B)
                break
        if (processes_started < number_of_processes
                and (iteration_count % 20 == 0)):
            #We haven't started all of the load generators
            #So start another
            process_list[processes_started].start()
            processes_started = processes_started + 1
        #Sleep at the start since we need to sleep on first entry

        time.sleep(polling_interval)
        if scaling_triggered:
            for service_name, service in services.items():
                get_tasks(service, manager)

            output_pipe.send([
                estimator.x[0][0], estimator.x[1][0], estimator.x[2][0],
                estimator.x[3][0], num_sql, num_web_workers, delta_requests,
                num_requests, iteration_count, minutes, seconds,
                scaling_triggered
            ])
            scaling_triggered = False
        iteration_count = iteration_count + 1

        ###################################### TEST ABILITY TO REACT TO A SPIKE
        if (iteration_count == 120):
            for i in range(0, spike_size):
                spike_list[i].start()
                #processes_started = processes_started + 1
                spike = True
                spike_number = 1

        if (iteration_count == 175):
            for i in range(0, spike_size):
                spike_par_pipes[i].send("close")
                #processes_started = processes_started - 1
                spike = False

        if (iteration_count == 230):
            for i in range(spike_size, 2 * spike_size):
                spike_list[i].start()
                #processes_started = processes_started + 1
                spike = True
                spike_number = 2

        if (iteration_count == 280):
            for i in range(spike_size, 2 * spike_size):
                spike_par_pipes[i].send("close")
                #processes_started = processes_started - 1
                spike = False

        for i in range(0, processes_started):
            par_pipes[i].send("poll")
        if spike:
            if spike_number == 1:
                for i in range(0, spike_size):
                    spike_par_pipes[i].send("poll")
                for i in range(0, spike_size):
                    while not spike_par_pipes[i].poll():
                        pass
            if spike_number == 2:
                for i in range(spike_size, 2 * spike_size):
                    spike_par_pipes[i].send("poll")
                for i in range(spike_size, 2 * spike_size):
                    while not spike_par_pipes[i].poll():
                        pass
        pipes_ready = poll_pipes(par_pipes, processes_started)
        # reset number of requests
        num_requests = 0
        for i in range(0, processes_started):
            num_requests = num_requests + par_pipes[i].recv()

        if spike:
            if spike_number == 1:
                for i in range(0, spike_size):
                    num_requests = num_requests + spike_par_pipes[i].recv()
            if spike_number == 2:
                for i in range(spike_size, 2 * spike_size):
                    num_requests = num_requests + spike_par_pipes[i].recv()
        delta_requests = num_requests - prev_num_requests
        #We've slept so poll
        sql_cpu_usages = []
        sql_mem_usages = []
        web_worker_cpu_usages = []
        web_worker_mem_usages = []

        #Check to see if we need to update the estimator
        if polls_since_update == polls_per_update:
            sql_cpu_avg, web_worker_cpu_avg, sql_mem_avg, web_worker_mem_avg = get_stats(
                services, sql_cpu_usages, sql_mem_usages,
                web_worker_cpu_usages, web_worker_mem_usages, nodes)
            #need to update the estimator
            #Check to see if we have 100 entries in the history list
            if sql_cpu_history.size == 100:
                #We have 100 entries randomly replace one of them
                replacement_index = random.randint(0, 99)
                #Use np.put to insert new value at index replacement_index, overwriting previous value
                np.put(sql_cpu_history, replacement_index,
                       sql_cpu_avg - prev_sql_cpu_avg)
                np.put(sql_mem_history, replacement_index,
                       sql_mem_avg - prev_sql_mem_avg)
                np.put(web_worker_cpu_history, replacement_index,
                       web_worker_cpu_avg - prev_web_worker_cpu_avg)
                np.put(web_worker_mem_history, replacement_index,
                       web_worker_mem_avg - prev_web_worker_mem_avg)
                np.put(request_history, replacement_index,
                       num_requests - prev_num_requests)
                np.put(web_work_history, replacement_index,
                       num_web_workers - prev_num_web_workers)
                np.put(sql_history, replacement_index, num_sql - prev_num_sql)

            else:
                #Don't have 100 entries. Append new values
                sql_cpu_history = np.append(sql_cpu_history,
                                            sql_cpu_avg - prev_sql_cpu_avg)
                sql_mem_history = np.append(sql_mem_history,
                                            sql_mem_avg - prev_sql_mem_avg)
                web_worker_cpu_history = np.append(
                    web_worker_cpu_history,
                    web_worker_cpu_avg - prev_web_worker_cpu_avg)
                web_worker_mem_history = np.append(
                    web_worker_mem_history,
                    web_worker_mem_avg - prev_web_worker_mem_avg)
                request_history = np.append(request_history,
                                            num_requests - prev_num_requests)
                web_work_history = np.append(
                    web_work_history, num_web_workers - prev_num_web_workers)
                sql_history = np.append(sql_history, num_sql - prev_num_sql)
            #Do regression
            target_mat = np.vstack([
                sql_cpu_history, web_worker_cpu_history, sql_mem_history,
                web_worker_mem_history
            ]).T
            design_mat = np.vstack(
                [sql_history, web_work_history, request_history]).T
            control_matrix = regularized_lin_regression(
                design_mat, target_mat, 0.0001)
            estimator.update_B(control_matrix.T)
            #Also need to correct Kalman gain
            estimator.update(
                np.array([[
                    sql_cpu_avg, web_worker_cpu_avg, sql_mem_avg,
                    web_worker_mem_avg
                ]]).T, 0.002 * np.random.randn(4, 4))
            polls_since_update = 0
        else:
            polls_since_update = polls_since_update + 1
        #TODO For Carl: Get Estimate from Estimator, make scaling decision, send values to logger
        prev_sql_cpu_avg = sql_cpu_avg
        prev_sql_mem_avg = sql_mem_avg
        prev_web_worker_cpu_avg = web_worker_cpu_avg
        prev_web_worker_mem_avg = web_worker_mem_avg
        prev_num_requests = num_requests
        prev_num_sql = num_sql
        prev_num_web_workers = num_web_workers
        estimate = estimator.estimate(np.array([[0, 0, delta_requests]]).T)
        #print(estimate)
        if (estimate[1] >= cpu_upper_threshold):
            #We assume the web worker needs scaling most of the time

            while not (estimate[1] < cpu_upper_threshold
                       or delta_web == search_range or num_web_workers +
                       (delta_web + 1) > max_containers):
                delta_web = delta_web + 1
                estimate = estimator.estimate(
                    np.array([[0, delta_web, delta_requests]]).T)
                scaling_triggered = True

        if (estimate[0] >= cpu_upper_threshold):

            while not (estimate[0] < cpu_upper_threshold
                       or delta_sql == search_range or num_sql +
                       (delta_sql + 1) > max_containers):
                delta_sql = delta_sql + 1
                estimate = estimator.estimate(
                    np.array([[delta_sql, delta_web, delta_requests]]).T)
                scaling_triggered = True
        if not scaling_triggered:
            #just to prevent two cases triggering
            if estimate[1] <= cpu_lower_threshold:

                while not (estimate[1] > cpu_lower_threshold
                           or abs(delta_web) == search_range
                           or num_web_workers + (delta_web - 1) < 1):
                    delta_web = delta_web - 1
                    estimate = estimator.estimate(
                        np.array([[0, delta_web, delta_requests]]).T)
                    #We assume the web worker needs scaling most of the time
                    scaling_triggered = True

            if (estimate[0] <= cpu_lower_threshold):

                while not (estimate[0] > cpu_lower_threshold
                           or abs(delta_sql) == search_range or num_sql +
                           (delta_sql - 1) < 1):
                    delta_sql = delta_sql - 1
                    estimate = estimator.estimate(
                        np.array([[delta_sql, delta_web, delta_requests]]).T)
                    scaling_triggered = True
        #We have made our decision actually update estimator
        estimator.predict(np.array([[delta_sql, delta_web, delta_requests]]).T)
        if scaling_triggered:
            #Actually do the scaling here
            num_web_workers = num_web_workers + delta_web
            num_sql = num_sql + delta_sql
            scale(services["web-worker"], num_web_workers, manager)
            scale(services["mysql"], num_sql, manager)
            delta_web = 0
            delta_sql = 0
            #scaling_triggered = 0
            #time.sleep(0.05)

        #Send the values to the logger
        #order will be sql_cpu web_worker_cpu sql_mem web_worker_mem num_sql num_web_workers
        #For each value we send actual then predicted
        diff_time = time.time() - startTime
        minutes, seconds = diff_time // 60, diff_time % 60
        if not scaling_triggered:
            #diff_time = time.time() - startTime
            #minutes, seconds = diff_time // 60, diff_time % 60
            output_pipe.send([
                estimator.x[0][0], estimator.x[1][0], estimator.x[2][0],
                estimator.x[3][0], num_sql, num_web_workers, delta_requests,
                num_requests, iteration_count, minutes, seconds,
                scaling_triggered
            ])
Пример #18
0
def main(arguments):
    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)

    # Logistics
    parser.add_argument("--cuda", help="CUDA id to use", type=int, default=0)
    parser.add_argument("--seed", help="Random seed", type=int, default=19)
    parser.add_argument("--use_pytorch",
                        help="1 to use PyTorch",
                        type=int,
                        default=1)
    parser.add_argument("--out_dir",
                        help="Dir to write preds to",
                        type=str,
                        default='')
    parser.add_argument("--log_file", help="File to log to", type=str)
    parser.add_argument("--load_data",
                        help="0 to read data from scratch",
                        type=int,
                        default=1)

    # Task options
    parser.add_argument("--tasks",
                        help="Tasks to evaluate on, as a comma separated list",
                        type=str)
    parser.add_argument("--max_seq_len",
                        help="Max sequence length",
                        type=int,
                        default=40)

    # Model options
    parser.add_argument("--model_checkpoint",
                        help="Model checkpoint to use",
                        type=str,
                        default='')
    parser.add_argument("--word_vec_file",
                        help="Word vector file to use",
                        type=str)
    parser.add_argument("--batch_size",
                        help="Batch size to use",
                        type=int,
                        default=64)

    # Classifier options
    parser.add_argument("--cls_batch_size",
                        help="Batch size to use",
                        type=int,
                        default=64)

    args = parser.parse_args(arguments)
    logging.basicConfig(format='%(asctime)s : %(message)s',
                        level=logging.DEBUG)
    if not os.path.exists(args.out_dir):
        os.makedirs(args.out_dir)
    log_file = os.path.join(args.out_dir, "results.log")
    fileHandler = logging.FileHandler(log_file)
    logging.getLogger().addHandler(fileHandler)
    logging.info(args)

    # define senteval params
    params_senteval = {
        'task_path': PATH_TO_DATA,
        'usepytorch': args.use_pytorch,
        'kfold': 10,
        'max_seq_len': args.max_seq_len,
        'batch_size': args.batch_size,
        'load_data': args.load_data,
        'seed': args.seed
    }
    params_senteval['classifier'] = {
        'nhid': 0,
        'optim': 'rmsprop',
        'batch_size': 128,
        'tenacity': 3,
        'epoch_size': 2
    }

    # Load InferSent model
    params_model = {
        'bsize': 64,
        'word_emb_dim': 300,
        'enc_lstm_dim': 2048,
        'pool_type': 'max',
        'dpout_model': 0.0,
        'version': V
    }
    model = InferSent(params_model)
    model.load_state_dict(torch.load(args.model_checkpoint))
    model.set_w2v_path(args.word_vec_file)

    params_senteval['infersent'] = model.cuda()

    # Do SentEval stuff
    se = senteval.engine.SE(params_senteval, batcher, prepare)
    tasks = get_tasks(args.tasks)
    results = se.eval(tasks)
    write_results(results, args.out_dir)
    logging.info(results)
Пример #19
0
def main(arguments):
    parser = argparse.ArgumentParser(description=__doc__,
                    formatter_class=argparse.RawDescriptionHelpFormatter)

    # Logistics
    parser.add_argument("--cuda", help="CUDA id to use", type=int, default=0)
    parser.add_argument("--seed", help="Random seed", type=int, default=19)
    parser.add_argument("--use_pytorch", help="1 to use PyTorch", type=int, default=1)
    parser.add_argument("--out_dir", help="Dir to write preds to", type=str, default='')
    parser.add_argument("--log_file", help="File to log to", type=str)
    parser.add_argument("--load_data", help="0 to read data from scratch", type=int, default=1)

    # Task options
    parser.add_argument("--tasks", help="Tasks to evaluate on, as a comma separated list", type=str)
    parser.add_argument("--max_seq_len", help="Max sequence length", type=int, default=40)

    # Model options
    parser.add_argument("--ckpt_path", help="Path to ckpt to load", type=str,
                        default=PATH_PREFIX + 'ckpts/svae/glue_svae/best.mdl')
    parser.add_argument("--vocab_path", help="Path to vocab to use", type=str,
                        default=PATH_PREFIX + 'processed_data/svae/glue_v2/vocab.json')
    parser.add_argument("--model", help="Word emb dim", type=str, default='vae')
    parser.add_argument("--embedding_size", help="Word emb dim", type=int, default=300)
    parser.add_argument("--word_dropout", help="Word emb dim", type=float, default=0.5)
    parser.add_argument("--hidden_size", help="RNN size", type=int, default=512)
    parser.add_argument("--latent_size", help="Latent vector dim", type=int, default=16)
    parser.add_argument("--num_layers", help="Number of encoder layers", type=int, default=1)
    parser.add_argument("--bidirectional", help="1 for bidirectional", type=bool, default=False)
    parser.add_argument("--rnn_type", help="Type of rnn", type=str, choices=['rnn', 'gru'],
                        default='gru')
    parser.add_argument("--batch_size", help="Batch size to use", type=int, default=64)

    # Classifier options
    parser.add_argument("--cls_batch_size", help="Batch size to use", type=int, default=64)

    args = parser.parse_args(arguments)
    logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
    if args.log_file:
        fileHandler = logging.FileHandler(args.log_file)
        logging.getLogger().addHandler(fileHandler)
    logging.info(args)

    # define senteval params
    params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': args.use_pytorch, 'kfold': 10,
            'max_seq_len': args.max_seq_len, 'batch_size': args.batch_size, 'load_data': args.load_data,
            'seed': args.seed}
    params_senteval['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': args.cls_batch_size,
            'tenacity': 5, 'epoch_size': 4, 'cudaEfficient': True}

    # Load InferSent model
    vocab = json.load(open(args.vocab_path, 'r'))
    args.denoise = False
    args.prob_swap, args.prob_drop = 0.0, 0.0
    if args.model == 'vae':
        model = SentenceVAE(args, vocab['w2i'],
                            #sos_idx=w2i['<sos>'], eos_idx=w2i['<eos>'], pad_idx=w2i['<pad>'],
                            #max_sequence_length=args.max_seq_len,
                            embedding_size=args.embedding_size,
                            rnn_type=args.rnn_type, hidden_size=args.hidden_size,
                            word_dropout=args.word_dropout, latent_size=args.latent_size,
                            num_layers=args.num_layers, bidirectional=args.bidirectional)
    elif args.model == 'ae':
        model = SentenceAE(args, vocab['w2i'],
                           embedding_size=args.embedding_size,
                           rnn_type=args.rnn_type, hidden_size=args.hidden_size,
                           word_dropout=args.word_dropout, latent_size=args.latent_size,
                           num_layers=args.num_layers, bidirectional=args.bidirectional)

    model.load_state_dict(torch.load(args.ckpt_path))
    model = model.cuda()
    model.eval()
    params_senteval['model'] = model

    # Do SentEval stuff
    se = senteval.engine.SE(params_senteval, batcher, prepare)
    tasks = get_tasks(args.tasks)
    results = se.eval(tasks)
    if args.out_dir:
        write_results(results, args.out_dir)
    if not args.log_file:
        print(results)
    else:
        logging.info(results)
Пример #20
0
def main(arguments):
    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)

    # Logistics
    parser.add_argument("--seed", help="Random seed", type=int, default=19)
    parser.add_argument("--cuda", help="CUDA id to use", type=int, default=0)
    parser.add_argument("--use_pytorch",
                        help="1 to use PyTorch",
                        type=int,
                        default=1)
    parser.add_argument("--out_dir",
                        help="Dir to write preds to",
                        type=str,
                        default='')
    parser.add_argument("--log_file", help="File to log to", type=str)
    parser.add_argument("--load_data",
                        help="0 to read data from scratch",
                        type=int,
                        default=1)

    # Task options
    parser.add_argument("--tasks",
                        help="Tasks to evaluate on, as a comma separated list",
                        type=str)
    parser.add_argument("--max_seq_len",
                        help="Max sequence length",
                        type=int,
                        default=40)

    # Model options
    parser.add_argument("--model_dir",
                        help="Path to skipthoughts folder",
                        type=str)
    parser.add_argument("--dict_file", help="File to load dict from", type=str)
    parser.add_argument("--model_file",
                        help="File to load model from",
                        type=str)
    parser.add_argument("--batch_size",
                        help="Batch size to use",
                        type=int,
                        default=64)

    # Classifier options
    parser.add_argument("--cls_batch_size",
                        help="Batch size to use for classifiers",
                        type=int,
                        default=64)

    args = parser.parse_args(arguments)
    logging.basicConfig(format='%(asctime)s : %(message)s',
                        level=logging.DEBUG)
    if not os.path.exists(args.out_dir):
        os.makedirs(args.out_dir)
    log_file = os.path.join(args.out_dir, "results.log")
    fileHandler = logging.FileHandler(log_file)
    logging.getLogger().addHandler(fileHandler)
    logging.info(args)

    #os.environ["MKL_THREADING_LAYER"] = "GNU"
    #sys.path.insert(0, PATH_TO_SKIPTHOUGHTS)
    #import skipthoughts

    # Set params for SentEval
    params_senteval = {
        'task_path': PATH_TO_DATA,
        'usepytorch': args.use_pytorch,
        'kfold': 10,
        'max_seq_len': args.max_seq_len,
        'batch_size': args.batch_size,
        'load_data': args.load_data,
        'seed': args.seed
    }
    params_senteval['classifier'] = {
        'nhid': 0,
        'optim': 'adam',
        'batch_size': args.cls_batch_size,
        'tenacity': 5,
        'epoch_size': 4,
        'cudaEfficient': True
    }
    params_senteval['encoder'] = skipthoughts.load_model()

    se = senteval.engine.SE(params_senteval, batcher, prepare)
    tasks = get_tasks(args.tasks)
    results = se.eval(tasks)
    write_results(results, args.out_dir)
    logging.info(results)