def main(): conf_f = open('conf.yaml', 'r') config = yaml.safe_load(conf_f) conf_f.close() servers = {} print(config) settings = Settings(config['config']['remote_tmp_dir'], config['config']['remote_tmp_dir'], config['config']['archive_type'], True) if config['store'][0]['type'] == 'localstore': store = LocalStorage(config['store'][0]['name'], config['store'][0]['path']) if config['store'][0]['type'] == 'smb': store = SmbStorage(config['store'][0]['name'], config['store'][0]['host'], config['store'][0]['share'], config['store'][0]['path'], config['store'][0]['user'], config['store'][0]['password']) for s in config['servers']: servers[s['name']] = Server(s['name'], s['user'], password='', key=s['key']) for j in config['jobs']: job = Job(j['name']) for t in j['tasks']: if t['type'] == 'dir': job.add_task( FileTask(t['name'], t['target_list'], settings)) if t['type'] == 'mysql': job.add_task( MySQLDumpTask(t['name'], t['user'], t['password'], t['host'], t['target_list'], settings)) #tasks[t['name']].execute() #tasks[t['name']].transfer() #task[t['name']].clean() print('Job: ', job.name) if job.name in s['jobs']: servers[s['name']].add_job(job) #execute for s in servers.keys(): servers[s].connect() servers[s].prepare() servers[s].execute_jobs() servers[s].add_storage(store) servers[s].transfer_artifacts() servers[s].clean()
def parseFile(path): problem = None with open(os.path.join(os.getcwd(), path), "r") as data: number_jobs, number_machines = [ int(element) for element in re.findall("\S+", data.readline())[0:2] ] jobs = [] for job_id, current_job in enumerate(data): if job_id >= number_jobs: break # Every line is a job job = Job(job_id + 1) # First number in line represent number of tasks id_task = 1 # Successive numbers denote numbers of possible Operation operation = 1 current_job = re.findall('\S+', current_job) while operation < len(current_job): # Number of operations task = Task(id_task, job) number_operations = int(current_job[operation]) for current_tuple in range(0, number_operations): id_machine = current_job[operation + 1 + current_tuple * 2] processing_time = current_job[operation + 2 + current_tuple * 2] task.add_operation( Operation(int(id_machine), int(processing_time))) # Next operation has a certain offset operation += number_operations * 2 + 1 # Next Task id_task += 1 job.add_task(task) jobs.append(job) machines = [] for i in range(1, number_machines + 1): machines.append(Machine(i)) problem = Problem(jobs, machines) return problem
def main(): total_deadline = int(sys.argv[1]) delay = int(sys.argv[2]) wc_job = Job('word count') bzip2_job = Job('bzip2') h264_job = Job('h264') for i in range(20): wc_job.add_task(RUN_PATH + '/word_count.sh') for i in range(10): bzip2_job.add_task(RUN_PATH + '/bzip2.sh') for i in range(4): h264_job.add_task(RUN_PATH + '/h264.sh') for worker in config.workers: wc_job.set_per_server_time(worker, WC_PER_TASK_RUNNING_TIME) bzip2_job.set_per_server_time(worker, BZIP2_PER_TASK_RUNNING_TIME) h264_job.set_per_server_time(worker, H264_PER_TASK_RUNNING_TIME) wc_job.set_priority(3) bzip2_job.set_priority(2) h264_job.set_priority(1) # First batch print 'Dispatching first batch' job_set = [wc_job] for j in job_set: j.set_deadline(total_deadline) schedule = framework.get_dispatcher().schedule_jobs(job_set) print schedule t1 = Thread(target=framework.run_job_set, args=(job_set,)) t1.start() # Simulate gap between job arrival time.sleep(delay) # Second batch print 'Dispatching second batch' job_set = [bzip2_job, h264_job] for j in job_set: j.set_deadline(total_deadline - delay) schedule = framework.get_dispatcher().schedule_jobs(job_set) print schedule t2 = Thread(target=framework.run_job_set, args=(job_set,)) t2.start() # Wait till finish t1.join() t2.join() return 0