if job_name == 'Job1': job_id = 'job1_id' map_output_key = 'map_output_key' map_output_value = 'map_output_value' #dont need the hadoop.org.... elif job_name == 'Job2': job_id = 'job2_id' map_output_key = 'map_output_key' map_output_value = 'map_output_value' else: print 'Job doenst exists in the script' exit(1) connector = ConnectionGetter(user, password, project_name, project_id, main_ip) keystone_util = UtilKeystone(connector.keystone()) token_ref_id = passwordstone_util.getTokenRef(user, password, project_name).id sahara_util = UtilSahara(connector.sahara(token_ref_id)) exec_date = time.strftime('%Y%m%d%H%M%S') input_ds_name = 'input_' + file_name + '_' + exec_date input_ds_utl = 'swift://' + input_container + '.sahara/' + file_name output_ds_name = 'output_' + file_name + '_' + exec_date output_ds_url = 'swift://' + output_container + '.sahara/' + output_ds_name input_ds = sahara_util.createDataSource(input_ds_name, input_ds_url, 'swift', user, password) output_ds = sahara_util.createDataSource(output_ds_name, output_ds_url, 'swift', user, password) sahara_util.runMapReduceJob(job_name, job_id, cluster_id, map_output_key, map_output_value, input_ds.id, output_ds.id)