コード例 #1
0
ファイル: mr.py プロジェクト: mnielsen/mr_py
 def get_dict_value(self, dict_name, key):
     instance = self.workers.instances[hash(key) % self.size]
     instance_filename = "root@" + instance.public_dns_name + ":" + dict_name
     mr_lib.scp(instance_filename, dict_name)
     d = mr_lib.read_pickle(dict_name)
     value = d[key]
     os.remove(dict_name)
     return value
コード例 #2
0
ファイル: map.py プロジェクト: mnielsen/mr_py
# map.py
#
# Part of the mr.py library.  Run on worker machines by mr.py, when a map
# job only is called. 

import itertools, mr_lib, sys

mr_lib.set_flag("map_done",False) # map phase on this worker not yet done
filename,input_dict = sys.argv[1:]
exec("from "+filename+" import mapper") # import map job
i = mr_lib.read_pickle(input_dict) # Read the input dictionary
mapper_params = mr_lib.read_pickle("mapper_params.mr") # Get the parameters for the mapper
for k in i.keys(): i[k] = mapper(k,i[k],mapper_params) # Run the mapper
mr_lib.write_pickle(i,input_dict) # Write the input dictionary back out
mr_lib.set_flag("map_done",True) # map phase is done

コード例 #3
0
ファイル: map_combine.py プロジェクト: mnielsen/mr_py
# map_combine.py
#
# Part of the mr.py library.
#
# This program is automatically run on worker machines by mr.py, at the start of
# a MapReduce job.

import itertools, mr_lib, sys

mr_lib.set_flag("map_combine_done",False) # flag indicating map_combine not yet done

# Read in ip address of current worker, and description of whole cluster
my_number,my_ip = mr_lib.read_pickle("my_details.mr")
ip = mr_lib.read_pickle("cluster_description.mr")

filename,input_dict = sys.argv[1:]
exec("from "+filename+" import mapper, reducer")

# Get the parameters for the MapReduce job
mapper_params,reducer_params = mr_lib.read_pickle("params.mr")

# Read the input dictionary into local memory
i = mr_lib.read_pickle(input_dict)

# Run MapReduce over the local input dictionary.
intermediate = []
for (key,value) in i.items():
  intermediate.extend(mapper(key,value,mapper_params))
groups = {}
for key, group in itertools.groupby(sorted(intermediate), 
                                    lambda x: x[0]):
コード例 #4
0
ファイル: reduce.py プロジェクト: mnielsen/mr_py
# reduce.py
#
# Part of the mr.py library.
#
# This program is automatically run on worker machines by mr.py, to conclude
# a MapReduce job.

import itertools, mr_lib, os, sys

# Sets a flag, visible to the client, saying that the reduce phase on this worker 
# is not yet done.
mr_lib.set_flag("reduce_done",False)

# Read in ip address of current worker, and description of whole cluster
my_number,my_ip = mr_lib.read_pickle("my_details.mr")
ip = mr_lib.read_pickle("cluster_description.mr")

# Read the filename and input dictionary
filename,output_dict,output_field = sys.argv[1:]

# Get the parameters for the MapReduce job
mapper_params,reducer_params = mr_lib.read_pickle("params.mr")

# import the reducer
module = filename[:-3]
exec("from "+module+" import reducer")

# read in all the intermediate data
intermediate = []
for machine in xrange(len(ip)):
  name = "inter.dict."+str(machine)+"."+str(my_number)