コード例 #1
0
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    sol = Sliding.solution(WIDTH, HEIGHT)

    """ YOUR CODE HERE """
    sol = Sliding.board_to_hash(WIDTH, HEIGHT, sol)
    new_visited = [(sol, level)]
    
    new_visited = sc.parallelize(new_visited)
    num = 1

    #while there are still (k, v) pairs at the current level
    while num:
        #use += as we do not retain board sets not at the global level
        #in our mapping function
        new_visited += new_visited.flatMap(bfs_map)
        if level % 4 == 3: # only reduce and filter every other iteration for performance reasons
            new_visited = new_visited.reduceByKey(bfs_reduce)
            new_visited = new_visited.partitionBy(PARTITION_COUNT) #figure out how to use hash
            num = new_visited.filter(filter_func).count() # count the number of elements in the RDD at the current level
        level += 1
        # Debuggin purposes print("\n\n\nLevel " + str(level) + '\n\n\n')

    """ YOUR OUTPUT CODE HERE """
    new_visited.coalesce(slaves).saveAsTextFile(output)

    sc.stop()
コード例 #2
0
ファイル: SlidingBfsSpark(new).py プロジェクト: warlck/cs61c
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)


    """ YOUR MAP REDUCE PROCESSING CODE HERE """


    """ YOUR OUTPUT CODE HERE """

    sc.stop()
コード例 #3
0
ファイル: SlidingBfsSpark.py プロジェクト: poywoo/61cproj2-2
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """
    sol_board = Sliding.solution(WIDTH, HEIGHT)
    sol = Sliding.board_to_hash(WIDTH, HEIGHT, sol_board)
    all_sols = sc.parallelize([(sol, level)]) #create an RDD 
    before_count = 1
    k = 0 #counter for iterations of partitionBy
    c = 0 #counter for iterations of count()
    while True:
        level += 1
        all_sols = all_sols.flatMap(bfs_map)
        if k%4 == 0: #every 4 iterations, use parititionBy
            all_sols = all_sols.partitionBy(PARTITION_COUNT)
        all_sols = all_sols.reduceByKey(bfs_reduce)
        if c%2 == 0: #every 2 iterations, use count()
            after_count = all_sols.count()
            if before_count == after_count:
                break
            before_count = after_count
        k += 1
        c += 1

    """ YOUR OUTPUT CODE HERE """
    all_sols = all_sols.map(lambda a: (a[1], a[0])).sortByKey()
    all_sols.coalesce(slaves).saveAsTextFile(output)
    sc.stop()
コード例 #4
0
ファイル: SlidingBfsSpark.py プロジェクト: shauryakalsi/CS61C
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """
    sol = Sliding.board_to_hash(WIDTH, HEIGHT, Sliding.solution(WIDTH, HEIGHT))    
    RDD = sc.parallelize([(sol,level)]) 
    count = RDD.count()
    RDD_count = 0
    search = True
    k = 1
    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    while search:
        if k % 3== 0:
            RDD = RDD.flatMap(bfs_map).partitionBy(PARTITION_COUNT).reduceByKey(bfs_reduce) #PUT PARTITION_COUNT FOR 16
        else:
            RDD = RDD.flatMap(bfs_map).reduceByKey(bfs_reduce) 
        if k % 2 == 0:
            RDD_count = RDD.count() 
            if RDD_count == count: 
                search = False
            count = RDD_count
        k = k + 1
        level = level + 1
    """ YOUR OUTPUT CODE HERE """
    RDD = RDD.map(swap_map)  
    RDD.coalesce(slaves).saveAsTextFile(output)    
    #outputLst = RDD.collect()
    #for elem in outputLst:
       #output(str(elem[0]) + " " + str(elem[1])) #output the elements
    sc.stop()
コード例 #5
0
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """
    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    solution=Sliding.solution(WIDTH, HEIGHT)
    sol = Sliding.board_to_hash(WIDTH, HEIGHT, solution)
    data = sc.parallelize([(sol,level),])
    counter = 0
    curLen = 1 
    while(counter < curLen):
        level += 1
        data = data.flatMap(bfs_flat_map)
        

        if (level% 12 == 0):
            data = data.partitionBy(PARTITION_COUNT)
        data = data.reduceByKey(bfs_reduce)
        if (level% 6 == 0):
            counter = curLen
            curLen = data.count()
        
        
    """ YOUR OUTPUT CODE HERE """
    data.coalesce(slaves).saveAsTextFile(output)
    sc.stop()
コード例 #6
0
ファイル: SlidingBfsSpark.py プロジェクト: hansongcal/CS61C
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """
    NUM_WORKERS = slaves

    sol = Sliding.solution(WIDTH, HEIGHT)
    """ MAP REDUCE PROCESSING CODE HERE """
    level_pos = sc.parallelize((make_state(level, sol),))
    prev_size, size = 0, 1

    while prev_size != size:
        level += 1
        if level % 10 == 0:
            level_pos = level_pos.partitionBy(PARTITION_COUNT)
        level_pos = level_pos.flatMap(bfs_flat_map).reduceByKey(bfs_reduce)
        prev_size = size
        size = level_pos.count()

    """ OUTPUT CODE HERE """
    level_pos = level_pos.map(unhash_board)
    level_pos.coalesce(NUM_WORKERS).saveAsTextFile(output)

    sc.stop()
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
	
    myRDD = sc.parallelize([(sol, level)])
    counter = 0
    counter = myRDD.count()
    k = 0
    comp = 0
    repar = 0
    while k <= (math.sqrt(WIDTH * HEIGHT)-1) * math.log(math.factorial(WIDTH * HEIGHT),2):
    	myRDD = myRDD.flatMap(sol_map)
        if (repar % 8 == 0):
            myRDD = myRDD.partitionBy(6)
        myRDD = myRDD.reduceByKey(bfs_reduce)
        repar += 1
        level += 1
        k += 1
    k = 0

    while True:
        myRDD = myRDD.flatMap(sol_map)
        myRDD = myRDD.reduceByKey(bfs_reduce)
        if k % 3 == 0:
            comp = myRDD.count()
            if comp == counter:
                break
            else: 
                counter = comp
        level += 1
        k += 1

    myRDD = myRDD.map(bfs_map).collect()
    result = ""
    for each in myRDD:
        result += str(each) + "\n"
    output(result)
    sc.stop()
コード例 #8
0
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level, prev_len, PARTITION_COUNT

    # Initialize global constants
    HEIGHT = height
    WIDTH = width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
    level_nodes = sc.parallelize([(Sliding.board_to_hash(WIDTH, HEIGHT, sol), 0)])

    PARTITION_COUNT = 16
    prev_len = 0
    count = 0
    while True:
        level_nodes = level_nodes.flatMap(bfs_map).reduceByKey(bfs_reduce)
        next_len = level_nodes.count()
        if next_len == prev_len:
            break
        prev_len = next_len

        count += 1
        if count == 10:
            count = 0
            level_nodes = level_nodes.partitionBy(PARTITION_COUNT)

    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    # level = []
    # def add_to_string(obj):
    #     output(str(obj))
    level_nodes = level_nodes.map(lambda x : (x[1], x[0]))
    output_string = ""
    for l in level_nodes.sortByKey(True).collect():
        output_string += str(l) + "\n"
    output(output_string)
    # level_nodes.sortByKey(True).coalesce(1).saveAsTextFile("output")
    # level_nodes.foreach(add_to_string)


    """ YOUR OUTPUT CODE HERE """
    sc.stop()
コード例 #9
0
ファイル: SlidingBfsSpark.py プロジェクト: leoleblanc/CS61C
def solve_sliding_puzzle(master, output, height, width, slaves):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job
    
    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)


    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    #cores = multiprocessing.cpu_count() #OPTIMIZATION, gives cpu count for this machine, for partitionBy
    constant = 8
    sol_hash = board_to_hash(WIDTH, HEIGHT, sol) #this is the initial hash
    lst = sc.parallelize([(sol_hash, level)]).partitionBy(PARTITION_COUNT) #creates initial RDD of [(hash, level)]
    #lst = sc.parallelize([(sol, level)]).partitionBy(PARTITION_COUNT) #this creates the initial (K, V) RDD comprised of: (0, ('A', 'B', 'C', '-'))
    
    lst = lst.flatMap(bfs_map).reduceByKey(bfs_reduce)
    level+=1 #this is so that repartition doesn't run right when level = 0
    while (True): #continually loop
        if (level % constant == 0):
            new_lst = lst.flatMap(bfs_map).repartition(PARTITION_COUNT).reduceByKey(bfs_reduce)
            #new_lst is going to be lst + the new children in lst
            if (new_lst.count() == lst.count()):
                break
        else:
            new_lst = lst.flatMap(bfs_map).reduceByKey(bfs_reduce)
        lst = new_lst #set lst to equal the new list + non-duplicate children
        level+=1 #increment level
    
    """ YOUR OUTPUT CODE HERE """
    lst.coalesce(slaves).saveAsTextFile(output) #I guess this is supposed to... write lst to output

    
    # toPrint = "" #set the empty string
    # for pair in lst.collect():
    #     toPrint += (str(pair[1]) + " " + str(pair[0]) + "\n") #get the elements to add to the string
    # output(toPrint) #write the string

    sc.stop()
コード例 #10
0
def main():
    parser = argparse.ArgumentParser(
            description="Returns back the entire solution graph.")
    parser.add_argument("-H", "--height", type=int, default=2,
            help="height of the puzzle")
    parser.add_argument("-W", "--width", type=int, default=2,
            help="width of the puzzle")
    args = parser.parse_args()

    p = Sliding.solution(args.width, args.height)
    slidingBfsSolver(p, args.width, args.height)
コード例 #11
0
ファイル: SlidingBfsSpark.py プロジェクト: warlck/cs61c
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """
    sol = Sliding.solution(WIDTH, HEIGHT)
    rdd = sc.parallelize([(sol, level)])

    prev_count = 0
    count = rdd.count()

    k = 0
    i = 0
    #put this here since I am assuming this part?
    #hashID = Sliding.board_to_hash(WIDTH, HEIGHT, sol) #board(obj) to hash(int) #either sol or value[0], is this here?
    while prev_count < count:
        rdd = rdd.flatMap(bfs_map)
        if k % 4 == 0:
            rdd = rdd.partitionBy(16, partitionHash)
        rdd = rdd.reduceByKey(bfs_reduce)
        level += 1
        if i % 4 == 0:
            prev_count = count
            count = rdd.count()

        k += 1
        i += 1
    #nodes is an rdd
    #nodes.coalesce(NUM_WORKERS).saveAsTextFile(str(WIDTH) + "x" + str(HEIGHT) + "-output") # Let NUM_WORKERS be the number of workers (6 or 12)
    # replace num_workers with slaves?
    #rdd.coalesce(slaves).saveAsTextFile(output) # Let NUM_WORKERS be the number of workers (6 or 12), this is the new way IS IT SLAVES
    # for top line is NUM_WORKERS GLOBAL VARIABLE PARTITION_COUNT, or is it 6, 12 depending on some sort of if condition
    # ask manny for clarrification

    #hash_to_board(WIDTH, HEIGHT, hashID) #hash(int) to board(obj) #should be what we stored in hashID, this should be at top in map function right
    # do I save this instead as rdd? ask manny
    #hashID = board_to_hash(WIDTH, HEIGHT, value[0]) #board(obj) to hash(int) #either sol or value[0], is this here?
    #not sure if need to do
    #rdd = rdd.collect()
    #positions = rdd.collect()
    #positions = sorted(positions, key=lambda kv: kv[1])
    #for pos in positions:
    	#output = 
    hashID = Sliding.board_to_hash(WIDTH, HEIGHT, sol) #board(obj) to hash(int) #either sol or value[0], is this here?
    slaves = 6
    rdd.coalesce(slaves).saveAsTextFile(output) # Let NUM_WORKERS be the number of workers (6 or 12), this is the new way IS IT SLAVES
    sc.stop()
コード例 #12
0
ファイル: SlidingBfsSpark.py プロジェクト: warlck/cs61c
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)


    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    rdd = sc.parallelize([(sol, level)])

    prev_count = 0
    count = rdd.count()
    # while rdd.filter(lambda x: x[1] == level).count() != 0: 
    k = 0
    i = 0
    while prev_count < count:
        rdd = rdd.flatMap(bfs_map)
        if k % 4 == 0:
            rdd = rdd.partitionBy(16, partitionHash)
        rdd = rdd.reduceByKey(bfs_reduce)
        level += 1
        if i % 4 == 0:
            prev_count = count
            count = rdd.count()
        k += 1
        i += 1

    """ YOUR OUTPUT CODE HERE """
    positions = rdd.collect()
    positions = sorted(positions, key=lambda kv: kv[1]) #sort k, v pairs by level
    for pos in positions:
        output(str(pos[1]) + " " + str(pos[0]))
    sc.stop()
コード例 #13
0
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. 
    sc = SparkContext(master, "python")


    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT= height
    WIDTH= width
    level = 0 

    sol = Sliding.solution(WIDTH, HEIGHT)

    # Create a list of just the solution
    solList = []
    solList.append((sol, 0))
    levelList = sc.parallelize(solList)
    counter = 0

    # Continue until all positions have been found.
    while level != -1:
        level += 1
        counter += 1
        levelList = levelList.flatMap(bfs_flat_map) \
                             .reduceByKey(bfs_reduce)

        # Checks if any positions were added
        newList = levelList.filter(lambda x: x[1] == level)
        if newList.count() == 0:
            level = -1

        # Repartitions every 32 steps
        if counter % 32 == 0:
            levelList = levelList.partitionBy(16)

    arr = levelList.collect()

    for elem in arr:
        finalStr = str(elem[1]) + " " + str(elem[0])
        output(finalStr)

    sc.stop()
コード例 #14
0
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)


    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    # parallelize 
    job = sc.parallelize([(sol, level)])
    old_result = 1
    # loop until no more children
    while True:
	if level % 8 == 0:
		job = job.partitionBy(16)
        # do the map reduce
	curr_job = job.flatMap(press_map).map(bfs_map).reduceByKey(bfs_reduce)
	# check if no new children found
	num = curr_job.count()
	if num == old_result:
		break
        old_result = num
        job = curr_job
        level += 1
    

    """ YOUR OUTPUT CODE HERE """
    sorts = sorted(curr_job.collect(), key=lambda l: l[1])
    for item in sorts:
	output(str(item[1]) + " " + str(item[0]))
    sc.stop()
コード例 #15
0
ファイル: SlidingBfsSpark.py プロジェクト: shauryakalsi/CS61C
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job
 
    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
    RDD = sc.parallelize([(sol,level)]) #creates RDD with key puzzle and value level
    count = RDD.count()
    RDD_count = 0
    search = True
    k = 1
    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    while search:
        if k % 3 == 0:
	       RDD = RDD.flatMap(bfs_map).partitionBy(8).reduceByKey(bfs_reduce) #call bfs map on RDD - its result is a pair
        else:
	       RDD = RDD.flatMap(bfs_map).reduceByKey(bfs_reduce)
        if k % 2 == 0:
            RDD_count = RDD.count()
            if RDD_count == count:
                search = False
            count = RDD_count
        k = k + 1
        level = level + 1        
    """ YOUR OUTPUT CODE HERE """
    RDD = RDD.map(swap_map)#.sortByKey()
    outputLst = RDD.collect()
    for elem in outputLst:
        output(str(elem[0]) + " " + str(elem[1])) #output the elements
    sc.stop()
コード例 #16
0
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """
    sol = Sliding.board_to_hash(WIDTH, HEIGHT, Sliding.solution(WIDTH, HEIGHT))
    RDD = sc.parallelize([(sol, level)])
    counter = RDD.count()
    k, comp, data = 0, 0, 0
    repar = 0
    bound = (math.sqrt(WIDTH * HEIGHT)-1) * math.log(math.factorial(WIDTH * HEIGHT),2)
     
    # running mapreduce under lower bound
    while k <= bound:
        RDD = RDD.flatMap(bfs_map)
        if repar % 8 == 0:
            RDD = RDD.partitionBy(PARTITION_COUNT, hash)
        RDD = RDD.reduceByKey(bfs_reduce)
        level += 1
        k += 1
        repar += 1
    k = 0
    repar = 0
    # running mapreduce until the number of elements in RDD stops increasing
    while True:
        RDD = RDD.flatMap(bfs_map)
        if repar % 8 == 0:
            RDD = RDD.partitionBy(PARTITION_COUNT, hash)
        RDD = RDD.reduceByKey(bfs_reduce)
        if k % 3 == 0:
            comp = RDD.count()
            if comp == counter:
                break
            else: 
                counter = comp
        level += 1
        k += 1
        repar += 1
    # output code
    RDD = RDD.map(revert_back)
    RDD.coalesce(6).saveAsTextFile(output)
    sc.stop()
コード例 #17
0
ファイル: SlidingBfsSpark.py プロジェクト: rlaprade/61c-Spark
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
    

    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    level_pos = sc.parallelize((make_state(level, sol),))
    prev_size, size = 0, 1
    
    while prev_size != size:
        level += 1
        level_pos = level_pos.flatMap(bfs_flat_map) \
                 .reduceByKey(bfs_reduce)
                 # .map(bfs_map) \
        prev_size = size
        size = level_pos.count()
        # output("level: {}, size: {}, prev_size: {}".format(level, size, prev_size))
        # output(str(level_pos.collect()))

    """ YOUR OUTPUT CODE HERE """
    # Get a from level_pos and sort it by level
    state_space = sorted(level_pos.collect(), key=lambda state: get_level(state))
    for state in state_space:
        output("{lvl} {brd}".format(lvl=get_level(state), brd=get_board(state)))
    
    sc.stop()
コード例 #18
0
ファイル: SlidingBfsSpark.py プロジェクト: SanityL/Projects
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)


    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    myRdd = sc.parallelize([(sol, level)]) # myRdd = [(('A', 'B', 'C', '-'), 0)]
    myRdd = myRdd.flatMap(bfs_flat_map).reduceByKey(bfs_reduce)
    prev_num = 0
    pos_num = myRdd.count()

    while prev_num != pos_num:
        level+=1
        prev_num = pos_num
        myRdd = myRdd.flatMap(bfs_flat_map)
        if level%4==0:
            myRdd = myRdd.partitionBy(16)
        myRdd = myRdd.reduceByKey(bfs_reduce)
        pos_num = myRdd.count()

    """ YOUR OUTPUT CODE HERE """
    # myRdd = myRdd.map(lambda a: (a[1], a[0])).sortByKey().collect() # myRdd becomes a list
    # for each in myRdd:
    #     output(str(each[0]) + " " + str(each[1]))
    myRdd = myRdd.map(lambda a: (Sliding.hash_to_board(WIDTH, HEIGHT, a[1]), a[0])).sortByKey()
    sc.stop()
コード例 #19
0
ファイル: SlidingBfsSpark.py プロジェクト: SanityL/Projects
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT = height
    WIDTH = width
    level = 0  # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    myRdd = sc.parallelize([(sol, level)
                            ])  # myRdd = [(('A', 'B', 'C', '-'), 0)]
    myRdd = myRdd.flatMap(bfs_flat_map).reduceByKey(bfs_reduce)
    prev_num = 0
    pos_num = myRdd.count()

    while prev_num != pos_num:
        level += 1
        prev_num = pos_num
        myRdd = myRdd.flatMap(bfs_flat_map)
        if level % 4 == 0:
            myRdd = myRdd.partitionBy(16)
        myRdd = myRdd.reduceByKey(bfs_reduce)
        pos_num = myRdd.count()
    """ YOUR OUTPUT CODE HERE """
    # myRdd = myRdd.map(lambda a: (a[1], a[0])).sortByKey().collect() # myRdd becomes a list
    # for each in myRdd:
    #     output(str(each[0]) + " " + str(each[1]))
    myRdd = myRdd.map(lambda a: (Sliding.hash_to_board(WIDTH, HEIGHT, a[1]), a[
        0])).sortByKey()
    sc.stop()
コード例 #20
0
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT = height
    WIDTH = width
    level = 0

    sc = SparkContext(master, "python")
    """ YOUR CODE HERE """
    sol = Sliding.solution(WIDTH, HEIGHT)
    hashID = Sliding.board_to_hash(
        WIDTH, HEIGHT,
        sol)  #board(obj) to hash(int) #either sol or value[0], is this here?
    rdd = sc.parallelize([(hashID, level)])

    prev_count = 0
    count = rdd.count()

    k = 0
    i = 0
    #put this here since I am assuming this part?   next try uncomment this
    #hashID = Sliding.board_to_hash(WIDTH, HEIGHT, sol) #board(obj) to hash(int) #either sol or value[0], is this here?
    while prev_count < count:
        rdd = rdd.flatMap(bfs_map)
        if k % 4 == 0:
            #board = Sliding.hash_to_board(WIDTH, HEIGHT, hashID)
            rdd = rdd.partitionBy(16, hash)  #else try Sliding.board_to_hash
        rdd = rdd.reduceByKey(bfs_reduce)
        level += 1
        if i % 4 == 0:
            prev_count = count
            count = rdd.count()

        k += 1
        i += 1
    boardState = Sliding.board_to_hash(
        WIDTH, HEIGHT, sol
    )  #board(obj) to hash(int) #either sol or value[0], is this here?, so it is an int
    #PARTITION_COUNT = slaves
    #slaves = 12
    #output = str(pos[1]) + " " + str(pos[0])
    rdd.coalesce(slaves).saveAsTextFile(
        output
    )  # Let NUM_WORKERS be the number of workers (6 or 12), this is the new way IS IT SLAVES
    sc.stop()
コード例 #21
0
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 1 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = [ ( Sliding.solution(WIDTH, HEIGHT), 0 ) ]
    sol_rdd = sc.parallelize(sol)
   
    prev_lvl_count = 0
    current_lvl_count = 1
    current_lvl_rdd = sol_rdd

    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    while prev_lvl_count != current_lvl_count:
        current_lvl_rdd = current_lvl_rdd.flatMap(bfs_flatmap).reduceByKey(bfs_reduce)
        #print(current_lvl_rdd.collect())
        prev_lvl_count = current_lvl_count
        current_lvl_count = current_lvl_rdd.count()
        level += 1
        if level % 8 == 0:
            current_lvl_rdd = current_lvl_rdd.partitionBy(16)

    """ YOUR OUTPUT CODE HERE """

    current_lvl_rdd = current_lvl_rdd.map(flip_map).sortByKey()
    rdd_list = current_lvl_rdd.collect()
    for board in rdd_list:
        output(str(board[0]) + " " + str(board[1]))
コード例 #22
0
ファイル: SlidingBfsSpark.py プロジェクト: 7DBW13/CS61C-3
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)

    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    data = sc.parallelize([[sol, 0]])
    count, newCount = 1, 0

    while count != newCount:
        count = newCount
        if level % 10 == 0:
            data = data.partitionBy(16)
        data = data.flatMap(bfs_flatmap).map(bfs_map).reduceByKey(bfs_reduce)
        level += 1
        newCount = data.count()

    """ YOUR OUTPUT CODE HERE """
    data= data.map(kv_switch).sortByKey()
    outdata = data.collect()

    for el in outdata:
        output(str(el[0]) + ' ' + str(el[1]))

    sc.stop()
コード例 #23
0
ファイル: SlidingBfsSpark.py プロジェクト: yuluntian/Spark
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)


    # Map Reduce
    tree = sc.parallelize([(sol, level)]) # all (position, level) pairs visited so far
    

    while True:
        temp = tree.flatMap(bfs_map)
        if level % 16 == 0:
            temp = temp.partitionBy(16)
        temp = temp.reduceByKey(bfs_reduce) # return a new tree that contains the children of the frontier nodes
        if level % 8 == 0:
            if temp.count() == tree.count(): # if no new positions are added, exit
                break
        tree = temp
        level = level + 1


    for s in tree.collect():
        output(str(s[1]) + " " + str(s[0]))
    sc.stop()
コード例 #24
0
ファイル: SlidingBfsSpark.py プロジェクト: podfog/SchoolWork
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)


    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    count = 1
    new_count = 2
    output_rdd = sc.parallelize([(sol, 0)])
    while count != new_count:
        level = level + 1
        output_rdd = output_rdd.flatMap(bfs_map).reduceByKey(bfs_reduce)
        if level % 8 == 0:
            output_rdd = output_rdd.partitionBy(16)
            count = new_count
            new_count = output_rdd.count()

    """ YOUR OUTPUT CODE HERE """
    end = output_rdd.collect()
    string = ""
    for i in range(0, len(end)):
        output(str(end[i][1]) + " " + str(end[i][0]))
    sc.stop()
コード例 #25
0
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
    all_sols = sc.parallelize([(sol, level)])
    last_sols = sc.parallelize([(sol, level)])
    while True:
        last_sols = last_sols.flatMap(bfs_map)
        temp2 = all_sols + last_sols
        before_count = all_sols.count()
        all_sols = temp2.reduceByKey(bfs_reduce)
        if before_count == all_sols.count():
            break

    flipped = all_sols.collect()
    final = []
    for pair in flipped:
	final.append((pair[1], pair[0]))
    print sorted(final)


    """ YOUR OUTPUT CODE HERE """
    
    sc.stop()
コード例 #26
0
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT = height
    WIDTH = width
    level = 0

    sc = SparkContext(master, "python")
    sol = Sliding.board_to_hash(WIDTH, HEIGHT, Sliding.solution(WIDTH, HEIGHT))
    """ YOUR CODE HERE """
    count = 1
    new_count = 2
    output_rdd = sc.parallelize([(sol, 0)])
    while count != new_count:
        level = level + 1
        output_rdd = output_rdd.flatMap(bfs_map).reduceByKey(bfs_reduce)
        if level % 8 == 0:
            output_rdd = output_rdd.partitionBy(PARTITION_COUNT)
            count = new_count
            new_count = output_rdd.count()
    output_rdd.coalesce(slaves).saveAsTextFile(output)
    sc.stop()
コード例 #27
0
def main():
    global W, H
    parser = argparse.ArgumentParser(
        description="Returns back the entire solution graph.")
    parser.add_argument("-H",
                        "--height",
                        type=int,
                        default=2,
                        help="height of the puzzle")
    parser.add_argument("-W",
                        "--width",
                        type=int,
                        default=2,
                        help="width of the puzzle")
    args = parser.parse_args()

    W = args.width
    H = args.height

    p = Sliding.solution(W, H)
    slidingBfsSolver(p)
コード例 #28
0
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level, frontRDD
    HEIGHT=height
    WIDTH=width
    level = 0
    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
    print sol

    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    max_level = 6
    frontRDD = sc.parallelize([(Sliding.board_to_hash(WIDTH, HEIGHT, sol), level)])
    count = 0

    prevCount = -1
    while 1:
        if count % 8 == 0:
           frontRDD = frontRDD.repartition(PARTITION_COUNT)
        frontRDD = frontRDD.flatMap(bfs_map)
        frontRDD = frontRDD.reduceByKey(bfs_reduce)
        count = count + 1
        level = level + 1
        currCount = frontRDD.count()
        if currCount > prevCount:
            prevCount = currCount
        else:
            break
    #frontRDD = frontRDD.map(lambda x: (x[1], x[0]))
    #frontRDD = frontRDD.sortByKey(True, PARTITION_COUNT)
    """ YOUR OUTPUT CODE HERE """
    frontRDD.coalesce(slaves).saveAsTextFile(output)
    #outList = frontRDD.collect()
    #for l in outList:
        #output(str(l[0])+" "+str(l[1]))

    sc.stop()
コード例 #29
0
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT = height
    WIDTH = width
    level = 0  # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    count = 1
    new_count = 2
    output_rdd = sc.parallelize([(sol, 0)])
    while count != new_count:
        level = level + 1
        output_rdd = output_rdd.flatMap(bfs_map).reduceByKey(bfs_reduce)
        if level % 8 == 0:
            output_rdd = output_rdd.partitionBy(16)
            count = new_count
            new_count = output_rdd.count()
    """ YOUR OUTPUT CODE HERE """
    end = output_rdd.collect()
    string = ""
    for i in range(0, len(end)):
        output(str(end[i][1]) + " " + str(end[i][0]))
    sc.stop()
コード例 #30
0
ファイル: SlidingBfsSpark.py プロジェクト: killernye/cs61c
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
   
    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    soln = sc.parallelize([(sol, 0)])
    num = 1
    temp = 0
    while (temp != num):
	if (level % 16 == 0):
	    soln = soln.partitionBy(16, hashhash)
	level = level + 1
        soln = soln.flatMap(bfs_map).reduceByKey(bfs_reduce)
        temp = num
	num = soln.count()
    """ YOUR OUTPUT CODE HERE """    
    temp = soln.collect()
    for tup in temp:
	output(str(tup[1]) + " " + str(tup[0]))
    sc.stop()
コード例 #31
0
ファイル: SlidingBfsSpark.py プロジェクト: warlck/cs61c
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """
    sol = Sliding.solution(WIDTH, HEIGHT)
    hashID = Sliding.board_to_hash(WIDTH, HEIGHT, sol) #board(obj) to hash(int) #either sol or value[0], is this here?
    rdd = sc.parallelize([(hashID, level)])

    prev_count = 0
    count = rdd.count()

    k = 0
    i = 0
    #put this here since I am assuming this part?   next try uncomment this
    #hashID = Sliding.board_to_hash(WIDTH, HEIGHT, sol) #board(obj) to hash(int) #either sol or value[0], is this here?
    while prev_count < count:
        rdd = rdd.flatMap(bfs_map)
        if k % 4 == 0:
            #board = Sliding.hash_to_board(WIDTH, HEIGHT, hashID)
            rdd = rdd.partitionBy(16, hash) #else try Sliding.board_to_hash
        rdd = rdd.reduceByKey(bfs_reduce)
        level += 1
        if i % 4 == 0:
            prev_count = count
            count = rdd.count()

        k += 1
        i += 1
    boardState = Sliding.board_to_hash(WIDTH, HEIGHT, sol) #board(obj) to hash(int) #either sol or value[0], is this here?, so it is an int
    #PARTITION_COUNT = slaves
    #slaves = 12
    #output = str(pos[1]) + " " + str(pos[0])
    rdd.coalesce(slaves).saveAsTextFile(output) # Let NUM_WORKERS be the number of workers (6 or 12), this is the new way IS IT SLAVES
    sc.stop()
コード例 #32
0
ファイル: copy(more).py プロジェクト: lei272/cs61c
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT = height
    WIDTH = width
    level = 0  # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    rdd = sc.parallelize([(sol, level)])

    prev_count = 0
    count = rdd.count()
    # while rdd.filter(lambda x: x[1] == level).count() != 0:
    while prev_count < count:
        rdd = rdd.flatMap(bfs_map)
        rdd = rdd.reduceByKey(bfs_reduce)
        level += 1
        prev_count = count
        count = rdd.count()
    """ YOUR OUTPUT CODE HERE """
    positions = rdd.collect()
    for pos in positions:
        output(str(pos[1]) + " " + str(pos[0]))
    sc.stop()
コード例 #33
0
ファイル: SlidingBfsSpark.py プロジェクト: podfog/SchoolWork
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")
    sol = Sliding.board_to_hash(WIDTH, HEIGHT, Sliding.solution(WIDTH, HEIGHT))


    """ YOUR CODE HERE """
    count = 1
    new_count = 2
    output_rdd = sc.parallelize([(sol, 0)])
    while count != new_count:
        level = level + 1
        output_rdd = output_rdd.flatMap(bfs_map).reduceByKey(bfs_reduce)
        if level % 8 == 0:
            output_rdd = output_rdd.partitionBy(PARTITION_COUNT)
            count = new_count
            new_count = output_rdd.count()
    output_rdd.coalesce(slaves).saveAsTextFile(output)
    sc.stop()
コード例 #34
0
ファイル: SlidingBfsSpark.py プロジェクト: logicx24/proj2-2
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    sol = Sliding.solution(WIDTH, HEIGHT)
    
    frontierRDD = sc.parallelize([(Sliding.board_to_hash(WIDTH, HEIGHT, sol), 0)])
    boardsRDD = sc.parallelize([(Sliding.board_to_hash(WIDTH, HEIGHT, sol), 0)])
    #while frontierRDD.count() != 0:
    while True:
        level += 1
            
        # get all frontier nodes as a flattened list of ONLY (key), NOT (key, value)
        frontierRDD = frontierRDD.flatMap(lambda v: Sliding.children(WIDTH, HEIGHT, Sliding.hash_to_board(WIDTH, HEIGHT, v[0])))
            
        # add new (chilq, level) pairs to all boards
        boardsRDD = boardsRDD + frontierRDD.map(lambda v: (Sliding.board_to_hash(WIDTH, HEIGHT, v), level))
        #boardsRDD = boardsRDD.partitionBy(8, partitionFunc)
            
        # only keep board seen at lowest level
        boardsRDD = boardsRDD.reduceByKey(lambda v1, v2: min(v1, v2))

        # frontier is only the boards that have the current level
        frontierRDD = boardsRDD.filter(lambda v: v[1] == level)

        # magic voodoo that it doesn't work without
        boardsRDD = boardsRDD.partitionBy(slaves, lambda v: v)
        frontierRDD = frontierRDD.partitionBy(slaves, lambda v: v)
        if level % 4 == 0 and frontierRDD.count() == 0:
            break

    boardsRDD.coalesce(slaves).saveAsTextFile(output)
    sc.stop()
コード例 #35
0
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
   
    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    soln = sc.parallelize([(Sliding.board_to_hash(WIDTH,HEIGHT,sol), 0)])
    num = 1
    temp = 0
    while (temp != num):
        if (level % 16 == 0):
            soln = soln.partitionBy(PARTITION_COUNT, hash)
        level = level + 1
        soln = soln.flatMap(bfs_map).reduceByKey(bfs_reduce)
        temp = num
        num = soln.count()
    """ YOUR OUTPUT CODE HERE """    
    soln.coalesce(slaves).saveAsTextFile(output)
    sc.stop()
コード例 #36
0
ファイル: SlidingBfsSpark.py プロジェクト: stupidfive/CS61C-1
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT = height
    WIDTH = width
    level = 0

    sc = SparkContext(master, "python")
    """ YOUR CODE HERE """
    sol = Sliding.board_to_hash(WIDTH, HEIGHT, Sliding.solution(WIDTH, HEIGHT))
    RDD = sc.parallelize([(sol, level)])
    count = RDD.count()
    RDD_count = 0
    search = True
    k = 1
    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    while search:
        if k % 3 == 0:
            RDD = RDD.flatMap(bfs_map).partitionBy(
                PARTITION_COUNT).reduceByKey(
                    bfs_reduce)  #PUT PARTITION_COUNT FOR 16
        else:
            RDD = RDD.flatMap(bfs_map).reduceByKey(bfs_reduce)
        if k % 2 == 0:
            RDD_count = RDD.count()
            if RDD_count == count:
                search = False
            count = RDD_count
        k = k + 1
        level = level + 1
    """ YOUR OUTPUT CODE HERE """
    RDD = RDD.map(swap_map)
    RDD.coalesce(slaves).saveAsTextFile(output)
    #outputLst = RDD.collect()
    #for elem in outputLst:
    #output(str(elem[0]) + " " + str(elem[1])) #output the elements
    sc.stop()
コード例 #37
0
def solve_sliding_puzzle(master, output, height, width):
    """
        Aqui tienen que construir el RDD Spark y todo lo demas

        @param master: el master que tienen que utilizar
        @param output: la funcion que espera un string para escribir en archivo
        @param height: el height
        @param width: el width
    """
    global w, h, level, graph_length

    level = 0
    w = width
    h = height
    graph_length = (math.factorial(w * h) / 2)
    print "graph length %s" % graph_length

    #solucion
    solution = Sliding.solution(w, h)
    # Conf
    conf = SparkConf().setAppName('SlidingBFS').setMaster(master)
    # Spark Context
    sc = SparkContext(conf=conf)

    #######################################
    #  AQUI TIENEN QUE PONER SU SOLUCIÓN  #
    #######################################

    graph = sc.parallelize([(solution, 0)])
    out = sorted(bfs_map(sc, solution, graph).collect(),
                 key=lambda tup: tup[1])

    for line in out:
        output(str(line[1]) + " " + str(line[0]))

    sc.stop()
コード例 #38
0
ファイル: SlidingBfsSpark.py プロジェクト: k7p/PuzzleSolver
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """

    #how can you get the children on the highest level from the solution?

    #how do you get a list of board states




    '''

    Get board and level from the RDD and pass into map and rduce

    reduceByKey(func)

    List of all children board states and pass into map

    while level < height - 1?????; do map and reduce
    '''




    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)

    #curr_level = 0



    """ YOUR MAP REDUCE PROCESSING CODE HERE """  

    #children = Sliding.children(WIDTH, HEIGHT, sol)

    #create the rdd

    rdd = sc.parallelize( [(sol, 0)] )

    #parallelize only takes in a list
    #you pass in the board because you start from there 

    #rdd = hash(rdd)
    #rdd = rdd.partitionBy(116)         

    rdd = rdd.coalesce(4)            
    rdd = rdd.partitionBy(16)           
    #rdd = hash(rdd)  

    while True: 
        #if level%4==0:
            #rdd = rdd.partitionBy(16, hash) 

        curr_size = rdd.count() #tells you how many things are in the rdd

        rdd = rdd.flatMap(bfs_map) #you want to return a list for reduce to reduce    

        rdd = rdd.reduceByKey(bfs_reduce) 

        if rdd.count() == curr_size:  #added nothing new so break 
            break
        
        level = level + 1
        """ YOUR OUTPUT CODE HERE """
        
    result_list = rdd.collect() #collects the things inside the list 

    for stuff in result_list:
        one = stuff[1]
        two = stuff[0]
        output(str(one) + str(two))
        #output(str(stuff))
    #rdd.coalesce(1).saveAsTextFile(output)

        

    sc.stop()
コード例 #39
0
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 1 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)


    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    dataAggregate = sc.parallelize([(serializeData(sol), 0)]);
    data = dataAggregate
    curLen = 1

    #addFunc = lambda x, y: x + y
    maximum = math.factorial(HEIGHT*WIDTH)/2
    k=0 #count number
    p=0 #partition number
    if (maximum<13):
        k=16
        p=8
    elif(maximum<362880):
        k=32
        p=8
    else:
        k=56
        p=16
    while curLen < maximum: # not really just temp value
        #data.cache()
        data = data.flatMap(bfs_flat_map) \
                   .distinct() \
                   .map(gen_bfs_map(level))
        dataAggregate += data
        level += 1
        if(level%k == 0):
            data = dataAggregate.partitionBy(p).reduceByKey(bfs_reduce)
            dataAggregate = data
            curLen = data.count()
    # """ YOUR OUTPUT CODE HERE """
    # # sort by value
    # output('\n'.join(data.map(lambda (x,y): str(x) + " " + tuple(str(y))).collect()))
    # # output("\n".join([str(level), str(curLen), str(maximum)]))
    pairs = data.collect()
    pairs=sorted(pairs, key = lambda value : value[1]) 


    """ YOUR OUTPUT CODE HERE """
    # print(pairs)
    for pair in pairs:
        # print (pair)
        string=str(pair[1])+" "+str(tuple(pair[0]))
        output(string)
    sc.stop()
コード例 #40
0
def main(args):
    p = Sliding.solution(args.width, args.height)
    slidingBfsSolver(p, args.width, args.height)
コード例 #41
0
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT = height
    WIDTH = width
    level = 0  # this "constant" will change, but it remains constant for every MapReduce job
    #THIS MEANS THAT MAPREDUCE FROM LEVEL TO NEXT LEVEL

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    rdd = [(sol, 0)]
    prevcount = 0
    c = 1
    rdd = sc.parallelize(rdd)
    k = 0

    while c != prevcount:
        if k == 10:
            rdd.partitionBy(8)
            k = 0
        rdd = rdd.flatMap(bfs_map) \
                .reduceByKey(bfs_reduce, numPartitions=16)
        prevcount = c
        c = rdd.count()
        level += 1
        k += 1

    finalsolution = rdd.collect()

    ##SET METHOD: THIS SHIT WORKS
    # isdone = False
    # level_to_pos = {}
    # pos_to_level = {}
    # visited = [sol]
    # visited = set(visited)
    # currBoard = []
    # currBoard.append(sol)
    # rdd = sc.parallelize(currBoard)
    # level = 0
    # level_to_pos[level] = [sol]
    # level += 1

    # while isdone == False:
    #     rdd = rdd.map(bfs_map) \
    #             .reduce(bfs_reduce)
    #     currlevelset = set(rdd) #gets rid of duplicates
    #     currlevelset = currlevelset.difference(visited) #gets rid of positions visited in previous levels
    #     if not currlevelset:
    #         isdone = True
    #     level_to_pos[level] = list(currlevelset) #adds the remaining positions to current level
    #     visited = visited.union(currlevelset)
    #     rdd = sc.parallelize(list(currlevelset))
    #     level += 1
    """ YOUR OUTPUT CODE HERE """
    #SET METHOD
    # sc.stop()
    # for i in level_to_pos:
    #     output(str(level_to_pos[i]))

    #kv method
    sc.stop()
    for positiontuple in finalsolution:
        output(str(positiontuple[1]) + " " + str(positiontuple[0]))
コード例 #42
0
	def get_solution_hash():
		return Sliding.board_to_hash(width, height, Sliding.solution(width, height))
コード例 #43
0
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)


    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    RDD = sc.parallelize()

    while 
    RDD.bfs_map(sol).bfs_reduce().collect()
    #call partition then collect.()? collect is serial/no parallel so be careful 

    #call map reduce here! 

    #base case: when there are no more boards at a level 
    #global vars, reducing by keys -- 

    #where to initialize 

    """ YOUR OUTPUT CODE HERE """

    sc.stop()



""" DO NOT EDIT PAST THIS LINE

You are welcome to read through the following code, but you
do not need to worry about understanding it.
"""

def main():
    """
    Parses command line arguments and runs the solver appropriately.
    If nothing is passed in, the default values are used.
    """
    parser = argparse.ArgumentParser(
            description="Returns back the entire solution graph.")
    parser.add_argument("-M", "--master", type=str, default="local[8]",
            help="url of the master for this job")
    parser.add_argument("-O", "--output", type=str, default="solution-out",
            help="name of the output file")
    parser.add_argument("-H", "--height", type=int, default=2,
            help="height of the puzzle")
    parser.add_argument("-W", "--width", type=int, default=2,
            help="width of the puzzle")
    args = parser.parse_args()


    # open file for writing and create a writer function
    output_file = open(args.output, "w")
    writer = lambda line: output_file.write(line + "\n")

    # call the puzzle solver
    solve_sliding_puzzle(args.master, writer, args.height, args.width)

    # close the output file
    output_file.close()

# begin execution if we are running this file directly
if __name__ == "__main__":
    main()