def post_solve(job: Job): # pydantic guarantees type safety job.assert_valid() solved: Result = distribute(job) solved.assert_valid() return solved
def test_from_json(): json_file = Path("./tests/res/in/testjob.json") assert json_file.exists() with open(json_file, "r") as encoded_job: job = Job.parse_raw(encoded_job.read()) assert job.__class__ == Job assert len(job) > 0
def random_job() -> Job: max_length = random.randint(1000, 2000) cut_width = random.randint(0, 10) n_sizes = random.randint(5, 10) sizes = [] for i in range(n_sizes): sizes.append( TargetSize(length=random.randint(10, 1000), quantity=random.randint(1, 20))) return Job(max_length=max_length, target_sizes=sizes, cut_width=cut_width)
def _solve_gapfill(job: Job) -> List[List[int]]: # 1. Sort by magnitude (largest first) # 2. stack until limit is reached # 3. try smaller as long as possible # 4. create new bar # TODO: rewrite to use native map instead # we are writing around in target sizes, prevent leaking changes to job mutable_sizes = copy.deepcopy(job.sizes_as_list()) targets = sorted(mutable_sizes, reverse=True) stocks = [] current_size = 0 current_stock = [] i_target = 0 while len(targets) > 0: # nothing fit, next stock if i_target >= len(targets): # add local result stocks.append(current_stock) # reset current_stock = [] current_size = 0 i_target = 0 current_target = targets[i_target] # target fits inside current stock, transfer to results if (current_size + current_target.length + job.cut_width) < job.max_length: current_stock.append(current_target.length) current_size += current_target.length + job.cut_width # remove empty entries if current_target.quantity <= 1: targets.remove(current_target) else: current_target.quantity -= 1 # try smaller else: i_target += 1 # apply last "forgotten" stock if current_stock: stocks.append(current_stock) # trimming could be calculated from len(stocks) * length - sum(stocks) return stocks
def test_full_model(): json_job = Path("./tests/res/in/testjob.json") assert json_job.exists() json_result = Path("./tests/res/out/testresult.json") with open(json_job, "r") as encoded_job: job = Job.parse_raw(encoded_job.read()) solved = distribute(job) encoded_solved = solved.json() assert len(encoded_solved) > 20 with open(json_result, "r") as encoded_result: result = Result.parse_raw(encoded_result.read()) assert solved == result
def test_benchmark(): job = Job(max_length=1200, target_sizes=(TargetSize(length=300, quantity=3), TargetSize(length=200, quantity=3), TargetSize(length=100, quantity=3)), cut_width=0) start = time.perf_counter() solved_bruteforce = _solve_bruteforce(job) t_bruteforce = time.perf_counter() - start solved_gapfill = _solve_gapfill(job) t_gapfill = time.perf_counter() - t_bruteforce solved_FFD = _solve_FFD(job) t_FFD = time.perf_counter() - t_gapfill # bruteforce should be better at the cost of increased runtime print( f"[Runtime] Bruteforce: {t_bruteforce:.2f}s, Gapfill: {t_gapfill:.2f}s, FFD: {t_FFD:.2f}s" )
def _solve_bruteforce(job: Job) -> List[List[int]]: # failsafe if len(job) > 12: raise OverflowError("Input too large") # find every possible ordering (n! elements) all_orderings = permutations(job.iterate_sizes()) # TODO: remove duplicates (due to "quantity") # "infinity" minimal_trimmings = len(job) * job.max_length best_stock: List[List[int]] = [] # possible improvement: Distribute combinations to multiprocessing worker threads for combination in all_orderings: stocks, trimmings = _split_combination(combination, job.max_length, job.cut_width) if trimmings < minimal_trimmings: best_stock = stocks minimal_trimmings = trimmings return best_stock
def _solve_FFD(job: Job) -> List[List[int]]: # iterate over list of stocks # put into first stock that it fits into # 1. Sort by magnitude (largest first) # 2. stack until limit is reached # 3. try smaller as long as possible # 4. create new bar # TODO: rewrite to use native map instead mutable_sizes = copy.deepcopy(job.sizes_as_list()) sizes = sorted(mutable_sizes, reverse=True) stocks: List[List[int]] = [[]] stock_lengths: List[int] = [0] i_target = 0 while i_target < len(sizes): current_size = sizes[i_target] for i, stock in enumerate(stocks): # step through existing stocks until current size fits if (job.max_length - stock_lengths[i]) > current_size.length: # add size stock.append(current_size.length) stock_lengths[i] += job.cut_width + current_size.length break else: # nothing fit, opening next bin stocks.append([current_size.length]) stock_lengths.append(0) # decrease/get next if current_size.quantity <= 1: i_target += 1 else: current_size.quantity -= 1 return stocks
def test_to_json(): job = Job(max_length=1200, cut_width=5, target_sizes={"300": 4, "200": 3}) assert job.json() == '{"max_length": 1200, "cut_width": 5, ' \ '"target_sizes": {"300": 4, "200": 3}}'
def generate_testjob(): return Job.parse_raw(load_json(Path("./tests/res/in/testjob.json")))