async def part2(): memory = intcode.init(data.strip().split(',')) perf() mx = 0 for phases in permutations(range(5, 10), 5): sig = await run(memory, phases) mx = max(mx, sig) print(f'part 2: {mx}') # part 2: 2645740 perf()
async def main(): memory = data.strip().split(',') memory = {i: int(memory[i]) for i in range(len(memory))} memory['rb'] = 0 perf() part_a = await run(memory, 1) print(f'part 1: {part_a}') # part 1: 1 -> 2316632620 perf() part_b = await run(memory, 2) print(f'part 2: {part_b}') # part 2: 2 -> 78869 perf() return part_a, part_b
border = 1 - border for a, b in [(0, 0), (0, 1), (1, 0), (1, 1)]: cm[a][b] += torch.sum((z == a).float() * (y == b).float()) cm1[a][b] += torch.sum( (z == a).float() * (y == b).float() * border) globalcm += cm globalcm1 += cm1 if len(os.listdir("build")) < 100: nextI = len(os.listdir("build")) debug = util.torchTOpil(globalresize(x)) debug = PIL.Image.fromarray(numpy.uint8(debug)) debug.save("build/" + str(nextI) + "_x.png") debug = (2.0 * y - 1) * border * 127 + 127 debug = debug.cpu().numpy() debug = PIL.Image.fromarray(numpy.uint8(debug)) debug.save("build/" + str(nextI) + "_y.png") debug = z.cpu().numpy() * 255 debug = PIL.Image.fromarray(numpy.uint8(debug)) debug.save("build/" + str(nextI) + "_z.png") print("perf0=", util.perf(cm)) print("perf1=", util.perf(cm1)) print("bords=", util.perf(cm - cm1)) print("global result") print("perf0=", util.perf(globalcm)) print("perf1=", util.perf(globalcm1)) print("bords=", util.perf(globalcm - globalcm1))
for a, b in [(0, 0), (0, 1), (1, 0), (1, 1)]: stats[a][b] += torch.sum( (z == a).float() * (y == b).float() * (1 - border)) if i < 10: print(i, "/", nbbatchs, printloss) if i < 1000 and i % 100 == 99: print(i, "/", nbbatchs, printloss / 100) printloss = torch.zeros(1).cuda() if i >= 1000 and i % 300 == 299: print(i, "/", nbbatchs, printloss / 300) printloss = torch.zeros(1).cuda() if i % 1000 == 999: torch.save(net, "build/model.pth") print(i, "perf", util.perf(stats)) if util.perf(stats)[0] > 96: print("training stops after reaching high training accuracy") os._exit(0) else: stats = torch.zeros(2, 2).cuda() if i > nbbatchs * 0.1: loss = loss * 0.5 if i > nbbatchs * 0.2: loss = loss * 0.5 if i > nbbatchs * 0.5: loss = loss * 0.5 if i > nbbatchs * 0.8: loss = loss * 0.5