def test(): db_loader = DbLoader() db = db_loader.load() cube_loader = CubeLoader(db) cube = cube_loader.load() print(Pricer.price(cube))
def init(cls): cls.pixmap_loader = PixmapLoader( pixmap_executor=30, printing_executor=30, imageable_executor=30, ) try: cls.db = Loader.load() except DBLoadException: update() cls.db = Loader.load() cls.serialization_strategy = JsonId(cls.db)
def run(): db = Loader.load() cube_loader = CubeLoader(db) cube = cube_loader.load() # cubes = list(cube_loader.all_cubes()) cardboards = set(printing.cardboard for printing in cube.all_printings) search_parser = SearchParser(db) print(f'cube loaded, {len(cardboards)} unique cardboards') while True: query = input(': ') try: pattern = search_parser.parse(query) except ParseException as e: print(f'Invalid query "{e}"') continue results = list(pattern.matches(cardboards)) print( '\n'.join(sorted((cardboard.name for cardboard in results), )) + f'\n\n-------\n{len(results)} result{"" if len(results) == 1 else "s"}' )
def proxy_recent_distribution(): db = Loader.load() image_loader = ImageLoader() cube_loader = CubeLoader(db) trap_collection_persistor = TrapCollectionPersistor(db) recent_winner_trap_collection = trap_collection_persistor.get_most_recent_trap_collection() print(recent_winner_trap_collection.minimal_string_list)
def count(): db = Loader.load() cube = CubeLoader(db).load() # for printing in (printing for printing in set(cube.all_printings) if printing.front_face.artist.name=='Eric Deschamps'): # print(printing) artists = Multiset(printing.front_face.artist for printing in set(cube.all_printings)) for artist, multiplicity in sorted(artists.items(), key=lambda vs: vs[1]): print(artist, multiplicity)
def run(): from mtgorp.db.load import Loader db = Loader.load() cube_loader = CubeLoader(db) cube_loader.check_and_update() lapper = LapProxyer( cube_loader, margin_size=.7, card_margin_size=.05, ) lapper.pdf_all_images() delta = lapper.difference_report() print(delta.report)
def init(cls): cls.db = Loader.load()
def calculate( generations: int, trap_amount: int, max_delta: t.Optional[int] = None, create_proxy_pdfs: bool = True, persist_traps: bool = True, ): random.seed() db = Loader.load() image_loader = ImageLoader() fetcher = ConstrainedNodeFetcher(db) cube_loader = CubeLoader(db) trap_collection_persistor = TrapCollectionPersistor(db) constrained_nodes = fetcher.fetch_garbage() print(f'loaded {len(constrained_nodes)} nodes') cube = cube_loader.load() cube_traps = FrozenMultiset( trap for trap in cube.traps if ( trap.intention_type == IntentionType.GARBAGE or trap.intention_type == IntentionType.LAND_GARBAGE ) ) blue_print = ConstraintSetBluePrint( ( algorithm.ValueDistributionHomogeneityConstraint, 2, {}, ), ( algorithm.GroupExclusivityConstraint, 2, {'group_weights': GROUP_WEIGHTS}, ), ( algorithm.SizeHomogeneityConstraint, 1, {}, ), ) if max_delta is not None and max_delta > 0: distributor = DeltaDistributor( constrained_nodes = constrained_nodes, trap_amount = trap_amount, origin_trap_collection = cube_traps, constraint_set_blue_print = blue_print, max_trap_delta = max_delta, mate_chance = .45, mutate_chance = .35, tournament_size = 3, population_size = 600, ) else: distributor = Distributor( constrained_nodes = constrained_nodes, trap_amount = trap_amount, constraint_set_blue_print = blue_print, mate_chance = .5, mutate_chance = .45, tournament_size = 4, population_size = 400, ) random_fitness = statistics.mean( map(distributor.constraint_set.total_score, distributor.sample_random_population) ) st = time.time() winner = distributor.evaluate(generations).best print(f'Done in {time.time() - st} seconds') distribution_model = DistributionModel( tuple( tuple( NewConstrainedNode( node.value, node.node, node.groups, ) for node in trap ) for trap in winner.traps ) ) print('saved nodes:', sum(map(len, distribution_model.traps))) with open(os.path.join(paths.OUT_DIR, 'old_distribution.json'), 'w') as f: f.write(JsonId(db).serialize(distribution_model)) print('Random fitness:', random_fitness) try: print('Current cube fitness:', distributor.evaluate_cube(cube_traps)) except ValueError: print('Nodes does not match current cube') _, added, removed = distributor.trap_collection_to_trap_distribution(cube_traps, constrained_nodes) print('added:', added) print('removed:', removed) print('Winner fitness:', winner.fitness.values[0]) distributor.show_plot() winner_traps = winner.as_trap_collection for trap in winner_traps: trap._intention_type = IntentionType.GARBAGE new_traps = winner_traps - cube_traps removed_traps = cube_traps - winner_traps print('New traps', len(new_traps)) trap_collection = TrapCollection(winner_traps) print('\n------------------------------------------------\n') print(trap_collection.minimal_string_list) print('\n------------------------------------------------\n') if persist_traps: trap_collection_persistor.persist(trap_collection) print('traps persisted') if create_proxy_pdfs: out, new_out, removed_out = GARBAGE_OUT_PATH, GARBAGE_NEW_OUT_PATH, GARBAGE_REMOVED_OUT_PATH proxy_laps( laps = winner_traps, image_loader = image_loader, file_name = out, ) proxy_laps( laps = new_traps, image_loader = image_loader, file_name = new_out, ) proxy_laps( laps = removed_traps, image_loader = image_loader, file_name = removed_out, ) print('proxying done')
def test(): db = Loader.load() strategy = JsonId(db) cube = CubeLoader(db).load() constrained_nodes = NodeCollection( ConstrainedNodeFetcher(db).fetch_garbage()) groups = GroupMap(_GROUP_WEIGHTS) # s = '{"cube_delta": {}, "nodes_delta": {"nodes": []}}' # patch = strategy.deserialize(CubePatch, s) patch = CubePatch( CubeDeltaOperation({ db.cardboards['Brainstorm'].from_expansion('ICE'): -1, db.cardboards['Brainstorm'].from_expansion('EMA'): 1, # Trap( # AllNode( # ( # db.cardboards['Brainstorm'].from_expansion('ICE'), # db.cardboards['Web'].from_expansion('LEA'), # ) # ), # intention_type=IntentionType.SYNERGY, # ): 2 }), NodesDeltaOperation({ # ConstrainedNode( # node = AllNode( # ( # db.cardboards['Web'].from_expansion('LEA'), # ) # ), # groups = ['ok', 'lmao'], # value = 2, # ): 1, ConstrainedNode( node=AllNode(( db.cardboards['Brainstorm'].from_expansion('ICE'), db.cardboards['Web'].from_expansion('LEA'), )), groups=['lolHAHA'], value=1, ): 1, })) print(patch) meta_cube = MetaCube(cube, constrained_nodes, groups) verbose_patch = patch.as_verbose(meta_cube) print(verbose_patch) updater = CubeUpdater(meta_cube, patch) print(updater) report = UpdateReport(updater) for notification in report.notifications: print(notification.title + '\n' + notification.content + '\n\n')
from configparser import ConfigParser from sqlalchemy import create_engine from sqlalchemy.orm.session import sessionmaker from sqlalchemy.orm import scoped_session from mtgorp.db.load import Loader from mkmcheck import paths _loader = Loader() db = _loader.load() _parser = ConfigParser() _parser.read(paths.DATABASE_CONFIG_PATH) _keys = _parser['DEFAULT'] _values_parser = ConfigParser() _values_parser.read(paths.VALUES_CONFIG_PATH) _values = _values_parser['DEFAULT'] SHEET_ID = _values['target_sheet_id'] INPUT_SHEET_NAME = _values['input_sheet_name'] OUTPUT_SHEET_NAME = _values['output_sheet_name'] OUTPUT_SHEET_ID = int(_values['output_sheet_id']) OUTPUT_KNAPSACK_SHEET_NAME = _values['output_knapsack_sheet_name']