def run(versions=[], runBurger=True, runBE=True, runJarExtractor=True):
    install_node_jar_extractor()
    install_node_burger_extractor()

    utils.fetch_manifest()
    _vers = utils.extrapolate_versions(versions)

    if not has_dir("output/minecraft-data"):
        os.makedirs("output/minecraft-data")

    print("Extracting", versions, _vers)

    for version in _vers:
        if version in ('latest', 'snapshot', 'release'):
            version = utils.get_latest_version(version)
        print(c.BOLD, "Extracting", version, c.RESET)
        good = True
        if runBurger:
            good = good and run_burger(version)
        if runBE:
            good = good and run_burger_extractor(version)
        if runJarExtractor:
            good = good and run_prismarine_jar_extractor(version)

        if good:
            print(c.OKGREEN, f"{version} was successfully extracted", c.RESET)
        else:
            print(c.FAIL, f"{version} failed one or more extractions", c.RESET)

    pass
예제 #2
0
def clean_binaries_and_plugins(path):
    versioned_files = list(filter(
        lambda file_name: re.match(Constants.VERSION_REGEX, file_name),
        listdir(path)
    ))
    versions = list(map(
        lambda file_name: re.search(Constants.VERSION_REGEX, file_name).group(0),
        versioned_files
    ))
    latest_version = get_latest_version(versions)
    for file_name in versioned_files:
        if latest_version not in file_name:
            remove_unless_dry_run(path, file_name)
예제 #3
0
def check_and_clean(path):
    files = listdir(path)
    if contains_only_files(files, path):
        return
    directories = filter_out_nones(files)
    latest_version = get_latest_version(directories)
    if latest_version is None:
        clean_recursively(path, directories)
    elif len(directories) == 1:
        return
    else:
        print('Update ' + path.split(Constants.M2_PATH)[1])
        for directory_name in directories:
            if latest_version in directory_name:
                continue
            print(directory_name + ' (Has newer version: ' + latest_version +
                  ')')
            remove_unless_dry_run(path, directory_name)
예제 #4
0
def train(epochs, batch_size, world_count, sz=64, version_name=None):
    cur_dir = os.getcwd()
    res_dir = os.path.abspath(os.path.join(cur_dir, '..', 'res'))
    all_models_dir = os.path.abspath(os.path.join(cur_dir, '..', 'models'))
    model_dir = utils.check_or_create_local_path('animator', all_models_dir)

    utils.delete_empty_versions(model_dir, 1)
    no_version = version_name is None
    if no_version:
        latest = utils.get_latest_version(model_dir)
        version_name = f'ver{latest + 1}'

    version_dir = utils.check_or_create_local_path(version_name, model_dir)
    graph_dir = utils.check_or_create_local_path('graph', model_dir)
    graph_version_dir = utils.check_or_create_local_path(
        version_name, graph_dir)

    previews_dir = utils.check_or_create_local_path('previews', version_dir)
    model_save_dir = utils.check_or_create_local_path('models', version_dir)

    print('Saving source...')
    utils.save_source_to_dir(version_dir)

    print('Loading minimap values...')
    mm_values = utils.load_minimap_values(res_dir)

    print('Loading block images...')
    block_images = utils.load_block_images(res_dir)

    print('Loading encoding dictionaries...')
    block_forward, block_backward = utils.load_encoding_dict(
        res_dir, 'blocks_optimized')

    print('Building model from scratch...')
    animator = build_basic_animator(sz)
    animator.compile(loss='mse', optimizer='adam')

    translator = load_model(
        f'{all_models_dir}\\translator\\ver15\\models\\best_loss.h5')
    translator.trainable = False

    animator_minimap = Sequential()
    animator_minimap.add(animator)
    animator_minimap.add(translator)
    animator_minimap.compile(loss='mse', optimizer='adam')

    print('Saving model images...')
    keras.utils.plot_model(animator,
                           to_file=f'{version_dir}\\animator.png',
                           show_shapes=True,
                           show_layer_names=True)

    print('Loading worlds...')
    x_train = load_minimaps(world_count, f'{res_dir}\\worlds\\', (sz, sz),
                            block_forward, mm_values)

    world_count = x_train.shape[0]
    batch_cnt = (world_count - (world_count % batch_size)) // batch_size

    # Set up tensorboard
    print('Setting up tensorboard...')
    tb_manager = TensorboardManager(graph_version_dir, batch_cnt)

    for epoch in range(epochs):

        # Create directories for current epoch
        cur_previews_dir = utils.check_or_create_local_path(
            f'epoch{epoch}', previews_dir)
        cur_models_dir = utils.check_or_create_local_path(
            f'epoch{epoch}', model_save_dir)

        print('Shuffling data...')
        np.random.shuffle(x_train)

        for batch in range(batch_cnt):
            minimaps = x_train[batch * batch_size:(batch + 1) * batch_size]
            # actual = y_train[minibatch_index * batch_size:(minibatch_index + 1) * batch_size]

            # Train animator
            # world_loss = animator.train_on_batch(minimaps, actual)
            minimap_loss = animator_minimap.train_on_batch(minimaps, minimaps)
            tb_manager.log_var('mm_loss', epoch, batch, minimap_loss)

            print(f"Epoch = {epoch}/{epochs} :: Batch = {batch}/{batch_cnt} "
                  f":: MMLoss = {minimap_loss}")

            # Save previews and models
            if batch == batch_cnt - 1:
                print('Saving previews...')
                worlds = animator.predict(minimaps)
                trained = animator_minimap.predict(minimaps)
                for i in range(batch_size):
                    world_decoded = utils.decode_world_sigmoid(
                        block_backward, worlds[i])
                    utils.save_world_preview(
                        block_images, world_decoded,
                        f'{cur_previews_dir}\\animated{i}.png')
                    utils.save_world_minimap(
                        mm_values, world_decoded,
                        f'{cur_previews_dir}\\actual{i}.png')
                    utils.save_rgb_map(utils.decode_world_minimap(trained[i]),
                                       f'{cur_previews_dir}\\trained{i}.png')

                    mm_decoded = utils.decode_world_minimap(minimaps[i])
                    utils.save_rgb_map(mm_decoded,
                                       f'{cur_previews_dir}\\target{i}.png')

                print('Saving models...')
                try:
                    animator.save(f'{cur_models_dir}\\animator.h5')
                    animator.save_weights(
                        f'{cur_models_dir}\\animator.weights')
                except ImportError:
                    print('Failed to save data.')
def main(max_rows, cache, name, version, threshold):
    print("Download %r" % name)
    pd.set_option("max_rows", max_rows)
    expire_after = datetime.timedelta(days=cache)
    session = init_session(expire_after)
    df = download_index(session)
    # df_first = df.groupby('name').last()
    # df_last = df.groupby('name').first()

    if name not in df['name'].values:
        print("%r is not a valid library name" % name)
        print()
        print("Possible library names are:")
        names = find_nearest_names(name, threshold=threshold, df=df)
        print(names[0:10])
        find_nearest_name = names.iloc[0]
        if find_nearest_name.score == 1:
            print()
            new_name = find_nearest_name['name']
            print("Case is important! Autofixing it as %r instead of %r" %
                  (new_name, name))
            name = new_name
            input("")
        else:
            print()
            sys.exit("Please correct library name!")

    df_all_versions = df[df['name'].str.upper() == name.upper()]
    name = df_all_versions.name.unique()[0]
    print(df_all_versions.set_index('version'))

    print("")

    versions = get_versions(name, df)
    print("versions: %s" % versions.map(str).values)

    if version == 'latest':
        version = get_latest_version(name, df)

    print()

    print("version: %s" % str(version))

    url = get_archive_url(name, version, df)

    print()

    print("Downloading from %r" % url)
    assert url[-4:] == '.zip', "URL must finish with .zip"
    print()

    show_licence_from_archive_url(url)

    print()

    print("(possible) repository url")
    print()
    try:
        repository_url = repository_url_from_archive_url(url)
        print(repository_url)
    except Exception as e:
        print("Can't find repository url")
        print()
        traceback.print_exc()
예제 #6
0
def train(epochs, batch_size, world_count, size, version_name=None):
    cur_dir = os.getcwd()
    res_dir = os.path.abspath(os.path.join(cur_dir, '..', 'res'))
    all_models_dir = os.path.abspath(os.path.join(cur_dir, '..', 'models'))
    model_dir = utils.check_or_create_local_path('translator', all_models_dir)

    utils.delete_empty_versions(model_dir, 1)
    no_version = version_name is None
    if no_version:
        latest = utils.get_latest_version(model_dir)
        version_name = f'ver{latest + 1}'

    version_dir = utils.check_or_create_local_path(version_name, model_dir)
    graph_dir = utils.check_or_create_local_path('graph', model_dir)
    graph_version_dir = utils.check_or_create_local_path(
        version_name, graph_dir)

    model_save_dir = utils.check_or_create_local_path('models', version_dir)

    print('Saving source...')
    utils.save_source_to_dir(version_dir)

    print('Loading minimap values...')
    minimap_values = utils.load_minimap_values(res_dir)

    print('Loading encoding dictionaries...')
    block_forward, block_backward = utils.load_encoding_dict(
        res_dir, 'blocks_optimized')

    print('Building model from scratch...')
    translator = build_translator(size)
    translator.compile(optimizer='adam', loss='mse')

    print('Loading worlds...')
    x_train, y_train = load_worlds_with_minimaps(world_count,
                                                 f'{res_dir}\\worlds\\',
                                                 (size, size), block_forward,
                                                 minimap_values)

    best_loss_callback = keras.callbacks.ModelCheckpoint(
        f'{model_save_dir}\\best_loss.h5',
        verbose=0,
        save_best_only=True,
        save_weights_only=False,
        mode='min',
        period=1,
        monitor='loss')

    # Create callback for automatically saving latest model so training can be resumed. Saves every epoch
    latest_h5_callback = keras.callbacks.ModelCheckpoint(
        f'{model_save_dir}\\latest.h5',
        verbose=0,
        save_best_only=False,
        save_weights_only=False,
        mode='auto',
        period=1)

    # Create callback for automatically saving latest weights so training can be resumed. Saves every epoch
    latest_weights_callback = keras.callbacks.ModelCheckpoint(
        f'{model_save_dir}\\latest.weights',
        verbose=0,
        save_best_only=False,
        save_weights_only=True,
        mode='auto',
        period=1)

    # Create callback for tensorboard
    tb_callback = keras.callbacks.TensorBoard(log_dir=graph_version_dir,
                                              batch_size=batch_size,
                                              write_graph=False,
                                              write_grads=True)

    callback_list = [
        latest_h5_callback, latest_weights_callback, best_loss_callback,
        tb_callback
    ]
    translator.fit(x_train,
                   y_train,
                   batch_size,
                   epochs,
                   callbacks=callback_list)
예제 #7
0
def train(epochs, batch_size, world_count, version_name=None):
    cur_dir = os.getcwd()
    res_dir = os.path.abspath(os.path.join(cur_dir, '..', 'res'))
    all_models_dir = os.path.abspath(os.path.join(cur_dir, '..', 'models'))
    model_dir = utils.check_or_create_local_path('auto_encoder',
                                                 all_models_dir)
    utils.delete_empty_versions(model_dir, 1)

    no_version = version_name is None
    if no_version:
        latest = utils.get_latest_version(model_dir)
        version_name = f'ver{latest + 1}'

    version_dir = utils.check_or_create_local_path(version_name, model_dir)
    graph_dir = utils.check_or_create_local_path('graph', model_dir)
    graph_version_dir = utils.check_or_create_local_path(
        version_name, graph_dir)

    worlds_dir = utils.check_or_create_local_path('worlds', version_dir)
    previews_dir = utils.check_or_create_local_path('previews', version_dir)
    model_save_dir = utils.check_or_create_local_path('models', version_dir)

    latest_epoch = utils.get_latest_epoch(model_save_dir)
    initial_epoch = latest_epoch + 1

    print('Saving source...')
    utils.save_source_to_dir(version_dir)

    # Load block images
    print('Loading block images...')
    block_images = utils.load_block_images(res_dir)

    print('Loading encoding dictionaries...')
    block_forward, block_backward = utils.load_encoding_dict(
        res_dir, 'blocks_optimized')

    # Load model and existing weights
    print('Loading model...')

    # Try to load full model, otherwise try to load weights
    loaded_model = False
    if not no_version and latest_epoch != -1:
        if os.path.exists(
                f'{version_dir}\\models\\epoch{latest_epoch}\\autoencoder.h5'):
            print('Found models.')
            ae = load_model(
                f'{version_dir}\\models\\epoch{latest_epoch}\\autoencoder.h5')
            loaded_model = True
        elif os.path.exists(
                f'{version_dir}\\models\\epoch{latest_epoch}\\autoencoder.weights'
        ):
            print('Found weights.')
            ae = autoencoder_model(112)
            ae.load_weights(
                f'{version_dir}\\models\\epoch{latest_epoch}\\autoencoder.weights'
            )

            print('Compiling model...')
            ae_optim = Adam(lr=0.0001)
            ae.compile(loss='binary_crossentropy', optimizer=ae_optim)
            loaded_model = True

    # Model was not loaded, compile new one
    if not loaded_model:
        print('Compiling model...')
        ae = autoencoder_model(112)
        print('Compiling model...')
        ae_optim = Adam(lr=0.0001)
        ae.compile(loss='binary_crossentropy', optimizer=ae_optim)

    if no_version:
        # Delete existing worlds and previews if any
        print('Checking for old generated data...')
        utils.delete_files_in_path(worlds_dir)
        utils.delete_files_in_path(previews_dir)

    print('Saving model images...')
    keras.utils.plot_model(ae,
                           to_file=f'{version_dir}\\autoencoder.png',
                           show_shapes=True,
                           show_layer_names=True)

    # Load Data
    print('Loading worlds...')
    x_train = load_worlds(world_count, f'{res_dir}\\worlds\\', (112, 112),
                          block_forward)

    # Start Training loop
    world_count = x_train.shape[0]
    batch_cnt = (world_count - (world_count % batch_size)) // batch_size

    # Set up tensorboard
    print('Setting up tensorboard...')
    tb_manager = TensorboardManager(graph_version_dir, batch_cnt)

    for epoch in range(initial_epoch, epochs):

        # Create directories for current epoch
        cur_worlds_cur = utils.check_or_create_local_path(
            f'epoch{epoch}', worlds_dir)
        cur_previews_dir = utils.check_or_create_local_path(
            f'epoch{epoch}', previews_dir)
        cur_models_dir = utils.check_or_create_local_path(
            f'epoch{epoch}', model_save_dir)

        print('Shuffling data...')
        np.random.shuffle(x_train)

        for batch in range(batch_cnt):

            # Get real set of images
            world_batch = x_train[batch * batch_size:(batch + 1) * batch_size]

            # Train
            loss = ae.train_on_batch(world_batch, world_batch)

            # Save snapshot of generated images on last batch
            if batch == batch_cnt - 1:

                # Generate samples
                generated = ae.predict(world_batch)

                # Save samples
                for image_num in range(batch_size):
                    generated_world = generated[image_num]
                    decoded_world = utils.decode_world_sigmoid(
                        block_backward, generated_world)
                    utils.save_world_data(
                        decoded_world,
                        f'{cur_worlds_cur}\\world{image_num}.world')
                    utils.save_world_preview(
                        block_images, decoded_world,
                        f'{cur_previews_dir}\\preview{image_num}.png')

                # Save actual worlds
                for image_num in range(batch_size):
                    actual_world = world_batch[image_num]
                    decoded_world = utils.decode_world_sigmoid(
                        block_backward, actual_world)
                    utils.save_world_preview(
                        block_images, decoded_world,
                        f'{cur_previews_dir}\\actual{image_num}.png')

            # Write loss
            tb_manager.log_var('ae_loss', epoch, batch, loss)

            print(
                f'epoch [{epoch}/{epochs}] :: batch [{batch}/{batch_cnt}] :: loss = {loss}'
            )

            # Save models
            if batch % 100 == 99 or batch == batch_cnt - 1:
                print('Saving models...')
                try:
                    ae.save(f'{cur_models_dir}\\autoencoder.h5')
                    ae.save_weights(f'{cur_models_dir}\\autoencoder.weights')
                except ImportError:
                    print('Failed to save data.')
예제 #8
0
def train(epochs, batch_size, world_count, version_name=None, initial_epoch=0):
    cur_dir = os.getcwd()
    res_dir = os.path.abspath(os.path.join(cur_dir, '..', 'res'))
    all_models_dir = os.path.abspath(os.path.join(cur_dir, '..', 'models'))
    model_dir = utils.check_or_create_local_path('helper', all_models_dir)

    utils.delete_empty_versions(model_dir, 1)

    no_version = version_name is None
    if no_version:
        latest = utils.get_latest_version(model_dir)
        version_name = f'ver{latest + 1}'

    version_dir = utils.check_or_create_local_path(version_name, model_dir)
    graph_dir = utils.check_or_create_local_path('graph', model_dir)
    graph_version_dir = utils.check_or_create_local_path(
        version_name, graph_dir)

    worlds_dir = utils.check_or_create_local_path('worlds', version_dir)
    previews_dir = utils.check_or_create_local_path('previews', version_dir)
    model_save_dir = utils.check_or_create_local_path('models', version_dir)

    print('Saving source...')
    utils.save_source_to_dir(version_dir)

    # Load block images
    print('Loading block images...')
    block_images = utils.load_block_images(res_dir)

    print('Loading encoding dictionaries...')
    block_forward, block_backward = utils.load_encoding_dict(
        res_dir, 'blocks_optimized')

    if no_version:
        # Delete existing worlds and previews if any
        print('Checking for old generated data...')
        utils.delete_files_in_path(worlds_dir)
        utils.delete_files_in_path(previews_dir)

    # Load model and existing weights
    print('Loading models...')

    judge = build_judge_model(32)
    judge_optimizer = Adam(lr=0.0001)
    judge.compile(loss='binary_crossentropy',
                  optimizer=judge_optimizer,
                  metrics=['accuracy'])

    helper_optimizer = Adam(lr=0.001)
    helper = build_helper_model(32)
    helper_feedback = build_helper_feedback_model(helper, judge, 32)
    helper_feedback.compile(loss='binary_crossentropy',
                            optimizer=helper_optimizer)

    # Load Data
    print('Loading worlds...')
    x_train = load_worlds(world_count, f'{res_dir}\\worlds\\', (32, 32),
                          block_forward)

    # Start Training loop
    world_count = x_train.shape[0]
    batch_cnt = (world_count - (world_count % batch_size)) // batch_size
    tb_manager = TensorboardManager(graph_version_dir, batch_cnt)

    for epoch in range(initial_epoch, epochs):

        print(f'Epoch = {epoch} ')
        # Create directories for current epoch
        cur_worlds_cur = utils.check_or_create_local_path(
            f'epoch{epoch}', worlds_dir)
        cur_previews_dir = utils.check_or_create_local_path(
            f'epoch{epoch}', previews_dir)
        cur_models_dir = utils.check_or_create_local_path(
            f'epoch{epoch}', model_save_dir)

        print('Shuffling data...')
        np.random.shuffle(x_train)

        for batch in range(batch_cnt):

            # Get real set of worlds
            world_batch = x_train[batch * batch_size:(batch + 1) * batch_size]
            world_batch_masked, world_masks = utils.mask_batch_low(world_batch)
            world_masks_reshaped = np.reshape(world_masks[:, :, :, 0],
                                              (batch_size, 32 * 32, 1))

            # Get fake set of worlds
            noise = np.random.normal(0, 1, size=(batch_size, 128))
            generated = helper.predict([world_batch_masked, noise])

            real_labels = np.ones((batch_size, 32 * 32, 1))
            fake_labels = np.zeros((batch_size, 32 * 32, 1))
            masked_labels = 1 - world_masks_reshaped

            judge.trainable = True
            j_real = judge.train_on_batch([world_batch_masked, world_batch],
                                          real_labels)
            j_fake = judge.train_on_batch([world_batch_masked, generated[1]],
                                          fake_labels)

            tb_manager.log_var('j_loss_real', epoch, batch, j_real[0])
            tb_manager.log_var('j_loss_fake', epoch, batch, j_fake[0])
            tb_manager.log_var('j_acc_real', epoch, batch, j_real[1])
            tb_manager.log_var('j_acc_fake', epoch, batch, j_fake[1])

            judge.trainable = False
            h_loss = helper_feedback.train_on_batch(
                [world_batch_masked, noise], real_labels)
            tb_manager.log_var('h_loss', epoch, batch, h_loss)

            print(
                f'epoch [{epoch}/{epochs}] :: batch [{batch}/{batch_cnt}] :: fake_loss = {j_fake[0]} :: fake_acc = '
                f'{j_fake[1]} :: real_loss = {j_real[0]} :: real_acc = {j_real[1]} :: h_loss = {h_loss}'
            )

            if batch % 1000 == 999 or batch == batch_cnt - 1:

                # Save generated batch
                for i in range(batch_size):
                    actual_world = world_batch_masked[i]
                    a_decoded = utils.decode_world_sigmoid(
                        block_backward, actual_world)
                    utils.save_world_preview(
                        block_images, a_decoded,
                        f'{cur_previews_dir}\\actual{i}.png')

                    gen_world = generated[1][i]
                    decoded = utils.decode_world_sigmoid(
                        block_backward, gen_world)
                    utils.save_world_preview(
                        block_images, decoded,
                        f'{cur_previews_dir}\\preview{i}.png')

                # Save models
                try:
                    judge.save(f'{cur_models_dir}\\judge.h5')
                    helper.save(f'{cur_models_dir}\\helper.h5')
                    judge.save_weights(f'{cur_models_dir}\\judge.weights')
                    helper.save_weights(f'{cur_models_dir}\\helper.weights')
                except ImportError:
                    print('Failed to save data.')
예제 #9
0
def clean_data_for_ide(path, files, prefix):
    ide_versions = to_versions_map(files, prefix)
    latest_version = get_latest_version(ide_versions.keys())
    for current_version, current_name in ide_versions.items():
        if current_version != latest_version:
            remove_unless_dry_run(path, current_name)
예제 #10
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-c", "--config", required=True, help="Config file path.")
    parser.add_argument("-ch", "--checkpoint", required=False, help="Checkpoint file path.", default=None)
    parser.add_argument("-d", "--debug", required=False, type=bool, help="Checkpoint file path.", default=False)

    args = parser.parse_args()

    with open(args.config, 'r') as stream:
        configs = yaml.safe_load(stream)

    root_dir = configs['log_dir']
    if not os.path.isdir(root_dir):
        os.makedirs(root_dir)

    version = utils.get_latest_version(root_dir)

    version_directory = root_dir + "version_" + str(version)
    if not os.path.isdir(version_directory):
        os.makedirs(version_directory)

    weights_directory = version_directory + "/weights/"
    if not os.path.isdir(weights_directory):
        os.makedirs(weights_directory)

    assert bool, type(args.debug)
    debug = args.debug

    start = datetime.datetime.now()
    configs['start'] = start
    configs['version'] = version
예제 #11
0
def decompile_version(version, client=True, ignoreMappings=False):
    print("\n-------")

    print(c.BOLD, "Decompiling", version, "this will take a while", c.RESET)

    failedBecauseNoMappings = False

    # If you crash here, you set an invalid version to decompile
    if version == "latest" or version == "snapshot":
        version = utils.get_latest_version(typ)
    else:
        dn = utils.get_date_for_version(version)

    # We have Mojang mappings since 1.14.4
    # For both 1.14.4 and 1.15, we include both MCP and Mojang mappings (this just makes it easier to diff)

    D = "./DecompilerMC"

    # DecompilerMC spits out alot of input()s which are annoying, --quiet removes them. Unfortuantely this
    # also removes some useful debugging info, so run this script with --verbose to get all the messages.
    quiet = '' if verbose else '--quiet'

    # uses MCP mappings
    isLegacy = False

    if utils.mc_version_cmp(version, '1.14.4') < 0:
        print(f"{version} < 1.14.4 -- legacy version so using MCP mappings")
        isLegacy = True

    if isLegacy:
        mapping = get_mcp_mappings_for(version)
        if not mapping:
            print("No MCP mappings for", version)
            failedBecauseNoMappings = True

    if failedBecauseNoMappings or (utils.mc_version_cmp(version, '1.7.10') <
                                   0):
        if not ignoreMappings:
            print(
                c.FAIL,
                f"{version}: no mapping data! Skipping because ignoreMappings is false.",
                c.RESET)
            return
        print(
            f"{version}: no mapping data! JAR will only be decompiled. Abort now if you expected deobfuscation."
        )
        input("Press enter to continue.")
        # do not remap and basically just run fernflower
        s = '' if client else '--side server'
        l = f"python3 {D}/main.py -mcv {version} -na -rmap false --download_mapping false -dj true -rjar false -dec true -dd false -d fernflower {s}"
        print("> ", l)
        os.system(l)
        return

    # MCP mappings
    if isLegacy or (utils.mc_version_cmp(version, '1.15')
                    == 0) or (utils.mc_version_cmp(version, '1.14.4') == 0):
        mapping = get_mcp_mappings_for(version)
        print(f'{version}: Using MCP mappings at', mapping)

        os.chdir(D)
        s = '' if client else '--side server'
        l = f"python3 main.py -mcv {version} -na -rmap false --download_mapping false -dj true -rjar true -dec true -dd false -d fernflower {s} -m ../{mapping} {quiet}"
        print(c.WARNING, ">", l, c.RESET)
        ret = os.system(l)
        if ret:
            raise "Decompile failed - non-zero exit code"

        print(c.WARNING, f"> move {D}/src/{version} {D}/src/{version}_mcp/",
              c.RESET)
        utils.move(f"src/{version}/", f"src/{version}_mcp/")
        print(
            c.OKGREEN,
            f"{version} was successfully decompiled to {D}/src/{version}_mcp/",
            c.RESET)
        os.chdir('..')

    # Mojang mappings
    if not isLegacy:
        print(f"{version}: Using Mojang mappings")
        os.chdir(D)
        s = '' if client else '--side server'
        l = f"python3 main.py -mcv {version} -d fernflower {quiet} {s}"
        print(c.WARNING, "> ", l, c.RESET)
        os.system(l)
        utils.move(f"src/{version}/", f"src/{version}_mojang/")
        print(
            c.OKGREEN,
            f"{version} was successfully decompiled to {D}/src/{version}_mojang/",
            c.RESET)
        os.chdir('..')
예제 #12
0
def train(epochs,
          batch_size,
          world_count,
          dict_src_name,
          version_name=None,
          initial_epoch=0):
    cur_dir = os.getcwd()
    res_dir = os.path.abspath(os.path.join(cur_dir, '..', 'res'))
    all_models_dir = os.path.abspath(os.path.join(cur_dir, '..', 'models'))
    model_dir = utils.check_or_create_local_path('pro_classifier',
                                                 all_models_dir)

    utils.delete_empty_versions(model_dir, 1)
    no_version = version_name is None
    if no_version:
        latest = utils.get_latest_version(model_dir)
        version_name = f'ver{latest + 1}'

    version_dir = utils.check_or_create_local_path(version_name, model_dir)
    graph_dir = utils.check_or_create_local_path('graph', model_dir)
    graph_version_dir = utils.check_or_create_local_path(
        version_name, graph_dir)

    model_save_dir = utils.check_or_create_local_path('models', version_dir)

    print('Saving source...')
    utils.save_source_to_dir(version_dir)

    print('Loading encoding dictionaries...')
    block_forward, block_backward = utils.load_encoding_dict(
        res_dir, 'blocks_optimized')

    print('Building model from scratch...')
    c_optim = Adam(lr=0.0001)

    size = 64
    c = build_classifier(size)
    # c = build_resnet50(1)
    # c = build_wide_resnet(input_dim=(size, size, 10), nb_classes=1, N=2, k=1, dropout=0.1)

    c.summary()
    c.compile(loss='binary_crossentropy',
              optimizer=c_optim,
              metrics=['accuracy'])

    print('Loading labels...')
    label_dict = utils.load_label_dict(res_dir, dict_src_name)

    print('Loading worlds...')
    x, y_raw = load_worlds_with_labels(world_count, f'{res_dir}\\worlds\\',
                                       label_dict, (size, size), block_forward)

    y = utils.convert_labels_binary(y_raw, epsilon=0)

    # Create callback for automatically saving best model based on highest regular accuracy
    check_best_acc = keras.callbacks.ModelCheckpoint(
        f'{model_save_dir}\\best_acc.h5',
        monitor='acc',
        verbose=0,
        save_best_only=True,
        save_weights_only=False,
        mode='max',
        period=1)

    # Create callback for automatically saving latest model so training can be resumed. Saves every epoch
    latest_h5_callback = keras.callbacks.ModelCheckpoint(
        f'{model_save_dir}\\latest.h5',
        verbose=0,
        save_best_only=False,
        save_weights_only=False,
        mode='auto',
        period=1)

    # Create callback for automatically saving latest weights so training can be resumed. Saves every epoch
    latest_weights_callback = keras.callbacks.ModelCheckpoint(
        f'{model_save_dir}\\latest.weights',
        verbose=0,
        save_best_only=False,
        save_weights_only=True,
        mode='auto',
        period=1)

    # Create callback for tensorboard
    tb_callback = keras.callbacks.TensorBoard(log_dir=graph_version_dir,
                                              batch_size=batch_size,
                                              write_graph=False,
                                              write_grads=True)

    callback_list = [
        check_best_acc, latest_h5_callback, latest_weights_callback,
        tb_callback
    ]

    # Train model
    c.fit(x,
          y,
          batch_size,
          epochs,
          initial_epoch=initial_epoch,
          callbacks=callback_list,
          validation_split=0.2)
예제 #13
0
def train(epochs,
          batch_size,
          world_count,
          latent_dim,
          version_name=None,
          initial_epoch=0):
    cur_dir = os.getcwd()
    res_dir = os.path.abspath(os.path.join(cur_dir, '..', 'res'))
    all_models_dir = os.path.abspath(os.path.join(cur_dir, '..', 'models'))
    model_dir = utils.check_or_create_local_path('gan', all_models_dir)

    utils.delete_empty_versions(model_dir, 1)
    no_version = version_name is None
    if no_version:
        latest = utils.get_latest_version(model_dir)
        version_name = f'ver{latest + 1}'

    version_dir = utils.check_or_create_local_path(version_name, model_dir)
    graph_dir = utils.check_or_create_local_path('graph', model_dir)
    graph_version_dir = utils.check_or_create_local_path(
        version_name, graph_dir)

    worlds_dir = utils.check_or_create_local_path('worlds', version_dir)
    previews_dir = utils.check_or_create_local_path('previews', version_dir)
    model_save_dir = utils.check_or_create_local_path('models', version_dir)

    print('Saving source...')
    utils.save_source_to_dir(version_dir)

    print('Loading block images...')
    block_images = utils.load_block_images(res_dir)

    print('Loading encoding dictionaries...')
    block_forward, block_backward = utils.load_encoding_dict(
        res_dir, 'blocks_optimized')

    # Load model and existing weights
    print('Loading model...')

    # Try to load full model, otherwise try to load weights
    size = 64
    cur_models = f'{model_save_dir}\\epoch{initial_epoch - 1}'
    if os.path.exists(f'{cur_models}\\discriminator.h5') and os.path.exists(
            f'{cur_models}\\generator.h5'):
        print('Building model from files...')
        d = load_model(f'{cur_models}\\discriminator.h5')
        g = load_model(f'{cur_models}\\generator.h5')

        if os.path.exists(f'{cur_models}\\d_g.h5'):
            d_on_g = load_model(f'{cur_models}\\d_g.h5')
        else:
            g_optim = Adam(lr=0.0001, beta_1=0.5)
            d_on_g = generator_containing_discriminator(g, d)
            d_on_g.compile(loss='binary_crossentropy', optimizer=g_optim)
    elif os.path.exists(
            f'{cur_models}\\discriminator.weights') and os.path.exists(
                f'{cur_models}\\generator.weights'):
        print('Building model with weights...')
        d_optim = Adam(lr=0.00001)
        d = build_discriminator(size)
        d.load_weights(f'{cur_models}\\discriminator.weights')
        d.compile(loss='binary_crossentropy',
                  optimizer=d_optim,
                  metrics=['accuracy'])

        g = build_generator(size)
        g.load_weights(f'{cur_models}\\generator.weights')

        g_optim = Adam(lr=0.0001, beta_1=0.5)
        d_on_g = generator_containing_discriminator(g, d)
        d_on_g.compile(loss='binary_crossentropy', optimizer=g_optim)
    else:
        print('Building model from scratch...')
        d_optim = Adam(lr=0.00001)
        g_optim = Adam(lr=0.0001, beta_1=0.5)

        d = build_discriminator(size)
        d.compile(loss='binary_crossentropy',
                  optimizer=d_optim,
                  metrics=['accuracy'])
        d.summary()

        g = build_generator(size, latent_dim)
        g.summary()

        d_on_g = generator_containing_discriminator(g, d)
        d_on_g.compile(loss='binary_crossentropy', optimizer=g_optim)

    if no_version:
        # Delete existing worlds and previews if any
        print('Checking for old generated data...')
        utils.delete_files_in_path(worlds_dir)
        utils.delete_files_in_path(previews_dir)

        print('Saving model images...')
        keras.utils.plot_model(d,
                               to_file=f'{version_dir}\\discriminator.png',
                               show_shapes=True,
                               show_layer_names=True)
        keras.utils.plot_model(g,
                               to_file=f'{version_dir}\\generator.png',
                               show_shapes=True,
                               show_layer_names=True)

    # Load Data
    print('Loading worlds...')
    label_dict = utils.load_label_dict(res_dir, 'pro_labels_b')
    x_train = load_worlds_with_label(world_count,
                                     f'{res_dir}\\worlds\\',
                                     label_dict,
                                     1, (size, size),
                                     block_forward,
                                     overlap_x=0.1,
                                     overlap_y=0.1)

    world_count = x_train.shape[0]
    batch_cnt = (world_count - (world_count % batch_size)) // batch_size

    # Set up tensorboard
    print('Setting up tensorboard...')
    tb_manager = TensorboardManager(graph_version_dir, batch_cnt)

    preview_frequency_sec = 5 * 60.0
    for epoch in range(initial_epoch, epochs):

        # Create directories for current epoch
        cur_worlds_dir = utils.check_or_create_local_path(
            f'epoch{epoch}', worlds_dir)
        cur_previews_dir = utils.check_or_create_local_path(
            f'epoch{epoch}', previews_dir)
        cur_models_dir = utils.check_or_create_local_path(
            f'epoch{epoch}', model_save_dir)

        print('Shuffling data...')
        np.random.shuffle(x_train)

        last_save_time = time.time()
        for batch in range(batch_cnt):

            # Get real set of images
            real_worlds = x_train[batch * batch_size:(batch + 1) * batch_size]

            # Get fake set of images
            noise = np.random.normal(0, 1, size=(batch_size, latent_dim))
            fake_worlds = g.predict(noise)

            real_labels = np.ones(
                (batch_size,
                 1))  # np.random.uniform(0.9, 1.1, size=(batch_size,))
            fake_labels = np.zeros(
                (batch_size,
                 1))  # np.random.uniform(-0.1, 0.1, size=(batch_size,))

            # Train discriminator on real worlds
            d.trainable = True
            d_loss = d.train_on_batch(real_worlds, real_labels)
            acc_real = d_loss[1]
            loss_real = d_loss[0]
            tb_manager.log_var('d_acc_real', epoch, batch, d_loss[1])
            tb_manager.log_var('d_loss_real', epoch, batch, d_loss[0])

            # Train discriminator on fake worlds
            d_loss = d.train_on_batch(fake_worlds, fake_labels)
            d.trainable = False
            acc_fake = d_loss[1]
            loss_fake = d_loss[0]
            tb_manager.log_var('d_acc_fake', epoch, batch, d_loss[1])
            tb_manager.log_var('d_loss_fake', epoch, batch, d_loss[0])

            # Training generator on X data, with Y labels
            # noise = np.random.normal(0, 1, (batch_size, 256))

            # Train generator to generate real
            g_loss = d_on_g.train_on_batch(noise, real_labels)
            tb_manager.log_var('g_loss', epoch, batch, g_loss)

            print(
                f'epoch [{epoch}/{epochs}] :: batch [{batch}/{batch_cnt}] :: fake_acc = {acc_fake} :: '
                f'real_acc = {acc_real} :: fake_loss = {loss_fake} :: real_loss = {loss_real} :: gen_loss = {g_loss}'
            )

            # Save models
            time_since_save = time.time() - last_save_time
            if time_since_save >= preview_frequency_sec or batch == batch_cnt - 1:
                print('Saving previews...')
                for i in range(batch_size):
                    generated_world = fake_worlds[i]
                    decoded_world = utils.decode_world_sigmoid(
                        block_backward, generated_world)
                    utils.save_world_data(decoded_world,
                                          f'{cur_worlds_dir}\\world{i}.world')
                    utils.save_world_preview(
                        block_images, decoded_world,
                        f'{cur_previews_dir}\\preview{i}.png')

                print('Saving models...')
                try:
                    d.save(f'{cur_models_dir}\\discriminator.h5')
                    g.save(f'{cur_models_dir}\\generator.h5')
                    d_on_g.save(f'{cur_models_dir}\\d_g.h5')
                    d.save_weights(f'{cur_models_dir}\\discriminator.weights')
                    g.save_weights(f'{cur_models_dir}\\generator.weights')
                    d_on_g.save_weights(f'{cur_models_dir}\\d_g.weights')
                except ImportError:
                    print('Failed to save data.')

                last_save_time = time.time()
예제 #14
0
def train(epochs, batch_size, world_count, version_name=None, initial_epoch=0):
    cur_dir = os.getcwd()
    res_dir = os.path.abspath(os.path.join(cur_dir, '..', 'res'))
    all_models_dir = os.path.abspath(os.path.join(cur_dir, '..', 'models'))
    model_dir = utils.check_or_create_local_path('inpainting', all_models_dir)

    utils.delete_empty_versions(model_dir, 0)

    no_version = version_name is None
    if no_version:
        latest = utils.get_latest_version(model_dir)
        version_name = f'ver{latest}'

    version_dir = utils.check_or_create_local_path(version_name, model_dir)
    graph_dir = utils.check_or_create_local_path('graph', model_dir)
    graph_version_dir = utils.check_or_create_local_path(version_name, graph_dir)

    worlds_dir = utils.check_or_create_local_path('worlds', version_dir)
    previews_dir = utils.check_or_create_local_path('previews', version_dir)
    model_save_dir = utils.check_or_create_local_path('models', version_dir)

    print('Saving source...')
    utils.save_source_to_dir(version_dir)

    # Load block images
    print('Loading block images...')
    block_images = utils.load_block_images(res_dir)

    print('Loading encoding dictionaries...')
    block_forward, block_backward = utils.load_encoding_dict(res_dir, 'blocks_optimized')

    # Load model
    print('Loading model...')
    feature_model = auto_encoder.autoencoder_model()
    feature_model.load_weights(f'{all_models_dir}\\auto_encoder\\ver15\\models\\epoch28\\autoencoder.weights')
    feature_layers = [7, 14, 21]

    contextnet = PConvUnet(feature_model, feature_layers, inference_only=False)
    unet = contextnet.build_pconv_unet(train_bn=True, lr=0.0001)
    unet.summary()
    # pconv_unet.load_weights(f'{contextnet_dir}\\ver43\\models\\epoch4\\unet.weights')

    if no_version:
        # Delete existing worlds and previews if any
        print('Checking for old generated data...')
        utils.delete_files_in_path(worlds_dir)
        utils.delete_files_in_path(previews_dir)

    print('Saving model images...')
    keras.utils.plot_model(unet, to_file=f'{version_dir}\\unet.png', show_shapes=True,
                           show_layer_names=True)

    # Set up tensorboard
    print('Setting up tensorboard...')
    tb_writer = tf.summary.FileWriter(logdir=graph_version_dir)
    unet_loss_summary = tf.Summary()
    unet_loss_summary.value.add(tag='unet_loss', simple_value=None)

    # Load Data
    x_train = load_worlds(world_count, f'{res_dir}\\worlds\\', (128, 128), block_forward)

    # Start Training loop
    world_count = x_train.shape[0]
    batch_cnt = (world_count - (world_count % batch_size)) // batch_size

    for epoch in range(initial_epoch, epochs):

        print(f'Epoch = {epoch}')
        # Create directories for current epoch
        cur_worlds_cur = utils.check_or_create_local_path(f'epoch{epoch}', worlds_dir)
        cur_previews_dir = utils.check_or_create_local_path(f'epoch{epoch}', previews_dir)
        cur_models_dir = utils.check_or_create_local_path(f'epoch{epoch}', model_save_dir)

        print('Shuffling data...')
        np.random.shuffle(x_train)

        for batch in range(batch_cnt):

            # Get real set of images
            world_batch = x_train[batch * batch_size:(batch + 1) * batch_size]
            world_batch_masked, world_masks = utils.mask_batch_high(world_batch)

            if batch % 1000 == 999 or batch == batch_cnt - 1:

                # Save model
                try:
                    unet.save(f'{cur_models_dir}\\unet.h5')
                    unet.save_weights(f'{cur_models_dir}\\unet.weights')
                except ImportError:
                    print('Failed to save data.')

                # Save previews
                test = unet.predict([world_batch_masked, world_masks])

                d0 = utils.decode_world_sigmoid(block_backward, world_batch[0])
                utils.save_world_preview(block_images, d0, f'{cur_previews_dir}\\{batch}_orig.png')

                d1 = utils.decode_world_sigmoid(block_backward, test[0])
                utils.save_world_preview(block_images, d1, f'{cur_previews_dir}\\{batch}_fixed.png')

                d2 = utils.decode_world_sigmoid(block_backward, world_batch_masked[0])
                utils.save_world_preview(block_images, d2, f'{cur_previews_dir}\\{batch}_masked.png')

            loss = unet.train_on_batch([world_batch_masked, world_masks], world_batch)

            unet_loss_summary.value[0].simple_value = loss / 1000.0  # Divide by 1000 for better Y-Axis values
            tb_writer.add_summary(unet_loss_summary, (epoch * batch_cnt) + batch)
            tb_writer.flush()

            print(f'epoch [{epoch}/{epochs}] :: batch [{batch}/{batch_cnt}] :: unet_loss = {loss}')