コード例 #1
0
ファイル: train.py プロジェクト: redxtreme/Halite
def main():
    parser = argparse.ArgumentParser(description="Halite II training")
    parser.add_argument("--model_name", help="Name of the model")
    parser.add_argument("--minibatch_size", type=int, help="Size of the minibatch", default=100)
    parser.add_argument("--steps", type=int, help="Number of steps in the training", default=100)
    parser.add_argument("--data", help="Data directory or zip file containing uncompressed games")
    parser.add_argument("--cache", help="Location of the model we should continue to train")
    parser.add_argument("--games_limit", type=int, help="Train on up to games_limit games", default=1000)
    parser.add_argument("--seed", type=int, help="Random seed to make the training deterministic")
    parser.add_argument("--bot_to_imitate", help="Name of the bot whose strategy we want to learn")
    parser.add_argument("--dump_features_location", help="Location of hdf file where the features should be stored")

    args = parser.parse_args()

    # Make deterministic if needed
    if args.seed is not None:
        np.random.seed(args.seed)
    nn = NeuralNet(cached_model=args.cache, seed=args.seed)

    if args.data.endswith('.zip'):
        raw_data = fetch_data_zip(args.data, args.games_limit)
    else:
        raw_data = fetch_data_dir(args.data, args.games_limit)

    data_input, data_output = parse(raw_data, args.bot_to_imitate, args.dump_features_location)
    data_size = len(data_input)
    training_input, training_output = data_input[:int(0.85 * data_size)], data_output[:int(0.85 * data_size)]
    validation_input, validation_output = data_input[int(0.85 * data_size):], data_output[int(0.85 * data_size):]

    training_data_size = len(training_input)

    # randomly permute the data
    permutation = np.random.permutation(training_data_size)
    training_input, training_output = training_input[permutation], training_output[permutation]

    print("Initial, cross validation loss: {}".format(nn.compute_loss(validation_input, validation_output)))

    curves = []

    for s in range(args.steps):
        start = (s * args.minibatch_size) % training_data_size
        end = start + args.minibatch_size
        training_loss = nn.fit(training_input[start:end], training_output[start:end])
        if s % 200 == 0 or s == args.steps - 1:
            validation_loss = nn.compute_loss(validation_input, validation_output)
            print("Step: {}, cross validation loss: {}, training_loss: {}".format(s, validation_loss, training_loss))
            curves.append((s, training_loss, validation_loss))

    cf = pd.DataFrame(curves, columns=['step', 'training_loss', 'cv_loss'])
    fig = cf.plot(x='step', y=['training_loss', 'cv_loss']).get_figure()

    # Save the trained model, so it can be used by the bot
    current_directory = os.path.dirname(os.path.abspath(__file__))
    model_path = os.path.join(current_directory, os.path.pardir, "models", args.model_name + ".ckpt")
    print("Training finished, serializing model to {}".format(model_path))
    nn.save(model_path)
    print("Model serialized")

    curve_path = os.path.join(current_directory, os.path.pardir, "models", args.model_name + "_training_plot.png")
    fig.savefig(curve_path)
コード例 #2
0
ファイル: train.py プロジェクト: IrinaArmstrong/Halite-II_ML
def main():
    parser = argparse.ArgumentParser(description="Halite II training")
    parser.add_argument("--model_name", help="Name of the model")
    parser.add_argument(
        "--features_ready",
        help="If data already preprocessed to features and saved")
    parser.add_argument(
        "--data",
        help="Data directory or zip file containing uncompressed games")
    parser.add_argument(
        "--cache", help="Location of the model we should continue to train")
    parser.add_argument("--games_limit",
                        type=int,
                        help="Train on up to games_limit games",
                        default=1000)
    parser.add_argument("--seed",
                        type=int,
                        help="Random seed to make the training deterministic")
    parser.add_argument("--bot_to_imitate",
                        help="Name of the bot whose strategy we want to learn")
    parser.add_argument(
        "--dump_features_location",
        help="Location of hdf file where the features should be stored")
    parser.add_argument(
        "--dump_features_fn",
        help="File name of file where the features should be stored")

    args = parser.parse_args()

    if args.data.endswith('.zip'):
        raw_data = fetch_data_zip(args.data, args.games_limit)
        data_input, data_output = parse(raw_data, args.bot_to_imitate,
                                        args.dump_features_location,
                                        args.dump_features_fn)
    elif args.features_ready:
        data_input, data_output = load_parsed(args.dump_features_location,
                                              args.dump_features_fn)
    else:
        raw_data = fetch_data_dir(args.data, args.games_limit)
        data_input, data_output = parse(raw_data, args.bot_to_imitate,
                                        args.dump_features_location,
                                        args.dump_features_fn)

    input_data_size = data_input.shape[1:]
    # Make deterministic if needed
    if args.seed is not None:
        np.random.seed(args.seed)

    # Redirect sys.stdout to the file
    stderr_fn = sys.stderr
    stdout_fn = sys.stdout
    sys.stdout = open('./LogSTDOUT.txt', 'w')
    sys.stdout = open('./LogSTDERR.txt', 'w')

    net = CNN_Net(input_size=input_data_size,
                  output_size=PLANET_MAX_NUM,
                  cached_model=args.cache,
                  cached_model_path="",
                  seed=args.seed)

    sys.stdout.close()
    sys.stderr.close()
    sys.stderr = stderr_fn
    sys.stdout = stdout_fn

    net.train(data_input,
              data_output,
              validation_split=0.2,
              n_epochs=28,
              batch_size=100,
              verbose=1,
              model_version="v0")
コード例 #3
0
ファイル: train_boost.py プロジェクト: DrMatters/sc2_agents
def main():
    parser = argparse.ArgumentParser(description="Halite II training")
    parser.add_argument("--model_name",
                        help="Name of the model",
                        default='xgb_model')
    parser.add_argument(
        "--data",
        help="Data directory or zip file containing uncompressed games")
    parser.add_argument("--games_limit",
                        type=int,
                        help="Train on up to games_limit games")
    parser.add_argument("--seed",
                        type=int,
                        help="Random seed to make the training deterministic",
                        default=53)
    parser.add_argument("--bot_to_imitate",
                        help="Name of the bot whose strategy we want to learn")
    parser.add_argument("--cache_train_data",
                        help="Location of where to store/read cache data")
    parser.add_argument('--only_cache_data', action='store_true')

    args = parser.parse_args()

    # Make deterministic if needed
    if args.seed is not None:
        np.random.seed(args.seed)
        random.seed(args.seed)

    if args.cache_train_data:
        base_cache_path = os.path.splitext(args.cache_train_data)[0]
        ext = os.path.splitext(args.cache_train_data)[1]
        path_data_input = base_cache_path + '_input' + ext
        path_data_output = base_cache_path + '_output' + ext

    if not args.cache_train_data or not os.path.exists(path_data_input):
        if args.data.endswith('.zip'):
            raw_data = fetch_data_zip(args.data, args.games_limit)
        else:
            raw_data = fetch_data_dir(args.data, args.games_limit)
        # if no cache path or file not exists then calculate train data
        data_input, data_output = parse(raw_data, args.bot_to_imitate)
        if args.cache_train_data:
            np.save(path_data_input, data_input)
            np.save(path_data_output, data_output)
    else:
        print('Reading cache')
        # cache path exists and file exists
        data_input = np.load(path_data_input)
        data_output = np.load(path_data_output)

    if args.only_cache_data:
        return

    data_input_p = np.reshape(data_input, (data_input.shape[0], -1))
    data_output_p = np.argmax(data_output, axis=1)

    X_train, X_test, y_train, y_test = \
        model_selection.train_test_split(data_input_p, data_output_p, train_size=0.85, random_state=args.seed,
                                         shuffle=True)

    classifier = xgboost.XGBRegressor(
        objective='multi:softprob',
        n_jobs=-1,
        random_state=args.seed,
    )
    classifier.set_params(**{'num_class': data_output.shape[1]})
    fit_res = classifier.fit(X_train,
                             y_train,
                             eval_set=[(X_train, y_train), (X_test, y_test)],
                             eval_metric='mlogloss')

    # Save the trained model, so it can be used by the bot
    current_directory = os.path.dirname(os.path.abspath(__file__))
    model_path = os.path.join(current_directory, os.path.pardir, "models",
                              args.model_name + ".pickle")
    print("Training finished, serializing model to {}".format(model_path))
    with open(model_path, 'wb') as f:
        pickle.dump(classifier, f)
    print("Model serialized")