コード例 #1
0
def run_training(log_dir, checkpoint_path, train_dataset, valid_dataset,
                 epochs, batch_size, model_name, regularization, unit_size):

    callbacks = [
        tf.keras.callbacks.TensorBoard(log_dir=log_dir),
        tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
                                           verbose=1,
                                           save_best_only=True)
    ]

    steps_per_epoch = 23331 // batch_size
    class_weights = {
        0: 2.8008624502432555,
        1: 0.9837281553398058,
        2: 3.8114655431838704,
        3: 14.608419838523645,
        4: 4.826791158536586,
        5: 52.99372384937238,
        6: 50.06126482213438,
        7: 20.16799363057325
    }

    model = create_model.get_model(regularization, unit_size)

    model.fit(train_dataset,
              epochs=epochs,
              steps_per_epoch=steps_per_epoch,
              verbose=1,
              callbacks=callbacks,
              validation_data=valid_dataset,
              validation_steps=None,
              class_weight=class_weights)

    return model
コード例 #2
0
def run_training(log_dir, checkpoint_path, train_dataset, valid_dataset,
                 epochs, batch_size, model_name, regularization):

    callbacks = [
        tf.keras.callbacks.TensorBoard(log_dir=log_dir),
        tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
                                           verbose=1,
                                           save_best_only=False)
    ]

    steps_per_epoch = 23331 // batch_size
    class_weights = {
        3: 3.6536321989528795,
        2: 0.9530129737111642,
        4: 1.2068201469952442,
        5: 13.229265402843602,
        0: 0.7001191371958866,
        1: 0.24593612334801762,
        7: 5.038583032490974,
        6: 12.51737668161435
    }

    model = create_model.get_model(regularization, [2048, 1024])

    model.fit(train_dataset,
              epochs=epochs,
              steps_per_epoch=steps_per_epoch,
              verbose=1,
              callbacks=callbacks,
              validation_data=valid_dataset,
              validation_steps=None,
              class_weight=class_weights)

    return model
コード例 #3
0
 def __init__(self,
              predict_path=PREDICT_FOLDER,
              train_pickle_path=TRAIN_PICKLE,
              width=WIDTH,
              height=HEIGHT):
     self.width = width
     self.height = height
     self.predict_path = predict_path
     self.train_images = load_pickle(train_pickle_path)
     self.model = get_model((self.width, self.height, 1))
     self.model.load_weights(os.path.join(SAVE_PATH, 'weights.h5'))
コード例 #4
0
def train_model(train_data_path,
                batch_size=BATCH_SIZE,
                n_iter=N_ITER,
                evaluate_every=EVALUATE_EVERY,
                eval_batch_size=EVAL_BATCH_SIZE,
                width=WIDTH,
                height=HEIGHT,
                model_path=SAVE_PATH,
                eval_data_path=None):

    input_shape = (width, height, 1)

    model = get_model(input_shape)

    gd = GenerateBatch(batch_size, train_data_path, width, height)
    if eval_data_path:
        gd_eval = GenerateBatch(eval_batch_size, eval_data_path, width, height)

    t_start = time.time()
    print("Starting training process!")
    print("-------------------------------------")
    for i in range(1, n_iter + 1):
        (inputs, targets) = gd.get_batch()
        loss = model.train_on_batch(inputs, targets)
        if i % evaluate_every == 0:
            print("\n ------------- \n")
            print("Time for {0} iterations: {1} mins".format(
                i, (time.time() - t_start) / 60.0))
            print("Train Loss: {0}".format(loss))
            if eval_data_path:
                (eval_inputs, eval_targets) = gd_eval.get_batch()
                probs = model.predict(eval_inputs)
                got_right = 0
                for i in range(len(probs)):
                    if not (bool(probs[i][0] < CLASSIFIER_THRESHOLD) ^
                            (not bool(eval_targets[i]))):
                        got_right += 1
                print("Eval accuracy: {}".format(got_right / len(probs)))
            model.save_weights(os.path.join(model_path, 'weights.h5'))
コード例 #5
0
    '--hidden_units',
    nargs=3,
    help='Number of neurons in the two hidden layers in the classifier',
    default=[4096, 2048, 1024],
    type=int)
parser.add_argument('--epochs', default=30, type=int)
parser.add_argument('--gpu', action="store_true", default=False)

args = parser.parse_args()
state_dict_checkpoint = 'state_dict_checkpoint.pt'

image_datasets = get_image_datasets(args.data_directory)
dataloaders = get_dataloaders(image_datasets)

model_arch = get_model_arch_from_model_name(args.arch)
model = get_model(model_arch, args.hidden_units)

device = 'cuda' if torch.cuda.is_available() and args.gpu else 'cpu'
print("Running in {}".format(device))

train_classifier(model,
                 device,
                 dataloaders,
                 lr=args.learning_rate,
                 epochs=args.epochs,
                 model_checkpoint=state_dict_checkpoint)

model.load_state_dict(torch.load(state_dict_checkpoint))
model.class_to_idx = image_datasets['train'].class_to_idx

store_checkpoint(model, model_arch, checkpoint_path=args.save_dir)
コード例 #6
0
utils_files_dir = 'util_files'
batch_size = 16

if not os.path.exists(utils_files_dir):
    os.mkdir(utils_files_dir)

if not os.path.exists(os.path.join(utils_files_dir, "train.json")):
    logger.info("creating and preparing data for training and testing.")
    create_dataset()

class_weights = get_class_weights()

if not os.path.exists(os.path.join(utils_files_dir, "model.h5")):
    logger.info("creating model.")
    model = get_model()
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])

    train = dict(json.load(open('util_files/train.json')))
    train_data = train['data']
    len_training_data = len(train_data)

    valid = dict(json.load(open('util_files/valid.json')))
    valid_data = valid['data']
    len_validation_data = len(valid_data)

    training_generator = generator(batch_size=batch_size)
    validation_generator = generator(batch_size=batch_size, mode='valid')
コード例 #7
0
    # Plot cumulative reward
    with open(os.path.join(log_dir, "monitor.csv"), 'rt') as fh:
        firstline = fh.readline()
        assert firstline[0] == '#'
        df = pd.read_csv(fh, index_col=None)['r']
    df.rolling(window=1000).mean().plot()
    plt.show()
    return model


if __name__ == '__main__':
    env = ConnectFourGym(agent2="random")
    log_dir = "ppo/"
    os.makedirs(log_dir, exist_ok=True)

    # Logging progress
    monitor_env = Monitor(env, log_dir, allow_early_resets=True)

    # Create a vectorized environment
    vec_env = DummyVecEnv([lambda: monitor_env])

    # Initialize agent
    model = get_model(vec_env)

    # Train agent
    model = train_model(model)

    env_game = make("connectx")
    env_game.run([agent1, "random"])
    get_win_percentages(agent1=agent1, agent2="random")