示例#1
0
def train_model(model: Model,
                dataset: Dataset,
                epochs: int,
                batch_size: int,
                gpu_ind: Optional[int] = None,
                use_wandb=False) -> Model:
    callbacks = []

    if EARLY_STOPPING:
        early_stopping = EarlyStopping(monitor='val_loss',
                                       min_delta=0.01,
                                       patience=3,
                                       verbose=1,
                                       mode='auto')
        callbacks.append(early_stopping)

    if GPU_UTIL_SAMPLER and gpu_ind is not None:
        gpu_utilization = GPUUtilizationSampler(gpu_ind)
        callbacks.append(gpu_utilization)

    ##### Hide lines below until Lab 4
    if use_wandb:
        wandb = WandbCallback()
        callbacks.append(wandb)
    ##### Hide lines above until Lab 4

    # Pick up after a good run.
    new_fname = (
        '/home/timehaven/github.com/gradescope/fsdl-text-recognizer-project/' +
        'lab5/wandb/run-20180804_234736-uc49b8km/model-best.h5')
    old_fname = model.weights_filename
    model.weights_filename = new_fname
    model.load_weights()
    print("Loaded weights!")
    model.weights_filename = old_fname
    import time
    time.sleep(5)

    model.network.summary()

    t = time()
    history = model.fit(dataset, batch_size, epochs, callbacks)
    print('Training took {:2f} s'.format(time() - t))

    if GPU_UTIL_SAMPLER and gpu_ind is not None:
        gpu_utilizations = gpu_utilization.samples
        print(
            f'GPU utilization: {round(np.mean(gpu_utilizations), 2)} +- {round(np.std(gpu_utilizations), 2)}'
        )

    return model
def train_model(
        model: Model,
        dataset: Dataset,
        epochs: int,
        batch_size: int,
        gpu_ind: Optional[int] = None,
        use_wandb: bool = False) -> Model:
    """Train model."""
    callbacks = []

    if EARLY_STOPPING:
        early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=3, verbose=1, mode='auto')
        callbacks.append(early_stopping)

    # Hide lines below until Lab 3
    if use_wandb:
        wandb.init(config={'gpu_ind': gpu_ind})
        wandb_callback = WandbCallback()
        callbacks.append(wandb_callback)
    # Hide lines above until Lab 3

    model.network.summary()

    t = time()
    _history = model.fit(dataset=dataset, batch_size=batch_size, epochs=epochs, callbacks=callbacks)
    print('Training took {:2f} s'.format(time() - t))

    return model
示例#3
0
def train_model(model: Model,
                dataset: Dataset,
                epochs: int,
                batch_size: int,
                use_wandb: bool = False) -> Model:
    """Train model."""
    callbacks = []

    if EARLY_STOPPING:
        early_stopping = EarlyStopping(monitor='val_loss',
                                       min_delta=0.01,
                                       patience=3,
                                       verbose=1,
                                       mode='auto')
        callbacks.append(early_stopping)

    model.network.summary()

    t = time()
    _history = model.fit(dataset=dataset,
                         batch_size=batch_size,
                         epochs=epochs,
                         callbacks=callbacks)
    print('Training took {:2f} s'.format(time() - t))

    return model
def train_model(model: Model,
                dataset: Dataset,
                epochs: int,
                batch_size: int,
                gpu_ind: Optional[int] = None,
                use_wandb=False) -> Model:
    callbacks = []

    if EARLY_STOPPING:
        early_stopping = EarlyStopping(monitor='val_loss',
                                       min_delta=0.01,
                                       patience=3,
                                       verbose=1,
                                       mode='auto')
        callbacks.append(early_stopping)

    if GPU_UTIL_SAMPLER and gpu_ind is not None:
        gpu_utilization = GPUUtilizationSampler(gpu_ind)
        callbacks.append(gpu_utilization)

    model.network.summary()

    t = time()
    history = model.fit(dataset, batch_size, epochs, callbacks)
    print('Training took {:2f} s'.format(time() - t))

    if GPU_UTIL_SAMPLER and gpu_ind is not None:
        gpu_utilizations = gpu_utilization.samples
        print(
            f'GPU utilization: {round(np.mean(gpu_utilizations), 2)} +- {round(np.std(gpu_utilizations), 2)}'
        )

    return model
示例#5
0
def train_model(model: Model,
                dataset: Dataset,
                epochs: int,
                batch_size: int,
                gpu_ind: Optional[int] = None,
                use_wandb: bool = False,
                **train_args) -> Model:
    """Train model."""
    callbacks = []

    # TODO: keras specific callbacks
    # if EARLY_STOPPING:
    #     early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=3, verbose=1, mode='auto')
    #     callbacks.append(early_stopping)

    # if GPU_UTIL_SAMPLER and gpu_ind is not None:
    #     gpu_utilization = GPUUtilizationSampler(gpu_ind)
    #     callbacks.append(gpu_utilization)

    # print(model.network)

    t = time()
    _history = model.fit(dataset=dataset,
                         batch_size=batch_size,
                         epochs=epochs,
                         callbacks=callbacks,
                         **train_args)
    print('Training took {:2f} s'.format(time() - t))

    # TODO: util functions
    # if GPU_UTIL_SAMPLER and gpu_ind is not None:
    #     gpu_utilizations = gpu_utilization.samples
    #     print(f'GPU utilization: {round(np.mean(gpu_utilizations), 2)} +- {round(np.std(gpu_utilizations), 2)}')

    return model
示例#6
0
def train_model(model: Model,
                dataset: Dataset,
                epochs: int,
                batch_size: int,
                use_wandb: bool = False) -> Model:
    """Train model."""
    callbacks = []

    if MODEL_CHECKPOINT:
        model_checkpoint = ModelCheckpoint(filepath=os.path.join(
            wandb.run.dir, model.weights_filename_only),
                                           verbose=1)
        callbacks.append(model_checkpoint)

    if EARLY_STOPPING:
        early_stopping = EarlyStopping(monitor="val_loss",
                                       min_delta=0.01,
                                       patience=3,
                                       verbose=1,
                                       mode="auto")
        callbacks.append(early_stopping)

    if use_wandb:
        image_callback = WandbImageLogger(model, dataset)
        wandb_callback = WandbCallback(save_model=True)
        callbacks.append(image_callback)
        callbacks.append(wandb_callback)

    model.network.summary()
    t = time()
    _history = model.fit(dataset=dataset, batch_size=batch_size, initial_epoch=wandb.run.step if wandb.run.resumed else 0, epochs=epochs, callbacks=callbacks)  # pylint: disable=line-too-long
    print("Training took {:2f}s".format(time() - t))

    return model
示例#7
0
def train_model(model: Model,
                dataset: Dataset,
                epochs: int,
                batch_size: int,
                use_wandb: bool = False) -> Model:
    """Train model."""
    callbacks = []

    if EARLY_STOPPING:
        early_stopping = EarlyStopping(monitor="val_loss",
                                       min_delta=0.01,
                                       patience=3,
                                       verbose=1,
                                       mode="auto")
        callbacks.append(early_stopping)

    # Hide lines below until Lab 4
    if use_wandb:
        image_callback = WandbImageLogger(model, dataset)
        wandb_callback = WandbCallback()
        callbacks.append(image_callback)
        callbacks.append(wandb_callback)
    # Hide lines above until Lab 4

    model.network.summary()

    t = time()
    _history = model.fit(dataset=dataset,
                         batch_size=batch_size,
                         epochs=epochs,
                         callbacks=callbacks)
    print("Training took {:2f} s".format(time() - t))

    return model
def train_model(model: Model,
                dataset: Dataset,
                epochs: int,
                batch_size: int,
                gpu_ind: Optional[int] = None,
                use_wandb=False) -> Model:
    callbacks = []
    # Pass early stopping to callback
    if EARLY_STOPPING:
        early_stopping = EarlyStopping(monitor='val_loss',
                                       min_delta=0.01,
                                       patience=3,
                                       verbose=1,
                                       mode='auto')
        callbacks.append(early_stopping)
    # Monitor GPU utilization of the GPU in use
    if GPU_UTIL_SAMPLER and gpu_ind is not None:
        gpu_utilization = GPUUtilizationSampler(gpu_ind)
        callbacks.append(gpu_utilization)

    # Print model structure
    model.network.summary()
    # Start timer
    t = time()
    # Train model on dataset
    history = model.fit(dataset, batch_size, epochs, callbacks)
    # Print training time
    print('Training took {:2f} s'.format(time() - t))
    # Print GPU utilization if True
    if GPU_UTIL_SAMPLER and gpu_ind is not None:
        gpu_utilizations = gpu_utilization.samples
        print(
            f'GPU utilization: {round(np.mean(gpu_utilizations), 2)} +- {round(np.std(gpu_utilizations), 2)}'
        )
    # Return trained model
    return model