Example #1
0
def train(model: Hidden, device: torch.device,
          hidden_config: HiDDenConfiguration, train_options: TrainingOptions,
          this_run_folder: str, tb_logger):
    """
    Trains the HiDDeN model
    :param model: The model
    :param device: torch.device object, usually this is GPU (if avaliable), otherwise CPU.
    :param hidden_config: The network configuration
    :param train_options: The training settings
    :param this_run_folder: The parent folder for the current training run to store training artifacts/results/logs.
    :param tb_logger: TensorBoardLogger object which is a thin wrapper for TensorboardX logger.
                Pass None to disable TensorboardX logging
    :return:
    """

    train_data, val_data = utils.get_data_loaders(hidden_config, train_options)

    images_to_save = 8
    saved_images_size = (512, 512)

    best_epoch = train_options.best_epoch
    best_cond = train_options.best_cond
    for epoch in range(train_options.start_epoch,
                       train_options.number_of_epochs + 1):
        logging.info(
            f'\nStarting epoch {epoch}/{train_options.number_of_epochs} [{best_epoch}]'
        )
        training_losses = defaultdict(functions.AverageMeter)
        epoch_start = time.time()
        for image, _ in tqdm(train_data, ncols=80):
            image = image.to(device)  #.squeeze(0)
            message = torch.Tensor(
                np.random.choice(
                    [0, 1],
                    (image.shape[0], hidden_config.message_length))).to(device)
            losses, _ = model.train_on_batch([image, message])

            for name, loss in losses.items():
                training_losses[name].update(loss)

        train_duration = time.time() - epoch_start
        logging.info('Epoch {} training duration {:.2f} sec'.format(
            epoch, train_duration))
        logging.info('-' * 40)
        utils.write_losses(os.path.join(this_run_folder, 'train.csv'),
                           training_losses, epoch, train_duration)
        if tb_logger is not None:
            tb_logger.save_losses('train_loss', training_losses, epoch)
            tb_logger.save_grads(epoch)
            tb_logger.save_tensors(epoch)
            tb_logger.writer.flush()

        validation_losses = defaultdict(functions.AverageMeter)
        logging.info('Running validation for epoch {}/{}'.format(
            epoch, train_options.number_of_epochs))
        val_image_patches = ()
        val_encoded_patches = ()
        val_noised_patches = ()
        for image, _ in tqdm(val_data, ncols=80):
            image = image.to(device)  #.squeeze(0)
            message = torch.Tensor(
                np.random.choice(
                    [0, 1],
                    (image.shape[0], hidden_config.message_length))).to(device)
            losses, (encoded_images, noised_images,
                     decoded_messages) = model.validate_on_batch(
                         [image, message])
            for name, loss in losses.items():
                validation_losses[name].update(loss)

            if hidden_config.enable_fp16:
                image = image.float()
                encoded_images = encoded_images.float()
            pick = np.random.randint(0, image.shape[0])
            val_image_patches += (F.interpolate(
                image[pick:pick + 1, :, :, :].cpu(),
                size=(hidden_config.W, hidden_config.H)), )
            val_encoded_patches += (F.interpolate(
                encoded_images[pick:pick + 1, :, :, :].cpu(),
                size=(hidden_config.W, hidden_config.H)), )
            val_noised_patches += (F.interpolate(
                noised_images[pick:pick + 1, :, :, :].cpu(),
                size=(hidden_config.W, hidden_config.H)), )

        if tb_logger is not None:
            tb_logger.save_losses('val_loss', validation_losses, epoch)
            tb_logger.writer.flush()

        val_image_patches = torch.stack(val_image_patches).squeeze(1)
        val_encoded_patches = torch.stack(val_encoded_patches).squeeze(1)
        val_noised_patches = torch.stack(val_noised_patches).squeeze(1)
        utils.save_images(val_image_patches[:images_to_save, :, :, :],
                          val_encoded_patches[:images_to_save, :, :, :],
                          val_noised_patches[:images_to_save, :, :, :],
                          epoch,
                          os.path.join(this_run_folder, 'images'),
                          resize_to=saved_images_size)

        curr_cond = validation_losses['encoder_mse'].avg + validation_losses[
            'bitwise-error'].avg
        if best_cond is None or curr_cond < best_cond:
            best_cond = curr_cond
            best_epoch = epoch

        utils.log_progress(validation_losses)
        logging.info('-' * 40)
        utils.save_checkpoint(model, train_options.experiment_name, epoch,
                              best_epoch, best_cond,
                              os.path.join(this_run_folder, 'checkpoints'))
        logging.info(
            f'Current best epoch = {best_epoch}, loss = {best_cond:.6f}')
        utils.write_losses(os.path.join(this_run_folder, 'validation.csv'),
                           validation_losses, epoch,
                           time.time() - epoch_start)
def train(model: Hidden, device: torch.device,
          hidden_config: HiDDenConfiguration, train_options: TrainingOptions,
          this_run_folder: str, tb_logger):
    """
    Trains the HiDDeN model
    :param model: The model
    :param device: torch.device object, usually this is GPU (if avaliable), otherwise CPU.
    :param hidden_config: The network configuration
    :param train_options: The training settings
    :param this_run_folder: The parent folder for the current training run to store training artifacts/results/logs.
    :param tb_logger: TensorBoardLogger object which is a thin wrapper for TensorboardX logger.
                Pass None to disable TensorboardX logging
    :return:
    """

    train_data, val_data = utils.get_data_loaders(hidden_config, train_options)
    file_count = len(train_data.dataset)
    if file_count % train_options.batch_size == 0:
        steps_in_epoch = file_count // train_options.batch_size
    else:
        steps_in_epoch = file_count // train_options.batch_size + 1

    print_each = 10
    images_to_save = 8
    saved_images_size = (512, 512)

    for epoch in range(train_options.start_epoch,
                       train_options.number_of_epochs + 1):
        logging.info('\nStarting epoch {}/{}'.format(
            epoch, train_options.number_of_epochs))
        logging.info('Batch size = {}\nSteps in epoch = {}'.format(
            train_options.batch_size, steps_in_epoch))
        training_losses = defaultdict(AverageMeter)
        epoch_start = time.time()
        step = 1
        #train
        for image, _ in train_data:
            image = image.to(device)
            """
            message = torch.Tensor(np.random.choice([0, 1], (image.shape[0], hidden_config.message_length))).to(device)
            losses, _ = model.train_on_batch([image, message])
            print(losses)
            """
            #crop imgs
            imgs = cropImg(32, image)
            #iterate img
            bitwise_arr = []
            main_losses = None
            encoded_imgs = []
            for img in imgs:
                img = img.to(device)
                message = torch.Tensor(
                    np.random.choice(
                        [0, 1], (img.shape[0],
                                 hidden_config.message_length))).to(device)
                losses, (encoded_images, noised_images,
                         decoded_messages) = model.train_on_batch(
                             [img, message])
                encoded_imgs.append(
                    encoded_images[0][0].cpu().detach().numpy())
                main_losses = losses
                for name, loss in losses.items():
                    if (name == 'bitwise-error  '):
                        bitwise_arr.append(loss)
            Total = 0
            Vcount = 0
            V_average = 0
            H_average = 0
            for i in range(0, len(encoded_imgs) - 1):
                if ((i + 1) % 4 != 0):
                    img = encoded_imgs[i]
                    img_next = encoded_imgs[i + 1]
                    average_img = 0
                    average_img_next = 0
                    for j in range(0, 32):
                        for k in range(0, 10):
                            average_img = average_img + img[j][31 - k]
                            average_img_next = average_img_next + img_next[j][k]
                    average_blocking = np.abs(average_img -
                                              average_img_next) / 320
                    V_average = V_average + average_blocking
                    for j in range(0, 32):
                        distinct = np.abs(img[j][31] - img_next[j][0])
                        Total = Total + 1
                        if (distinct > 0.5):
                            Vcount = Vcount + 1
            V_average = V_average / 12
            Hcount = 0
            for i in range(0, len(encoded_imgs) - 4):
                img = encoded_imgs[i]
                img_next = encoded_imgs[i + 4]
                average_img = 0
                average_img_next = 0
                for j in range(0, 32):
                    for k in range(0, 10):
                        average_img = average_img + img[31 - k][j]
                        average_img_next = average_img_next + img_next[k][j]
                average_blocking = np.abs(average_img - average_img_next) / 320
                H_average = H_average + average_blocking
                for j in range(0, 32):
                    distinct = np.abs(img[31][j] - img_next[0][j])
                    Total = Total + 1
                    if (distinct > 0.5):
                        Hcount = Hcount + 1
            H_average = H_average / 12

            bitwise_arr = np.array(bitwise_arr)
            bitwise_avg = np.average(bitwise_arr)
            #blocking_loss = (Vcount+Hcount)/Total
            blocking_loss = (H_average + V_average) / 2

            for name, loss in main_losses.items():
                if (name == 'bitwise-error  '):
                    training_losses[name].update(bitwise_avg)
                else:
                    if (name == 'blocking_effect'):
                        training_losses[name].update(blocking_loss)
                    else:
                        training_losses[name].update(loss)

            if step % print_each == 0 or step == steps_in_epoch:
                logging.info('Epoch: {}/{} Step: {}/{}'.format(
                    epoch, train_options.number_of_epochs, step,
                    steps_in_epoch))
                utils.log_progress(training_losses)
                logging.info('-' * 40)
            step += 1

        train_duration = time.time() - epoch_start
        logging.info('Epoch {} training duration {:.2f} sec'.format(
            epoch, train_duration))
        logging.info('-' * 40)
        utils.write_losses(os.path.join(this_run_folder, 'train.csv'),
                           training_losses, epoch, train_duration)
        if tb_logger is not None:
            tb_logger.save_losses(training_losses, epoch)
            tb_logger.save_grads(epoch)
            tb_logger.save_tensors(epoch)

        first_iteration = True
        validation_losses = defaultdict(AverageMeter)
        logging.info('Running validation for epoch {}/{}'.format(
            epoch, train_options.number_of_epochs))

        #val
        for image, _ in val_data:
            image = image.to(device)
            #crop imgs
            imgs = cropImg(32, image)
            #iterate img
            bitwise_arr = []
            main_losses = None
            encoded_imgs = []
            blocking_imgs = []
            for img in imgs:
                img = img.to(device)
                message = torch.Tensor(
                    np.random.choice(
                        [0, 1], (img.shape[0],
                                 hidden_config.message_length))).to(device)
                losses, (encoded_images, noised_images,
                         decoded_messages) = model.validate_on_batch(
                             [img, message])
                encoded_imgs.append(encoded_images)
                blocking_imgs.append(
                    encoded_images[0][0].cpu().detach().numpy())
                main_losses = losses
                for name, loss in losses.items():
                    if (name == 'bitwise-error  '):
                        bitwise_arr.append(loss)

            Total = 0
            Vcount = 0
            V_average = 0
            H_average = 0
            for i in range(0, len(blocking_imgs) - 1):
                if ((i + 1) % 4 != 0):
                    img = blocking_imgs[i]
                    img_next = blocking_imgs[i + 1]
                    average_img = 0
                    average_img_next = 0
                    for j in range(0, 32):
                        for k in range(0, 10):
                            average_img = average_img + img[j][31 - k]
                            average_img_next = average_img_next + img_next[j][k]
                    average_blocking = np.abs(average_img -
                                              average_img_next) / 320
                    V_average = V_average + average_blocking
                    for j in range(0, 32):
                        distinct = np.abs(img[j][31] - img_next[j][0])
                        Total = Total + 1
                        if (distinct > 0.5):
                            Vcount = Vcount + 1
            V_average = V_average / 12
            Hcount = 0
            for i in range(0, len(blocking_imgs) - 4):
                img = blocking_imgs[i]
                img_next = blocking_imgs[i + 4]
                for j in range(0, 32):
                    for k in range(0, 10):
                        average_img = average_img + img[31 - k][j]
                        average_img_next = average_img_next + img_next[k][j]
                average_blocking = np.abs(average_img - average_img_next) / 320
                H_average = H_average + average_blocking
                for j in range(0, 32):
                    distinct = np.abs(img[31][j] - img_next[0][j])
                    Total = Total + 1
                    if (distinct > 0.5):
                        Hcount = Hcount + 1
            H_average = H_average / 12

            bitwise_arr = np.array(bitwise_arr)
            bitwise_avg = np.average(bitwise_arr)
            #blocking_loss = (Vcount+Hcount)/Total
            blocking_loss = (H_average + V_average) / 2
            for name, loss in main_losses.items():
                if (name == 'bitwise-error  '):
                    validation_losses[name].update(bitwise_avg)
                else:
                    if (name == 'blocking_effect'):
                        validation_losses[name].update(blocking_loss)
                    else:
                        validation_losses[name].update(loss)
            #concat image
            encoded_images = concatImgs(encoded_imgs)

            if first_iteration:
                if hidden_config.enable_fp16:
                    image = image.float()
                    encoded_images = encoded_images.float()
                utils.save_images(
                    image.cpu()[:images_to_save, :, :, :],
                    encoded_images[:images_to_save, :, :, :].cpu(),
                    epoch,
                    os.path.join(this_run_folder, 'images'),
                    resize_to=saved_images_size)
                first_iteration = False

        utils.log_progress(validation_losses)
        logging.info('-' * 40)
        utils.save_checkpoint(model, train_options.experiment_name, epoch,
                              os.path.join(this_run_folder, 'checkpoints'))
        utils.write_losses(os.path.join(this_run_folder, 'validation.csv'),
                           validation_losses, epoch,
                           time.time() - epoch_start)
Example #3
0
def train(model: Hidden, device: torch.device,
          hidden_config: HiDDenConfiguration, train_options: TrainingOptions,
          this_run_folder: str, tb_logger, vocab):
    """
    Trains the HiDDeN model
    :param model: The model
    :param device: torch.device object, usually this is GPU (if avaliable), otherwise CPU.
    :param hidden_config: The network configuration
    :param train_options: The training settings
    :param this_run_folder: The parent folder for the current training run to store training artifacts/results/logs.
    :param tb_logger: TensorBoardLogger object which is a thin wrapper for TensorboardX logger.
                Pass None to disable TensorboardX logging
    :return:
    """

    train_data, val_data = utils.get_data_loaders(hidden_config, train_options,
                                                  vocab)
    file_count = len(train_data.dataset)
    if file_count % train_options.batch_size == 0:
        steps_in_epoch = file_count // train_options.batch_size
    else:
        steps_in_epoch = file_count // train_options.batch_size + 1

    print_each = 10
    images_to_save = 8
    saved_images_size = (512, 512)

    for epoch in range(train_options.start_epoch,
                       train_options.number_of_epochs + 1):
        logging.info('\nStarting epoch {}/{}'.format(
            epoch, train_options.number_of_epochs))
        logging.info('Batch size = {}\nSteps in epoch = {}'.format(
            train_options.batch_size, steps_in_epoch))
        training_losses = defaultdict(AverageMeter)
        epoch_start = time.time()
        step = 1
        for image, ekeys, dkeys, caption, length in train_data:
            image, caption, ekeys, dkeys = image.to(device), caption.to(
                device), ekeys.to(device), dkeys.to(device)

            losses, _ = model.train_on_batch(
                [image, ekeys, dkeys, caption, length])

            for name, loss in losses.items():
                training_losses[name].update(loss)
            if step % print_each == 0 or step == steps_in_epoch:
                logging.info('Epoch: {}/{} Step: {}/{}'.format(
                    epoch, train_options.number_of_epochs, step,
                    steps_in_epoch))
                utils.log_progress(training_losses)
                logging.info('-' * 40)
            step += 1

        train_duration = time.time() - epoch_start
        logging.info('Epoch {} training duration {:.2f} sec'.format(
            epoch, train_duration))
        logging.info('-' * 40)
        utils.write_losses(os.path.join(this_run_folder, 'train.csv'),
                           training_losses, epoch, train_duration)
        if tb_logger is not None:
            tb_logger.save_losses(training_losses, epoch)
            tb_logger.save_grads(epoch)
            tb_logger.save_tensors(epoch)

        first_iteration = True
        validation_losses = defaultdict(AverageMeter)
        logging.info('Running validation for epoch {}/{}'.format(
            epoch, train_options.number_of_epochs))
        for image, ekeys, dkeys, caption, length in val_data:
            image, caption, ekeys, dkeys = image.to(device), caption.to(
                device), ekeys.to(device), dkeys.to(device)

            losses, (encoded_images, noised_images, decoded_messages, predicted_sents) = \
                model.validate_on_batch([image, ekeys, dkeys, caption, length])

            #print(predicted)
            #exit()
            predicted_sents = predicted_sents.cpu().numpy()
            for i in range(train_options.batch_size):
                try:
                    #print(''.join([vocab.idx2word[int(w)] + ' ' for w in predicted.cpu().numpy()[i::train_options.batch_size]][1:length[i]-1]))
                    print("".join([
                        vocab.idx2word[int(idx)] + ' '
                        for idx in predicted_sents[i]
                    ]))
                    break
                except IndexError:
                    print(f'{i}th batch does not have enough length.')

            for name, loss in losses.items():
                validation_losses[name].update(loss)
            if first_iteration:
                if hidden_config.enable_fp16:
                    image = image.float()
                    encoded_images = encoded_images.float()
                utils.save_images(
                    image.cpu()[:images_to_save, :, :, :],
                    encoded_images[:images_to_save, :, :, :].cpu(),
                    epoch,
                    os.path.join(this_run_folder, 'images'),
                    resize_to=saved_images_size)
                first_iteration = False

        utils.log_progress(validation_losses)
        logging.info('-' * 40)
        utils.save_checkpoint(model, train_options.experiment_name, epoch,
                              os.path.join(this_run_folder, 'checkpoints'))
        utils.write_losses(os.path.join(this_run_folder, 'validation.csv'),
                           validation_losses, epoch,
                           time.time() - epoch_start)
def train(model: Hidden, device: torch.device,
          hidden_config: HiDDenConfiguration, train_options: TrainingOptions,
          this_run_folder: str, tb_logger):
    """
    Trains the HiDDeN model
    :param model: The model
    :param device: torch.device object, usually this is GPU (if avaliable), otherwise CPU.
    :param hidden_config: The network configuration
    :param train_options: The training settings
    :param this_run_folder: The parent folder for the current training run to store training artifacts/results/logs.
    :param tb_logger: TensorBoardLogger object which is a thin wrapper for TensorboardX logger.
                Pass None to disable TensorboardX logging
    :return:
    """

    train_data, val_data = utils.get_data_loaders(hidden_config, train_options)
    file_count = len(train_data.dataset)
    if file_count % train_options.batch_size == 0:
        steps_in_epoch = file_count // train_options.batch_size
    else:
        steps_in_epoch = file_count // train_options.batch_size + 1
    steps_in_epoch = 313

    print_each = 10
    images_to_save = 8
    saved_images_size = (512, 512)

    for epoch in range(train_options.start_epoch,
                       train_options.number_of_epochs + 1):
        logging.info('\nStarting epoch {}/{}'.format(
            epoch, train_options.number_of_epochs))
        logging.info('Batch size = {}\nSteps in epoch = {}'.format(
            train_options.batch_size, steps_in_epoch))
        training_losses = defaultdict(AverageMeter)
        epoch_start = time.time()
        step = 1
        for image, _ in train_data:
            image = image.to(device)
            message = torch.Tensor(
                np.random.choice(
                    [0, 1],
                    (image.shape[0], hidden_config.message_length))).to(device)
            losses, _ = model.train_on_batch([image, message])

            for name, loss in losses.items():
                training_losses[name].update(loss)
            if step % print_each == 0 or step == steps_in_epoch:
                #import pdb; pdb.set_trace()
                logging.info('Epoch: {}/{} Step: {}/{}'.format(
                    epoch, train_options.number_of_epochs, step,
                    steps_in_epoch))
                utils.log_progress(training_losses)
                logging.info('-' * 40)
            step += 1
            if step == steps_in_epoch:
                break

        train_duration = time.time() - epoch_start
        logging.info('Epoch {} training duration {:.2f} sec'.format(
            epoch, train_duration))
        logging.info('-' * 40)
        utils.write_losses(os.path.join(this_run_folder, 'train.csv'),
                           training_losses, epoch, train_duration)
        if tb_logger is not None:
            tb_logger.save_losses(training_losses, epoch)
            tb_logger.save_grads(epoch)
            tb_logger.save_tensors(epoch)

        first_iteration = True
        validation_losses = defaultdict(AverageMeter)
        logging.info('Running validation for epoch {}/{}'.format(
            epoch, train_options.number_of_epochs))
        step = 1
        for image, _ in val_data:
            image = image.to(device)
            message = torch.Tensor(
                np.random.choice(
                    [0, 1],
                    (image.shape[0], hidden_config.message_length))).to(device)
            losses, (encoded_images, noised_images,
                     decoded_messages) = model.validate_on_batch(
                         [image, message])
            for name, loss in losses.items():
                validation_losses[name].update(loss)
            if first_iteration:
                if hidden_config.enable_fp16:
                    image = image.float()
                    encoded_images = encoded_images.float()
                utils.save_images(
                    image.cpu()[:images_to_save, :, :, :],
                    encoded_images[:images_to_save, :, :, :].cpu(),
                    epoch,
                    os.path.join(this_run_folder, 'images'),
                    resize_to=saved_images_size)
                first_iteration = False
            step += 1
            if step == steps_in_epoch // 10:
                break

        utils.log_progress(validation_losses)
        logging.info('-' * 40)
        utils.save_checkpoint(model, train_options.experiment_name, epoch,
                              os.path.join(this_run_folder, 'checkpoints'))
        utils.write_losses(os.path.join(this_run_folder, 'validation.csv'),
                           validation_losses, epoch,
                           time.time() - epoch_start)
def main():
    # device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    device = torch.device('cpu')

    parser = argparse.ArgumentParser(description='Training of HiDDeN nets')
    # parser.add_argument('--size', '-s', default=128, type=int, help='The size of the images (images are square so this is height and width).')
    parser.add_argument('--data-dir',
                        '-d',
                        required=True,
                        type=str,
                        help='The directory where the data is stored.')
    parser.add_argument(
        '--runs_root',
        '-r',
        default=os.path.join('.', 'experiments'),
        type=str,
        help='The root folder where data about experiments are stored.')

    args = parser.parse_args()
    print_each = 25

    completed_runs = [
        o for o in os.listdir(args.runs_root)
        if os.path.isdir(os.path.join(args.runs_root, o))
        and o != 'no-noise-defaults'
    ]

    print(completed_runs)

    write_csv_header = True
    for run_name in completed_runs:
        current_run = os.path.join(args.runs_root, run_name)
        print(f'Run folder: {current_run}')
        options_file = os.path.join(current_run, 'options-and-config.pickle')
        train_options, hidden_config, noise_config = utils.load_options(
            options_file)
        train_options.train_folder = os.path.join(args.data_dir, 'val')
        train_options.validation_folder = os.path.join(args.data_dir, 'val')
        train_options.batch_size = 4
        checkpoint = utils.load_last_checkpoint(
            os.path.join(current_run, 'checkpoints'))

        noiser = Noiser(noise_config, device)
        model = Hidden(hidden_config, device, noiser, tb_logger=None)
        utils.model_from_checkpoint(model, checkpoint)

        print('Model loaded successfully. Starting validation run...')
        _, val_data = utils.get_data_loaders(hidden_config, train_options)
        file_count = len(val_data.dataset)
        if file_count % train_options.batch_size == 0:
            steps_in_epoch = file_count // train_options.batch_size
        else:
            steps_in_epoch = file_count // train_options.batch_size + 1

        losses_accu = {}
        step = 0
        for image, _ in val_data:
            step += 1
            image = image.to(device)
            message = torch.Tensor(
                np.random.choice(
                    [0, 1],
                    (image.shape[0], hidden_config.message_length))).to(device)
            losses, (encoded_images, noised_images,
                     decoded_messages) = model.validate_on_batch(
                         [image, message])
            if not losses_accu:  # dict is empty, initialize
                for name in losses:
                    losses_accu[name] = []
            for name, loss in losses.items():
                losses_accu[name].append(loss)
            if step % print_each == 0:
                print(f'Step {step}/{steps_in_epoch}')
                utils.print_progress(losses_accu)
                print('-' * 40)

        utils.print_progress(losses_accu)
        write_validation_loss(os.path.join(args.runs_root,
                                           'validation_run.csv'),
                              losses_accu,
                              run_name,
                              checkpoint['epoch'],
                              write_header=write_csv_header)
        write_csv_header = False