コード例 #1
0
def load_model(config_file, device):
    config_folder = os.path.join('config_folder', config_file)
    option_file = os.path.join(config_folder, 'options.pickle')
    checkpoint_file = os.path.join(config_folder, 'checkpoint.pyt')

    checkpoint = torch.load(checkpoint_file, map_location='cpu')
    _, hidden_config, noise_config = utils.load_options(option_file)
    noiser = Noiser(noise_config, device)
    model = Hidden(hidden_config, device, noiser, tb_logger=None)
    utils.model_from_checkpoint(model, checkpoint)

    return model
コード例 #2
0
def system_setup(run_data):
    """Setup the system."""
    run_data["options"] = utils.load_options()
    if run_data["options"].verbosity > 0:
        print("Options set as:")
        print(run_data["options"])
    if 'cuda' in (dev_name := run_data["options"].model_params['device']):
        p = not torch.cuda.is_available()
        c = int(dev_name[dev_name.find(':') + 1:]) + 1
        q = c > torch.cuda.device_count()
        if p or q:
            raise utils.errors.MisconfigurationError(
                f"Device '{dev_name}' is not available on this machine"
            )
コード例 #3
0
    '--device',
    '-d',
    type=str,
    required=False,
    default="cameras",
    help='Device use to collect data. It can be either "webcam" or "cameras"')
args = parser.parse_args()

# pass args
exp = args.exp
subject_id = args.subject
label_id = args.label
device = args.device

# load camera info --> options.json
op, cam = utils.load_options(device)
# load experiment labels --> labels.json
labels = utils.load_labels(exp)

log = Logger(name="Capture")

# get folder to store the collected data
if exp == "emotions":
    folder = op.folder_emotions
elif exp == "signals":
    folder = op.folder_signals
elif exp == "gestures":
    folder = op.folder_gestures
elif exp == "adl":
    folder = op.folder_adl
elif exp == "falls":
コード例 #4
0
ファイル: main.py プロジェクト: dlshu/RS-GAN-v1
def main():
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    parent_parser = argparse.ArgumentParser(
        description='Training of HiDDeN nets')
    subparsers = parent_parser.add_subparsers(dest='command',
                                              help='Sub-parser for commands')
    new_run_parser = subparsers.add_parser('new', help='starts a new run')
    new_run_parser.add_argument('--data-dir',
                                '-d',
                                required=True,
                                type=str,
                                help='The directory where the data is stored.')
    # Anno dir
    new_run_parser.add_argument(
        '--anno-dir',
        '-a',
        type=str,
        help=
        'The directory where the annotations are stored. Specify only if you have annotations in a different folder.'
    )

    new_run_parser.add_argument('--batch-size',
                                '-b',
                                required=True,
                                type=int,
                                help='The batch size.')
    new_run_parser.add_argument('--epochs',
                                '-e',
                                default=300,
                                type=int,
                                help='Number of epochs to run the simulation.')
    new_run_parser.add_argument('--name',
                                required=True,
                                type=str,
                                help='The name of the experiment.')

    new_run_parser.add_argument(
        '--size',
        '-s',
        default=128,
        type=int,
        help=
        'The size of the images (images are square so this is height and width).'
    )
    new_run_parser.add_argument('--message',
                                '-m',
                                default=256,
                                type=int,
                                help='The length in bits of the watermark.')
    new_run_parser.add_argument(
        '--continue-from-folder',
        '-c',
        default='',
        type=str,
        help=
        'The folder from where to continue a previous run. Leave blank if you are starting a new experiment.'
    )
    # parser.add_argument('--tensorboard', dest='tensorboard', action='store_true',
    #                     help='If specified, use adds a Tensorboard log. On by default')
    new_run_parser.add_argument('--tensorboard',
                                action='store_true',
                                help='Use to switch on Tensorboard logging.')
    new_run_parser.add_argument('--enable-fp16',
                                dest='enable_fp16',
                                action='store_true',
                                help='Enable mixed-precision training.')

    new_run_parser.add_argument(
        '--noise',
        nargs='*',
        action=NoiseArgParser,
        help=
        "Noise layers configuration. Use quotes when specifying configuration, e.g. 'cropout((0.55, 0.6), (0.55, 0.6))'"
    )

    new_run_parser.set_defaults(tensorboard=False)
    new_run_parser.set_defaults(enable_fp16=False)
    new_run_parser.add_argument('--vocab-path',
                                '-v',
                                type=str,
                                default='./data/vocab.pkl',
                                help='load the vocab')

    continue_parser = subparsers.add_parser('continue',
                                            help='Continue a previous run')
    continue_parser.add_argument(
        '--folder',
        '-f',
        required=True,
        type=str,
        help='Continue from the last checkpoint in this folder.')
    continue_parser.add_argument(
        '--data-dir',
        '-d',
        required=False,
        type=str,
        help=
        'The directory where the data is stored. Specify a value only if you want to override the previous value.'
    )
    # Anno dir
    continue_parser.add_argument(
        '--anno-dir',
        '-a',
        required=False,
        type=str,
        help=
        'The directory where the annotations are stored. Specify a value only if you want to override the previous value.'
    )
    continue_parser.add_argument(
        '--epochs',
        '-e',
        required=False,
        type=int,
        help=
        'Number of epochs to run the simulation. Specify a value only if you want to override the previous value.'
    )

    args = parent_parser.parse_args()
    checkpoint = None
    loaded_checkpoint_file_name = None

    with open(args.vocab_path, 'rb') as f:
        vocab = pickle.load(f)

    if args.command == 'continue':
        this_run_folder = args.folder
        options_file = os.path.join(this_run_folder,
                                    'options-and-config.pickle')
        train_options, hidden_config, noise_config = utils.load_options(
            options_file)
        checkpoint, loaded_checkpoint_file_name = utils.load_last_checkpoint(
            os.path.join(this_run_folder, 'checkpoints'))
        train_options.start_epoch = checkpoint['epoch'] + 1
        if args.data_dir is not None:
            train_options.train_folder = os.path.join(args.data_dir, 'train')
            train_options.validation_folder = os.path.join(
                args.data_dir, 'val')
        if args.epochs is not None:
            if train_options.start_epoch < args.epochs:
                train_options.number_of_epochs = args.epochs
            else:
                print(
                    f'Command-line specifies of number of epochs = {args.epochs}, but folder={args.folder} '
                    f'already contains checkpoint for epoch = {train_options.start_epoch}.'
                )
                exit(1)

    else:
        assert args.command == 'new'
        start_epoch = 1

        train_options = TrainingOptions(
            batch_size=args.batch_size,
            number_of_epochs=args.epochs,
            train_folder=os.path.join(args.data_dir, 'train'),
            validation_folder=os.path.join(args.data_dir, 'val'),
            ann_train=os.path.join(args.data_dir, 'ann_train.json'),
            ann_val=os.path.join(args.data_dir, 'ann_val.json'),
            runs_folder=os.path.join('.', 'runs'),
            start_epoch=start_epoch,
            experiment_name=args.name)

        noise_config = args.noise if args.noise is not None else []
        hidden_config = HiDDenConfiguration(H=args.size,
                                            W=args.size,
                                            message_length=args.message,
                                            encoder_blocks=4,
                                            encoder_channels=64,
                                            decoder_blocks=7,
                                            decoder_channels=64,
                                            use_discriminator=True,
                                            use_vgg=False,
                                            discriminator_blocks=3,
                                            discriminator_channels=64,
                                            decoder_loss=1,
                                            encoder_loss=0.7,
                                            adversarial_loss=1e-3,
                                            vocab_size=len(vocab),
                                            enable_fp16=args.enable_fp16)

        this_run_folder = utils.create_folder_for_run(
            train_options.runs_folder, args.name)
        with open(os.path.join(this_run_folder, 'options-and-config.pickle'),
                  'wb+') as f:
            pickle.dump(train_options, f)
            pickle.dump(noise_config, f)
            pickle.dump(hidden_config, f)

    logging.basicConfig(level=logging.INFO,
                        format='%(message)s',
                        handlers=[
                            logging.FileHandler(
                                os.path.join(
                                    this_run_folder,
                                    f'{train_options.experiment_name}.log')),
                            logging.StreamHandler(sys.stdout)
                        ])
    if (args.command == 'new' and args.tensorboard) or \
            (args.command == 'continue' and os.path.isdir(os.path.join(this_run_folder, 'tb-logs'))):
        logging.info('Tensorboard is enabled. Creating logger.')
        from tensorboard_logger import TensorBoardLogger
        tb_logger = TensorBoardLogger(os.path.join(this_run_folder, 'tb-logs'))
    else:
        tb_logger = None

    noiser = Noiser(noise_config, device)

    model = Hidden(hidden_config, device, noiser, tb_logger)

    if args.command == 'continue':
        # if we are continuing, we have to load the model params
        assert checkpoint is not None
        logging.info(
            f'Loading checkpoint from file {loaded_checkpoint_file_name}')
        utils.model_from_checkpoint(model, checkpoint)

    logging.info('HiDDeN model: {}\n'.format(model.to_stirng()))
    logging.info('Model Configuration:\n')
    logging.info(pprint.pformat(vars(hidden_config)))
    logging.info('\nNoise configuration:\n')
    logging.info(pprint.pformat(str(noise_config)))
    logging.info('\nTraining train_options:\n')
    logging.info(pprint.pformat(vars(train_options)))

    train(model, device, hidden_config, train_options, this_run_folder,
          tb_logger, vocab)
コード例 #5
0
def main():

    service_name = 'GestureRecognizer.Recognition'
    log = Logger(name=service_name)

    op = load_options()

    channel = Channel(op.broker_uri)
    log.info('Connected to broker {}', op.broker_uri)

    exporter = create_exporter(service_name=service_name, uri=op.zipkin_uri)

    subscription = Subscription(channel=channel, name=service_name)
    for group_id in list(op.group_ids):
        subscription.subscribe(
            'SkeletonsGrouper.{}.Localization'.format(group_id))

    model = GestureRecognizer("model_gesture1_72.00.pth")
    log.info('Initialize the model')

    unc = Gauge('uncertainty_total', "Uncertainty about predict")
    unc.set(0.0)
    start_http_server(8000)

    buffer = list()
    predict_flag = False

    mean = lambda x: (sum(x) / len(x))

    while True:

        msg = channel.consume()

        tracer = Tracer(exporter, span_context=msg.extract_tracing())
        span = tracer.start_span(name='detection_and_info')

        annotations = msg.unpack(ObjectAnnotations)
        skeleton = select_skeletons(annotations=annotations,
                                    min_keypoints=op.skeletons.min_keypoints,
                                    x_range=op.skeletons.x_range,
                                    y_range=op.skeletons.y_range)

        if skeleton is None:
            tracer.end_span()
            continue

        skl = Skeleton(skeleton)
        skl_normalized = skl.normalize()
        pred, prob, uncertainty = model.predict(skl_normalized)

        if pred == 0 and predict_flag is False:
            pass

        elif pred != 0 and predict_flag is False:
            initial_time = time.time()
            predict_flag = True
            buffer.append(uncertainty)

        elif pred != 0 and predict_flag is True:
            buffer.append(uncertainty)

        elif pred == 0 and predict_flag is True:
            predict_flag = False
            exec_time = time.time() - initial_time
            if exec_time >= op.exec_time:
                unc.set(mean(buffer))
                log.info("execution_ms: {}, buffer_mean: {}",
                         (exec_time * 1000), mean(buffer))
            buffer = []

        tracer.end_span()

        info = {
            'prediction': pred,
            'probability': prob,
            'uncertainty': uncertainty,
            'took_ms': {
                'service': round(span_duration_ms(span), 2)
            }
        }
        log.info('{}', str(info).replace("'", '"'))
コード例 #6
0
def main():
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    parser = argparse.ArgumentParser(description='Training of HiDDeN nets')
    parser.add_argument('--size', '-s', default=128, type=int)
    parser.add_argument('--data-dir', '-d', required=True, type=str)

    parser.add_argument('--runs-folder',
                        '-sf',
                        default=os.path.join('.', 'runs'),
                        type=str)
    parser.add_argument('--message', '-m', default=30, type=int)
    parser.add_argument('--epochs', '-e', default=400, type=int)
    parser.add_argument('--batch-size', '-b', required=True, type=int)
    parser.add_argument('--continue-from-folder', '-c', default='', type=str)
    parser.add_argument('--tensorboard',
                        dest='tensorboard',
                        action='store_true')
    parser.add_argument('--no-tensorboard',
                        dest='tensorboard',
                        action='store_false')
    parser.set_defaults(tensorboard=True)

    args = parser.parse_args()

    checkpoint = None
    if args.continue_from_folder != '':
        this_run_folder = args.continue_from_folder
        train_options, hidden_config, noise_config = utils.load_options(
            this_run_folder)
        checkpoint = utils.load_last_checkpoint(
            os.path.join(this_run_folder, 'checkpoints'))
        train_options.start_epoch = checkpoint['epoch']
    else:
        start_epoch = 1
        train_options = TrainingOptions(
            batch_size=args.batch_size,
            number_of_epochs=args.epochs,
            train_folder=os.path.join(args.data_dir, 'train'),
            validation_folder=os.path.join(args.data_dir, 'val'),
            runs_folder=os.path.join('.', 'runs'),
            start_epoch=start_epoch)

        # noise_config = [
        #     {
        #         'type': 'resize',
        #         'resize_ratio': 0.4
        # }]
        noise_config = []
        hidden_config = HiDDenConfiguration(H=args.size,
                                            W=args.size,
                                            message_length=args.message,
                                            encoder_blocks=4,
                                            encoder_channels=64,
                                            decoder_blocks=7,
                                            decoder_channels=64,
                                            use_discriminator=True,
                                            use_vgg=False,
                                            discriminator_blocks=3,
                                            discriminator_channels=64,
                                            decoder_loss=1,
                                            encoder_loss=0.7,
                                            adversarial_loss=1e-3)

        this_run_folder = utils.create_folder_for_run(train_options)
        with open(os.path.join(this_run_folder, 'options-and-config.pickle'),
                  'wb+') as f:
            pickle.dump(train_options, f)
            pickle.dump(noise_config, f)
            pickle.dump(hidden_config, f)

    noiser = Noiser(noise_config, device)

    if args.tensorboard:
        print('Tensorboard is enabled. Creating logger.')
        from tensorboard_logger import TensorBoardLogger
        tb_logger = TensorBoardLogger(os.path.join(this_run_folder, 'tb-logs'))
    else:
        tb_logger = None

    model = Hidden(hidden_config, device, noiser, tb_logger)

    if args.continue_from_folder != '':
        # if we are continuing, we have to load the model params
        assert checkpoint is not None
        utils.model_from_checkpoint(model, checkpoint)

    print('HiDDeN model: {}\n'.format(model.to_stirng()))
    print('Model Configuration:\n')
    pprint.pprint(vars(hidden_config))
    print('\nNoise configuration:\n')
    pprint.pprint(str(noise_config))
    print('\nTraining train_options:\n')
    pprint.pprint(vars(train_options))
    print()

    train(model, device, hidden_config, train_options, this_run_folder,
          tb_logger)
コード例 #7
0
    return resize(im, config)


def show_image(dataset, emotion, config=None):
    """
  Display an image from an emotion given a dataset
  """

    im = get_image(dataset, emotion, config)

    if im is not None:
        destroyWindows()
        cv2.imshow(emotion.upper(), im)


dt, config = utils.load_options()
keys = utils.load_keymap()

# show command screen
cv2.imshow("COMMANDS", command_screen(keys))

while True:
    k = cv2.waitKey(1)

    if k == ord(keys.neutral):
        show_image(choice(dt), "neutral", config)

    elif k == ord(keys.angry):
        show_image(choice(dt), "angry", config)

    elif k == ord(keys.contempt):
コード例 #8
0
    g = cv2.merge([zero_array, g, zero_array])
    r = cv2.merge([zero_array, zero_array, r])
    resultado = None
    if (k <= int(num_frames / 6)):
        resultado = r
    elif (k > int(num_frames / 6) and (k <= int(2 * num_frames / 6))):
        resultado = g
    else:
        resultado = b
    return resultado


log = Logger(name='WatchVideos')
with open('keymap.json', 'r') as f:
    keymap = json.load(f)
options = load_options(print_options=False)

#categorias=['TIME_UP_GO_GRAYSCALE']
#categorias=['CAMINHADA_EM_CIRCULO_GRAYSCALE','IDA_E_VOLTA__DUAS_VEZES_GRAYSCALE']
categorias = [
    'TIME_UP_GO_GRAYSCALE', 'CAMINHADA_EM_CIRCULO_GRAYSCALE',
    'IDA_E_VOLTA__DUAS_VEZES_GRAYSCALE',
    'ELEVACAO_EXCESSIVA_DO_CALCANHAR_GRASCALE', 'CIRCUNDACAO_DO_PE_GRAYSCALE',
    'ASSIMETRIA_DO_COMPRIMENTO_DE_PASSO_GRAYSCALE'
]
for categoria in categorias:
    t = 0
    for l in range(0, 13):
        aux_diferenca_frame = [0]
        aux_soma_frame = [0]
        frame_anterior = [0]
コード例 #9
0
def main():
    # device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    device = torch.device('cpu')

    parser = argparse.ArgumentParser(description='Training of HiDDeN nets')
    # parser.add_argument('--size', '-s', default=128, type=int, help='The size of the images (images are square so this is height and width).')
    parser.add_argument('--data-dir',
                        '-d',
                        required=True,
                        type=str,
                        help='The directory where the data is stored.')
    parser.add_argument(
        '--runs_root',
        '-r',
        default=os.path.join('.', 'experiments'),
        type=str,
        help='The root folder where data about experiments are stored.')

    args = parser.parse_args()
    print_each = 25

    completed_runs = [
        o for o in os.listdir(args.runs_root)
        if os.path.isdir(os.path.join(args.runs_root, o))
        and o != 'no-noise-defaults'
    ]

    print(completed_runs)

    write_csv_header = True
    for run_name in completed_runs:
        current_run = os.path.join(args.runs_root, run_name)
        print(f'Run folder: {current_run}')
        options_file = os.path.join(current_run, 'options-and-config.pickle')
        train_options, hidden_config, noise_config = utils.load_options(
            options_file)
        train_options.train_folder = os.path.join(args.data_dir, 'val')
        train_options.validation_folder = os.path.join(args.data_dir, 'val')
        train_options.batch_size = 4
        checkpoint = utils.load_last_checkpoint(
            os.path.join(current_run, 'checkpoints'))

        noiser = Noiser(noise_config, device)
        model = Hidden(hidden_config, device, noiser, tb_logger=None)
        utils.model_from_checkpoint(model, checkpoint)

        print('Model loaded successfully. Starting validation run...')
        _, val_data = utils.get_data_loaders(hidden_config, train_options)
        file_count = len(val_data.dataset)
        if file_count % train_options.batch_size == 0:
            steps_in_epoch = file_count // train_options.batch_size
        else:
            steps_in_epoch = file_count // train_options.batch_size + 1

        losses_accu = {}
        step = 0
        for image, _ in val_data:
            step += 1
            image = image.to(device)
            message = torch.Tensor(
                np.random.choice(
                    [0, 1],
                    (image.shape[0], hidden_config.message_length))).to(device)
            losses, (encoded_images, noised_images,
                     decoded_messages) = model.validate_on_batch(
                         [image, message])
            if not losses_accu:  # dict is empty, initialize
                for name in losses:
                    losses_accu[name] = []
            for name, loss in losses.items():
                losses_accu[name].append(loss)
            if step % print_each == 0:
                print(f'Step {step}/{steps_in_epoch}')
                utils.print_progress(losses_accu)
                print('-' * 40)

        utils.print_progress(losses_accu)
        write_validation_loss(os.path.join(args.runs_root,
                                           'validation_run.csv'),
                              losses_accu,
                              run_name,
                              checkpoint['epoch'],
                              write_header=write_csv_header)
        write_csv_header = False
コード例 #10
0
        title = "No Attack"
    title = f"Performance of {options.fit_fun} under {title}"
    for k in stats.keys():
        if filter_fn(k):
            ax.plot(epochs, stats[k], label=k.replace('_', ' '))
    plt.xlabel("Epochs")
    plt.ylabel("Rate")
    plt.title(title.title(), fontdict={'fontsize': 7})
    plt.legend(loc=1, fontsize=5, framealpha=0.4)
    plt.savefig(img_name, dpi=320, metadata={'comment': str(options)})
    print(f"Done. Saved plot as {img_name}")


if __name__ == '__main__':
    print("Calculating statistics and generating plots...")
    options = utils.load_options()
    stats = gen_stats(options)
    img_name = "{}_{}_{:.1f}_{}_{}_{}{}".format(
        options.dataset, options.fit_fun,
        options.adversaries['percent_adv'] * 100, options.adversaries['type'],
        'optimized' if options.adversaries['optimized'] else f"{tt}" if
        (tt := options.adversaries['toggle_times']) else 'no_toggle',
        f"{d}_delay" if (d := options.adversaries['delay']) is not None
        and d > 0 else 'no_delay',
        "_scaled" if options.adversaries['scale_up'] else '').replace(
            ' ', '_')
    match_acc = lambda k: re.match('accuracy_\d', k)
    save_plot(options, stats, lambda k: match_acc(k) is None,
              f"{img_name}.png")
    save_plot(options, stats, lambda k: match_acc(k) is not None,
              f"{img_name}_accuracies.png")
コード例 #11
0
ファイル: main.py プロジェクト: Dinamite1990/HiDDeN
def main():
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    parser = argparse.ArgumentParser(description='Training of HiDDeN nets')
    parser.add_argument('--data-dir',
                        '-d',
                        required=True,
                        type=str,
                        help='The directory where the data is stored.')
    parser.add_argument('--batch-size',
                        '-b',
                        required=True,
                        type=int,
                        help='The batch size.')
    parser.add_argument('--epochs',
                        '-e',
                        default=400,
                        type=int,
                        help='Number of epochs to run the simulation.')
    parser.add_argument('--name',
                        required=True,
                        type=str,
                        help='The name of the experiment.')

    parser.add_argument(
        '--runs-folder',
        '-sf',
        default=os.path.join('.', 'runs'),
        type=str,
        help='The root folder where data about experiments are stored.')
    parser.add_argument(
        '--size',
        '-s',
        default=128,
        type=int,
        help=
        'The size of the images (images are square so this is height and width).'
    )
    parser.add_argument('--message',
                        '-m',
                        default=30,
                        type=int,
                        help='The length in bits of the watermark.')
    parser.add_argument(
        '--continue-from-folder',
        '-c',
        default='',
        type=str,
        help=
        'The folder from where to continue a previous run. Leave blank if you are starting a new experiment.'
    )
    parser.add_argument(
        '--tensorboard',
        dest='tensorboard',
        action='store_true',
        help='If specified, use adds a Tensorboard log. On by default')
    parser.add_argument('--no-tensorboard',
                        dest='tensorboard',
                        action='store_false',
                        help='Use to switch off Tensorboard logging.')

    parser.add_argument(
        '--noise',
        nargs='*',
        action=NoiseArgParser,
        help=
        "Noise layers configuration. Use quotes when specifying configuration, e.g. 'cropout((0.55, 0.6), (0.55, 0.6))'"
    )

    parser.set_defaults(tensorboard=True)
    args = parser.parse_args()

    checkpoint = None
    if args.continue_from_folder != '':
        this_run_folder = args.continue_from_folder
        options_file = os.path.join(this_run_folder,
                                    'options-and-config.pickle')
        train_options, hidden_config, noise_config = utils.load_options(
            options_file)
        checkpoint = utils.load_last_checkpoint(
            os.path.join(this_run_folder, 'checkpoints'))
        train_options.start_epoch = checkpoint['epoch'] + 1
    else:
        start_epoch = 1
        train_options = TrainingOptions(
            batch_size=args.batch_size,
            number_of_epochs=args.epochs,
            train_folder=os.path.join(args.data_dir, 'train'),
            validation_folder=os.path.join(args.data_dir, 'val'),
            runs_folder=os.path.join('.', 'runs'),
            start_epoch=start_epoch,
            experiment_name=args.name)

        noise_config = args.noise if args.noise is not None else []
        hidden_config = HiDDenConfiguration(H=args.size,
                                            W=args.size,
                                            message_length=args.message,
                                            encoder_blocks=4,
                                            encoder_channels=64,
                                            decoder_blocks=7,
                                            decoder_channels=64,
                                            use_discriminator=True,
                                            use_vgg=False,
                                            discriminator_blocks=3,
                                            discriminator_channels=64,
                                            decoder_loss=1,
                                            encoder_loss=0.7,
                                            adversarial_loss=1e-3)

        this_run_folder = utils.create_folder_for_run(
            train_options.runs_folder, args.name)
        with open(os.path.join(this_run_folder, 'options-and-config.pickle'),
                  'wb+') as f:
            pickle.dump(train_options, f)
            pickle.dump(noise_config, f)
            pickle.dump(hidden_config, f)

    logging.basicConfig(level=logging.INFO,
                        format='%(message)s',
                        handlers=[
                            logging.FileHandler(
                                os.path.join(this_run_folder,
                                             f'{args.name}.log')),
                            logging.StreamHandler(sys.stdout)
                        ])
    noiser = Noiser(noise_config, device)

    if args.tensorboard:
        logging.info('Tensorboard is enabled. Creating logger.')
        from tensorboard_logger import TensorBoardLogger
        tb_logger = TensorBoardLogger(os.path.join(this_run_folder, 'tb-logs'))
    else:
        tb_logger = None

    model = Hidden(hidden_config, device, noiser, tb_logger)

    if args.continue_from_folder != '':
        # if we are continuing, we have to load the model params
        assert checkpoint is not None
        utils.model_from_checkpoint(model, checkpoint)

    logging.info('HiDDeN model: {}\n'.format(model.to_stirng()))
    logging.info('Model Configuration:\n')
    logging.info(pprint.pformat(vars(hidden_config)))
    logging.info('\nNoise configuration:\n')
    logging.info(pprint.pformat(str(noise_config)))
    logging.info('\nTraining train_options:\n')
    logging.info(pprint.pformat(vars(train_options)))

    train(model, device, hidden_config, train_options, this_run_folder,
          tb_logger)
コード例 #12
0
def main(options=None):
    args = get_args()
    if options is not None:
        args = utils.load_options(args, options)

    seed = 1  # Do NOT modify the seed. The captions have been generated from images generated from this seed.
    torch.cuda.manual_seed_all(seed)
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)

    # -------------------------------- INSTANTIATE MAIN ACTORS ----------------------------- #

    # --------------- Create dataset ---------------- #
    print('Creating dataset', flush=True)
    image_normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                           std=[0.229, 0.224, 0.225])
    transform = transforms.Compose([
        transforms.Resize(128),  # Smaller edge will be matched to this number
        transforms.CenterCrop((128, 128)),
        transforms.ToTensor(),
        image_normalize,
    ])

    train_dataset = dataset.ImageAudioDataset(args.folder_dataset +
                                              args.name_dataset,
                                              split='train',
                                              random_sampling=True,
                                              transform=transform,
                                              loading_image=args.loading_image)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=None)

    val_dataset = dataset.ImageAudioDataset(args.folder_dataset +
                                            args.name_dataset,
                                            split='val',
                                            transform=transform,
                                            loading_image=args.loading_image)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    test_dataset = dataset.ImageAudioDataset(args.folder_dataset +
                                             args.name_dataset,
                                             split='test',
                                             transform=transform,
                                             loading_image=args.loading_image)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=args.workers,
                                              pin_memory=True)

    # -------------- Create model --------------- #
    print('Creating model', flush=True)
    module = __import__('models')
    model_class = getattr(module, args.model)
    model = model_class(args)
    model = torch.nn.DataParallel(model).cuda()
    # Print model information
    utils.print_model_report(model)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # Load model
    resume_epoch = 0
    if args.seed:
        if args.seed == 'EXPDIR':
            if args.name_checkpoint == '':
                name = args.model + '_' + args.name_dataset
            else:
                name = args.name_checkpoint
            path_load = args.expdir + 'model_best_' + name + '.pth.tar'
        else:
            path_load = args.seed
        if args.resume:
            utils.load_from_checkpoint(model,
                                       path_load,
                                       submodels_load=args.submodels_load,
                                       optimizer=None)
            checkpoint = torch.load(path_load)
            resume_epoch = checkpoint['epoch']
        else:
            utils.load_from_checkpoint(model,
                                       path_load,
                                       submodels_load=args.submodels_load,
                                       optimizer=None)

    # --------------- Instantiate trainer --------------- #
    print('Instantiating trainer', flush=True)
    all_loaders = {
        'val': val_loader,
        'train': train_loader,
        'test': test_loader
    }
    trainer = Trainer(model,
                      optimizer,
                      all_loaders,
                      args,
                      resume_epoch=resume_epoch)

    # ------------------------- Others ----------------------- #
    current_time = datetime.now().strftime('%b%d_%H-%M-%S')
    log_dir = os.path.join(
        args.results, 'runs',
        args.name_checkpoint + '_' + current_time + '_' + socket.gethostname())
    args.writer = SummaryWriter(log_dir=log_dir)

    # ----------------------------------- TRAIN ------------------------------------------ #
    if args.experiment:
        print("Running experiment", flush=True)
        experiments.experiment(args.experiment_name, trainer)
    elif args.evaluate:
        print("Performing evaluation epoch", flush=True)
        trainer.eval()
    elif args.generate_active_learning:
        print("Generating active learning samples", flush=True)
        active_learning.generate_active_learning(trainer)
    else:
        print("Beginning training", flush=True)
        trainer.train()
コード例 #13
0
ファイル: test_model.py プロジェクト: dlshu/RS-GAN-v1
def main():
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    parser = argparse.ArgumentParser(description='Test trained models')
    parser.add_argument(
        '--options-file',
        '-o',
        default='options-and-config.pickle',
        type=str,
        help='The file where the simulation options are stored.')
    parser.add_argument('--checkpoint-file',
                        '-c',
                        required=True,
                        type=str,
                        help='Model checkpoint file')
    parser.add_argument('--batch-size',
                        '-b',
                        default=12,
                        type=int,
                        help='The batch size.')
    parser.add_argument('--source-image',
                        '-s',
                        required=True,
                        type=str,
                        help='The image to watermark')
    parser.add_argument('--source-text',
                        '-t',
                        required=True,
                        type=str,
                        help='The text to watermark',
                        default='data/val_captions.txt')
    parser.add_argument('--vocab-path',
                        '-v',
                        required=True,
                        type=str,
                        help='The path of vocabulary',
                        default='data/vocab.pkl')

    # parser.add_argument('--times', '-t', default=10, type=int,
    #                     help='Number iterations (insert watermark->extract).')

    args = parser.parse_args()

    train_options, hidden_config, noise_config = utils.load_options(
        args.options_file)
    noiser = Noiser(noise_config, device)

    checkpoint = torch.load(args.checkpoint_file)
    hidden_net = Hidden(hidden_config, device, noiser, None)
    utils.model_from_checkpoint(hidden_net, checkpoint)

    image_pil = Image.open(args.source_image)
    image = randomCrop(np.array(image_pil), hidden_config.H, hidden_config.W)
    image_tensor = TF.to_tensor(image).to(device)
    image_tensor = image_tensor * 2 - 1  # transform from [0, 1] to [-1, 1]
    images = torch.stack([image_tensor for _ in range(args.batch_size)], 0)

    # for t in range(args.times):
    with open(args.vocab_path, 'rb') as f:
        vocab = pickle.load(f)

    with open(args.source_text) as f:
        captions = f.readlines()
        captions = random.sample(captions, args.batch_size)

    targets = []
    for i, caption in enumerate(captions):
        tokens = nltk.tokenize.word_tokenize(str(caption).lower())
        caption = []
        caption.append(vocab('<start>'))
        caption.extend([vocab(token) for token in tokens])
        caption.append(vocab('<end>'))
        target = torch.Tensor(caption)
        targets.append(target)
    targets.sort(key=lambda x: len(x), reverse=True)

    lengths = [len(cap) for cap in targets]
    captions = torch.zeros(len(targets), max(lengths)).long()

    for i, target in enumerate(targets):
        end = lengths[i]
        captions[i, :end] = target[:end]
    captions = captions.to(device)

    keys = np.random.permutation(512)
    ekeys = torch.Tensor(np.eye(512)[keys])
    dkeys = torch.Tensor(np.transpose(ekeys))
    ekeys = torch.stack([ekeys for _ in range(args.batch_size)]).to(device)
    dkeys = torch.stack([dkeys for _ in range(args.batch_size)]).to(device)
    #print(f'sizes: images-{len(images)}, ekeys-{len(ekeys)}, dkeys-{len(dkeys)}, captions-{len(captions)}, lengths-{lengths}')

    losses, (encoded_images, noised_images, decoded_messages, predicted_sents) = \
        hidden_net.validate_on_batch([images, ekeys, dkeys, captions, lengths])
    predicted_sents = predicted_sents.cpu().numpy()
    for i in range(args.batch_size):
        try:
            print("predict     : " + "".join(
                [vocab.idx2word[int(idx)] + ' '
                 for idx in predicted_sents[i]]))
            print("ground truth: " + "".join(
                [vocab.idx2word[int(idx)] + ' ' for idx in captions[i]]))
        except IndexError:
            print(f'{i}th batch does not have enough length.')

    noise_images = images - encoded_images

    utils.save_images_with_noise(images.cpu(),
                                 encoded_images.cpu(),
                                 noise_images.cpu(),
                                 'test_%d' % i,
                                 '.',
                                 resize_to=(256, 256))
コード例 #14
0
ファイル: main.py プロジェクト: chenhsiu48/HiDDeN
def main():
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

    parent_parser = argparse.ArgumentParser(description='Training of HiDDeN nets')
    subparsers = parent_parser.add_subparsers(dest='command', help='Sub-parser for commands')
    new_run_parser = subparsers.add_parser('new', help='starts a new run')
    new_run_parser.add_argument('--data-dir', '-d', required=True, type=str,
                                help='The directory where the data is stored.')
    new_run_parser.add_argument('--batch-size', '-b', default=30, type=int, help='The batch size.')
    new_run_parser.add_argument('--epochs', '-e', default=300, type=int, help='Number of epochs to run the simulation.')
    new_run_parser.add_argument('--name', required=True, type=str, help='The name of the experiment.')

    new_run_parser.add_argument('--size', '-s', default=128, type=int, help='The size of the images (images are square so this is height and width).')
    new_run_parser.add_argument('--in_channels', default=3, type=int, help='input channel size')
    new_run_parser.add_argument('--message', '-m', default=32, type=int, help='The length in bits of the watermark.')
    new_run_parser.add_argument('--ratio', default=0.2, type=float, help='ratio of dataset.')
    new_run_parser.add_argument('--continue-from-folder', '-c', default='', type=str,
                                help='The folder from where to continue a previous run. Leave blank if you are starting a new experiment.')
    new_run_parser.add_argument('--enable-fp16', dest='enable_fp16', action='store_true',
                                help='Enable mixed-precision training.')

    new_run_parser.add_argument('--noise', nargs='*', action=NoiseArgParser,
                                help="Noise layers configuration. Use quotes when specifying configuration, e.g. 'cropout((0.55, 0.6), (0.55, 0.6))'")

    new_run_parser.set_defaults(enable_fp16=False)

    continue_parser = subparsers.add_parser('continue', help='Continue a previous run')
    continue_parser.add_argument('--folder', '-f', required=True, type=str,
                                 help='Continue from the last checkpoint in this folder.')
    continue_parser.add_argument('--data-dir', '-d', required=False, type=str,
                                 help='The directory where the data is stored. Specify a value only if you want to override the previous value.')
    continue_parser.add_argument('--epochs', '-e', required=False, type=int,
                                help='Number of epochs to run the simulation. Specify a value only if you want to override the previous value.')

    args = parent_parser.parse_args()
    checkpoint = None
    loaded_checkpoint_file_name = None

    if args.command == 'continue':
        options_file = os.path.join(args.folder, 'options-and-config.pickle')
        train_options, hidden_config, noise_config = utils.load_options(options_file)
        checkpoint, loaded_checkpoint_file_name = utils.load_last_checkpoint(os.path.join(args.folder, 'checkpoints'))
        train_options.start_epoch = checkpoint['epoch'] + 1
        train_options.best_epoch = checkpoint['best_epoch']
        train_options.best_cond = checkpoint['best_cond']
        if args.epochs is not None:
            if train_options.start_epoch < args.epochs:
                train_options.number_of_epochs = args.epochs
            else:
                print(f'Command-line specifies of number of epochs = {args.epochs}, but folder={args.folder} '
                      f'already contains checkpoint for epoch = {train_options.start_epoch}.')
                exit(1)

    else:
        assert args.command == 'new'
        start_epoch = 1
        train_options = TrainingOptions(
            batch_size=args.batch_size,
            number_of_epochs=args.epochs, data_ratio=args.ratio,
            data_dir=args.data_dir,
            runs_folder='./runs', tb_logger_folder='./logger',
            start_epoch=start_epoch, experiment_name=f'{args.name}_r{int(100*args.ratio):03d}b{args.size}ch{args.in_channels}m{args.message}')

        noise_config = args.noise if args.noise is not None else []
        hidden_config = HiDDenConfiguration(H=args.size, W=args.size,input_channels=args.in_channels,
                                            message_length=args.message,
                                            encoder_blocks=4, encoder_channels=64,
                                            decoder_blocks=7, decoder_channels=64,
                                            use_discriminator=True,
                                            use_vgg=False,
                                            discriminator_blocks=3, discriminator_channels=64,
                                            decoder_loss=1,
                                            encoder_loss=0.7,
                                            adversarial_loss=1e-3,
                                            enable_fp16=args.enable_fp16
                                            )

        utils.create_folder_for_run(train_options)
        with open(train_options.options_file, 'wb+') as f:
            pickle.dump(train_options, f)
            pickle.dump(noise_config, f)
            pickle.dump(hidden_config, f)


    logging.basicConfig(level=logging.INFO,
                        format='%(message)s',
                        handlers=[
                            logging.FileHandler(os.path.join(train_options.this_run_folder, f'{train_options.experiment_name}.log')),
                            logging.StreamHandler(sys.stdout)
                        ])
    logging.info(f'Tensorboard is enabled. Creating logger at {train_options.tb_logger_dir}')
    tb_logger = TensorBoardLogger(train_options.tb_logger_dir)

    noiser = Noiser(noise_config, device)
    model = Hidden(hidden_config, device, noiser, tb_logger)

    if args.command == 'continue':
        # if we are continuing, we have to load the model params
        assert checkpoint is not None
        logging.info(f'Loading checkpoint from file {loaded_checkpoint_file_name}')
        utils.model_from_checkpoint(model, checkpoint)

    logging.info('HiDDeN model: {}\n'.format(model.to_stirng()))
    logging.info('Model Configuration:\n')
    logging.info(pprint.pformat(vars(hidden_config)))
    logging.info('\nNoise configuration:\n')
    logging.info(pprint.pformat(str(noise_config)))
    logging.info('\nTraining train_options:\n')
    logging.info(pprint.pformat(vars(train_options)))

    train(model, device, hidden_config, train_options, train_options.this_run_folder, tb_logger)
コード例 #15
0
def main():
    # device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    parser = argparse.ArgumentParser(description='Training of HiDDeN nets')
    parser.add_argument('--hostname',
                        default=socket.gethostname(),
                        help='the  host name of the running server')
    # parser.add_argument('--size', '-s', default=128, type=int, help='The size of the images (images are square so this is height and width).')
    parser.add_argument('--data-dir',
                        '-d',
                        required=True,
                        type=str,
                        help='The directory where the data is stored.')
    parser.add_argument(
        '--runs_root',
        '-r',
        default=os.path.join('.', 'experiments'),
        type=str,
        help='The root folder where data about experiments are stored.')
    parser.add_argument('--batch-size',
                        '-b',
                        default=1,
                        type=int,
                        help='Validation batch size.')

    args = parser.parse_args()

    if args.hostname == 'ee898-System-Product-Name':
        args.data_dir = '/home/ee898/Desktop/chaoning/ImageNet'
        args.hostname = 'ee898'
    elif args.hostname == 'DL178':
        args.data_dir = '/media/user/SSD1TB-2/ImageNet'
    else:
        args.data_dir = '/workspace/data_local/imagenet_pytorch'
    assert args.data_dir

    print_each = 25

    completed_runs = [
        o for o in os.listdir(args.runs_root)
        if os.path.isdir(os.path.join(args.runs_root, o))
        and o != 'no-noise-defaults'
    ]

    print(completed_runs)

    write_csv_header = True
    current_run = args.runs_root
    print(f'Run folder: {current_run}')
    options_file = os.path.join(current_run, 'options-and-config.pickle')
    train_options, hidden_config, noise_config = utils.load_options(
        options_file)
    train_options.train_folder = os.path.join(args.data_dir, 'val')
    train_options.validation_folder = os.path.join(args.data_dir, 'val')
    train_options.batch_size = args.batch_size
    checkpoint, chpt_file_name = utils.load_last_checkpoint(
        os.path.join(current_run, 'checkpoints'))
    print(f'Loaded checkpoint from file {chpt_file_name}')

    noiser = Noiser(noise_config, device, 'jpeg')
    model = Hidden(hidden_config, device, noiser, tb_logger=None)
    utils.model_from_checkpoint(model, checkpoint)

    print('Model loaded successfully. Starting validation run...')
    _, val_data = utils.get_data_loaders(hidden_config, train_options)
    file_count = len(val_data.dataset)
    if file_count % train_options.batch_size == 0:
        steps_in_epoch = file_count // train_options.batch_size
    else:
        steps_in_epoch = file_count // train_options.batch_size + 1

    with torch.no_grad():
        noises = ['webp_10', 'webp_25', 'webp_50', 'webp_75', 'webp_90']
        for noise in noises:
            losses_accu = {}
            step = 0
            for image, _ in val_data:
                step += 1
                image = image.to(device)
                message = torch.Tensor(
                    np.random.choice(
                        [0, 1], (image.shape[0],
                                 hidden_config.message_length))).to(device)
                losses, (
                    encoded_images, noised_images,
                    decoded_messages) = model.validate_on_batch_specific_noise(
                        [image, message], noise=noise)
                if not losses_accu:  # dict is empty, initialize
                    for name in losses:
                        losses_accu[name] = AverageMeter()
                for name, loss in losses.items():
                    losses_accu[name].update(loss)
                if step % print_each == 0 or step == steps_in_epoch:
                    print(f'Step {step}/{steps_in_epoch}')
                    utils.print_progress(losses_accu)
                    print('-' * 40)

            # utils.print_progress(losses_accu)
            write_validation_loss(os.path.join(args.runs_root,
                                               'validation_run.csv'),
                                  losses_accu,
                                  noise,
                                  checkpoint['epoch'],
                                  write_header=write_csv_header)
            write_csv_header = False
コード例 #16
0
ファイル: service.py プロジェクト: luizcarloscf/mock-cameras
def main():

    service_name = "CameraGateway"
    log = Logger(service_name)
    options = load_options()
    camera = CameraGateway(fps=options["fps"])

    publish_channel = Channel(options['broker_uri'])
    rpc_channel = Channel(options['broker_uri'])
    server = ServiceProvider(rpc_channel)
    logging = LogInterceptor()
    server.add_interceptor(logging)

    server.delegate(topic=service_name + ".*.GetConfig",
                    request_type=FieldSelector,
                    reply_type=CameraConfig,
                    function=camera.get_config)

    server.delegate(topic=service_name + ".*.SetConfig",
                    request_type=CameraConfig,
                    reply_type=Empty,
                    function=camera.set_config)

    exporter = create_exporter(service_name=service_name,
                               uri=options["zipkin_uri"])

    while True:

        # iterate through videos listed
        for video in options['videos']:

            # id of the first sequence of videos
            person_id = video['person_id']
            gesture_id = video['gesture_id']

            # getting the path of the 4 videos
            video_files = {
                cam_id: os.path.join(
                    options['folder'],
                    'p{:03d}g{:02d}c{:02d}.mp4'.format(person_id, gesture_id,
                                                       cam_id))
                for cam_id in options["cameras_id"]
            }

            for iteration in range(video['iterations']):

                info = {
                    "person": person_id,
                    "gesture": gesture_id,
                    "iteration": iteration
                }
                log.info('{}', str(info).replace("'", '"'))

                # object that let get images from multiples videos files
                video_loader = FramesLoader(video_files)

                # iterate through all samples on video
                while True:

                    time_initial = time.time()

                    # listen server for messages about change
                    try:
                        message = rpc_channel.consume(timeout=0)
                        if server.should_serve(message):
                            server.serve(message)
                    except socket.timeout:
                        pass

                    frame_id, frames = video_loader.read()

                    for cam in sorted(frames.keys()):
                        tracer = Tracer(exporter)
                        span = tracer.start_span(name='frame')
                        pb_image = to_pb_image(frames[cam])
                        msg = Message(content=pb_image)
                        msg.inject_tracing(span)
                        topic = 'CameraGateway.{}.Frame'.format(cam)
                        publish_channel.publish(msg, topic=topic)
                        tracer.end_span()

                    took_ms = (time.time() - time_initial) * 1000

                    dt = (1 / camera.fps) - (took_ms / 1000)
                    if dt > 0:
                        time.sleep(dt)
                        info = {
                            "sample": frame_id,
                            "took_ms": took_ms,
                            "wait_ms": dt * 1000
                        }
                        log.info('{}', str(info).replace("'", '"'))

                    if frame_id >= (video_loader.num_samples - 1):
                        video_loader.release()
                        del video_loader
                        gc.collect()
                        break

        if options['loop'] is False:
            break
コード例 #17
0
def main():
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    parent_parser = argparse.ArgumentParser(
        description='Training of HiDDeN nets')
    subparsers = parent_parser.add_subparsers(dest='command',
                                              help='Sub-parser for commands')
    new_run_parser = subparsers.add_parser('new', help='starts a new run')
    new_run_parser.add_argument('--data-dir',
                                '-d',
                                required=True,
                                type=str,
                                help='The directory where the data is stored.')
    new_run_parser.add_argument('--batch-size',
                                '-b',
                                required=True,
                                type=int,
                                help='The batch size.')
    new_run_parser.add_argument('--epochs',
                                '-e',
                                default=300,
                                type=int,
                                help='Number of epochs to run the simulation.')
    new_run_parser.add_argument('--name',
                                required=True,
                                type=str,
                                help='The name of the experiment.')
    new_run_parser.add_argument('--adv_loss',
                                default=0,
                                required=False,
                                type=float,
                                help='Coefficient of the adversarial loss.')
    new_run_parser.add_argument('--residual',
                                default=0,
                                required=False,
                                type=int,
                                help='If to use residual or not.')
    new_run_parser.add_argument('--video_dataset',
                                default=0,
                                required=False,
                                type=int,
                                help='If to use video dataset or not.')
    new_run_parser.add_argument(
        '--save-dir',
        '-sd',
        default='runs',
        required=True,
        type=str,
        help='The save directory where the result is stored.')

    new_run_parser.add_argument(
        '--size',
        '-s',
        default=128,
        type=int,
        help=
        'The size of the images (images are square so this is height and width).'
    )
    new_run_parser.add_argument('--message',
                                '-m',
                                default=30,
                                type=int,
                                help='The length in bits of the watermark.')
    new_run_parser.add_argument(
        '--continue-from-folder',
        '-c',
        default='',
        type=str,
        help=
        'The folder from where to continue a previous run. Leave blank if you are starting a new experiment.'
    )
    # parser.add_argument('--tensorboard', dest='tensorboard', action='store_true',
    #                     help='If specified, use adds a Tensorboard log. On by default')
    new_run_parser.add_argument('--tensorboard',
                                action='store_true',
                                help='Use to switch on Tensorboard logging.')
    new_run_parser.add_argument('--enable-fp16',
                                dest='enable_fp16',
                                action='store_true',
                                help='Enable mixed-precision training.')

    new_run_parser.add_argument(
        '--noise',
        nargs='*',
        action=NoiseArgParser,
        help=
        "Noise layers configuration. Use quotes when specifying configuration, e.g. 'cropout((0.55, 0.6), (0.55, 0.6))'"
    )
    new_run_parser.add_argument('--hostname',
                                default=socket.gethostname(),
                                help='the  host name of the running server')
    new_run_parser.add_argument(
        '--cover-dependent',
        default=1,
        required=False,
        type=int,
        help='If to use cover dependent architecture or not.')
    new_run_parser.add_argument('--jpeg_type',
                                '-j',
                                required=False,
                                type=str,
                                default='jpeg',
                                help='Jpeg type used in the combined2 noise.')

    new_run_parser.set_defaults(tensorboard=False)
    new_run_parser.set_defaults(enable_fp16=False)

    continue_parser = subparsers.add_parser('continue',
                                            help='Continue a previous run')
    continue_parser.add_argument(
        '--folder',
        '-f',
        required=True,
        type=str,
        help='Continue from the last checkpoint in this folder.')
    continue_parser.add_argument(
        '--data-dir',
        '-d',
        required=False,
        type=str,
        help=
        'The directory where the data is stored. Specify a value only if you want to override the previous value.'
    )
    continue_parser.add_argument(
        '--epochs',
        '-e',
        required=False,
        type=int,
        help=
        'Number of epochs to run the simulation. Specify a value only if you want to override the previous value.'
    )

    # continue_parser.add_argument('--tensorboard', action='store_true',
    #                             help='Override the previous setting regarding tensorboard logging.')

    # Setting up a seed for debug
    seed = 123
    torch.manual_seed(seed)
    np.random.seed(seed)

    args = parent_parser.parse_args()
    checkpoint = None
    loaded_checkpoint_file_name = None
    print(args.cover_dependent)

    if not args.video_dataset:
        if args.hostname == 'ee898-System-Product-Name':
            args.data_dir = '/home/ee898/Desktop/chaoning/ImageNet'
            args.hostname = 'ee898'
        elif args.hostname == 'DL178':
            args.data_dir = '/media/user/SSD1TB-2/ImageNet'
        else:
            args.data_dir = '/workspace/data_local/imagenet_pytorch'
    else:
        if args.hostname == 'ee898-System-Product-Name':
            args.data_dir = '/home/ee898/Desktop/chaoning/ImageNet'
            args.hostname = 'ee898'
        elif args.hostname == 'DL178':
            args.data_dir = '/media/user/SSD1TB-2/ImageNet'
        else:
            args.data_dir = './oops_dataset/oops_video'
    assert args.data_dir

    if args.command == 'continue':
        this_run_folder = args.folder
        options_file = os.path.join(this_run_folder,
                                    'options-and-config.pickle')
        train_options, hidden_config, noise_config = utils.load_options(
            options_file)
        checkpoint, loaded_checkpoint_file_name = utils.load_last_checkpoint(
            os.path.join(this_run_folder, 'checkpoints'))
        train_options.start_epoch = checkpoint['epoch'] + 1
        if args.data_dir is not None:
            train_options.train_folder = os.path.join(args.data_dir, 'train')
            train_options.validation_folder = os.path.join(
                args.data_dir, 'val')
        if args.epochs is not None:
            if train_options.start_epoch < args.epochs:
                train_options.number_of_epochs = args.epochs
            else:
                print(
                    f'Command-line specifies of number of epochs = {args.epochs}, but folder={args.folder} '
                    f'already contains checkpoint for epoch = {train_options.start_epoch}.'
                )
                exit(1)

    else:
        assert args.command == 'new'
        start_epoch = 1
        train_options = TrainingOptions(
            batch_size=args.batch_size,
            number_of_epochs=args.epochs,
            train_folder=os.path.join(args.data_dir, 'train'),
            validation_folder=os.path.join(args.data_dir, 'val'),
            runs_folder=os.path.join('.', args.save_dir),
            start_epoch=start_epoch,
            experiment_name=args.name,
            video_dataset=args.video_dataset)

        noise_config = args.noise if args.noise is not None else []
        hidden_config = HiDDenConfiguration(
            H=args.size,
            W=args.size,
            message_length=args.message,
            encoder_blocks=4,
            encoder_channels=64,
            decoder_blocks=7,
            decoder_channels=64,
            use_discriminator=True,
            use_vgg=False,
            discriminator_blocks=3,
            discriminator_channels=64,
            decoder_loss=1,
            encoder_loss=0.7,
            adversarial_loss=args.adv_loss,
            cover_dependent=args.cover_dependent,
            residual=args.residual,
            enable_fp16=args.enable_fp16)

        this_run_folder = utils.create_folder_for_run(
            train_options.runs_folder, args.name)
        with open(os.path.join(this_run_folder, 'options-and-config.pickle'),
                  'wb+') as f:
            pickle.dump(train_options, f)
            pickle.dump(noise_config, f)
            pickle.dump(hidden_config, f)

    logging.basicConfig(level=logging.INFO,
                        format='%(message)s',
                        handlers=[
                            logging.FileHandler(
                                os.path.join(
                                    this_run_folder,
                                    f'{train_options.experiment_name}.log')),
                            logging.StreamHandler(sys.stdout)
                        ])
    if (args.command == 'new' and args.tensorboard) or \
            (args.command == 'continue' and os.path.isdir(os.path.join(this_run_folder, 'tb-logs'))):
        logging.info('Tensorboard is enabled. Creating logger.')
        from tensorboard_logger import TensorBoardLogger
        tb_logger = TensorBoardLogger(os.path.join(this_run_folder, 'tb-logs'))
    else:
        tb_logger = None

    noiser = Noiser(noise_config, device, args.jpeg_type)
    model = Hidden(hidden_config, device, noiser, tb_logger)

    if args.command == 'continue':
        # if we are continuing, we have to load the model params
        assert checkpoint is not None
        logging.info(
            f'Loading checkpoint from file {loaded_checkpoint_file_name}')
        utils.model_from_checkpoint(model, checkpoint)

    logging.info('HiDDeN model: {}\n'.format(model.to_stirng()))
    logging.info('Model Configuration:\n')
    logging.info(pprint.pformat(vars(hidden_config)))
    logging.info('\nNoise configuration:\n')
    logging.info(pprint.pformat(str(noise_config)))
    logging.info('\nTraining train_options:\n')
    logging.info(pprint.pformat(vars(train_options)))

    # train(model, device, hidden_config, train_options, this_run_folder, tb_logger)
    # train_other_noises(model, device, hidden_config, train_options, this_run_folder, tb_logger)
    if str(args.noise[0]) == "WebP()":
        noise = 'webp'
    elif str(args.noise[0]) == "JpegCompression2000()":
        noise = 'jpeg2000'
    elif str(args.noise[0]) == "MPEG4()":
        noise = 'mpeg4'
    elif str(args.noise[0]) == "H264()":
        noise = 'h264'
    elif str(args.noise[0]) == "XVID()":
        noise = 'xvid'
    elif str(args.noise[0]) == "DiffQFJpegCompression2()":
        noise = 'diff_qf_jpeg2'
    elif str(args.noise[0]) == "DiffCorruptions()":
        noise = 'diff_corruptions'
    else:
        noise = 'jpeg'
    train_own_noise(model, device, hidden_config, train_options,
                    this_run_folder, tb_logger, noise)
コード例 #18
0
from is_wire.core import Channel, Message, Subscription, Logger
from is_wire.core import Tracer, ZipkinExporter, BackgroundThreadTransport
from is_wire.core.utils import now
from utils import load_options
from heatmap import SkeletonsHeatmap

from builtins import super

class MyChannel(Channel):
    def consume_until(self, deadline):
        timeout = max([deadline - now(), 0.0])
        return super().consume(timeout=timeout)

service_name = 'Skeletons.Heatmap'
log = Logger(name=service_name)
ops = load_options()

channel = MyChannel(ops.broker_uri)
subscription = Subscription(channel)
exporter = ZipkinExporter(
    service_name=service_name,
    host_name=ops.zipkin_host,
    port=ops.zipkin_port,
    transport=BackgroundThreadTransport(max_batch_size=20),
)

subscription.subscribe('Skeletons.Localization')

sks_hm = SkeletonsHeatmap(ops)

period = ops.period_ms / 1000.0
コード例 #19
0
def main():
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    parser = argparse.ArgumentParser(description='Test trained models')
    parser.add_argument(
        '--options-file',
        '-o',
        default='options-and-config.pickle',
        type=str,
        help='The file where the simulation options are stored.')
    parser.add_argument('--checkpoint-file',
                        '-c',
                        required=True,
                        type=str,
                        help='Model checkpoint file')
    parser.add_argument('--batch-size',
                        '-b',
                        default=12,
                        type=int,
                        help='The batch size.')
    parser.add_argument('--source-image',
                        '-s',
                        required=True,
                        type=str,
                        help='The image to watermark')
    # parser.add_argument('--times', '-t', default=10, type=int,
    #                     help='Number iterations (insert watermark->extract).')

    args = parser.parse_args()

    train_options, hidden_config, noise_config = utils.load_options(
        args.options_file)
    noiser = Noiser(noise_config, device)

    checkpoint = torch.load(args.checkpoint_file)
    hidden_net = Hidden(hidden_config, device, noiser, None)
    utils.model_from_checkpoint(hidden_net, checkpoint)

    image_pil = Image.open(args.source_image)
    image = randomCrop(np.array(image_pil), hidden_config.H, hidden_config.W)
    image_tensor = TF.to_tensor(image).to(device)
    image_tensor = image_tensor * 2 - 1  # transform from [0, 1] to [-1, 1]
    image_tensor.unsqueeze_(0)

    # for t in range(args.times):
    message = torch.Tensor(
        np.random.choice(
            [0, 1],
            (image_tensor.shape[0], hidden_config.message_length))).to(device)
    losses, (encoded_images, noised_images,
             decoded_messages) = hidden_net.validate_on_batch(
                 [image_tensor, message])
    decoded_rounded = decoded_messages.detach().cpu().numpy().round().clip(
        0, 1)
    message_detached = message.detach().cpu().numpy()
    print('original: {}'.format(message_detached))
    print('decoded : {}'.format(decoded_rounded))
    print('error : {:.3f}'.format(
        np.mean(np.abs(decoded_rounded - message_detached))))
    utils.save_images(image_tensor.cpu(),
                      encoded_images.cpu(),
                      'test',
                      '.',
                      resize_to=(256, 256))