Ejemplo n.º 1
0
def morph(args):
    # Communicator and Context
    extension_module = "cudnn"
    ctx = get_extension_context(extension_module, type_config=args.type_config)
    nn.set_default_context(ctx)

    # Args
    latent = args.latent
    maps = args.maps
    batch_size = args.batch_size
    image_size = args.image_size
    n_classes = args.n_classes
    not_sn = args.not_sn
    threshold = args.truncation_threshold

    # Model
    nn.load_parameters(args.model_load_path)
    z = nn.Variable([batch_size, latent])
    alpha = nn.Variable.from_numpy_array(np.zeros([1, 1]))
    beta = (nn.Variable.from_numpy_array(np.ones([1, 1])) - alpha)
    y_fake_a = nn.Variable([batch_size])

    y_fake_b = nn.Variable([batch_size])
    x_fake = generator(z, [y_fake_a, y_fake_b],
                       maps=maps,
                       n_classes=n_classes,
                       test=True,
                       sn=not_sn,
                       coefs=[alpha, beta]).apply(persistent=True)
    b, c, h, w = x_fake.shape

    # Monitor
    monitor = Monitor(args.monitor_path)
    name = "Morphed Image {} {}".format(args.from_class_id, args.to_class_id)
    monitor_image = MonitorImage(name,
                                 monitor,
                                 interval=1,
                                 num_images=1,
                                 normalize_method=normalize_method)

    # Morph
    images = []
    z_data = resample(batch_size, latent, threshold)
    z.d = z_data
    for i in range(args.n_morphs):
        alpha.d = 1.0 * i / args.n_morphs
        y_fake_a.d = generate_one_class(args.from_class_id, batch_size)
        y_fake_b.d = generate_one_class(args.to_class_id, batch_size)
        x_fake.forward(clear_buffer=True)
        monitor_image.add(i, x_fake.d)
Ejemplo n.º 2
0
def generate(args):
    # Communicator and Context
    extension_module = "cudnn"
    ctx = get_extension_context(extension_module, type_config=args.type_config)
    nn.set_default_context(ctx)

    # Args
    latent = args.latent
    maps = args.maps
    batch_size = args.batch_size
    image_size = args.image_size
    n_classes = args.n_classes
    not_sn = args.not_sn
    threshold = args.truncation_threshold

    # Model
    nn.load_parameters(args.model_load_path)
    z = nn.Variable([batch_size, latent])
    y_fake = nn.Variable([batch_size])
    x_fake = generator(z, y_fake, maps=maps, n_classes=n_classes, test=True, sn=not_sn)\
        .apply(persistent=True)

    # Generate All
    if args.generate_all:
        # Monitor
        monitor = Monitor(args.monitor_path)
        name = "Generated Image Tile All"
        monitor_image = MonitorImageTile(name,
                                         monitor,
                                         interval=1,
                                         num_images=args.batch_size,
                                         normalize_method=normalize_method)

        # Generate images for all classes
        for class_id in range(args.n_classes):
            # Generate
            z_data = resample(batch_size, latent, threshold)
            y_data = generate_one_class(class_id, batch_size)

            z.d = z_data
            y_fake.d = y_data
            x_fake.forward(clear_buffer=True)
            monitor_image.add(class_id, x_fake.d)
        return

    # Generate Indivisually
    monitor = Monitor(args.monitor_path)
    name = "Generated Image Tile {}".format(
        args.class_id) if args.class_id != -1 else "Generated Image Tile"
    monitor_image_tile = MonitorImageTile(name,
                                          monitor,
                                          interval=1,
                                          num_images=args.batch_size,
                                          normalize_method=normalize_method)
    name = "Generated Image {}".format(
        args.class_id) if args.class_id != -1 else "Generated Image"
    monitor_image = MonitorImage(name,
                                 monitor,
                                 interval=1,
                                 num_images=args.batch_size,
                                 normalize_method=normalize_method)
    z_data = resample(batch_size, latent, threshold)
    y_data = generate_random_class(n_classes, batch_size) if args.class_id == -1 else \
        generate_one_class(args.class_id, batch_size)
    z.d = z_data
    y_fake.d = y_data
    x_fake.forward(clear_buffer=True)
    monitor_image.add(0, x_fake.d)
    monitor_image_tile.add(0, x_fake.d)
                #just get the exact number of frames from the wav file upscale emot labels to that
                #OR!
                #just tweak the OUT_FRAMERATE until it works...

                #train_audio002_sent10 has 644 frames according to merlin (TODO where can we directly get this number from?)
                #emots has 652 after being processed by resample according to in and out frame rates
                #srikanth said num of frames should be within 5 to 644

                #if we use sox to get sampling rate and number of samples, then we can get a more accurate figure but still not same
                #number of samples * sampling rate * desired_frame_rate
                # 155610 * (1/48000) * 200
                # Out[2]: 648.375
                #NB NB NB !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

                #upsample
                sent_data = resample(sent_data, IN_FRAMERATE,
                                     OUT_FRAMERATE)  #returns numpy.ndarray
                # except:
                #     raise ValueError(tf, sent_timestamps, len(data), i, start_time, end_time, 'start frame', start_frame, end_frame, sent_data)

                sent_data = list(str(float(num)) for num in sent_data)

                #create filename
                #format tests_audio002_sent1_16bit.lab
                if inner_folder == 'test':
                    renamed_inner_folder = 'tests'  #as folder is called 'test' but files have name 'tests'
                else:
                    renamed_inner_folder = inner_folder

                file_name = renamed_inner_folder + '_audio' + dialog_num + '_sent' + str(
                    i) + '_16bit.' + emot_dim
                full_path_to_file = OUT_DIR + emot_dim + '/' + file_name
Ejemplo n.º 4
0
def match(args):
    # Context
    extension_module = "cudnn"
    ctx = get_extension_context(extension_module, device_id=args.device_id,
                                type_config=args.type_config)
    nn.set_default_context(ctx)

    # Args
    latent = args.latent
    maps = args.maps
    batch_size = 1
    image_size = args.image_size
    n_classes = args.n_classes
    not_sn = args.not_sn
    threshold = args.truncation_threshold

    # Model (SAGAN)
    nn.load_parameters(args.model_load_path)
    z = nn.Variable([batch_size, latent])
    y_fake = nn.Variable([batch_size])
    x_fake = generator(z, y_fake, maps=maps, n_classes=n_classes, test=True, sn=not_sn)\
        .apply(persistent=True)

    # Model (Inception model) from nnp file
    nnp = NnpLoader(args.nnp_inception_model_load_path)
    x, h = get_input_and_output(nnp, batch_size, args.variable_name)

    # DataIterator for a given class_id
    di = data_iterator_imagenet(args.train_dir, args.dirname_to_label_path,
                                batch_size=batch_size, n_classes=args.n_classes,
                                noise=False,
                                class_id=args.class_id)

    # Monitor
    monitor = Monitor(args.monitor_path)
    name = "Matched Image {}".format(args.class_id)
    monitor_image = MonitorImage(name, monitor, interval=1,
                                 num_images=batch_size,
                                 normalize_method=lambda x: (x + 1.) / 2. * 255.)
    name = "Matched Image Tile {}".format(args.class_id)
    monitor_image_tile = MonitorImageTile(name, monitor, interval=1,
                                          num_images=batch_size + args.top_n,
                                          normalize_method=lambda x: (x + 1.) / 2. * 255.)

    # Generate and p(h|x).forward
    # generate
    z_data = resample(batch_size, latent, threshold)
    y_data = generate_one_class(args.class_id, batch_size)
    z.d = z_data
    y_fake.d = y_data
    x_fake.forward(clear_buffer=True)
    # p(h|x).forward
    x_fake_d = x_fake.d.copy()
    x_fake_d = preprocess(
        x_fake_d, (args.image_size, args.image_size), args.nnp_preprocess)
    x.d = x_fake_d
    h.forward(clear_buffer=True)
    h_fake_d = h.d.copy()

    # Feature matching
    norm2_list = []
    x_data_list = []
    x_data_list.append(x_fake.d)
    for i in range(di.size):
        # forward for real data
        x_d, _ = di.next()
        x_data_list.append(x_d)
        x_d = preprocess(
            x_d, (args.image_size, args.image_size), args.nnp_preprocess)
        x.d = x_d
        h.forward(clear_buffer=True)
        h_real_d = h.d.copy()
        # norm computation
        axis = tuple(np.arange(1, len(h.shape)).tolist())
        norm2 = np.sum((h_real_d - h_fake_d) ** 2.0, axis=axis)
        norm2_list.append(norm2)

    # Save top-n images
    argmins = np.argsort(norm2_list)
    for i in range(args.top_n):
        monitor_image.add(i, x_data_list[i])
    matched_images = np.concatenate(x_data_list)
    monitor_image_tile.add(0, matched_images)