示例#1
0
def run(
    model_name='alexnet',
    layer_names=[
        'data',
        'conv1',
        'conv2',
        'conv3',
        'conv4',
        'conv5',
        'fc6', 'fc7', 'fc8'
    ],
    image_size=(227, 227),
):
    directory = './theanomodel'
    filename_save = '%s/%s_mean_std_%s.pkl'
    filename_train = './image/valid/*.JPEG'
    filename_model = '%s/%s.model' % (directory, model_name)

    # load model
    img_mean = utils.load_mean_image()
    model = pickle.load(open(filename_model))

    # get filename
    filename_train = glob.glob(filename_train)

    # get function to compute mean and mean**2
    func = get_function_mean_var(model, layer_names)

    # for all images
    means = {layer_name: [] for layer_name in layer_names}
    vars_ = {layer_name: [] for layer_name in layer_names}
    for i, fn in enumerate(filename_train):
        print 'computing mean: %d / %d' % (i, len(filename_train))
        img = utils.load_image(fn, img_mean)
        img = utils.clip_image(img, image_size)
        mvs = func(img[None, :, :, :])
        for j, layer_name in enumerate(layer_names):
            means[layer_name].append(mvs[j*2])
            vars_[layer_name].append(mvs[j*2+1])

    # save
    for layer_name in layer_names:
        mean = np.vstack(means[layer_name]).mean(0)
        std = (np.vstack(vars_[layer_name]).mean(0) - mean**2)**0.5
        data = {
            'mean': mean,
            'std': std,
        }
        filename = filename_save % (directory, model_name, layer_name)
        pickle.dump(
            data, 
            open(filename, 'wb'),                  
            protocol=pickle.HIGHEST_PROTOCOL,
        )
示例#2
0
def run(
    model_name='alexnet',
    layer_names=['data', 'conv1', 'conv2', 'conv3', 'conv4', 'conv5'],
    image_size=(227, 227),
    num_samples=100000,
):
    directory = './theanomodel'
    filename_save = '%s/%s_pca_%s.pkl'
    filename_mean_std = '%s/%s_mean_std_%s.pkl'
    filename_train = './image/valid/*.JPEG'
    filename_model = '%s/%s.model' % (directory, model_name)

    # load model
    img_mean = utils.load_mean_image()
    model = pickle.load(open(filename_model))

    # get filename
    filename_train = glob.glob(filename_train)
    num_samples_per_image = num_samples / len(filename_train) + 1

    # get function to sample
    func = get_function_sample(model, layer_names, num_samples_per_image)

    # sample
    samples = {layer_name: [] for layer_name in layer_names}
    for i, filename in enumerate(filename_train):
        print 'training PCA: %d / %d' % (i, len(filename_train))
        img = utils.load_image(filename, img_mean)
        img = utils.clip_image(img, image_size)
        s = func(img[None, :, :, :])
        for j, layer_name in enumerate(layer_names):
            samples[layer_name].append(s[j])

    # PCA
    for layer_name in layer_names:
        filename = filename_mean_std % (directory, model_name, layer_name)
        mean_std = pickle.load(open(filename))
        samples[layer_name] = np.vstack(samples[layer_name])
        samples[layer_name] = (
            (samples[layer_name] - mean_std['mean'][None, :]) /
            mean_std['std'][None, :]
        )
        pca = sklearn.decomposition.PCA(whiten=False)
        pca.fit(samples[layer_name])
        filename = filename_save % (directory, model_name, layer_name)
        pickle.dump(
            pca,
            open(filename, 'wb'),       
            protocol=pickle.HIGHEST_PROTOCOL,
        )
示例#3
0
def train_loop():

    ds = get_dataset()
    contentstylemodel = get_content_style_model()
    network = custom_unet_model()
    network.summary()

    progbar = tf.keras.utils.Progbar(len(ds))

    style_image = load_image(STYLE_IMAGE_PATH, add_dim=True)
    # load images returns normalized images with values between 1 & 1m
    # vgg model expects a scaled up image so we could use is preprocess_input
    style_target, _ = contentstylemodel(style_image * 255.0)

    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)

    loss_metric = tf.keras.metrics.Mean()
    style_loss_metric = tf.keras.metrics.Mean()
    content_loss_metric = tf.keras.metrics.Mean()
    total_loss_metric = tf.keras.metrics.Mean()

    metrics = {
        'style': style_loss_metric,
        'content': content_loss_metric,
        'total': total_loss_metric,
        'loss': loss_metric
    }

    for e in range(EPOCHS):
        for i, batch in enumerate(ds):
            train_step(batch, style_target, network, contentstylemodel,
                       optimizer, metrics)
            progbar.update(i + 1)
        if e % 1 == 0:
            print(
                f'epoch end: saving weights, style loss--{style_loss_metric.result()}. content loss: {content_loss_metric.result()} tloss: {total_loss_metric.result()}'
            )
            network.save_weights(WEIGHTS_PATH, save_format='tf')
        print(f"EPOCH -- {e + 1}: loss--{loss_metric.result()}")
        if e % 2 == 0:
            # validate the image looks good every 10 epoch
            image = load_image(TEST_IMAGE_IMAGE_PATH,
                               add_dim=True,
                               resize=False)
            pred = network(image)
            clipped = clip_image(pred)
            tensor_to_imgarr(clipped).save(PREDICTED_FILE_NAME)
            print('Predicred image')

    network.save_weights(WEIGHTS_PATH, save_format='tf')
def run(
        model_name='alexnet',
        layer_names=['data', 'conv1', 'conv2', 'conv3', 'conv4', 'conv5'],
        image_size=(227, 227),
        num_samples=100000,
):
    directory = './theanomodel'
    filename_save = '%s/%s_pca_%s.pkl'
    filename_mean_std = '%s/%s_mean_std_%s.pkl'
    filename_train = './image/valid/*.JPEG'
    filename_model = '%s/%s.model' % (directory, model_name)

    # load model
    img_mean = utils.load_mean_image()
    model = pickle.load(open(filename_model))

    # get filename
    filename_train = glob.glob(filename_train)
    num_samples_per_image = num_samples / len(filename_train) + 1

    # get function to sample
    func = get_function_sample(model, layer_names, num_samples_per_image)

    # sample
    samples = {layer_name: [] for layer_name in layer_names}
    for i, filename in enumerate(filename_train):
        print 'training PCA: %d / %d' % (i, len(filename_train))
        img = utils.load_image(filename, img_mean)
        img = utils.clip_image(img, image_size)
        s = func(img[None, :, :, :])
        for j, layer_name in enumerate(layer_names):
            samples[layer_name].append(s[j])

    # PCA
    for layer_name in layer_names:
        filename = filename_mean_std % (directory, model_name, layer_name)
        mean_std = pickle.load(open(filename))
        samples[layer_name] = np.vstack(samples[layer_name])
        samples[layer_name] = (
            (samples[layer_name] - mean_std['mean'][None, :]) /
            mean_std['std'][None, :])
        pca = sklearn.decomposition.PCA(whiten=False)
        pca.fit(samples[layer_name])
        filename = filename_save % (directory, model_name, layer_name)
        pickle.dump(
            pca,
            open(filename, 'wb'),
            protocol=pickle.HIGHEST_PROTOCOL,
        )
    def fit(self, image, content_targets, style_targets, content_layer_weights,
            style_layer_weights, content_weight, style_weight,
            variation_weight):
        with tf.GradientTape() as tape:
            output = self(image)
            loss = style_content_loss(
                generated_outputs=output,
                content_targets=content_targets,
                style_targets=style_targets,
                content_layer_weights=content_layer_weights,
                style_layer_weights=style_layer_weights,
                alpha=content_weight,
                beta=style_weight)

            variation_loss = calculate_variation_loss(image)
            loss = loss + variation_weight * variation_loss

        gradient = tape.gradient(loss, image)
        self.optimizer.apply_gradients([(gradient, image)])
        image.assign(clip_image(image))
示例#6
0
def predict_using_tflite(checkpoint, labels):
    model = TFModel(checkpoint)
    cap = cv2.VideoCapture(0)
    if cap.isOpened() is False:
        print("Error opening video stream or file")
    fps_time = 0
    use_cv2 = True
    display_size = (640, 640)  # w, h
    start = time.time()
    while cap.isOpened():
        ret_val, img = cap.read()
        img = clip_image(img)
        top_k, prediction = model(img)
        img = cv2.resize(img, display_size)
        elapsed = time.time() - start
        canvas = create_canvas(labels, top_k, prediction, display_size,
                               elapsed)
        start = time.time()
        cv2.imshow(model.title, cv2.hconcat([img, canvas]))

        if cv2.waitKey(1) == 27:
            break
示例#7
0
    def generate_smoothgrad(self,
                            input_images,
                            target_labels,
                            DEVICE,
                            iter_smoothgrad,
                            MAG,
                            further_grad=False):
        gradients = torch.zeros_like(input_images)
        for t in range(iter_smoothgrad):
            noise = torch.randn(input_images.shape) * MAG
            noise = noise.to(DEVICE)
            noisy_images = (input_images + noise).requires_grad_()
            noisy_images = clip_image(noisy_images)

            model_output = self.model(noisy_images)
            self.model.zero_grad()
            grad_t = grad(model_output[torch.arange(model_output.size()[0]),
                                       target_labels].sum(),
                          noisy_images,
                          create_graph=further_grad)[0]
            gradients += grad_t
        gradients /= iter_smoothgrad
        return gradients
def run(
        model_name='alexnet',
        layer_names=['data', 'conv1', 'conv2', 'conv3', 'conv4', 'conv5'],
        layer_sizes=[3, 96, 256, 384, 384, 256],
        batch_size=16,
        lr=1e+1,
        max_iter=20000,
        image_size=(227, 227),
        save_interval=1000,
):
    directory = './theanomodel'
    filename_train = './image/valid/*.JPEG'
    filename_save = '%s/%s_vlm_%s.pkl' % (directory, model_name,
                                          '_'.join(layer_names))
    lr_reduce_factor = 0.1
    lr_reduce_interval = 5000

    # load model
    img_mean = utils.load_mean_image()
    model, tparams = build_model(model_name, layer_names, layer_sizes)

    # build solver
    solver = solvers.SGD(
        model['loss_lm'],
        [model['data']],
        tparams.values(),
        lr=lr,
    )

    # get filename
    filename_train = glob.glob(filename_train)

    # train
    loss = []
    for iter_ in range(max_iter + 1):
        # load images
        imgs = []
        for filename in np.random.choice(filename_train, batch_size):
            img = utils.load_image(filename, img_mean)
            img = utils.clip_image(img, image_size)
            imgs.append(img)
        imgs = np.array(imgs)

        # update
        l = solver.update(imgs)
        loss.append(l)
        print 'training VLM: %d / %d %f' % (iter_, max_iter, l)

        # reduce learning rate
        if iter_ != 0 and iter_ % lr_reduce_interval == 0:
            solver.reset()
            solver.lr.set_value(solver.lr.get_value() *
                                np.array(lr_reduce_factor).astype(np.float32))

        # save
        if iter_ % save_interval == 0:
            print 'average loss:', np.mean(loss[-save_interval:])
            sys.setrecursionlimit(100000)
            pickle.dump(
                model,
                open(filename_save, 'wb'),
                protocol=pickle.HIGHEST_PROTOCOL,
            )
示例#9
0
def run(
    model_name='alexnet',
    layer_names=['data', 'conv1', 'conv2', 'conv3', 'conv4', 'conv5'],
    layer_sizes=[3, 96, 256, 384, 384, 256],
    batch_size=16,
    lr=1e+1,
    max_iter=20000,
    image_size=(227, 227),
    save_interval=1000,
):
    directory = './theanomodel'
    filename_train = './image/valid/*.JPEG'
    filename_save = '%s/%s_vlm_%s.pkl' % (directory, model_name, '_'.join(layer_names))
    lr_reduce_factor = 0.1
    lr_reduce_interval = 5000

    # load model
    img_mean = utils.load_mean_image()
    model, tparams = build_model(model_name, layer_names, layer_sizes)

    # build solver
    solver = solvers.SGD(
        model['loss_lm'],
        [model['data']],
        tparams.values(),
        lr=lr,
    )

    # get filename
    filename_train = glob.glob(filename_train)

    # train
    loss = []
    for iter_ in range(max_iter+1):
        # load images
        imgs = []
        for filename in np.random.choice(filename_train, batch_size):
            img = utils.load_image(filename, img_mean)
            img = utils.clip_image(img, image_size)
            imgs.append(img)
        imgs = np.array(imgs)

        # update
        l = solver.update(imgs)
        loss.append(l)
        print 'training VLM: %d / %d %f' % (iter_, max_iter, l)

        # reduce learning rate
        if iter_ != 0 and iter_ % lr_reduce_interval == 0:
            solver.reset()
            solver.lr.set_value(
                solver.lr.get_value() *
                np.array(lr_reduce_factor).astype(np.float32)
            )

        # save
        if iter_ % save_interval == 0:
            print 'average loss:', np.mean(loss[-save_interval:])
            sys.setrecursionlimit(100000)
            pickle.dump(
                model, 
                open(filename_save, 'wb'),                  
                protocol=pickle.HIGHEST_PROTOCOL,
            )
示例#10
0
from utils import tensor_to_imgarr, load_image, clip_image


WEIGHTS_PATH = os.path.join(os.getcwd(), 'weights')
PREDICTED_FILE_NAME = 'new.jpg'


def get_model_with_weights(weights=WEIGHTS_PATH):
    model = custom_unet_model()
    model.load_weights(weights).expect_partial()
    return model


parser = ArgumentParser(description="Arguments to predict style")
parser.add_argument('--image', '-i', required=True,
                    help="content image to evaluate")


if __name__ == '__main__':
    args = parser.parse_args()

    model = get_model_with_weights()
    print('Model created..')
    image = load_image(args.image, add_dim=True, resize=False)
    print('Image loaded', image.shape)
    pred = model(image)
    print('Predicred...')
    clipped = clip_image(pred)
    print('Clipped...')
    tensor_to_imgarr(clipped).save(PREDICTED_FILE_NAME)