Beispiel #1
0
def save_action(path,action):
    action_path=path+str(action)+"/"
    utils.make_dir(action_path)
    for i,img in enumerate(action.images):
        img_path=action_path+str(action)+"_"+str(i)
        img=np.reshape(img,(80,40))
        utils.save_img(img_path,img)
 def save(self,path,name):
     for proj,postfix in zip(self.projections,DIRS):
         proj_path=path+"/"+postfix
         utils.make_dir(proj_path)
         full_path=proj_path+name
         print(full_path)
         utils.save_img(full_path,proj)
Beispiel #3
0
def final_to_pca(in_path, out_path):
    # action=td.read_im_action(in_path+"/")
    action = utils.read_object(in_path)
    out_path = out_path.replace(".final", ".pca")
    pca = action.to_pca()
    eigen = td.to_time_serie(pca)
    img = td.to_img(eigen)
    utils.save_img(out_path, img)
def main():
    check_version()
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_target = get_img(options.style)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow":options.slow,
        "epochs":options.epochs,
        "print_iterations":options.checkpoint_iterations,
        "batch_size":options.batch_size,
        "save_path":os.path.join(options.checkpoint_dir,'fns.ckpt'),
        "learning_rate":options.learning_rate,
        "device":options.device,
        "total_iterations":options.total_iterations,
        "base_model_path":options.base_model_path,
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets,
        style_target,
        options.content_weight,
        options.style_weight,
        options.tv_weight,
        options.vgg_path
    ]

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        sys.stdout.flush()
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir,epoch,i)
            if not options.slow:
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test,preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint-dir %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_target = get_img(options.style)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow": options.slow,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate,
        "device_and_number": options.device_and_number
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, options.content_weight,
        options.style_weight, options.tv_weight, options.vgg_path
    ]

    import time
    from datetime import datetime
    start_time = time.time()
    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses
        delta_time, start_time = time.time() - start_time, time.time()
        print(
            'Current Time = {}; Time Elapsed = {}; Epoch = {}; Iteration = {}; Loss = {}'
            .format(datetime.now().strftime("%Y %B %d, %H:%M:%S"), delta_time,
                    epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('Loss values: style = %s; content = %s; tv = %s' % to_print)
        sys.stdout.flush()
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i)
            if not options.slow:  # if uses GPU, uses RAM that it doesn't have, so it's slow here
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test, preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
Beispiel #6
0
def main():
    print('ml5.js Style Transfer Training!')
    print('Note: This traning will take a couple of hours.')
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_target = get_img(options.style)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow": options.slow,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate,
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, options.content_weight,
        options.style_weight, options.tv_weight, options.vgg_path
    ]

    print('Training is starting!...')
    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i)
            if not options.slow:
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test, preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
    print('Converting model to ml5js')
    dump_checkpoints(kwargs['save_path'], options.model_dir)
    print(
        'Done! Checkpoint saved. Visit https://ml5js.org/docs/StyleTransfer for more information'
    )
Beispiel #7
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)
    content_img = get_img(options.content, (256, 256, 3)).astype(np.float32)
    content_img = np.reshape(content_img, (1, ) + content_img.shape)
    prediction = ffwd(content_img, options.style)
    save_img(options.output_path, prediction)
    print('Image saved to {}'.format(options.output_path))
Beispiel #8
0
def ffwd(data_in, paths_out, checkpoint_dir, device_t='/gpu:0', batch_size=4):
    assert len(paths_out) > 0
    is_paths = type(data_in[0]) == str
    if is_paths:
        assert len(data_in) == len(paths_out)
        img_shape = get_img(data_in[0]).shape
    else:
        assert data_in.size[0] == len(paths_out)
        img_shape = X[0].shape

    g = tf.Graph()
    batch_size = min(len(paths_out), batch_size)
    curr_num = 0
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), \
            tf.Session(config=soft_config) as sess:
        batch_shape = (batch_size,) + img_shape
        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        num_iters = int(len(paths_out)/batch_size)
        for i in range(num_iters):
            pos = i * batch_size
            curr_batch_out = paths_out[pos:pos+batch_size]
            if is_paths:
                curr_batch_in = data_in[pos:pos+batch_size]
                X = np.zeros(batch_shape, dtype=np.float32)
                for j, path_in in enumerate(curr_batch_in):
                    img = get_img(path_in)
                    assert img.shape == img_shape, \
                        'Images have different dimensions. ' +  \
                        'Resize images or use --allow-different-dimensions.'
                    X[j] = img
            else:
                X = data_in[pos:pos+batch_size]

            _preds = sess.run(preds, feed_dict={img_placeholder:X})
            for j, path_out in enumerate(curr_batch_out):
                save_img(path_out, _preds[j])
                
        remaining_in = data_in[num_iters*batch_size:]
        remaining_out = paths_out[num_iters*batch_size:]
    if len(remaining_in) > 0:
        ffwd(remaining_in, remaining_out, checkpoint_dir, 
            device_t=device_t, batch_size=1)
Beispiel #9
0
def ffwd(data_in, paths_out, checkpoint_dir, device_t='/gpu:0', batch_size=4):
    assert len(paths_out) > 0
    is_paths = type(data_in[0]) == str
    if is_paths:
        assert len(data_in) == len(paths_out)
        img_shape = get_img(data_in[0]).shape
    else:
        assert data_in.size[0] == len(paths_out)
        img_shape = X[0].shape

    g = tf.Graph()
    batch_size = min(len(paths_out), batch_size)
    curr_num = 0
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), \
            tf.Session(config=soft_config) as sess:
        batch_shape = (batch_size,) + img_shape
        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        num_iters = int(len(paths_out)/batch_size)
        for i in range(num_iters):
            pos = i * batch_size
            curr_batch_out = paths_out[pos:pos+batch_size]
            if is_paths:
                curr_batch_in = data_in[pos:pos+batch_size]
                X = np.zeros(batch_shape, dtype=np.float32)
                for j, path_in in enumerate(curr_batch_in):
                    img = get_img(path_in)
                    assert img.shape == img_shape, \
                        'Images have different dimensions. ' +  \
                        'Resize images or use --allow-different-dimensions.'
                    X[j] = img
            else:
                X = data_in[pos:pos+batch_size]

            _preds = sess.run(preds, feed_dict={img_placeholder:X})
            for j, path_out in enumerate(curr_batch_out):
                save_img(path_out, _preds[j])
                
        remaining_in = data_in[num_iters*batch_size:]
        remaining_out = paths_out[num_iters*batch_size:]
    if len(remaining_in) > 0:
        ffwd(remaining_in, remaining_out, checkpoint_dir, 
            device_t=device_t, batch_size=1)
Beispiel #10
0
 def train(self):
     for n in range(FLAGS.epochs):
         for _ in tqdm(
                 iterable=range(FLAGS.steps_per_epoch),
                 ncols=int(get_terminal_width() * .9),
                 desc=tqdm.write(f'Epoch {n + 1}/{FLAGS.epochs}'),
                 unit=' steps',
         ):
             self.train_step(self.img)
         save_img(self.img.read_value(), n + 1)
Beispiel #11
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    if not os.path.exists(options.test_dir):
        os.mkdir(options.test_dir)
    if not os.path.exists(options.checkpoint_dir):
        os.mkdir(options.checkpoint_dir)

    check_opts(options)

    style_target = get_img(options.style)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow": options.slow,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate,
        "gpu_fraction": options.gpu_fraction
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, options.content_weight,
        options.style_weight, options.tv_weight, options.vgg_path
    ]

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i)
            if not options.slow:
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test, preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
    def _execute(self):

        if not os.path.exists(self.path + 'field_evaluation/'):
            os.makedirs(self.path + 'field_evaluation/')

        print('EVALUATION OF POINT CLOUD STARTED')

        #Initialize CloudFeaturizer class
        cfP = cloud_featurizer_Parameters()
        featurizer = CloudFeaturizer(self.dws_point_cloud, cfP.METRIC,
                                     cfP.DIMENSIONS)

        print('POINT FEATURIZATION')
        #Compute features
        features = featurizer.collect_cloud_features()
        fkeys = list(features.keys())

        print('POINT CLASSIFICATION')
        #Initialize VegetationClassifier class
        classifier = VegetationClassifier(self.dws_point_cloud, features)
        #Compute classification
        crop_label, valid_idx = classifier.classify_vegetation()
        #sample valid (classified) points for further analysis
        classified_points = self.dws_point_cloud[valid_idx, :]
        #Visualize na save classification result
        save_img(np.hstack((classified_points, crop_label.reshape(-1, 1))),
                 'classification', self.path + 'field_evaluation/', 90, 0)

        print('EDGE DETECTION')
        #Initialize EdgeDetector class
        edP = edge_detector_Parameters()
        detector = EdgeDetector(classified_points, crop_label, edP.METRIC,
                                edP.ENTROPY_QUANTILE, edP.K)
        edge_label = detector.get_edge_points()
        save_img(np.hstack((classified_points, edge_label.reshape(-1, 1))),
                 'edge', self.path + 'field_evaluation/', 90, 0)

        valid_points = classified_points
        valid_points[:, 0] = valid_points[:, 0] + self.header.min[0]
        valid_points[:, 1] = valid_points[:, 1] + self.header.min[1]
        valid_points[:, 2] = valid_points[:, 2] + self.header.min[2]

        np.savetxt(
            self.path + 'field_evaluation/' +
            '{}_features.csv'.format(fkeys[0]), features[fkeys[0]])
        np.savetxt(
            self.path + 'field_evaluation/' +
            '{}_features.csv'.format(fkeys[1]), features[fkeys[1]])
        np.savetxt(self.path + 'field_evaluation/' + 'labels.csv',
                   np.hstack((valid_points, crop_label.reshape(-1, 1),
                              edge_label.reshape(-1, 1))),
                   delimiter=",")

        print('EVALUATION OF POINT CLOUD FINISHED')
Beispiel #13
0
def work(in_files, out_files, device_id, total_device, device_idx):
    """
        Stylized the images
        This function supports multi-GPU transformation

        Arg:    in_files        - The path list of input images
                out_files       - The path list of output images
                device_id       - The name of device which is following the rule that tensorflow makes
                total_device    - The total number of devices
                devices_idx     - The index of the current device
    """
    global adopt_revision

    with tf.Graph().as_default():
        tf_config = tf.ConfigProto(allow_soft_placement=True)
        tf_config.gpu_options.allow_growth = True

        # Construct graph
        img_ph = tf.placeholder(tf.float32, shape=image_shape)
        if adopt_revision == True:
            logits = SmallAutoEncoder(img_ph)
        else:
            logits = AutoEncoder(img_ph)

        # Run
        with tf.Session(config=tf_config) as sess:
            with tf.device(device_id):  # Adopt multi-GPU to transfer the image
                sess.run(tf.global_variables_initializer())
                saver = tf.train.Saver()
                saver.restore(sess, model_path + model_name)

                if total_device <= 1:
                    start = 0
                    end = int(len(in_files) // 1)
                else:
                    start = device_idx * int(len(in_files) // total_device)
                    end = device_idx * int(
                        len(in_files) // total_device) + int(
                            len(in_files) // total_device)
                conduct_time = time.time()
                for i in range(start, end, 1):
                    # print("progress: ", i, ' / ', end - start, '\t proc: ', device_idx)
                    img_batch = np.ndarray(image_shape)
                    for j, img_path in enumerate(in_files[i:i + 1]):
                        img = get_img(img_path)
                        img_batch[j] = img
                    _style_result = sess.run(
                        [
                            logits,
                        ], feed_dict={img_ph: img_batch / 255.0})
                    for j, img_path in enumerate(out_files[i:i + 1]):
                        save_img(img_path, _style_result[0][j])
                conduct_time = time.time() - conduct_time
                print("Conduct time: ", conduct_time)
Beispiel #14
0
def main_worker(opt):

    # opt.device = torch.device('cuda')

    model = RACNN(num_classes=opt.num_classes)
    model = model.to(opt.device)
    # model = torch.nn.DataParallel(model,device_ids=[0,1])
    print(model)
    cls_params = list(model.b1.parameters()) + list(
        model.b2.parameters()) + list(model.classifier1.parameters()) + list(
            model.classifier2.parameters())
    apn_params = list(model.apn.parameters())
    # optimizer = model.parameters()
    criterion = CrossEntropyLoss().to(opt.device)

    (train_loader, train_logger, optimizer_cls, optimizer_apn,
     scheduler) = get_train_utils(opt, cls_params, apn_params)
    val_loader, val_logger = get_val_utils(opt)

    test_sample, _ = next(iter(val_loader))

    tb_writer = SummaryWriter(log_dir=opt.result_path)
    pretrainAPN(train_loader, optimizer_apn, opt, model, tb_writer)
    # model = model.to(opt.device)
    for i in range(opt.begin_epoch, opt.n_epochs + 1):

        cls_train_epoch(i, train_loader, model, criterion, optimizer_cls,
                        opt.device, train_logger, tb_writer)
        apn_train_epoch(i, train_loader, model, optimizer_apn, opt.device,
                        tb_writer)

        if i % opt.checkpoint == 0:
            save_file_path = opt.result_path / 'save_{}.pth'.format(i)
            save_checkpoint(save_file_path, i, model, optimizer_cls,
                            optimizer_apn, scheduler)

        # if i % 5 == 0:

        prev_val_loss = val_epoch(i, val_loader, model, criterion, opt.device,
                                  val_logger, tb_writer)

        if opt.lr_scheduler == 'multistep':
            scheduler.step()
        elif opt.lr_scheduler == 'plateau':
            scheduler.step(prev_val_loss)
        test_sample = test_sample.to(opt.device)
        _, _, _, crops = model(test_sample)
        img = crops[0].data
        # pic_path = str(opt.result_path)+'/samples/'
        save_img(
            img,
            path='/home/zhaoliu/car_brand/racnn/results/samples/iter_{}@2x.jpg'
            .format(i),
            annotation=f' 2xstep = {i}')
def ffwd(data_in, paths_out, model, device_t='', batch_size=1):
    assert len(paths_out) > 0
    is_paths = type(data_in[0]) == str
    # TODO:如果 data_in 是保存输入图像的文件路径,即 is_paths 为 True,则读入第一张图像,由于 pb 模型的输入维度为 1 × 256 × 256 × 3, 因此需将输入图像的形状调整为 256 × 256,并传递给 img_shape;
    # 如果 data_in 是已经读入图像并转化成数组形式的数据,即 is_paths 为 False,则直接获取图像的 shape 特征 img_shape
    if is_paths:
        # Get the shape when loading the first image.
        img_shape = get_img(data_in[0], (256, 256, 3)).shape
    else:
        # Get the shape from data_in[0] when the images have been loaded.
        img_shape = data_in[0].shape

    g = tf.Graph()
    config = tf.ConfigProto(allow_soft_placement=True,
                            inter_op_parallelism_threads=1,
                            intra_op_parallelism_threads=1)
    with g.as_default():
        with tf.gfile.FastGFile(model, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')

        with tf.Session(config=config) as sess:
            sess.run(tf.global_variables_initializer())
            input_tensor = sess.graph.get_tensor_by_name('X_content:0')
            output_tensor = sess.graph.get_tensor_by_name('add_37:0')
            batch_size = 1
            # TODO:读入的输入图像的数据格式为 HWC,还需要将其转换成 NHWC
            batch_shape = (batch_size, ) + img_shape
            num_iters = int(len(paths_out) / batch_size)
            for i in range(num_iters):
                pos = i * batch_size
                curr_batch_out = paths_out[pos:pos + batch_size]
                # TODO:如果 data_in 是保存输入图像的文件路径,则依次将该批次中输入图像文件路径下的 batch_size 张图像读入数组 X;
                # 如果 data_in 是已经读入图像并转化成数组形式的数据,则将该数组传递给 X
                if is_paths:
                    curr_batch_in = data_in[pos:pos + batch_size]
                    X = np.zeros(batch_shape)
                    #print(X.shape)
                    for j, path in enumerate(curr_batch_in):
                        img = get_img(path, img_shape)
                        X[j] = img
                else:
                    X = data_in[pos:pos + batch_size]

                start = time.time()
                # TODO: 使用 sess.run 来计算 output_tensor
                _preds = sess.run(output_tensor, feed_dict={input_tensor: X})
                end = time.time()
                for j, path_out in enumerate(curr_batch_out):
                    #TODO:在该批次下调用 utils.py 中的 save_img() 函数对所有风格迁移后的图片进行存储
                    save_img(path_out, _preds[j])
                delta_time = end - start
                print("Inference (CPU) processing time: %s" % delta_time)
Beispiel #16
0
def evaluate_img(img_in, img_path, ckpt):
    img_shape = (256, 256, 3)
    batch_shape = (1, 256, 256, 3)

    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with tf.Graph().as_default(), tf.Session(config=soft_config) as sess:
        # Declare placeholders we'll feed into the graph
        X_inputs = tf.placeholder(
            tf.float32, shape=batch_shape, name='X_inputs')

        # Define output node
        preds = transform.net(X_inputs)  # (1, 720, 720, 3)
        tf.identity(preds[0], name='output')

        # For restore training checkpoints (important)
        saver = tf.train.Saver()
        saver.restore(sess, ckpt)  # run
        """
        saver = tf.train.Saver()
        if os.path.isdir(FLAGS.checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)  # run
            else:
                raise Exception("No checkpoint found...")
        else:
            ckpt = saver.restore(sess, FLAGS.checkpoint_dir)
        """

        X = np.zeros(batch_shape, dtype=np.float32)  # feed

        img = get_img(img_in, img_shape)
        X[0] = img

        _preds = sess.run(preds, feed_dict={X_inputs: X})
        save_img(img_path, _preds[0])

        # Write graph.
        start_time = time.time()
        tf.train.write_graph(
            sess.graph.as_graph_def(),
            FLAGS.model_dir,
            FLAGS.model_name + '.pb',
            as_text=False)
        tf.train.write_graph(
            sess.graph.as_graph_def(),
            FLAGS.model_dir,
            FLAGS.model_name + '.pb.txt',
            as_text=True)
        end_time = time.time()
        delta_time = end_time - start_time
        print('Save pb and pb.txt done!, time:', delta_time)
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    files = list_files(options.train_path) 
    print('hihi')
    for x in files:
        in_path = os.path.join(options.train_path, x)
        out_path = os.path.join(options.result_path, x)
        img = get_img(in_path, (256,256,3))
        save_img(out_path, img)
Beispiel #18
0
    def debug(self):
        for job in self._memory.get_jobs():
            for i, face in enumerate(job.get_faces()):
                path = "".join([
                    self.config.final_debug_path, job.people_name,
                    job.obj_name,
                    str(i), '.jpg'
                ])
                save_img(path, face)

        for job in self._memory.get_jobs():
            job.debug()
Beispiel #19
0
def generate(args, print_args=True):
    if print_args:
        print(args)

    model = Model.load(args.model_file)

    image = load_img(args.image_file)
    _, height, width = image.size()
    max_size = max(height, width)

    img = generate_image(model, model.nz, height, width)

    save_img(img, 'generate.png')
Beispiel #20
0
    def _add_job_with_faces(self, job, faces):
        job.add_faces(faces)
        if self.config.debug:
            for face in faces:
                dict_key = "".join([job.people_name, job.obj_name])
                path = "".join([
                    self.config.debug_path, job.people_name, job.obj_name,
                    str(self.find_obj_times[dict_key]), '.jpg'
                ])
                save_img(path, face)
                self.find_obj_times[dict_key] += 1

        self._memory.add_job(job)
Beispiel #21
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_target = get_img(options.style)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow": options.slow,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, options.content_weight,
        options.style_weight, options.tv_weight, options.vgg_path
    ]

    #    with tf.Graph().as_default(), tf.device('/cpu:0'), tf.Session() as sess:
    #        init_op = tf.global_variables_initializer()
    #        saver = tf.train.Saver()
    #        saver.restore(sess, options.checkpoint_dir)

    print('options.test: ' + options.test)
    print('options.checkpoint_der: ' + options.checkpoint_dir)

    for index_image in range(0, 20):
        options.test = '../image_style_dataset/' + 'structure' + str(
            index_image + 1).zfill(4) + '.jpg'
        print(options.test)
        preds_path = '%s/' % (options.test_dir) + SAVE_NAME + '%s_%s.png' % (
            999, index_image)
        if not options.slow:
            ckpt_dir = os.path.dirname(options.checkpoint_dir)
            evaluate.ffwd_to_img(options.test, preds_path,
                                 options.checkpoint_dir)
        else:
            save_img(preds_path, img)
Beispiel #22
0
 def on_epoch_end(self, epoch, logs=None):
     if epoch % self.step != 0:
         return
     result = tf.squeeze(self.model(self.input_net), axis=0)
     if self.rootdir is not None:
         utils.save_img(
             os.path.join(self.rootdir, f"epoch_{epoch}.png"),
             result,
             self.data_format,
         )
     if self.keep_output:
         self.outputs_net.append(result)
     if self.plot:
         utils.plot_img(result, ncols=1, data_format=self.data_format)
Beispiel #23
0
def get_stylize_image(content_fullpath, style_fullpath, output_path,
                      content_size=256, style_size=256, alpha=0.6,
                      swap5=False, ss_alpha=0.6, adain=False):
    content_img = get_img(content_fullpath)
    content_img = resize_to(content_img, int(content_size))

    style_img = get_img(style_fullpath)
    style_img = resize_to(style_img,int(style_size))

    stylized_rgb = wct_model.predict(
        content_img, style_img, alpha, swap5, ss_alpha, adain)

    save_img(output_path, stylized_rgb)
    print("stylized image saved "+output_path)
Beispiel #24
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    # TODO:获取风格图像 style_target 以及内容图像数组 content_targets
    style_target = ___________________
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        #        "slow":options.slow,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate,
        "type": options.type,
        "save": options.save
    }
    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, options.content_weight,
        options.style_weight, options.tv_weight, options.vgg_path
    ]

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses
        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i)
            if not options.slow:
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test, preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    print("Training complete.")
Beispiel #25
0
    def test_single(self, img_fn):
        # networks
        self.model = Net(num_channels=self.num_channels,
                         base_filter=64,
                         num_residuals=18)

        if self.gpu_mode:
            self.model.cuda()

        # load model
        self.load_model()

        # load data
        img = Image.open(img_fn)
        img = img.convert('YCbCr')
        y, cb, cr = img.split()
        y = y.resize(
            (y.size[0] * self.scale_factor, y.size[1] * self.scale_factor),
            Image.BICUBIC)

        input = Variable(ToTensor()(y)).view(1, -1, y.size[1], y.size[0])
        if self.gpu_mode:
            input = input.cuda()

        self.model.eval()
        recon_img = self.model(input)

        # save result images
        utils.save_img(recon_img.cpu().data, 1, save_dir=self.save_dir)

        out = recon_img.cpu()
        out_img_y = out.item()
        out_img_y = (((out_img_y - out_img_y.min()) * 255) /
                     (out_img_y.max() - out_img_y.min())).numpy()
        # out_img_y *= 255.0
        # out_img_y = out_img_y.clip(0, 255)
        out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')

        out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
        out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
        out_img = Image.merge(
            'YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')

        # save img
        result_dir = os.path.join(self.save_dir, 'result')
        if not os.path.exists(result_dir):
            os.mkdir(result_dir)
        save_fn = result_dir + '/SR_result.png'
        out_img.save(save_fn)
Beispiel #26
0
def forward_prop(data_in,
                 paths_out,
                 checkpoint_dir,
                 device_t='/cpu:0',
                 batch_size=1):
    assert len(paths_out) > 0
    is_paths = type(data_in) == str

    img_shape = get_img(data_in).shape

    #print("Batch size: ", batch_size)

    g = tf.Graph()

    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), tf.Session(
            config=soft_config) as sess:
        batch_shape = (batch_size, ) + img_shape
        #print("Batch_shape: ", batch_shape)
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()

        #Restore checkpoint in session
        saver.restore(sess, checkpoint_dir)

        curr_batch_out = paths_out

        if is_paths:
            curr_batch_in = data_in
            print("curr_batch_in: ", curr_batch_in)
            print("curr_batch_out: ", curr_batch_out)
            X = np.zeros(batch_shape, dtype=np.float32)

            img = get_img(curr_batch_in)
            assert img.shape == img_shape, 'Images have different dimensions. ' + 'Resize images'
            X[0] = img
            #print("Shape: ", X.shape)  #(1,960,960,3)

        _preds = sess.run(preds, feed_dict={img_placeholder: X})

        save_img(curr_batch_out, _preds[0])
        sess.close()
    print("Done!!")
    return curr_batch_out
Beispiel #27
0
def create_latent_images():
    model = load_model("model.h5")
    data_dir = TRAIN_PATH
    shutil.rmtree("result/")
    dir_names = sorted(os.listdir(data_dir))
    generator = data_gen(data_dir, 1)
    for i in range(len(dir_names) - 1):
        (encoder_input, decoder_input), _ = next(generator)
        latent_image = model.predict([encoder_input, decoder_input])
        target_dir = os.path.join("result/", dir_names[i + 1])
        os.makedirs(target_dir)
        for j in range(latent_image.shape[1]):
            img = latent_image[0][j]
            save_path = os.path.join(target_dir, "{0:05d}.jpg".format(j + 1))
            save_img(img, save_path)
Beispiel #28
0
def apply_cls(dim,s_cat,params,actions,extr):
    imgs=data.get_named_projections(dim,actions)
    img_cats=[]
    for img_i in imgs: 
        img_j=np.reshape(img_i[1],(1,img_i[1].size))
        cat=extr.test(img_j)
        if(s_cat==cat):
        	img_cats.append(img_i)
    out_path=params['out_path']
    out_path=out_path+str(s_cat)+"/"
    utils.make_dir(out_path)
    for cat,img_i in img_cats:
    	full_path=out_path+cat
        img_i=np.reshape(img_i, (60,60))
        utils.save_img(full_path,img_i)
Beispiel #29
0
    def __init__(self, n_scans, in_dir, out_dir):
        self.in_dir = in_dir
        self.out_dir = out_dir
        self.in_file_paths = glob(in_dir)
        self.in_file_paths.sort()
        self.rx_number = 1
        self.rx_component = 'Ez'
        self.n_scans = n_scans

        for path in tqdm(self.in_file_paths):
            self._create_ascan(path)
            self._merge_ascan(path)
            outputdata, dt = self._prepare_bscan(path)
            save_img(path, outputdata, dt, self.rx_number, self.rx_component,
                     self.out_dir)
Beispiel #30
0
    def build(self):

        #mask initialization
        b, c, w, h = self.original_img_tensor.shape
        mask_tensor = torch.rand(
            (b, 1, int(w / self.factor), int(h / self.factor)))

        if cuda_available():
            mask_tensor = mask_tensor.cuda()
        mask_tensor = Variable(mask_tensor, requires_grad=True)

        output = self.model(self.original_img_tensor)
        #target class for explanations
        class_index = np.argmax(output.data.cpu().numpy())

        optimizer = torch.optim.Adam([mask_tensor], self.lr)

        for i in range(self.iter + 1):
            #upsampling mask to fit the shape of mask to the shape of image
            upsampled_mask = self.upsample(mask_tensor)

            #gjttering
            jitter = torch.randn((b, c, w, h)) * 0.03
            jitter_org_img_tensor = self.original_img_tensor + jitter.cuda()

            mask_img=torch.mul(upsampled_mask,jitter_org_img_tensor)+torch.mul((1-upsampled_mask),\
                                                                          self.perturbed_img_tensor)

            mask_output = torch.nn.Softmax(dim=1)(self.model(mask_img))
            mask_prob = mask_output[0, class_index]

            loss=self.l1_coeff*torch.mean(1-torch.abs(mask_tensor))+\
                 TV(mask_tensor,self.tv_coeff,self.tv_beta)+mask_prob

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            #allow the values of mask to be [0,1]
            mask_tensor.data.clamp_(0, 1)

            if i % 20 == 0:
                print(
                    f'[{i}/{self.iter}] Loss: {loss}  Prob for the target class: {mask_prob}'
                )

        save_img(self.upsample(mask_tensor), self.original_img,
                 self.perturbed_img, self.img_path, self.model_path)
Beispiel #31
0
    def train(self, dataloader, epochs=100):

        optim_D = optim.Adam(self.D.parameters(), lr=1e-4)
        optim_G = optim.Adam(self.G.parameters(), lr=1e-4)

        for epoch in range(epochs):

            bar = tqdm(dataloader)

            for batch, (data, labels) in enumerate(bar):

                ### train_D
                self.D.train()
                self.G.eval()
                optim_D.zero_grad()
                x_real = data.to(self.device)
                x_fake = self.generate_fake(self.batch_size)[0]
                predict_real = self.D(x_real)
                predict_fake = self.D(x_fake)
                loss_D_real = -torch.log(predict_real).mean()
                loss_D_fake = -torch.log(torch.clamp(1 - predict_fake,
                                                     min=eps)).mean()
                loss_D = loss_D_real + loss_D_fake
                loss_D.backward()
                optim_D.step()

                acc_D = (torch.sum(predict_real > 0.5).item() + torch.sum(
                    predict_fake < 0.5).item()) / self.batch_size / 2

                ### train_G
                self.G.train()
                self.D.eval()
                optim_G.zero_grad()
                x_fake = self.generate_fake(self.batch_size)[0]
                predict_fake = self.D(x_fake)
                loss_G = -torch.log(predict_fake).mean()
                loss_G.backward()
                optim_G.step()
                acc_G = torch.sum(
                    predict_fake < 0.5).item() / predict_fake.shape[0]

                bar.set_postfix(loss_D='{:.03f}'.format(loss_D),
                                acc_D='{:.03f}'.format(acc_D),
                                loss_G='{:.03f}'.format(loss_G),
                                acc_G='{:.03f}'.format(acc_G))

            img = self.generate_fake(1)[0][0].detach().cpu()
            save_img(img, f'output/{epoch}.jpg')
Beispiel #32
0
def storeTensor(tensor, store_path = "?"):
    """
        This function assume the size of tensor is [32, x, x, 1]
    """
    result_img = None
    for i in range(8):
        row = None
        for j in range(4):
            if row is None:
                row = tensor[i*8+j]
            else:
                row = np.concatenate((row, tensor[i*8+j]), axis=2)
        if result_img is None:
            result_img = row
        else:
            result_img = np.concatenate((result_img, row), axis=1)
    save_img(store_path, result_img)
def overlay(img: Image, overlay_color: tuple, orig_file_name:str):
    """
    Place an overlay over an existing image

    :param img: Image opened with PIL.Image
    :param overlay_color: four-tuple with color to add to your image
    :param orig_file_name: name of the original file
    """

    assert len(overlay_color) == 4, 'Overlay color shall be a 4-tuple'

    img_overlay = Image.new(size=img.size, color=overlay_color, mode='RGBA')
    img.paste(img_overlay, None, mask=img_overlay)

    color_string = '_'.join([str(c) for c in overlay_color])
    filename = '{orig}_overlay_{color}.jpg'.format(orig=orig_file_name, color=color_string)
    save_img(img, filename)
Beispiel #34
0
def save_neg_samples(root_folder, folder_to, train=True, subset_size=None, generate_subset=False):
    """Se encarga de cargar todos los samples negativos"""
    lst_file_folder = 'Train' if train else 'Test'
    lst_neg_file = os.path.join(lst_file_folder, 'neg.lst')
    content_neg = open(os.path.join(root_folder, lst_neg_file))  # Abro el listado de imagenes positivas
    content_neg_lines = content_neg.readlines()
    if subset_size:
        random.shuffle(content_neg_lines)  # Los pongo en orden aleatorio cuando genero subset
        content_neg_lines = content_neg_lines[0:subset_size]  # Si fue especificado un tamaño de subset recorto el dataset
    for img_path in content_neg_lines:
        img_path = img_path.rstrip('\n')  # Cuando lee la linea queda el \n en el final, lo eliminamos
        img = skimage.io.imread(os.path.join(root_folder, img_path))  # Cargo la imagen
        if generate_subset and not subset_size:
            utils.generate_sub_samples(img, img_path, folder_to)  # Genero nuevas muestras a partir de la imagen
        img = utils.resize(img)  # Re escalo la imagen original
        filename = utils.get_filename(img_path)  # Genero el nombre que tendra la imagen guardada
        utils.save_img(img, folder_to, filename)  # Guardo la imagen en la carpeta de negativos
Beispiel #35
0
    def test(self):
        print("Begin test...")
        # load trained model
        self.model.load(self.sess)

        # inference validation images & calculate PSNR
        mean_runtime= 0
        mean_psnr = 0
        file_name_list = self.data.file_names_for_dirs[0]
        for (in_img, file_name) in zip(self.data.dataset[0], file_name_list):
            # inference 
            start_time = time.time()
            if self.args.degrade or self.args.no_self_ensemble:
                out_img = chop_forward(in_img, self.sess, self.model, scale=self.data.scale_list[0], shave=10)
                
            # Only for Track 1, we use geometric self-ensemble
            else:
                tmp_img = np.zeros([in_img.shape[0]*self.data.scale_list[0], in_img.shape[1]*self.data.scale_list[0], 3])
                for i in range(2):
                    if i == 0:
                        flip_img = np.fliplr(in_img)
                        for j in range(4):
                            rot_flip_img = np.rot90(flip_img, j)
                            out_img = chop_forward(rot_flip_img, self.sess, self.model, scale=self.data.scale_list[0], shave=10)
                            tmp_img += np.fliplr(np.rot90(out_img, 4-j)) 
                    else:
                        for k in range(4):
                            rot_img = np.rot90(in_img, k)
                            out_img = chop_forward(rot_img, self.sess, self.model, scale=self.data.scale_list[0], shave=10)
                            tmp_img += np.rot90(out_img, 4-k)
                out_img = tmp_img / 8
            end_time = time.time()
            mean_runtime += (end_time - start_time) / self.args.num_test

            # save images
            dir_img = os.path.join(self.args.exp_dir, self.args.exp_name, 'results', file_name)
            save_img(out_img, dir=dir_img)

        # write text file for summarize results
        dir_file = os.path.join(self.args.exp_dir, self.args.exp_name, 'results', 'results.txt')
        with open(dir_file, "w") as f:
            f.write("runtime per image [s] : {0:2.2f}\n".format(mean_runtime))
            f.write("CPU[1] / GPU[0] : 0\n")
            f.write("Extra Data [1] / No Extra Data [0] : 0")
        print("Test is done!")
def test(data_loader, model, logger, epoch):
    with torch.no_grad():
        metric = Metric()
        model.train(False)
        for i, input in enumerate(data_loader):
            input = collate(input)
            input_size = input['img'].size(0)
            input = to_device(input, cfg['device'])
            output = model(input)
            output['loss'] = output['loss'].mean() if cfg['world_size'] > 1 else output['loss']
            evaluation = metric.evaluate(cfg['metric_name']['test'], input, output)
            logger.append(evaluation, 'test', input_size)
        logger.append(evaluation, 'test')
        info = {'info': ['Model: {}'.format(cfg['model_tag']), 'Test Epoch: {}({:.0f}%)'.format(epoch, 100.)]}
        logger.append(info, 'test', mean=False)
        logger.write('test', cfg['metric_name']['test'])
        save_img(input['img'][:100], './output/vis/input_{}.png'.format(cfg['model_tag']), range=(-1, 1))
        save_img(output['img'][:100], './output/vis/output_{}.png'.format(cfg['model_tag']), range=(-1, 1))
    return
Beispiel #37
0
def save_neg_samples(folder_from, folder_to, subset_size=None):
    """Se encarga de cargar todos los samples negativos"""
    for dirpath, dirnames, filenames in os.walk(
            folder_from):  # Obtengo los nombres de los archivos
        if subset_size:
            random.shuffle(
                filenames)  # Los pongo en orden aleatorio cuando genero subset
            filenames = filenames[
                0:
                subset_size]  # Si fue especificado un tamaño de subset recorto el dataset
        for filename in filenames:
            img_path = os.path.join(dirpath, filename)
            img = skimage.io.imread(img_path)  # Cargo la imagen
            # generate_sub_samples(img, img_path)  # Genero nuevas muestras a partir de la imagen
            img = utils.resize(img)  # Re escalo la imagen original
            basename, extension = utils.get_basename(filename)
            filename_final = basename + '.png'
            utils.save_img(
                img, folder_to,
                filename_final)  # Guardo la imagen en la carpeta de negativos
    for y in range(height):
        # Get new width at this height
        relative_y = (y / height - 0.5) * 2  # Convert to value between -1 and +1
        angle = math.asin(relative_y)
        relative_x = math.cos(angle)
        new_width = int(math.ceil(relative_x * width))

        # Cut a strip at this height
        left, right = 0, width
        upper, lower = y, y + 1
        box = (left, upper, right, lower)
        img_strip = img.crop(box)

        # Resize the strip
        img_resized = img_strip.resize(size=(new_width, 1))

        # Paste the resized strip in place
        left = int(round(0.5 * (width - new_width)))
        upper = y
        box = (left, upper)
        result.paste(img_resized, box)

    return result


if __name__ == '__main__':
    orig_name = 'amsterdam_190x150'
    ams = open_img('{}.jpg'.format(orig_name))
    ams_circular = make_circular(ams)
    save_img(ams_circular, '{}_circular.png'.format(orig_name))
"""
Created on Mar 17, 2018

Used to answer this question:
https://stackoverflow.com/questions/49263496/pil-image-rotate-center0-0

@author: physicalattraction
"""

from utils import open_img, save_img

if __name__ == '__main__':
    orig_file_name = 'amsterdam_190x150'
    img = open_img('{}.jpg'.format(orig_file_name))
    angle = 45
    # Note: the rotated image has black pixels at the edges
    rotated_img = img.rotate(angle, expand=True)
    save_img(rotated_img, 'rotate_{}_{}.jpg'.format(angle, orig_file_name))
Beispiel #40
0
def main():
    # Load the WCT model
    wct_model = WCT(checkpoints=args.checkpoints, 
                                relu_targets=args.relu_targets,
                                vgg_path=args.vgg_path, 
                                device=args.device,
                                ss_patch_size=args.ss_patch_size, 
                                ss_stride=args.ss_stride)

    # Create needed dirs
    in_dir = os.path.join(args.tmp_dir, 'input')
    out_dir = os.path.join(args.tmp_dir, 'sytlized')
    if not os.path.exists(in_dir):
        os.makedirs(in_dir)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    if os.path.isdir(args.in_path):
        in_path = get_files(args.in_path)
    else: # Single image file
        in_path = [args.in_path]

    if os.path.isdir(args.style_path):
        style_files = get_files(args.style_path)
    else: # Single image file
        style_files = [args.style_path]

    print(style_files)
    import time
    # time.sleep(999)

    in_args = [
        'ffmpeg',
        '-i', args.in_path,
        '%s/frame_%%d.png' % in_dir
    ]

    subprocess.call(" ".join(in_args), shell=True)
    base_names = os.listdir(in_dir)
    in_files = [os.path.join(in_dir, x) for x in base_names]
    out_files = [os.path.join(out_dir, x) for x in base_names]

    


    s = time.time()
    for content_fullpath in in_path:
        content_prefix, content_ext = os.path.splitext(content_fullpath)
        content_prefix = os.path.basename(content_prefix)


        try:

            for style_fullpath in style_files:
                style_img = get_img(style_fullpath)
                if args.style_size > 0:
                    style_img = resize_to(style_img, args.style_size)
                if args.crop_size > 0:
                    style_img = center_crop(style_img, args.crop_size)

                style_prefix, _ = os.path.splitext(style_fullpath)
                style_prefix = os.path.basename(style_prefix)

                # print("ARRAY:  ", style_img)
                out_v = os.path.join(args.out_path, '{}_{}{}'.format(content_prefix, style_prefix, content_ext))
                print("OUT:",out_v)
                if os.path.isfile(out_v):
                    print("SKIP" , out_v)
                    continue
                
                for in_f, out_f in zip(in_files, out_files):
                    print('{} -> {}'.format(in_f, out_f))
                    content_img = get_img(in_f)

                    if args.keep_colors:
                        style_rgb = preserve_colors_np(style_img, content_img)
                    else:
                        style_rgb = style_img

                    stylized = wct_model.predict(content_img, style_rgb, args.alpha, args.swap5, args.ss_alpha)

                    if args.passes > 1:
                        for _ in range(args.passes-1):
                            stylized = wct_model.predict(stylized, style_rgb, args.alpha)

                    # Stitch the style + stylized output together, but only if there's one style image
                    if args.concat:
                        # Resize style img to same height as frame
                        style_img_resized = scipy.misc.imresize(style_rgb, (stylized.shape[0], stylized.shape[0]))
                        stylized = np.hstack([style_img_resized, stylized])

                    save_img(out_f, stylized)

                fr = 30
                out_args = [
                    'ffmpeg',
                    '-i', '%s/frame_%%d.png' % out_dir,
                    '-f', 'mp4',
                    '-q:v', '0',
                    '-vcodec', 'mpeg4',
                    '-r', str(fr),
                    '"' + out_v + '"'
                ]
                print(out_args)

                subprocess.call(" ".join(out_args), shell=True)
                print('Video at: %s' % out_v)

                if args.keep_tmp is True or len(style_files) > 1:
                    continue
                else:
                    shutil.rmtree(args.tmp_dir)
                print('Processed in:',(time.time() - s))

            print('Processed in:',(time.time() - s))
 
        except Exception as e:
            print("EXCEPTION: ",e)
    Inputs:
    -------
    img: Image opened with PIL.Image
    xy: tuple with relative (x,y) position of the center of the cropped image
        x and y shall be between 0 and 1
    scale_factor: the ratio between the original image's size and the cropped image's size
    """

    center = (img.size[0] * xy[0], img.size[1] * xy[1])
    new_size = (img.size[0] / scale_factor, img.size[1] / scale_factor)
    left = max(0, int(center[0] - new_size[0] / 2))
    right = min(img.size[0], int(center[0] + new_size[0] / 2))
    upper = max(0, int(center[1] - new_size[1] / 2))
    lower = min(img.size[1], int(center[1] + new_size[1] / 2))
    cropped_img = img.crop((left, upper, right, lower))
    return cropped_img


if __name__ == '__main__':
    orig_file_name = 'amsterdam_190x150'
    ams = open_img('{}.jpg'.format(orig_file_name))

    crop_ams = crop_image(ams, (0.50, 0.50), 1.1)
    save_img(crop_ams, 'crop_{}_01.jpg'.format(orig_file_name))

    crop_ams = crop_image(ams, (0.25, 0.25), 2.5)
    save_img(crop_ams, 'crop_{}_02.jpg'.format(orig_file_name))

    crop_ams = crop_image(ams, (0.75, 0.45), 3.5)
    save_img(crop_ams, 'crop_{}_03.jpg'.format(orig_file_name))
Beispiel #42
0
def main():
    start = time.time()

    # Load the WCT model
    wct_model = WCT(checkpoints=args.checkpoints, 
                                relu_targets=args.relu_targets,
                                vgg_path=args.vgg_path, 
                                device=args.device,
                                ss_patch_size=args.ss_patch_size, 
                                ss_stride=args.ss_stride)

    # Get content & style full paths
    if os.path.isdir(args.content_path):
        content_files = get_files(args.content_path)
    else: # Single image file
        content_files = [args.content_path]
    if os.path.isdir(args.style_path):
        style_files = get_files(args.style_path)
        if args.random > 0:
            style_files = np.random.choice(style_files, args.random)
    else: # Single image file
        style_files = [args.style_path]

    os.makedirs(args.out_path, exist_ok=True)

    count = 0

    ### Apply each style to each content image
    for content_fullpath in content_files:
        content_prefix, content_ext = os.path.splitext(content_fullpath)
        content_prefix = os.path.basename(content_prefix)  # Extract filename prefix without ext

        content_img = get_img(content_fullpath)
        if args.content_size > 0:
            content_img = resize_to(content_img, args.content_size)
        
        for style_fullpath in style_files: 
            style_prefix, _ = os.path.splitext(style_fullpath)
            style_prefix = os.path.basename(style_prefix)  # Extract filename prefix without ext

            # style_img = get_img_crop(style_fullpath, resize=args.style_size, crop=args.crop_size)
            # style_img = resize_to(get_img(style_fullpath), content_img.shape[0])

            style_img = get_img(style_fullpath)

            if args.style_size > 0:
                style_img = resize_to(style_img, args.style_size)
            if args.crop_size > 0:
                style_img = center_crop(style_img, args.crop_size)

            if args.keep_colors:
                style_img = preserve_colors_np(style_img, content_img)

            # if args.noise:  # Generate textures from noise instead of images
            #     frame_resize = np.random.randint(0, 256, frame_resize.shape, np.uint8)
            #     frame_resize = gaussian_filter(frame_resize, sigma=0.5)

            # Run the frame through the style network
            stylized_rgb = wct_model.predict(content_img, style_img, args.alpha, args.swap5, args.ss_alpha, args.adain)

            if args.passes > 1:
                for _ in range(args.passes-1):
                    stylized_rgb = wct_model.predict(stylized_rgb, style_img, args.alpha, args.swap5, args.ss_alpha, args.adain)

            # Stitch the style + stylized output together, but only if there's one style image
            if args.concat:
                # Resize style img to same height as frame
                style_img_resized = scipy.misc.imresize(style_img, (stylized_rgb.shape[0], stylized_rgb.shape[0]))
                # margin = np.ones((style_img_resized.shape[0], 10, 3)) * 255
                stylized_rgb = np.hstack([style_img_resized, stylized_rgb])

            # Format for out filename: {out_path}/{content_prefix}_{style_prefix}.{content_ext}
            out_f = os.path.join(args.out_path, '{}_{}{}'.format(content_prefix, style_prefix, content_ext))
            # out_f = f'{content_prefix}_{style_prefix}.{content_ext}'
            
            save_img(out_f, stylized_rgb)

            count += 1
            print("{}: Wrote stylized output image to {}".format(count, out_f))

    print("Finished stylizing {} outputs in {}s".format(count, time.time() - start))
"""
Created on ???

Used to answer this question:
???

@author: physicalattraction
"""

from PIL import Image

from utils import save_img


def create_img(width: int = 200, height: int = 200) -> Image:
    img = Image.new('RGB', (width, height))

    pixel_list = [(i % 256, i % 256, i % 256) for i in range(width * height)]
    i_pixel = 0
    for x in range(width):
        for y in range(height):
            img.putpixel((x, y), pixel_list[i_pixel])
            i_pixel += 1

    return img


if __name__ == '__main__':
    droopy = create_img()
    save_img(droopy, 'droopy.png')
@author: physicalattraction
"""

from PIL import Image
import math

from utils import open_img, save_img


def spotlight(img: Image, center: (int, int), radius: int) -> Image:
    width, height = img.size
    overlay_color = (0, 0, 0, 128)
    img_overlay = Image.new(size=img.size, color=overlay_color, mode='RGBA')
    for x in range(width):
        for y in range(height):
            dx = x - center[0]
            dy = y - center[1]
            distance = math.sqrt(dx * dx + dy * dy)
            if distance < radius:
                img_overlay.putpixel((x, y), (0, 0, 0, 0))
    img.paste(img_overlay, None, mask=img_overlay)
    return img


if __name__ == '__main__':
    orig_file_name = 'amsterdam_1900x1500'
    img = open_img('{}.jpg'.format(orig_file_name))
    spotlight_img = spotlight(img, (475, 900), 400)
    save_img(spotlight_img, 'spotlight_{}.jpg'.format(orig_file_name))
Beispiel #45
0
 def save(self,in_path):
     for i,img_i in enumerate(self.imgs):
         full_path=in_path+"_"+str(i)
         if(i==0):
             img_i=np.reshape(img_i,(60,60))
             utils.save_img(full_path,img_i)
Beispiel #46
0
def main():
    # Load the WCT model
    wct_model = WCT(checkpoints=args.checkpoints, 
                    relu_targets=args.relu_targets,
                    vgg_path=args.vgg_path, 
                    device=args.device,
                    ss_patch_size=args.ss_patch_size, 
                    ss_stride=args.ss_stride)

    # Load a panel to control style settings
    style_window = StyleWindow(args.style_path, 
                               args.style_size, 
                               args.crop_size, 
                               args.scale, 
                               args.alpha, 
                               args.swap5, 
                               args.ss_alpha,
                               args.passes)

    # Start the webcam stream
    cap = WebcamVideoStream(args.video_source, args.width, args.height).start()

    _, frame = cap.read()

    # Grab a sample frame to calculate frame size
    frame_resize = cv2.resize(frame, None, fx=args.scale, fy=args.scale)
    img_shape = frame_resize.shape

    # Setup video out writer
    if args.video_out is not None:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        if args.concat:
            out_shape = (img_shape[1]+img_shape[0],img_shape[0]) # Make room for the style img
        else:
            out_shape = (img_shape[1],img_shape[0])
        print('Video Out Shape:', out_shape)
        video_writer = cv2.VideoWriter(args.video_out, fourcc, args.fps, out_shape)
    
    fps = FPS().start() # Track FPS processing speed

    # Toggles changed with kb shortcuts
    keep_colors = args.keep_colors
    swap_style = args.swap5
    use_adain = args.adain

    count = 0

    while(True):
        ret, frame = cap.read()

        if ret is True:       
            frame_resize = cv2.resize(frame, None, fx=style_window.scale, fy=style_window.scale)

            if args.noise:  # Generate textures from noise instead of images
                frame_resize = np.random.randint(0, 256, frame_resize.shape, np.uint8)

            count += 1
            print("Frame:",count,"Orig shape:",frame.shape,"New shape",frame_resize.shape)

            content_rgb = cv2.cvtColor(frame_resize, cv2.COLOR_BGR2RGB)  # OpenCV uses BGR, we need RGB

            if args.random > 0 and count % args.random == 0:
                style_window.set_style(random=True)

            if keep_colors:
                style_rgb = preserve_colors_np(style_window.style_rgb, content_rgb)
            else:
                style_rgb = style_window.style_rgb

            # Run the frame through the style network
            stylized_rgb = wct_model.predict(content_rgb, style_rgb, style_window.alpha, swap_style, style_window.ss_alpha, use_adain)

            # Repeat stylization pipeline
            if style_window.passes > 1:
                for i in range(style_window.passes-1):
                    stylized_rgb = wct_model.predict(stylized_rgb, style_rgb, style_window.alpha, swap_style, style_window.ss_alpha, use_adain)

            # Stitch the style + stylized output together
            if args.concat:
                # Resize style img to same height as frame
                style_rgb_resized = cv2.resize(style_rgb, (stylized_rgb.shape[0], stylized_rgb.shape[0]))
                stylized_rgb = np.hstack([style_rgb_resized, stylized_rgb])
            
            stylized_bgr = cv2.cvtColor(stylized_rgb, cv2.COLOR_RGB2BGR)
                
            if args.video_out is not None:
                stylized_bgr = cv2.resize(stylized_bgr, out_shape) # Make sure frame matches video size
                video_writer.write(stylized_bgr)

            cv2.imshow('WCT Universal Style Transfer', stylized_bgr)

            fps.update()

            key = cv2.waitKey(10) 
            if key & 0xFF == ord('r'):   # Load new random style
                style_window.set_style(random=True)
            elif key & 0xFF == ord('c'): # Toggle color preservation
                keep_colors = not keep_colors
                print('Switching to keep_colors:',keep_colors)
            elif key & 0xFF == ord('s'): # Toggle style swap
                swap_style = not swap_style
                print('New value for flag swap_style:',swap_style)
            elif key & 0xFF == ord('a'): # Toggle AdaIN
                use_adain = not use_adain
                print('New value for flag use_adain:',use_adain)
            elif key & 0xFF == ord('w'): # Write stylized frame
                out_f = "{}.png".format(time.time())
                save_img(out_f, stylized_rgb)
                print('Saved image to:',out_f)
            elif key & 0xFF == ord('q'): # Quit gracefully
                break
        else:
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    cap.stop()
    
    if args.video_out is not None:
        video_writer.release()
    
    cv2.destroyAllWindows()
"""
Created on Oct 13, 2017

Used to answer this question:
https://stackoverflow.com/a/46736330/1469465

@author: physicalattraction
"""

from PIL import ImageDraw

from utils import save_img, open_img, print_pil_version_info

if __name__ == '__main__':
    image = open_img('star_transparent.png')
    width, height = image.size
    center = (int(0.5 * width), int(0.5 * height))
    yellow = (255, 255, 0, 255)
    ImageDraw.floodfill(image, xy=center, value=yellow)
    save_img(image, 'star_yellow.png')

    print_pil_version_info()