Esempio n. 1
0
 def save_generated(self, data):
     """ Generate images from batch and save them
     """
     self.set_inputs(data["A"])
     self.forward()
     path = self.opt.checkpoints_dir +f"/{self.opt.name}/generated/{data['A_name']}_fake.png"
     save_image(path,tensor_to_image(self.fake)[...,:3])
Esempio n. 2
0
    def _get_qrcode(self):
        """
        缓存并展示登录二维码
        :return:
        """
        url = 'https://qr.m.jd.com/show'
        payload = {
            'appid': 133,
            'size': 147,
            't': str(int(time.time() * 1000)),
        }
        headers = {
            'User-Agent': self.spider_session.get_user_agent(),
            'Referer': 'https://passport.jd.com/new/login.aspx',
        }
        resp = self.session.get(url=url, headers=headers, params=payload)

        if not response_status(resp):
            logger.info('获取二维码失败')
            return False

        save_image(resp, self.qrcode_img_file)
        logger.info('二维码获取成功,请打开京东APP扫描')
        open_image(self.qrcode_img_file)
        return True
    def _get_qrcode(self):
        """
        缓存并展示登录二维码
        :return:
        """
        url = 'https://qr.m.jd.com/show'
        payload = {
            'appid': 133,
            'size': 300,
            't': str(int(time.time() * 1000)),
        }
        headers = {
            'User-Agent': self.spider_session.get_user_agent(),
            'Referer': 'https://passport.jd.com/new/login.aspx',
        }
        resp = self.session.get(url=url, headers=headers, params=payload)

        if not response_status(resp):
            logger.info('获取二维码失败')
            return False
        save_image(resp, self.qrcode_img_file)
        logger.info('二维码获取成功,请打开京东APP扫描')

        open_image(add_bg_for_qr(self.qrcode_img_file))
        if global_config.getRaw('messenger', 'email_enable') == 'true':
            email.send('二维码获取成功,请打开京东APP扫描', "<img src='cid:qr_code.png'>", [email.mail_user], 'qr_code.png')
        return True
Esempio n. 4
0
 def save_visuals(self, **kwargs):
     """ Save recent visuals, i.e. real_A, fake_B etc., to disc
     """
     visuals = self.get_visuals()
     for visual, image in visuals.items():
         path = self.opt.checkpoints_dir +"/{self.opt.name}/images/{visual}_{self.step}.png"
         wandb.log({visual: [wandb.Image(image, caption=visual)]}, step=self.step)
         save_image(path, image)
Esempio n. 5
0
def run(path, namespace):
    im = util.open_image(path)
    if im is not None:
        fit_image = fit(im, namespace.width, namespace.height, namespace.color,
                        namespace.resample)
        util.save_image(fit_image, namespace.path, namespace.save_folder,
                        namespace.save_as, namespace.mode, "fit",
                        namespace.optimize, namespace.background)
def main():
    args = parse_args()
    if args.random_seed is not None:
        # fixed random seeds for reproducibility
        np.random.seed(args.random_seed)
        torch.random.manual_seed(args.random_seed)

    # infer target label from image
    input_img = load_image(args.input_img, size=64)

    labels, confidences = send_query(input_img)
    target_idx = np.argmax(confidences)
    target_class = labels[target_idx]

    # ask user if he wants to continue
    print(f'Inferred label: {target_class}, confidence of {np.round(confidences[target_idx], 3)}')
    if not query_yes_no('Continue ?'):
        print('Please choose an input image which the API classifies as your target class. ')
        sys.exit(0)

    # generate adversarial image
    else:
        if not args.color:
            target_img = image_to_grayscale(input_img)
        else:
            target_img = input_img

        print('Generating adversarial...')
        adversarial, conf, num_queries, conv_images, cppn = generate_adversarial(target_class=target_class,
                                                                                 target_image=target_img,
                                                                                 color=args.color,
                                                                                 target_conf=args.target_conf,
                                                                                 max_queries=args.max_queries,
                                                                                 init=args.init)

        if conf < args.target_conf:
            print(f'Failed to generate an adversarial image after {args.max_queries} queries.')
            # write_to_log('log.tsv', f'{target_class}\t{conf}\t{num_queries}\t{args.color}\t{args.init}')
            sys.exit(0)
        print(f'Found an adversarial image with > {args.target_conf} API confidence after {num_queries} queries.')

        output_dir = Path(args.output_dir)
        print(f'\tSaving results in: {output_dir}/')

        # save adversarial image
        adversarial_fname = str(output_dir / f'adversarial_{clean_filename(target_class)}_{conf}')
        save_image(adversarial_fname + '.png', adversarial)
        if args.high_res:
            cppn.set_img_size(2000)
            adversarial_high_res = cppn.render_image()
            save_image(adversarial_fname + '_HD.png', adversarial_high_res)
        # save convergence gif
        if not args.no_gif:
            conv_gif_fname = str(output_dir / f'convergence_{clean_filename(target_class)}_{conf}.gif')
            save_gif_from_images(conv_gif_fname, conv_images)

        # write_to_log('log.tsv', f'{target_class}\t{conf}\t{num_queries}\t{args.color}\t{args.init}')
        print('Finished.')
Esempio n. 7
0
def run(path, namespace):
    im = util.open_image(path)
    if im is not None:
        inverted_im = invert(im)
        if inverted_im is None:
            return
        util.save_image(inverted_im, namespace.path, namespace.save_folder,
                        namespace.save_as, namespace.mode, "inverted",
                        namespace.optimize, namespace.background)
Esempio n. 8
0
 def store_prediction(self, tst_batch, name, size=5):
     pred = self.model(tst_batch, training=False)
     pred = tf.math.argmax(pred, axis=-1)
     img = combine_preds(tst_batch,
                         pred,
                         self.n_class,
                         bs=size,
                         resize_out=self.store_resize_f)
     save_image(img, "%s/%s.png" % (self.pred_path, name))
Esempio n. 9
0
def run(path, namespace):

    im = util.open_image(path)
    if im is not None:
        resized_image = resize(im, namespace.width, namespace.height,
                               namespace.resample)
        util.save_image(resized_image, path, namespace.save_folder,
                        namespace.save_as, namespace.mode, "resized",
                        namespace.optimize, namespace.background)
def save_image(result_dir, image, image_name, aspect_ratio=1.0):
    im = util.tensor2im(image)
    save_path = os.path.join(result_dir, image_name)
    h, w, _ = im.shape
    if aspect_ratio > 1.0:
        im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic')
    if aspect_ratio < 1.0:
        im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic')
    util.save_image(im, save_path)
    print('save in path: ', save_path)
    def train(self, x, y, learning_rate=0.01):
        ds = SupervisedDataSet( len(x[0]), len(y[0]) )

        util.save_image(util.array_to_image(x[0]), 'out/real.png')
        util.save_image(util.array_to_image(x[-1]), 'out/fake.png')

        for i in range( len(x) ):
            ds.addSample( x[i], y[i] )

        trainer = BackpropTrainer(self.network, ds, learningrate=learning_rate)
        return trainer.train()
Esempio n. 12
0
def run(path, namespace):
    im = util.open_image(path)
    if im is not None:
        util.save_image(im,
                        path,
                        namespace.save_folder,
                        namespace.save_as,
                        namespace.mode,
                        "optimized",
                        optimize=True,
                        background=namespace.background)
Esempio n. 13
0
def train(epoch):
    model.train()
    train_loss = 0
    train_likelihood = 0
    train_state_kld = 0
    for batch_idx in range(epoch_size // args.batch_size):
        (states, actions) = next(train_iterator)
        states = states.to(device).float()
        actions = actions.to(device).float()

        input_states = states[:, 0, :3]
        pred_states, state_mu, state_logvar = model(input_states)

        likelihood, state_kld = loss_function(pred_states, input_states,
                                              state_mu, state_logvar)
        loss = likelihood + kl_schedule(epoch) * (args.state_kl * state_kld)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        train_likelihood += likelihood.item()
        train_state_kld += state_kld.item()

        if batch_idx > 0 and batch_idx % args.log_interval == 0:
            print(('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}').format(
                epoch, batch_idx * len(states), epoch_size,
                100. * batch_idx / len(train_loader),
                loss.item() / len(states)))

    # render reconstructions
    for i, inputs, pred in zip(range(min(5, len(pred_states))), input_states,
                               pred_states):
        inputs = list(inputs.reshape([-1, 3, 64, 64]))
        img_path = path.join(render_path, "recon{}_{}.png".format(epoch, i))
        util.save_image(img_path, torch.cat([*inputs, pred], dim=2).detach())

    # render some example observations
    if epoch == 1:
        for i in range(min(5, len(states))):
            imgs = states[i]
            img_path = path.join(render_path,
                                 "observations{}_{}.png".format(epoch, i))
            imgs = imgs.reshape([-1, 3, 64, 64])
            tvu.save_image(imgs, img_path, nrow=4)

    print(('====> Epoch: {} Average loss: {:.4f}'
           '\tLL: {:.6f}\tstate KLD: {:.6f}').format(
               epoch,
               train_loss / epoch_size,
               train_likelihood / epoch_size,
               train_state_kld / epoch_size,
           ))
Esempio n. 14
0
    def test_img_save_and_load(self):
        test_img = util.load_image(test_img_dir / '2.png', size=64)
        self.assertEqual(type(test_img), np.ndarray)
        self.assertEqual(test_img.shape, (64, 64, 3))
        self.assertEqual(test_img.dtype, np.float64)

        util.save_image(test_img_dir / 'test.png', test_img)
        file = (test_img_dir / 'test.png')
        self.assertTrue(file.exists())
        self.assertTrue(file.is_file())
        self.assertTrue(np.allclose(test_img, util.load_image(file)))
        file.unlink()
Esempio n. 15
0
def run(path, namespace):
    ## NOTE: By default, the image is grayscaled and saved as the original.
    # image's mode. To save as 'L' or 'LA' the user must pass them explicitily
    # with 'mode' optional argument.
    im = util.open_image(path)
    if im is not None:
        dessaturated_im = dessaturate(im)
        if dessaturated_im is None:
            return
        util.save_image(dessaturated_im, path, namespace.save_folder,
                        namespace.save_as, namespace.mode, "dessaturated",
                        namespace.optimize, namespace.background)
Esempio n. 16
0
    def load_image(self, shape, path, output_dir='', save=True):
        ''' loads an image in bgr format '''
        images = np.zeros(shape,  dtype='float32')
        image_size = shape[2:]
        in_image = scipy.misc.imread(path)
        in_image = scipy.misc.imresize(in_image, (image_size[0], image_size[1]))
        images[0] = np.transpose(in_image, (2, 0, 1))   # convert to (3, 227, 227) format

        data = images[:,::-1]   # convert from RGB to BGR
        if save:
            name = "%s/samples/%s.jpg" % (output_dir, 'start')
            util.save_image(data, name)
        return data
Esempio n. 17
0
def get_boxes():
    with tempfile.TemporaryDirectory("box-session") as tmpdir:
        if request.mimetype == "image/jpeg":
            image_path = os.path.join(tmpdir, "image.jpeg")
            data = request.get_data()
        elif request.mimetype == "application/json":
            json_data = json.loads(request.get_data())
            data = base64.decodebytes(json_data["data"].encode())
            extension = json_data["type"]
            image_path = os.path.join(tmpdir, "image.png")
        else:
            return "Unsupported MIME type: `{}`".format(request.mimetype)

        util.save_image(image_path, data)
        return flask.jsonify(google_query.get_boxes(image_path))
Esempio n. 18
0
def main(args):
    """
    see https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/py_kmeans_opencv.html
    """
    if args.like is not None:
        _, newcolors = cluster_image(args.likefile, args.k)
    else:
        newcolors = color_selector(args.colors, args.colorfile)
    K = len(newcolors)

    labels, _ = cluster_image(args.infile, len(newcolors))
    res = newcolors[labels.flatten()]

    img = load_image(args.infile)
    save_image(args.outfile, res, img)
Esempio n. 19
0
    def validate(self, sess, test_size, x_provider, y_provider, n_epoch):
        self.net.is_training = False
        mean_dice = 0
        mean_iou = 0
        for val_index in range(test_size):
            val_x, val_y = sess.run([x_provider, y_provider])
            prediction, loss = sess.run([self.net.predicter, self.net.loss],
                                        feed_dict={
                                            self.net.x: val_x,
                                            self.net.y: val_y,
                                        })
            img_predict = prediction.reshape((self.nx, self.ny, 4))
            img_predict = self.remove_minor_cc(img_predict, 0.3, 4)
            img_y = val_y.reshape((self.nx * self.ny, 4))
            img_predict = img_predict.reshape((self.nx * self.ny, 4))
            dice, iou = sess.run([self.net.dice, self.net.iou],
                                 feed_dict={
                                     self.net.img_pred: img_predict,
                                     self.net.img_y: img_y
                                 })
            dice1 = dice[0]
            dice2 = dice[1]
            dice3 = dice[2]
            dice = (dice1 + dice2 + dice3) / 3
            iou1 = iou[0]
            iou2 = iou[1]
            iou3 = iou[2]
            iou = (iou1 + iou2 + iou3) / 3
            mean_dice += dice
            mean_iou += iou
            if val_index == 0:
                img = util.combine_img_prediction(val_x, img_y[:, 1],
                                                  img_predict[:, 1],
                                                  img_y[:, 2], img_predict[:,
                                                                           2],
                                                  img_y[:, 3], img_predict[:,
                                                                           3])
                util.save_image(
                    img,
                    "%s/%s.jpg" % (self.prediction_path, "epoch_%s" % n_epoch +
                                   '_%s' % val_index + '_%s' % dice))

        mean_dice = mean_dice / test_size
        mean_iou = mean_iou / test_size
        logging.info(
            "Validation Loss: {:.4f}, Dice: {:.4f}, IoU: {:.4f}".format(
                loss, mean_dice, mean_iou))
        return mean_dice, mean_iou
Esempio n. 20
0
def run_kmeans(data_path):
    img = imread(data_path)
    img2 = img.reshape(img.shape[0] * img.shape[1], img.shape[2])

    # Cluster for K = 2,3,...,16 save the image to see differences
    for cluster_count in range(2, 17):
        km = KMeans(cluster_count)
        V, cmap = km.fit(img2)

        if cluster_count in [2, 4, 6, 8, 16]:
            save_image(
                V,
                cmap,
                img.shape,
                "result/kmeans_{0}_clusters.png".format(cluster_count),
            )
Esempio n. 21
0
    def store_prediction(self, sess, batch_x, batch_y, name):                                                       
        prediction = sess.run(self.net.predicter, feed_dict={self.net.x: batch_x,
                                                             self.net.y: batch_y,
                                                             self.net.keep_prob: 1.})
        pred_shape = prediction.shape

        loss = sess.run(self.net.cost, feed_dict={self.net.x: batch_x,
                                                  self.net.y: batch_y,
                                                  self.net.keep_prob: 1.})

        logging.info("Validation loss={:.4f}".format(loss))

        img = util.combine_img_prediction(batch_x, batch_y, prediction)                                               
        util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))

        return pred_shape
Esempio n. 22
0
def main():
    model_name = 'generator_e_59'
    model_name = os.path.join(pp.MODEL_SAVES, model_name)
    model = Generator()
    chainer.serializers.load_npz(model_name, model)

    num_features = util.get_number_of_features(pp.CELEB_FACES_FC6_TEST)
    all_names = np.array(util.get_names_h5_file(pp.FC6_TEST_H5))

    y_tmp = np.zeros((num_features, 32 * 32 * 3), dtype=np.float32)
    target_tmp = np.zeros((num_features, 32 * 32 * 3), dtype=np.float32)

    save_list_names = os.listdir('/home/gabi/Documents/temp_datasets/test_celeba_reconstruction_m99')
    save_list_names = [i.split('_')[0]+'.jpg' for i in save_list_names]
    # save_list = random.sample(xrange(num_features), 100)
    # save_list_names = [''] * 100
    cnt = 0



    # for i in save_list:
    #     save_list_names[cnt] = util.sed_line(pp.CELEB_FACES_FC6_TEST, i).strip().split(',')[0]
    #     cnt += 1

    cnt = 0
    for i in all_names:
        features = util.get_features_h5_in_batches([i], train=False)
        features = util.to_correct_input(features)
        labels = util.get_labels([i])
        labels = np.asarray(labels, dtype=np.float32)
        target_tmp[cnt] = labels

        with chainer.using_config('train', False):
            f = np.expand_dims(features[0], 0)
            prediction = model(f)
            y_tmp[cnt] = prediction.data[0]
            if i in save_list_names:
                util.save_image(prediction, i, epoch=0)
                print("image '%s' saved" % i)

        cnt += 1

    # calculate validation loss
    y_tmp.astype(np.float32)
    target_tmp.astype(np.float32)
    loss = chainer.functions.mean_absolute_error(y_tmp, target_tmp)
    print('model: ', model_name, ' loss model: ', loss)
Esempio n. 23
0
def set_question_values(manipulated_question):
    manipulated_question["title"] = request.form["title"]
    manipulated_question["submission_time"] = util.get_new_timestamp()
    manipulated_question["message"] = request.form["description"]
    manipulated_question["user_id"] = data_manager.get_user_data(session['username'])['id']
    if 'file' in request.files:
        file = request.files['file']
        manipulated_question["image"] = util.save_image(file, data_manager.UPLOAD_FOLDER, "question")
    return manipulated_question
Esempio n. 24
0
    def save_images(self, webpage, visuals, image_path):
        image_dir = webpage.get_image_dir()
        name = ntpath.basename(image_path)

        webpage.add_header(name)
        ims = []
        txts = []
        links = []

        for label, image_numpy in visuals.items():
            image_name = '%s_%s.png' % (name, label)
            save_path = os.path.join(image_dir, image_name)
            util.save_image(image_numpy, save_path)

            ims.append(image_name)
            txts.append(label)
            links.append(image_name)
        webpage.add_images(ims, txts, links, width=self.win_size)
Esempio n. 25
0
    def render(self,
               optimization_tensor,
               input_img,
               output_filename='test.png',
               iterations=20):
        t_score = tf.reduce_mean(
            optimization_tensor)  # defining the optimization objective
        t_grad = tf.gradients(t_score, self.input_tensor)[
            0]  # behold the power of automatic differentiation!

        img = input_img.copy()

        img = self.image_optimize(img, iterations, t_grad)

        if img is not None:
            img = self.norm_visualize(img)
            img = np.clip(img, -1, 1)
            save_image(img, output_filename, color=self.save_color)
def estimate_foregrounds(directory):

    from pymatting import estimate_foreground_ml

    # Add your own method here (method name, estimate_foreground function)
    fg_methods = [
        ("multilevel", estimate_foreground_multilevel),
        #("naive", estimate_foreground_naive),
    ]

    alpha_methods = [
        "gt",
        "cf",
        "idx",
        "ifm",
        "knn",
    ]

    print("Running foreground estimation")
    for index in range(1, 28):
        name = "GT%02d" % index

        path = f"{directory}/converted/image/{name}.bmp"

        image = util.load_image(path)

        for alpha_method in alpha_methods:
            path = f"{directory}/alpha/{alpha_method}/{name}.png"

            alpha = util.load_image(path, "gray")

            for fg_method, estimate_foreground in fg_methods:

                foreground = estimate_foreground(image, alpha)

                print(
                    f'Processing image {name} with foreground estimation '
                    f'method {fg_method:10} and alpha matte {alpha_method:3}')

                path = f"{directory}/fg_methods/{fg_method}/{alpha_method}/{name}.bmp"

                util.save_image(path, foreground)
Esempio n. 27
0
    def output_minibatch_stats(self, sess, summary_writer, step, batch_x,
                               batch_y, epoch):
        # Calculate batch loss and accuracy
        summary_str, loss, acc, predictions = sess.run([
            self.summary_op, self.net.cost, self.net.accuracy,
            self.net.predicter
        ],
                                                       feed_dict={
                                                           self.net.x: batch_x,
                                                           self.net.y: batch_y,
                                                           self.net.keep_prob:
                                                           1.
                                                       })

        img_pred = util.to_rgb(np.asarray(predictions[0], 'float'))
        util.save_image(img_pred, "test_mini/%s_%s_pred.jpg" % (epoch, step))
        summary_writer.add_summary(summary_str, step)
        summary_writer.flush()
        logging.info(
            "Iter {:}, Minibatch Loss= {:.4f}, Training Accuracy= {:.4f}%, Minibatch error= {:.1f}%"
            .format(step, loss, acc * 100.0, error_rate(predictions, batch_y)))
Esempio n. 28
0
    def evaluate(self, net, iteration, noise_func):
        avg_psnr = 0.0
        for idx in range(len(self.images)):
            orig_img = self.images[idx]
            w = orig_img.shape[2]
            h = orig_img.shape[1]

            noisy_img = noise_func(orig_img)
            pred255 = util.infer_image(net, noisy_img)
            orig255 = util.clip_to_uint8(orig_img)
            assert (pred255.shape[2] == w and pred255.shape[1] == h)

            sqerr = np.square(
                orig255.astype(np.float32) - pred255.astype(np.float32))
            s = np.sum(sqerr)
            cur_psnr = 10.0 * np.log10((255 * 255) / (s / (w * h * 3)))
            avg_psnr += cur_psnr

            util.save_image(self.submit_config, pred255,
                            "img_{0}_val_{1}_pred.png".format(iteration, idx))

            if iteration == 0:
                util.save_image(
                    self.submit_config, orig_img,
                    "img_{0}_val_{1}_orig.png".format(iteration, idx))
                util.save_image(
                    self.submit_config, noisy_img,
                    "img_{0}_val_{1}_noisy.png".format(iteration, idx))
        avg_psnr /= len(self.images)
        print('Average PSNR: %.2f' % autosummary('PSNR_avg_psnr', avg_psnr))
Esempio n. 29
0
def test2d(config):
    prepare_dirs_and_logger(config)
    tf.set_random_seed(config.random_seed)

    batch_manager = BatchManager(config)

    # thread test
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    sess_config.allow_soft_placement = True
    sess_config.log_device_placement = False
    sess = tf.Session(config=sess_config)
    batch_manager.start_thread(sess)

    x, y = batch_manager.batch()  # [-1, 1]
    x_ = x.eval(session=sess)
    # y_ = y.eval(session=sess)
    batch_manager.stop_thread()

    x_w = vort_np(x_)
    x_w /= np.abs(x_w).max()
    x_w = (x_w + 1) * 0.5
    x_w = np.uint8(plt.cm.RdBu(x_w[..., 0]) * 255)[..., :3]
    x_ = (x_ + 1) * 127.5  # [0, 255]
    b_ch = np.ones([config.batch_size, config.res_y, config.res_x, 1]) * 127.5
    x_ = np.concatenate((x_, b_ch), axis=-1)
    x_ = np.concatenate((x_, x_w), axis=0)
    save_image(x_, '{}/x_fixed.png'.format(config.model_dir))

    # random pick from parameter space
    x, pi, zi = batch_manager.random_list(config.batch_size)
    x_w = vort_np(x / 127.5 - 1)
    x_w /= np.abs(x_w).max()
    x_w = (x_w + 1) * 0.5
    x_w = np.uint8(plt.cm.RdBu(x_w[..., 0]) * 255)[..., :3]
    x = np.concatenate((x, x_w), axis=0)
    save_image(x, '{}/x.png'.format(config.model_dir))
    with open('{}/x_p.txt'.format(config.model_dir), 'w') as f:
        f.write(str(pi))
        f.write(str(zi))
Esempio n. 30
0
def main():
    p = argparse.ArgumentParser(description='Display a kernel.')
    p.add_argument('-out', help='output to *.png file instead of viewing')
    p.add_argument('k', nargs='*', help='path to kernel(s)')
    args = p.parse_args()

    out = None

    for fn in args.k:
        print('Loading', fn)
        step, kernel = util.load_kernel(fn)
        print('  Step', step)
        print('  Kernel shape is', kernel.shape)
        print('  Min', np.min(kernel))
        print('  Max', np.max(kernel))
        print('  Mean', np.mean(kernel))
        print('  Sum', np.sum(kernel))
        print('  Sum of abs', np.sum(np.abs(kernel)))
        print('  RMS', np.sqrt(np.mean(kernel * kernel)))

        render = util.vis_hwoi(kernel, doubles=2)
        render = util.hstack([render, util.make_label(fn)], 5)

        if out is None:
            out = render
        else:
            out = util.vstack([out, render], 5)

    out = util.border(out, 5)

    if args.out is not None:
        util.save_image(args.out, out)
        print('Written to', args.out)
    else:
        print('Press ESC to close window.')

        def render_fn():
            return out

        util.viewer(None, render_fn)
Esempio n. 31
0
    def _get_QRcode(self):
        url = 'https://qr.m.jd.com/show'
        payload = {
            'appid': 133,
            'size': 147,
            't': str(int(time.time() * 1000)),
        }
        headers = {
            'User-Agent': self.default_user_agent,
            'Referer': 'https://passport.jd.com/new/login.aspx',
        }
        resp = self.session.get(url=url, headers=headers, params=payload)

        if not resp.ok:
            logger.info('获取二维码失败')
            return False

        QRCode_file = 'QRcode.png'
        save_image(resp, QRCode_file)
        logger.info('二维码获取成功,请打开京东APP扫描')
        open_image(QRCode_file)
        return True
Esempio n. 32
0
def _run_inference(output_dir=None,
                   file_extension='png',
                   depth=True,
                   egomotion=False,
                   model_ckpt=None,
                   input_dir=None,
                   input_list_file=None,
                   batch_size=1,
                   img_height=128,
                   img_width=416,
                   seq_length=3,
                   architecture=nets.RESNET,
                   imagenet_norm=True,
                   use_skip=True,
                   joint_encoder=True,
                   shuffle=False,
                   flip_for_depth=False,
                   inference_mode=INFERENCE_MODE_SINGLE,
                   inference_crop=INFERENCE_CROP_NONE,
                   use_masks=False):
  """Runs inference. Refer to flags in inference.py for details."""
  inference_model = model.Model(is_training=False,
                                batch_size=batch_size,
                                img_height=img_height,
                                img_width=img_width,
                                seq_length=seq_length,
                                architecture=architecture,
                                imagenet_norm=imagenet_norm,
                                use_skip=use_skip,
                                joint_encoder=joint_encoder)
  vars_to_restore = util.get_vars_to_save_and_restore(model_ckpt)
  saver = tf.train.Saver(vars_to_restore)
  sv = tf.train.Supervisor(logdir='/tmp/', saver=None)
  with sv.managed_session() as sess:
    saver.restore(sess, model_ckpt)
    if not gfile.Exists(output_dir):
      gfile.MakeDirs(output_dir)
    logging.info('Predictions will be saved in %s.', output_dir)

    # Collect all images to run inference on.
    im_files, basepath_in = collect_input_images(input_dir, input_list_file,
                                                 file_extension)
    if shuffle:
      logging.info('Shuffling data...')
      np.random.shuffle(im_files)
    logging.info('Running inference on %d files.', len(im_files))

    # Create missing output folders and pre-compute target directories.
    output_dirs = create_output_dirs(im_files, basepath_in, output_dir)

    # Run depth prediction network.
    if depth:
      im_batch = []
      for i in range(len(im_files)):
        if i % 100 == 0:
          logging.info('%s of %s files processed.', i, len(im_files))

        # Read image and run inference.
        if inference_mode == INFERENCE_MODE_SINGLE:
          if inference_crop == INFERENCE_CROP_NONE:
            im = util.load_image(im_files[i], resize=(img_width, img_height))
          elif inference_crop == INFERENCE_CROP_CITYSCAPES:
            im = util.crop_cityscapes(util.load_image(im_files[i]),
                                      resize=(img_width, img_height))
        elif inference_mode == INFERENCE_MODE_TRIPLETS:
          im = util.load_image(im_files[i], resize=(img_width * 3, img_height))
          im = im[:, img_width:img_width*2]
        if flip_for_depth:
          im = np.flip(im, axis=1)
        im_batch.append(im)

        if len(im_batch) == batch_size or i == len(im_files) - 1:
          # Call inference on batch.
          for _ in range(batch_size - len(im_batch)):  # Fill up batch.
            im_batch.append(np.zeros(shape=(img_height, img_width, 3),
                                     dtype=np.float32))
          im_batch = np.stack(im_batch, axis=0)
          est_depth = inference_model.inference_depth(im_batch, sess)
          if flip_for_depth:
            est_depth = np.flip(est_depth, axis=2)
            im_batch = np.flip(im_batch, axis=2)

          for j in range(len(im_batch)):
            color_map = util.normalize_depth_for_display(
                np.squeeze(est_depth[j]))
            visualization = np.concatenate((im_batch[j], color_map), axis=0)
            # Save raw prediction and color visualization. Extract filename
            # without extension from full path: e.g. path/to/input_dir/folder1/
            # file1.png -> file1
            k = i - len(im_batch) + 1 + j
            filename_root = os.path.splitext(os.path.basename(im_files[k]))[0]
            pref = '_flip' if flip_for_depth else ''
            output_raw = os.path.join(
                output_dirs[k], filename_root + pref + '.npy')
            output_vis = os.path.join(
                output_dirs[k], filename_root + pref + '.png')
            with gfile.Open(output_raw, 'wb') as f:
              np.save(f, est_depth[j])
            util.save_image(output_vis, visualization, file_extension)
          im_batch = []

    # Run egomotion network.
    if egomotion:
      if inference_mode == INFERENCE_MODE_SINGLE:
        # Run regular egomotion inference loop.
        input_image_seq = []
        input_seg_seq = []
        current_sequence_dir = None
        current_output_handle = None
        for i in range(len(im_files)):
          sequence_dir = os.path.dirname(im_files[i])
          if sequence_dir != current_sequence_dir:
            # Assume start of a new sequence, since this image lies in a
            # different directory than the previous ones.
            # Clear egomotion input buffer.
            output_filepath = os.path.join(output_dirs[i], 'egomotion.txt')
            if current_output_handle is not None:
              current_output_handle.close()
            current_sequence_dir = sequence_dir
            logging.info('Writing egomotion sequence to %s.', output_filepath)
            current_output_handle = gfile.Open(output_filepath, 'w')
            input_image_seq = []
          im = util.load_image(im_files[i], resize=(img_width, img_height))
          input_image_seq.append(im)
          if use_masks:
            im_seg_path = im_files[i].replace('.%s' % file_extension,
                                              '-seg.%s' % file_extension)
            if not gfile.Exists(im_seg_path):
              raise ValueError('No segmentation mask %s has been found for '
                               'image %s. If none are available, disable '
                               'use_masks.' % (im_seg_path, im_files[i]))
            input_seg_seq.append(util.load_image(im_seg_path,
                                                 resize=(img_width, img_height),
                                                 interpolation='nn'))

          if len(input_image_seq) < seq_length:  # Buffer not filled yet.
            continue
          if len(input_image_seq) > seq_length:  # Remove oldest entry.
            del input_image_seq[0]
            if use_masks:
              del input_seg_seq[0]

          input_image_stack = np.concatenate(input_image_seq, axis=2)
          input_image_stack = np.expand_dims(input_image_stack, axis=0)
          if use_masks:
            input_image_stack = mask_image_stack(input_image_stack,
                                                 input_seg_seq)
          est_egomotion = np.squeeze(inference_model.inference_egomotion(
              input_image_stack, sess))
          egomotion_str = []
          for j in range(seq_length - 1):
            egomotion_str.append(','.join([str(d) for d in est_egomotion[j]]))
          current_output_handle.write(
              str(i) + ' ' + ' '.join(egomotion_str) + '\n')
        if current_output_handle is not None:
          current_output_handle.close()
      elif inference_mode == INFERENCE_MODE_TRIPLETS:
        written_before = []
        for i in range(len(im_files)):
          im = util.load_image(im_files[i], resize=(img_width * 3, img_height))
          input_image_stack = np.concatenate(
              [im[:, :img_width], im[:, img_width:img_width*2],
               im[:, img_width*2:]], axis=2)
          input_image_stack = np.expand_dims(input_image_stack, axis=0)
          if use_masks:
            im_seg_path = im_files[i].replace('.%s' % file_extension,
                                              '-seg.%s' % file_extension)
            if not gfile.Exists(im_seg_path):
              raise ValueError('No segmentation mask %s has been found for '
                               'image %s. If none are available, disable '
                               'use_masks.' % (im_seg_path, im_files[i]))
            seg = util.load_image(im_seg_path,
                                  resize=(img_width * 3, img_height),
                                  interpolation='nn')
            input_seg_seq = [seg[:, :img_width], seg[:, img_width:img_width*2],
                             seg[:, img_width*2:]]
            input_image_stack = mask_image_stack(input_image_stack,
                                                 input_seg_seq)
          est_egomotion = inference_model.inference_egomotion(
              input_image_stack, sess)
          est_egomotion = np.squeeze(est_egomotion)
          egomotion_1_2 = ','.join([str(d) for d in est_egomotion[0]])
          egomotion_2_3 = ','.join([str(d) for d in est_egomotion[1]])

          output_filepath = os.path.join(output_dirs[i], 'egomotion.txt')
          file_mode = 'w' if output_filepath not in written_before else 'a'
          with gfile.Open(output_filepath, file_mode) as current_output_handle:
            current_output_handle.write(str(i) + ' ' + egomotion_1_2 + ' ' +
                                        egomotion_2_3 + '\n')
          written_before.append(output_filepath)
      logging.info('Done.')
Esempio n. 33
0
def finetune_inference(train_model, model_ckpt, output_dir):
  """Train model."""
  vars_to_restore = None
  if model_ckpt is not None:
    vars_to_restore = util.get_vars_to_save_and_restore(model_ckpt)
    ckpt_path = model_ckpt
  pretrain_restorer = tf.train.Saver(vars_to_restore)
  sv = tf.train.Supervisor(logdir=None, save_summaries_secs=0, saver=None,
                           summary_op=None)
  config = tf.ConfigProto()
  config.gpu_options.allow_growth = True
  img_nr = 0
  failed_heuristic = []
  with sv.managed_session(config=config) as sess:
    # TODO(casser): Caching the weights would be better to avoid I/O bottleneck.
    while True:  # Loop terminates when all examples have been processed.
      if model_ckpt is not None:
        logging.info('Restored weights from %s', ckpt_path)
        pretrain_restorer.restore(sess, ckpt_path)
      logging.info('Running fine-tuning, image %s...', img_nr)
      img_pred_folder = os.path.join(
          output_dir, FLAGS.ft_name + 'id_' + str(img_nr))
      if not gfile.Exists(img_pred_folder):
        gfile.MakeDirs(img_pred_folder)
      step = 1

      # Run fine-tuning.
      while step <= FLAGS.num_steps:
        logging.info('Running step %s of %s.', step, FLAGS.num_steps)
        fetches = {
            'train': train_model.train_op,
            'global_step': train_model.global_step,
            'incr_global_step': train_model.incr_global_step
        }
        _ = sess.run(fetches)
        if step % SAVE_EVERY == 0:
          # Get latest prediction for middle frame, highest scale.
          pred = train_model.depth[1][0].eval(session=sess)
          if FLAGS.flip:
            pred = np.flip(pred, axis=2)
          input_img = train_model.image_stack.eval(session=sess)
          input_img_prev = input_img[0, :, :, 0:3]
          input_img_center = input_img[0, :, :, 3:6]
          input_img_next = input_img[0, :, :, 6:]
          img_pred_file = os.path.join(
              img_pred_folder,
              str(step).zfill(10) + ('_flip' if FLAGS.flip else '') + '.npy')
          motion = np.squeeze(train_model.egomotion.eval(session=sess))
          # motion of shape (seq_length - 1, 6).
          motion = np.mean(motion, axis=0)  # Average egomotion across frames.

          if SAVE_PREVIEWS or step == FLAGS.num_steps:
            # Also save preview of depth map.
            color_map = util.normalize_depth_for_display(
                np.squeeze(pred[0, :, :]))
            visualization = np.concatenate(
                (input_img_prev, input_img_center, input_img_next, color_map))
            motion_s = [str(m) for m in motion]
            s_rep = ','.join(motion_s)
            with gfile.Open(img_pred_file.replace('.npy', '.txt'), 'w') as f:
              f.write(s_rep)
            util.save_image(
                img_pred_file.replace('.npy', '.%s' % FLAGS.file_extension),
                visualization, FLAGS.file_extension)

          with gfile.Open(img_pred_file, 'wb') as f:
            np.save(f, pred)

        # Apply heuristic to not finetune if egomotion magnitude is too low.
        ego_magnitude = np.linalg.norm(motion[:3], ord=2)
        heuristic = ego_magnitude >= FLAGS.egomotion_threshold
        if not heuristic and step == FLAGS.num_steps:
          failed_heuristic.append(img_nr)

        step += 1
      img_nr += 1
  return failed_heuristic
    return numpy.array(xr), numpy.array(yr)

def log(s):
    print(s)
    sys.stdout.flush()

if __name__ == '__main__':
    log('Loading dataset ...')
    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
    real_x = mnist.train.images[:1]
    real_y = numpy.array([ numpy.array([1.0]) for x in real_x ])

    for i in range(1):
        image = util.array_to_image(real_x[i])
        util.save_image(image, 'out/real_%d.png' % i)

    log('Constructing D ...')
    #d = cnn.CNN()
    #d = evo_mlp.EvoMLP()
    d = pybrain_mlp.PyBrainMLP([784, 128, 1])

    log('Constructing G ...')
    def fitness_func(generator, genome):
        x = numpy.array([ util.generate_image(genome).flatten() for x in range(5)])
        y =  [ v[0] for v in d.forward(x) ]
        return sum(y) / float( len(y) )

    g = cppn.CPPN('mnist_config', fitness_func)

    log('Initializing G ...')
from pybrain_mlp import PyBrainMLP
from tensorflow.examples.tutorials.mnist import input_data

import util

if __name__ == '__main__':
    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

    real = mnist.train.images[0]
    fake = [ 0.0 for x in real ]

    util.save_image(util.array_to_image(real), 'out/real.png')
    util.save_image(util.array_to_image(fake), 'out/fake.png')

    x = [ real, fake ]
    y = [ [1.0], [0.0] ]

    mlp = PyBrainMLP([784, 1])

    for i in range(10000):
        loss = mlp.train(x, y)

        if i % 100 == 0:
            print("%d: %f" % (i, loss))

        if i % 100 == 0:
            result = mlp.forward(x)
            print('Real: %s' % result[0])
            print('Fake: %s' % result[1])