def main():
    ckpt_filename = './ssd_300_kitti/ssd_model.ckpt'
    isess = tf.InteractiveSession()
    tensors = get_tenors(ckpt_filename, isess)
    saver = tf.train.Saver()
    saver.restore(isess, ckpt_filename)

    # Load a sample image.
    path = 'test_images/'
    path = '../kitti/voc_format/VOC2012/JPEGImages/'
    path = '../kitti/testing/image_2/'
    #path = "../PhotographicImageSynthesis/result_512p/final/"
    outpath = 'output_images/'
    image_names = sorted(os.listdir(path))
    for idx, name in enumerate(image_names):
        idx += 7481
        print("%06d.png" % idx)
        img = imread_as_jpg(path + "%06d.png" % idx)
        img = cv2.resize(img, (463, 150))

        img = process_image(img,
                            tensors,
                            isess,
                            select_threshold=0.8,
                            nms_threshold=0.5)
Esempio n. 2
0
def main():
    #CUDA_VISIBLE_DEVICES=""
    os.environ['CUDA_VISIBLE_DEVICES'] = ''
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    isess = tf.InteractiveSession()
    ckpt_filename = './ssd_300_kitti/ssd_model.ckpt'
    tensors = get_tenors(ckpt_filename, isess)
    predictions, localisations, logits, end_points, img_input, ssd = tensors

    rs = r".*\/conv[0-9]\/conv[0-9]_[0-9]/Relu$"
    rc = re.compile(rs)
    new_end_points = {}
    for op in tf.get_default_graph().as_graph_def().node:
        gr = rc.match(op.name)
        if gr:
            print(op.name)
            new_end_points[op.name.split(
                "/")[-2]] = tf.get_default_graph().get_tensor_by_name(op.name +
                                                                      ":0")
    """
    for n in new_end_points:
        print(n,new_end_points[n])
    """
    path = '../kitti/voc_format/VOC2012/JPEGImages/'
    outpath = 'output_images/'
    image_names = sorted(os.listdir(path))
    dimpkl = {}
    for name in image_names:
        img = imread_as_jpg(path + name)
        img = cv2.resize(img, (993 // 2, 300))
        print(img.shape, name)
        #img = process_image(img, tensors,isess, select_threshold=0.8, nms_threshold=0.5)
        img = process_image(img,
                            tensors,
                            isess,
                            select_threshold=0.8,
                            nms_threshold=0.5)
        #mpimg.imsave(outpath + name, img, format='jpg')
        for n in new_end_points:
            val = isess.run([new_end_points[n]], feed_dict={img_input: img})[0]
            print(n, val.shape[1:3])
            dimpkl[n] = val.shape[1:3]

        dill.dump(dimpkl, open("dim_300.pkl", "wb"))
        assert (False)
        mpimg.imsave(outpath + name, img, format='jpg')
Esempio n. 3
0
def main():
    nf = 256
    zdim = 1024
    filepath = "/workspace2/kitti/testing/image_2/007481.png"
    filepath = "test/test0_2.jpg"
    filepath = "test/test1_2.jpg"
    filepath = "test/noise3.jpg"
    filepath = "test/adversary.png"
    filepath = "/workspace2/kitti/training/image_2/000003.png"
    filepath = "/workspace2/kitti/testing/image_2/007597.png"
    filepath = "/workspace2/kitti/testing/image_2/007662.png"
    filepath = "/workspace2/kitti/testing/image_2/008001.png"
    filepath = "/workspace2/kitti/testing/image_2/007618.png"
    config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
    sess = tf.Session(config=config)
    images = tf.placeholder(tf.float32, [1, 150, 496, 3])
    scdobj = scd.SCD(input=images)
    rgan = eval_rgan.rgan(images=scdobj.end_points["pool5"],
                          z_dim=zdim,
                          nf=nf,
                          training=True)
    imgsynth_from_feature = eval_imgsynth.ImageSynthesizer(rgan.rec_x_out)

    scd_saver = scd.get_saver()
    scd_saver.restore(sess, "/workspace/imgsynth/ssd_300_kitti/ssd_model.ckpt")
    imgsynth_saver = eval_imgsynth.get_saver()
    imgsynth_saver.restore(
        sess, "/workspace/imgsynth/result_kitti256p_2/model.ckpt-89")
    rgan_saver = eval_rgan.get_saver()
    #rgan_saver.restore(sess,"models/model-99999")
    rgan_saver.restore(sess, "models_doubleres_constr_x_rzx/model-80999")

    image = cv2.resize(scd.imread_as_jpg(filepath), (496, 150))
    image = np.expand_dims(image, 0)
    reses = imgsynth_from_feature.generate_image_from_featuremap(
        sess, image, images)
    for i, a in enumerate(reses):
        print(i)
        skimage.io.imsave("test/single_generated.jpg", a)
Esempio n. 4
0
def main():
    filepath = "/workspace2/kitti/testing/image_2/007481.png"
    filepath = "test/test0_2.jpg"
    filepath = "test/test1_2.jpg"
    filepath = "test/noise3.jpg"
    filepath = "test/adversary.png"
    filepath = "/workspace2/kitti/testing/image_2/007489.png"
    filepath = "test/single_generated.jpg"
    filepath = "test/valid_adv7.png"
    config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
    sess = tf.Session(config=config)
    images = tf.placeholder(tf.float32, [1, 150, 496, 3])
    scdobj = scd.SCD(input=images)
    scd_saver = scd.get_saver()
    scd_saver.restore(sess, "/workspace/imgsynth/ssd_300_kitti/ssd_model.ckpt")

    image = cv2.resize(scd.imread_as_jpg(filepath), (496, 150))
    image = np.expand_dims(image, 0)
    reses = scdobj.get_image(sess, image)
    for i, a in enumerate(reses):
        print(i)
        skimage.io.imsave("test/single_detected%d.jpg" % i, a)
Esempio n. 5
0
def main():
    config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
    sess = tf.Session(config=config)

    #define tensors and  models
    zb = 6
    nf = 256
    zdim = 1024
    images = tf.placeholder(tf.float32, [2, 150, 496, 3])
    z = tf.placeholder(tf.float32, [zb, zdim])

    scdobj = scd.SCD(input=images)
    imgsynth = eval_imgsynth.ImageSynthesizer(scdobj.end_points["pool5"])

    rgan = eval_rgan.rgan(images=scdobj.end_points["pool5"],
                          z_dim=zdim,
                          nf=nf,
                          training=True)
    imgsynth_from_feature = eval_imgsynth.ImageSynthesizer(rgan.rec_x_out,
                                                           reuse=True)

    rgan_from_z = eval_rgan.rgan(latent_vec=z,
                                 nf=nf,
                                 z_dim=zdim,
                                 reuse=True,
                                 training=True)
    imgsynth_from_z = eval_imgsynth.ImageSynthesizer(rgan_from_z.rec_x_p_out,
                                                     reuse=True)

    #load weights
    scd_saver = scd.get_saver()
    scd_saver.restore(sess, "/workspace/imgsynth/ssd_300_kitti/ssd_model.ckpt")
    imgsynth_saver = eval_imgsynth.get_saver()
    imgsynth_saver.restore(
        sess, "/workspace/imgsynth/result_kitti256p_2/model.ckpt-89")
    rgan_saver = eval_rgan.get_saver()
    rgan_saver.restore(sess, "models/model-99999")

    co = []
    image = cv2.resize(
        scd.imread_as_jpg("/workspace2/kitti/testing/image_2/007482.png"),
        (496, 150))
    co.append(image)
    image = cv2.resize(
        scd.imread_as_jpg("/workspace2/kitti/testing/image_2/007481.png"),
        (496, 150))
    co.append(image)
    #reses = imgsynth.generate_image_from_featuremap(np.expand_dims(image,0),images)
    image = np.array(co)

    #simply get detection result
    reses = scdobj.get_image(sess, image)
    for i, a in enumerate(reses):
        print(i)
        skimage.io.imsave("test/detected%d.jpg" % i, a)
    #model:image=>feature, image decoder:feature=>image_hat
    reses = imgsynth.generate_image_from_featuremap(sess, image, images)
    for i, a in enumerate(reses):
        print(i)
        skimage.io.imsave("test/test%d.jpg" % i, a)

    #model:image=>feature, gan:feature=>latent vector=>feature_hat, image decoder:feature_hat=>image_hat
    reses = imgsynth_from_feature.generate_image_from_featuremap(
        sess, image, images)
    for i, a in enumerate(reses):
        print(i)
        skimage.io.imsave("test/test%d_2.jpg" % i, a)

    #get detection result thru gan and image decoder
    reses = scdobj.get_image(sess, reses)
    for i, a in enumerate(reses):
        print(i)
        skimage.io.imsave("test/gan_detected%d.jpg" % i, a)

    return 0
    #latent vector=>feature_hat, image decoder:feature_hat=>image_hat
    np.random.seed(0)
    zs = []
    zsp = np.random.randn(zb, zdim) + 1
    zs = np.array(zsp)
    reses = imgsynth_from_z.generate_image_from_featuremap(sess, zs, z)
    for i, a in enumerate(reses):
        print(i)
        skimage.io.imsave("test/noise%d.jpg" % i, a)

    #noise interpolation
    zsb = []
    for i in range(0, zb):
        #zs.append(zsp[0]*float(i)/(float(zb)-1.)+zsp[1]*(1.-float(i)/(float(zb)-1.)))
        zsb.append(zsp[i] * (1. - i / (zb - 1)))
    zsb = np.array(zsb)
    reses = imgsynth_from_z.generate_image_from_featuremap(sess, zsb, z)
    for i, a in enumerate(reses):
        print(i)
        skimage.io.imsave("test/noise_interpolate%d.jpg" % i, a)

    #model:images=>features, gan:features=>latent vectors=>features_hat, interpolate:features_hat=>features_interplated, image decoder:feature_interplated=>image_hat
    zs = []
    zsp = rgan.generate_noise_from_featuremap(sess, image, images)
    for i in range(0, zb):
        zs.append(zsp[0] * float(i) / zb + zsp[1] * (1 - float(i) / zb))
    zs = np.array(zs)
    reses = imgsynth_from_z.generate_image_from_featuremap(sess, zs, z)
    for i, a in enumerate(reses):
        print(i)
        skimage.io.imsave("test/interpolate%d.jpg" % i, a)

    zs = []
    zsp = rgan.generate_noise_from_featuremap(sess, image, images)
    for i in range(0, zb):
        zs.append(zsp[0] * float(i) / zb + zsp[1] * (1 - float(i) / zb) + 3)
    zs = np.array(zs)
    reses = imgsynth_from_z.generate_image_from_featuremap(sess, zs, z)
    for i, a in enumerate(reses):
        print(i)
        skimage.io.imsave("test/interpolate2%d.jpg" % i, a)
Esempio n. 6
0
    label_images = [None] * NUM_TRAINING_IMAGES
    for epoch in range(1, 201):
        if os.path.isdir("result_256p/%04d" % epoch):
            continue
        cnt = 0
        for ind in np.random.permutation(NUM_TRAINING_IMAGES - 25) + 1:
            st = time.time()
            cnt += 1
            if input_images[ind] is None:
                #label_images[ind]=helper.get_semantic_map("data/cityscapes/Label256Full/%08d.png"%ind)#training label
                #input_images[ind]=np.expand_dims(np.float32(scipy.misc.imread("data/cityscapes/RGB256Full/%08d.png"%ind)),axis=0)#training image
                label_images[ind] = np.load("hmlabels/%06d.npz" %
                                            ind)["arr_0"]  #training label

                path = '../kitti/voc_format/VOC2012/JPEGImages/%06d.png' % ind
                img = imread_as_jpg(path)
                #img = cv2.resize(img, (993,300))
                img = cv2.resize(img, (512, 256))
                input_images[ind] = np.expand_dims(np.float32(img),
                                                   axis=0)  #training image
            _, G_current, l0, l1, l2, l3, l4, l5 = sess.run(
                [G_opt, G_loss, p0, p1, p2, p3, p4, p5],
                feed_dict={
                    label:
                    np.concatenate(
                        (label_images[ind],
                         np.expand_dims(1 - np.sum(label_images[ind], axis=3),
                                        axis=3)),
                        axis=3),
                    real_image:
                    input_images[ind],
Esempio n. 7
0
    parser.add_argument("--decay_factor",
                        type=float,
                        default=0.01,
                        help="exponential annealing decay rate [0.01]")
    parser.add_argument("--doubleres",
                        type=bool,
                        default=False,
                        help="exponential annealing decay rate [0.01]")
    parser.add_argument('--exdir', type=str, default='examples')
    parser.add_argument('--mddir', type=str, default='models')
    args = parser.parse_args()

    fixed_images = np.zeros([8, args.input_h, args.input_w, args.input_c])
    for i in range(8):
        fixed_images[i, :, :, :] = cv2.resize(
            scd.imread_as_jpg(
                os.path.join(args.dataset_test_path, "%06d.png" % (i + 7481))),
            (args.input_w, args.input_h))

    mnistWganInv = pxhdgan(x_dim=784,
                           z_dim=args.z_dim,
                           w=args.input_w,
                           h=args.input_h,
                           c=args.input_c,
                           latent_dim=args.latent_dim,
                           nf=256,
                           batch_size=args.batch_size,
                           c_gp_x=args.c_gp_x,
                           lamda=args.lamda,
                           output_path=args.output_path,
                           args=args)
Esempio n. 8
0
        return x_p

    def inv_fn(x):
        z_p = rgan.generate_noise_from_featuremap(sess, x, images)
        return z_p

    if args.iterative:
        search = iterative_search
    else:
        search = recursive_search

    _, _, test_data = tflib.mnist.load_data()

    i = 0
    co = []
    image = cv2.resize(scd.imread_as_jpg(path), (496, 150))
    co.append(image)
    image = np.array(co)
    x = image
    y = 1
    adversary = search(gen_fn,
                       inv_fn,
                       cla_fn,
                       x,
                       y,
                       y_t=y_t,
                       h=upl,
                       nsamples=args.nsamples,
                       step=args.step,
                       verbose=args.verbose)
    if args.iterative: