Exemplo n.º 1
0
def main(_):
    X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels))
    model = Model(X, FLAGS.model, 'xxx')
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        model.loader(sess)
        gal_0 = Gallery('gallery_0', ext='png')
        gal_1 = Gallery('gallery_1', ext='png')
        gal_2 = Gallery('gallery_2', ext='png')
        for img in glob(os.path.join(FLAGS.image, "*/*.jpg")):
            filename = img.split("/")[-1]
            image = cv2.imread(img, cv2.IMREAD_COLOR)
            batch = np.expand_dims(image, axis=0).astype(dtype=np.float32)
            probs = sess.run(model.probs, feed_dict={X: batch})
            cls = np.argmax(probs[0])
            if cls == 0:
                cv2.imwrite(gal_0.next(filename=filename), image)
            if cls == 1:
                cv2.imwrite(gal_1.next(filename=filename), image)
            if cls == 2:
                cv2.imwrite(gal_2.next(filename=filename), image)
            '''
            if cls == 1:
                cv2.imwrite('gallery_1/'+filename,image)
                gal_1.next(filename=filename)
            if cls == 2:
                cv2.imwrite('gallery_2/'+filename,image)
                gal_2.next(filename=filename)
            '''
        gal_0.flush()
        gal_1.flush()
        gal_2.flush()
Exemplo n.º 2
0
    def __init__(self, controller):
        self.controller = controller  # function to call

        self.root = Tk()  # create main window called root
        self.root.state('zoomed')  # have root be full screen
        self.root.title('Picasa')  # name the display
        self.root.protocol(
            'WM_DELETE_WINDOW',
            lambda: self.controller('stop'))  # Trap close window

        self.menu = PicasaMenu(self.root, controller)  # create main menu
        self.root.config(menu=self.menu)  # add menu to window

        self.TAB = PanedWindow(self.root, orient=VERTICAL)  # Top And Bottom
        self.TAB.grid(sticky='nsew')
        self.LAR = PanedWindow(self.root, orient=HORIZONTAL)  # Left And Right
        self.LAR.grid(sticky='nsew')
        self.TAB.add(self.LAR)

        self.tree = Tree(self.root, controller)  # create tree
        self.LAR.add(self.tree)

        self.gallery = Gallery(self.root, controller)  # create all pics
        self.LAR.add(self.gallery)

        self.status = Status(self.TAB, controller)  # create status text
        self.TAB.add(self.status)

        self.TAB.config(sashrelief='raised')  # make sash visible
        self.LAR.config(sashrelief='raised')

        self.root.grid_columnconfigure(0, weight=1)  # make all resizeable
        self.root.grid_rowconfigure(0, weight=1)

        self.TAB.sash_place(0, 1, 1000)
Exemplo n.º 3
0
def write_dicom_volume_html_flip(volume, path, title):
    gal = Gallery('pethtmlview/' + path, score=False, title=title)
    for i in range(volume.shape[0]):
        transposedImage = cv2.transpose(volume[i])
        flippedImage = cv2.flip(transposedImage, 1)
        cv2.imwrite(gal.next(), flippedImage)
        pass
    gal.flush()
Exemplo n.º 4
0
def main(_):
    assert FLAGS.out
    assert FLAGS.db and os.path.exists(FLAGS.db)

    picpac_config = dict(
        seed=2016,
        #loop=True,
        shuffle=True,
        reshuffle=True,
        #resize_width=256,
        #resize_height=256,
        round_div=FLAGS.stride,
        batch=1,
        split=1,
        split_fold=0,
        annotate='json',
        channels=FLAGS.channels,
        stratify=True,
        pert_color1=20,
        pert_angle=20,
        pert_min_scale=0.8,
        pert_max_scale=1.2,
        #pad=False,
        pert_hflip=True,
        pert_vflip=True,
        channel_first=False  # this is tensorflow specific
        # Caffe's dimension order is different.
    )

    stream = picpac.ImageStream(FLAGS.db,
                                perturb=False,
                                loop=False,
                                **picpac_config)

    gal = Gallery(FLAGS.out)
    cc = 0
    with Model(FLAGS.model, name=FLAGS.name, prob=True) as model:
        for images, _, _ in stream:
            #images *= 600.0/1500
            #images -= 800
            #images *= 3000 /(2000-800)
            _, H, W, _ = images.shape
            if FLAGS.max_size:
                if max(H, W) > FLAGS.max_size:
                    continue
            if FLAGS.patch:
                stch = Stitcher(images, FLAGS.patch)
                probs = stch.stitch(model.apply(stch.split()))
            else:
                probs = model.apply(images)
            cc += 1
            save(gal.next(), images, probs)
            if FLAGS.max and cc >= FLAGS.max:
                break
    gal.flush()
    pass
Exemplo n.º 5
0
def main(_):
    X = tf.placeholder(tf.float32, shape=(None, None, None, 3), name="images")
    is_training = tf.placeholder(tf.bool, name="is_training")
    anchor_th = tf.constant(FLAGS.anchor_th, tf.float32)
    nms_max = tf.constant(FLAGS.nms_max, tf.int32)
    nms_th = tf.constant(FLAGS.nms_th, tf.float32)
    model = Model(X, anchor_th, nms_max, nms_th, is_training, FLAGS.model,
                  'xxx')
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        model.loader(sess)
        if FLAGS.input:
            assert os.path.exists(FLAGS.input)
            image = cv2.imread(FLAGS.input, cv2.IMREAD_COLOR)
            batch = np.expand_dims(image, axis=0).astype(dtype=np.float32)
            boxes, probs = sess.run([model.boxes, model.probs],
                                    feed_dict={
                                        X: batch,
                                        is_training: False
                                    })
            save_prediction_image(FLAGS.input + '.prob.png', image, boxes,
                                  probs)
        if FLAGS.input_db:
            assert os.path.exists(FLAGS.input_db)
            import picpac
            from gallery import Gallery
            picpac_config = {
                "db": FLAGS.input_db,
                "loop": False,
                "shuffle": False,
                "reshuffle": False,
                "annotate": False,
                "channels": 3,
                "stratify": False,
                "dtype": "float32",
                "batch": 1,
                "transforms": []
            }
            stream = picpac.ImageStream(picpac_config)
            gal = Gallery(FLAGS.input_db + '.out')
            C = 0
            for _, images in stream:
                boxes, probs = sess.run([model.boxes, model.probs],
                                        feed_dict={
                                            X: images,
                                            is_training: False
                                        })
                save_prediction_image(gal.next(), images[0], boxes, probs)
                C += 1
                if FLAGS.max and C >= FLAGS.max:
                    break
                pass
            pass
            gal.flush()
    pass
Exemplo n.º 6
0
def write_dicom_volume_html_resize_twice(volume, path, title):
    gal = Gallery('pethtmlview/' + path, score=False, title=title)
    for i in range(volume.shape[0]):
        resizedImage = cv2.resize(volume[i], (0, 0),
                                  fx=2.0,
                                  fy=2.0,
                                  interpolation=cv2.INTER_NEAREST)
        cv2.imwrite(gal.next(), resizedImage)
        pass
    gal.flush()
Exemplo n.º 7
0
def write_dicom_volume_html_flip_resize(volume, path, title):
    gal = Gallery('pethtmlview/' + path, score=False, title=title)
    for i in range(volume.shape[0]):
        transposedImage = cv2.transpose(volume[i])
        flippedImage = cv2.flip(transposedImage, 1)
        resizedImage = cv2.resize(flippedImage, (0, 0),
                                  fx=0.5,
                                  fy=0.5,
                                  interpolation=cv2.INTER_NEAREST)
        cv2.imwrite(gal.next(), resizedImage)
        pass
    gal.flush()
Exemplo n.º 8
0
def main (_):
    assert FLAGS.out
    assert FLAGS.db and os.path.exists(FLAGS.db)

    picpac_config = dict(seed=2016,
                #loop=True,
                shuffle=True,
                reshuffle=True,
                max_size = 400,
                #resize_width=256,
                #resize_height=256,
                round_div = FLAGS.stride,
                batch=1,
                split=1,
                split_fold=0,
                annotate='json',
                channels=FLAGS.channels,
                stratify=True,
                #pad=False,
                channel_first=False # this is tensorflow specific
                                    # Caffe's dimension order is different.
                )

    stream = picpac.ImageStream(FLAGS.db, perturb=False, loop=False, **picpac_config)


    gal = Gallery(FLAGS.out, score=True)
    cc = 0
    with Model(FLAGS.model, prob=True) as model:
        for images, _, _ in stream:
            #images *= 600.0/1500
            #images -= 800
            #images *= 3000 /(2000-800)
            _, H, W, _ = images.shape
            if FLAGS.max_size:
                if max(H, W) > FLAGS.max_size:
                    continue
            print(images.shape)
            # fcn-cls do not have patch
            # if FLAGS.patch:
                
            #     stch = Stitcher(images, FLAGS.patch)
            #     probs = stch.stitch(model.apply(stch.split()))
            # else:
            #     probs = model.apply(images)
            probs, scores = model.apply(images)
            cc += 1
            save(gal.next(score=scores[0]), images, probs)
            if FLAGS.max and cc >= FLAGS.max:
                break
    gal.flush(rank=True)
    pass
Exemplo n.º 9
0
def main (_):
    assert FLAGS.db and os.path.exists(FLAGS.db)
    assert FLAGS.model and os.path.exists(FLAGS.model + '.meta')

    L = tf.placeholder(tf.float32, shape=(None, None, None, 1))

    mg = meta_graph.read_meta_graph_file(FLAGS.model + '.meta')
    logits, = tf.import_graph_def(mg.graph_def, name='colorize',
                        #input_map={'L:0':L},
                        input_map={'fifo_queue_Dequeue:0':L},
                        return_elements=['logits:0'])
    prob = tf.nn.softmax(logits)
    saver = tf.train.Saver(saver_def=mg.saver_def, name='colorize')

    picpac_config = dict(seed=2016,
                cache=False,
                max_size=200,
                min_size=192,
                crop_width=192,
                crop_height=192,
                shuffle=True,
                reshuffle=True,
                batch=1,
                round_div=FLAGS.stride,
                channels=3,
                stratify=False,
                channel_first=False # this is tensorflow specific
                                    # Caffe's dimension order is different.
                )

    stream = picpac.ImageStream(FLAGS.db, perturb=False, loop=False, **picpac_config)

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        saver.restore(sess, FLAGS.model)
        gallery = Gallery(FLAGS.output, cols=2, header=['groundtruth', 'prediction'])
        c = 0
        for images, _, _ in stream:
            if FLAGS.max and (c >= FLAGS.max):
                break
            l, ab, w = _pic2pic.encode_lab(images.copy(), FLAGS.downsize)
            ab_p, = sess.run([prob], feed_dict={L: l})
            y_p = decode_lab(l, ab_p, T=FLAGS.T)
            cv2.imwrite(gallery.next(), images[0])
            cv2.imwrite(gallery.next(), y_p[0])
            c += 1
            print('%d/%d' % (c, FLAGS.max))
            pass
        gallery.flush()
        pass
    pass
Exemplo n.º 10
0
    def build(self):
        manager = ScreenManager()

        manager.add_widget(Login(name='login'))
        manager.add_widget(Incorrect(name='incorrect'))
        manager.add_widget(Gallery(name='gallery'))

        manager.add_widget(Info(name='porsche'))
        manager.add_widget(Info(name='lotus'))
        manager.add_widget(Info(name='chevrolet'))
        manager.add_widget(Info(name='alfaromeo'))

        manager.add_widget(Stopwatch(name='stopwatch'))
        manager.add_widget(Calculator(name='calculator'))

        return manager
Exemplo n.º 11
0
def main(_):
    X = tf.placeholder(tf.float32, shape=(None, None, None, 1), name="images")
    model = Model(X, FLAGS.model, 'xxx')
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        model.loader(sess)
        # 模型至此导入完毕
        # 注意如果要读入图片预测,需要进行下述预处理
        # 1. 转成灰度图, 比如用cv2.imread(..., cv2.IMREAD_GRAYSCALE)
        # 2. resize成固定大小,和train.sh中的参数匹配
        # 3. 比如灰度图是image, 那么对应的batch = image[np.newaxis, :, :, np.newaxis], 就可以送入tf.session预测了

        gal = Gallery('output', cols=2, ext='.jpg')
        CC = 0
        stream = picpac.ImageStream({
            'db':
            FLAGS.db,
            'loop':
            False,
            'channels':
            1,
            'threads':
            1,
            'shuffle':
            False,
            'transforms': [{
                "type": "resize",
                "width": FLAGS.width,
                "height": FLAGS.height
            }]
        })
        for meta, batch in stream:
            if CC > FLAGS.max:
                break
            print(meta.ids)
            image = batch[0]
            logits = sess.run(model.logits, feed_dict={X: batch})
            # 把logits转换成字符串
            label = logits2text(logits)
            '''END INFERENCE'''
            save_prediction_image(gal, image, label)
            CC += 1
        gal.flush()
    pass
Exemplo n.º 12
0
 def on_btn_runGallery(self):
     title = self.pte_galleryTitle.toPlainText()
     list = []
     for i in range(self.tbl_gallery.rowCount()):
         new = []
         for j in range(4):
             if self.tbl_gallery.item(i, j) != None:
                 new.append(int(self.tbl_gallery.item(i, j).text()))
             else:
                 new.append('')
         list.append(new)
     print(list)
     gl = Gallery(title, list, self.time_ms, self.intensity,
                  self.currentFilePath, mm)
     gl.run_field()
     #gl.show_slides()
     gl.run()
Exemplo n.º 13
0
    def post(self):
        self.response.headers['Content-Type'] = 'text/html'

        # GET USER KEY
        user = users.get_current_user()
        myuser_key = ndb.Key('MyUser', user.user_id())
        user_info = MyUser.get_by_id(myuser_key.id())

        action = self.request.get('button')

        # CREATE New Gallery
        if action == 'Create New Gallery':
            # GET NEW GALLERY NAME
            gallery_name = self.request.get('gallery_name')

            # CREATE NEW GALLERY
            new_gallery = Gallery(creator=myuser_key, gallery_name=gallery_name)
            new_gallery.put()

            # We also have to pass the details of this gallery to the MyUser datastore
            associate_gallery_to_user = MyUser.get_by_id(myuser_key.id())
            associate_gallery_to_user.gallery_key.append(new_gallery.key)
            associate_gallery_to_user.put()
            self.redirect('/dashboard')

        elif action == 'Edit Gallery':
            # GET CURRENT GALLERY DETAILS
            gallery_key = int(self.request.get('gallery_key'))
            current_gallery = Gallery.get_by_id(gallery_key)

            # GET NEW GALLERY NAME
            new_gallery_name = self.request.get('edit_gallery_name')

            current_gallery.gallery_name = new_gallery_name
            current_gallery.put()
            self.redirect('/')

        elif action == 'Yes':
            # DELETE GALLERY
            gall_id = int(self.request.get('delete_gallery_id'))
            current_gallery = Gallery.get_by_id(gall_id)

            user_info.gallery_key.remove(current_gallery.key)
            user_info.put()
            current_gallery.key.delete()
            self.redirect('/')
Exemplo n.º 14
0
def main(_):
    X = tf.placeholder(tf.float32,
                       shape=(None, None, None, FLAGS.channels),
                       name="images")
    model = Model(X, FLAGS.model, 'xxx')
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    mold = Scaling(stride=FLAGS.stride)
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        model.loader(sess)

        gal = Gallery('output', ext='.png')
        CC = 0
        if FLAGS.list:
            with open(FLAGS.list, 'r') as f:
                for line in f:
                    if CC > FLAGS.max:
                        break
                    path, label = line.strip().split(',')
                    label = int(label)

                    print(path)
                    if FLAGS.channels == 3:
                        image = cv2.imread(path, cv2.IMREAD_COLOR)
                    elif FLAGS.channels == 1:
                        image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
                    else:
                        assert False

                    image = cv2.resize(image, (FLAGS.resize, FLAGS.resize))

                    probs, heatmap = sess.run(
                        [model.probs, model.heatmap],
                        feed_dict={X: mold.batch_image(image)})
                    probs = probs[0]
                    heatmap = mold.unbatch_prob(image, heatmap)
                    '''END INFERENCE'''

                    save_prediction_image(gal, image, label, probs, heatmap)
                    CC += 1
        gal.flush()
    pass
Exemplo n.º 15
0
def main(_):
    cc = 0
    with Model(FLAGS.model, name=FLAGS.name) as model:
        for out, inp in DS:
            gal = Gallery(out, ext='.gif')
            for root, dirs, files in os.walk(inp, topdown=False):
                for path in files:
                    image = cv2.imread(os.path.join(root, path),
                                       cv2.IMREAD_GRAYSCALE)
                    image = image.astype(np.float32)
                    image = cv2.resize(image, None, fx=FLAGS.rx, fy=FLAGS.rx)

                    prob = model.apply(
                        np.expand_dims(np.expand_dims(image, axis=0),
                                       axis=3))[0]
                    visualize(gal.next(), image, prob)
            gal.flush()
    pass
Exemplo n.º 16
0
 def __init__(self,status_update_func,screen):
     config = Config('space_window.conf',__file__)    
     self._start_with=config.get('global','start_with','streams')
     self._current_stream=None
     self._resume_stream=None
     self._check_timer_delay=20
     self._check_timer=None
     self._wait=False
     self._streams=Streams.load()
     self._nasa=NasaPod(screen)
     self._gallery=Gallery(status_update_func,screen)
     self._music=Music(status_update_func)
     self._clock=Clock(screen)
     self._mopidy=None
     self._status_update=status_update_func    
     if not _standalone:
         threading.Thread(target=self.launch_mopidy).start()
     self._streams.set_status_func(status_update_func)
     self._resume_func=self.run_something
Exemplo n.º 17
0
def main(_):
    picpac_config = dict(
        seed=2016,
        shuffle=True,
        batch=1,
        annotate='json',
        channels=1,
        perturb=False,
        loop=False,
        stratify=True,
        channel_first=False  # this is tensorflow specific
        # Caffe's dimension order is different.
    )
    tr_stream = picpac.ImageStream(FLAGS.db, **picpac_config)
    gal = Gallery('sample', ext='.gif')
    cc = 0
    for image, label, _ in tr_stream:
        cc += 1
        visualize(gal.next(), image[0], label[0, :, :, 0])
        if cc >= FLAGS.classes - 1:
            break
        pass
    gal.flush()
Exemplo n.º 18
0
def show_gallery(path):
    global g
    user = g.user
    if user is None and not cfg()["public_access"]:
        return render("gallery.html",
                      authn_error="only logged in users may view this page")
    path = path.encode("utf-8")
    check_jailed_path(path, "data")
    groups = get_access_groups(path)
    if not cfg()["public_access"]:
        if not is_admin_mode(user) and not access_permitted(
                groups, user["groups"]):
            return render("gallery.html", authn_error=True)

    gallery = Gallery(
        path, follow_freedesktop_standard=cfg()["follow_freedesktop_standard"])
    gallery.populate()
    if cfg()["public_access"] or is_admin_mode(user):
        galleries = gallery.get_galleries()
    else:
        galleries = [
            gal for gal in gallery.get_galleries()
            if access_permitted(get_access_groups(gal["key"]), user["groups"])
        ]
    groups_error = None
    if request.method == 'POST':
        action = request.form["action"]
        if action == "save":
            if not is_admin_mode(user):
                return render("gallery.html", authn_error=True)
            groups = request.form["groups_string"].split()
            try:
                set_access_groups(path, groups)
            except IOError, ioe:
                groups_error = "%s" % ioe
        else:
            raise Exception("Unknown gallery editing action: \"%s\"" % action)
Exemplo n.º 19
0
import thumbnail
from gallery import Gallery

gallery = Gallery()

hooks = {
    'site.start': [thumbnail.create_thumbnails],
    'page.meta.post': [gallery.get_images],
    'page.template.pre': [gallery.get_albums, gallery.set_images],
}
Exemplo n.º 20
0
def main(_):
    X = tf.placeholder(tf.float32,
                       shape=(None, None, None, FLAGS.channels),
                       name="images")
    is_training = tf.placeholder(tf.bool, name="is_training")
    model = Model(X, is_training, FLAGS.model, 'xxx')
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        model.loader(sess)
        if FLAGS.input:
            assert False
            '''
            assert os.path.exists(FLAGS.input)
            image = cv2.imread(FLAGS.input, cv2.IMREAD_COLOR)
            batch = np.expand_dims(image, axis=0).astype(dtype=np.float32)
            boxes, probs = sess.run([model.boxes, model.probs], feed_dict={X: batch, is_training: False})
            save_prediction_image(FLAGS.input + '.prob.png', image, boxes, probs)
            '''
        if FLAGS.input_db:
            assert os.path.exists(FLAGS.input_db)
            from gallery import Gallery
            picpac_config = {
                "db":
                FLAGS.input_db,
                "loop":
                False,
                "shuffle":
                False,
                "reshuffle":
                False,
                "annotate":
                False,
                "channels":
                FLAGS.channels,
                "colorspace":
                "RGB",
                "stratify":
                False,
                "dtype":
                "float32",
                "batch":
                1,
                "annotate": [1],
                "transforms": [
                    {
                        "type": "resize",
                        "max_size": FLAGS.max_size
                    },
                    {
                        "type": "clip",
                        "round": FLAGS.backbone_stride
                    },
                    {
                        "type": "keypoints.basic",
                        'downsize': 1,
                        'classes': 1,
                        'radius': 25
                    },
                    {
                        "type": "drop"
                    },  # remove original annotation 
                ]
            }
            stream = picpac.ImageStream(picpac_config)
            gal = Gallery('out')
            C = 0
            for meta, images, _, label, _ in stream:
                shape = list(images.shape)
                shape[1] //= FLAGS.stride
                shape[2] //= FLAGS.stride
                shape[3] = 1
                prob, offsets = sess.run([model.prob, model.offsets],
                                         feed_dict={
                                             X: images,
                                             is_training: False
                                         })
                kp = cpp.predict_basic_keypoints(prob[0], offsets[0],
                                                 FLAGS.stride, 0.1)
                print(images.shape, prob.shape, offsets.shape, kp)
                save_prediction_image(gal.next(), images[0], kp,
                                      label[0, :, :, 0], prob[0, :, :, 0])
                C += 1
                if FLAGS.max and C >= FLAGS.max:
                    break
                pass
            pass
            gal.flush()
    pass
Exemplo n.º 21
0
def app(environ, start_response):
    start_response('200 OK', [('Content-Type', 'text/html; charset=utf-8')])

    this_now = datetime.datetime.now()

    #today = date.today() #today.year, today.month, today.day

    pages = [
        "crafts-gallery", "private-events", "about-us", "media", "reviews",
        "custom-built", "after-school", "summer-camp", "art-camp",
        "3-wednesdays-workshop", "listening-room", "wheel-wars"
    ]

    #galleries_dict = {"acrylic-painting": 1, "watercolor-painting": 2, "paint-your-pet": 3, "fused-glass": 4, "resin-crafts": 5, "fluid-art": 6, "commissioned-art": 8, "alcohol-ink": 9, "artist-guided-family-painting": 10, "handbuilt-pottery": 11, "leathercraft": 12, "water-marbling": 13, "pottery-painting": 18, "string-art": 19, "pottery-lessons": 20}

    #mysql -u catalystcreative_cca catalystcreative_66
    #SELECT lower(name), id FROM piwigz_categories order by id asc;
    galleries_dict = {
        "acrylic-painting": 1,
        "watercolor-painting": 2,
        "paint-your-pet": 3,
        "fused-glass": 4,
        "resin-crafts": 5,  # was "resin-art"
        "fluid-art": 6,
        "summer-art-camp": 7,
        "commissioned-art": 8,
        #"alcohol-ink": 9,
        "artist-guided-family-painting": 10,
        "handbuilt-pottery": 11,
        "leathercraft": 12,
        "water-marbling": 13,
        #"private-events": 14,
        #"custom-counter-tops-tables": 15,
        #"about-us": 16,
        "pottery-painting": 18,
        "string-art": 19,
        "pottery-lessons": 20
    }

    galleries_list = list(galleries_dict.keys())
    galleries_dict_vals = list(galleries_dict.values())

    db = MySQLdb.connect(host="localhost",
                         user=dbuser,
                         passwd=passwd,
                         db="catalystcreative_arts")

    ####
    ####
    if environ['REQUEST_METHOD'] == "GET":

        if environ['PATH_INFO'] == '/admin/events/list':
            c = db.cursor()
            c.execute(
                f"SELECT * FROM events WHERE edatetime >= CURDATE() ORDER BY edatetime"
            )
            allrows = c.fetchall()
            c.close()
            template = env.get_template("admin-events-list.html")
            response = template.render(allrows=allrows)

        elif environ['PATH_INFO'] == '/admin/orders/list':
            db.query(
                f"select *, ceiling(ship_date / 10000000000000) as ship_group from cart_order where status = 'complete' order by ship_group, ship_date desc, checkout_date desc"
            )
            #db.query(f"select * from cart_order where status = 'complete' order by ship_date desc, checkout_date desc") # and ship_date is NULL
            r = db.store_result()
            allrows = r.fetch_row(maxrows=100, how=1)

            new_allrows = []

            overall_total = 0
            per_month_totals = {}

            for row in allrows:
                cart_order_id = row["cart_order_id"]
                #db.query(f"select product_id, quantity from cart_order_product where cart_order_id = {cart_order_id}")

                try:
                    content = read_file(
                        f"../cart-api/details/{cart_order_id}.json")
                    details = json.loads(content)
                    email = details["payer"]["email_address"]
                    address = details["purchase_units"][0]["shipping"][
                        "address"]
                    given_name = details["payer"]["name"]["given_name"]
                    surname = details["payer"]["name"]["surname"]
                    name = f"{given_name} {surname}"
                    try:
                        phone = details["payer"]["phone"]["phone_number"][
                            "national_number"]
                    except:
                        phone = ""
                except:
                    pass

                db.query(
                    f"select a.product_id, a.quantity, b.name, b.inventory \
                    from cart_order_product a, products b \
                    where a.product_id = b.pid and cart_order_id = {cart_order_id}"
                )
                r = db.store_result()
                sub_allrows = r.fetch_row(maxrows=100, how=1)
                row["products"] = sub_allrows

                try:
                    row["email"] = email
                    row["address"] = address
                    row["name"] = name
                    row["phone"] = phone
                except:
                    pass

                new_allrows.append(row)

                overall_total += int(row["total"])

                checkout_month = str(row["checkout_date"]).split("-")[1]

                try:
                    per_month_totals[checkout_month] += int(row["total"])
                except:
                    per_month_totals[checkout_month] = int(row["total"])

            template = env.get_template("admin-orders-list.html")
            response = template.render(allrows=new_allrows,
                                       overall_total=overall_total,
                                       per_month_totals=per_month_totals)

        elif environ['PATH_INFO'] == '/admin/events/add-edit':
            if len(environ['QUERY_STRING']) > 1:
                eid = environ['QUERY_STRING'].split("=")[1]
                db.query(f"SELECT * FROM events WHERE eid = {eid}")
                r = db.store_result()
                row = r.fetch_row(maxrows=1, how=1)[0]
                form = EventsForm(**row)

                c = db.cursor()
                c.execute(
                    f"SELECT * FROM events WHERE tags like '%series={eid}%'")
                children = c.fetchall()
                c.close()
            else:
                form = EventsForm()
                children = None
            template = env.get_template("admin-events-add-edit.html")
            response = template.render(form=form, children=children)

        elif environ['PATH_INFO'] == "/admin/events/delete":
            eid = int(environ['QUERY_STRING'].split("=")[1])
            if type(eid) == int:

                # FIRST UPDATE THE HTML PAGE !!!!
                db.query(f"SELECT * FROM events WHERE eid = {eid}")
                e = db.store_result()
                event = e.fetch_row(maxrows=1, how=1)[0]
                event["quantity_sum"] = 0
                event["remaining_spots"] = 0
                template = env.get_template("event.html")
                content = template.render(event=event, deleted=True)
                write_file(f"../www/event/{eid}.html", content)

                sql = f"DELETE FROM events WHERE eid = {eid}"
                c = db.cursor()
                c.execute(sql)
                c.close()

                response = '<meta http-equiv="refresh" content="0; url=/app/admin/events/list" />'
            else:
                response = ""

        #### ####
        #### ####
        elif environ['PATH_INFO'] == '/build-individual-event':
            if environ['QUERY_STRING']:
                eid = int(environ['QUERY_STRING'].split("=")[1])
                db.query(f"SELECT * FROM events WHERE eid = {eid}")
            else:
                db.query("SELECT * FROM events WHERE edatetime >= CURTIME() \
                    and (tags <> 'invisible' or tags is null) ORDER BY edatetime"
                         )
            e = db.store_result()
            allrows = e.fetch_row(maxrows=100, how=1)

            template = env.get_template("event.html")
            upcoming_event_ids = []
            for event in allrows:
                eid = event["eid"]
                elimit = event["elimit"]

                db.query(
                    f"select sum(quantity) as quantity_sum from orders where eid = {eid}"
                )
                q = db.store_result()
                quantity_sum = q.fetch_row(maxrows=1, how=1)[0]["quantity_sum"]

                try:
                    event["quantity_sum"] = int(quantity_sum)
                    event["remaining_spots"] = int(elimit) - int(quantity_sum)
                except:
                    event["quantity_sum"] = 0
                    event["remaining_spots"] = int(elimit)

                upcoming_event_ids.append(eid)
                content = template.render(event=event)
                write_file(f"../www/event/{eid}.html", content)
            write_file(f"data/upcoming_event_ids.json",
                       json.dumps(upcoming_event_ids, indent=4))
            response = "build-individual-event"
        #### ####
        #### ####

        elif environ['PATH_INFO'] == '/list/events' or environ[
                'PATH_INFO'] == '/calendar':

            db.query(
                "SELECT * FROM events WHERE edatetime >= CURTIME() and (tags <> 'invisible' or tags is null) ORDER BY edatetime"
            )
            r = db.store_result()
            allrows = r.fetch_row(maxrows=100, how=1)

            db.query(
                "SELECT eid, SUM(quantity) as sum_quantity FROM orders GROUP BY eid"
            )
            # TODO: May need to add join to events table above
            # so as to only pull future event dates
            r = db.store_result()
            orders_count = r.fetch_row(maxrows=100, how=1)

            orders_count_object = {}
            for item in orders_count:
                key = int(item['eid'])
                val = int(item['sum_quantity'])
                orders_count_object[key] = val

            events_object = {}
            parent = {}
            for row in allrows:
                eid = row["eid"]
                events_object[eid] = {}
                events_object[eid]["date"] = int(row["edatetime"].timestamp())
                events_object[eid]["title"] = row["title"]
                events_object[eid]["price"] = row["price"]
                events_object[eid]["price_text"] = row["price_text"]

                if "series" in row["tags"]:
                    parent[eid] = re.sub('^.*series=(\d+).*$', r'\1',
                                         row["tags"])
                else:
                    parent[eid] = ""

            events_object = json.dumps(events_object)

            try:
                test = environ['QUERY_STRING'].split("=")[1]
            except:
                test = ""

            template = env.get_template("list-events.html")
            response = template.render(events=allrows,
                                       orders_count=orders_count_object,
                                       events_object=events_object,
                                       parent=parent,
                                       test=test)

        elif environ['PATH_INFO'] == '/cart':
            template = env.get_template("cart-list.html")
            response = template.render()

        elif environ['PATH_INFO'] == '/products':
            db.query("SELECT * FROM products WHERE active = 1")
            r = db.store_result()
            allrows = r.fetch_row(maxrows=100, how=1)
            template = env.get_template("list-products.html")
            response = template.render(products=allrows)

        elif re.match('/products/[a-z-]+/[0-9]+', environ['PATH_INFO']):
            path_parts = environ['PATH_INFO'].split('/')
            product_name = path_parts[2]
            product_id = path_parts[3]
            pid = int(product_id)
            db.query(f"SELECT * FROM products WHERE pid = {pid}")
            r = db.store_result()
            row = r.fetch_row(maxrows=1, how=1)[0]
            template = env.get_template("product-detail.html")
            response = template.render(row=row)

        elif environ['PATH_INFO'] == '/book/event':
            eid = environ['QUERY_STRING'].split("=")[1]
            db.query(f"SELECT * FROM events WHERE eid = {eid}")
            r = db.store_result()
            row = r.fetch_row(maxrows=1, how=1)[0]

            db.query(f"SELECT count(id) as cnt FROM orders WHERE eid = {eid}")
            o = db.store_result()
            order_count = o.fetch_row(maxrows=1, how=1)[0]["cnt"]

            template = env.get_template("book-event.html")
            response = template.render(event_data=row, order_count=order_count)

        elif environ['PATH_INFO'] == '/gallery/slideshow' or environ[
                'PATH_INFO'].lstrip('/') in galleries_list:

            if environ['PATH_INFO'] == '/gallery/slideshow':
                gid = int(environ['QUERY_STRING'].split("=")[1])
            else:
                path_info = environ['PATH_INFO'].lstrip('/')
                gid = int(galleries_dict[path_info])

            try:
                g = Gallery(gid)
                gallery = g.get_gallery()
                images = g.get_images()

                template = env.get_template("gallery-slideshow.html")
                response = template.render(gallery=gallery, images=images)
            except:
                template = env.get_template("main.html")
                response = template.render(
                    path_info=f"{path_info} gallery does not yet exist")

        ####
        elif environ['PATH_INFO'] == '/admin/booking/list':

            view = ""
            gtlt = ">="  # default
            ascdesc = "asc"

            if environ['QUERY_STRING'] and "view" in environ['QUERY_STRING']:
                view = environ['QUERY_STRING'].split("=")[1]

                if view == "past-events":
                    gtlt = "<"
                    ascdesc = "desc"

            # TODO: This first part should be call-able separately
            # And should be called about once per minute

            # OR EVEN BETTER:
            # Maybe this chunk should be moved to the
            # /paypal-transaction-complete section

            # List, sort, then read all files in the orders/ folder
            files = [
                f for f in listdir("orders/") if isfile(join("orders/", f))
            ]

            if len(files) > 0 and view != "past-events":
                # PART-1: Load new orders into database:
                # Reminder: Orders data files are saved as event_eid_value.json
                for f in files:
                    eid = f.replace(".json", "").strip()
                    event_orders_data = json.loads(read_file(f"orders/{f}"))

                    for cca_order_id, cca_order in event_orders_data.items():

                        try:
                            order = cca_order["paypal"]

                            #print("____cca_order_id", cca_order_id)
                            #print("____order", order)
                            data_array = []
                            # We don't necessarily want all data from orders/event_eid_value.json
                            # So let's pick and choose what data we want to keep:

                            data_array.append(order['orderID'])

                            data_array.append(eid)
                            data_array.append(order['details']['create_time'])
                            data_array.append(
                                order['details']['payer']['email_address'])
                            data_array.append(order['details']['payer']['name']
                                              ['given_name'])
                            data_array.append(
                                order['details']['payer']['name']['surname'])
                            data_array.append(order['quantity'])
                            # Notice zero after purchase_units:
                            data_array.append(
                                order['details']['purchase_units'][0]['amount']
                                ['value'])
                            data_array.append(
                                order['details']['purchase_units'][0]
                                ['payments']['captures'][0]['amount']['value'])

                            # FOR VARIABLE_TIME FIELD
                            try:
                                data_array.append(order['variable_time_slot'])
                            except:
                                data_array.append('no variable time slot')

                            # FOR EXTRA_DATA FIELD
                            try:
                                total_number_scarf = json.dumps({
                                    "total_number_scarf":
                                    int(order['total_number_scarf'])
                                })
                                data_array.append(total_number_scarf)
                            except:
                                data_array.append('not an event with scarf')

                            try:
                                data_array.append(
                                    order["details"]["purchase_units"][0]
                                    ["payments"]["captures"][0]["id"])
                            except:
                                data_array.append("transaction id")

                            data_array.append(cca_order["cca_buyer_name"])
                            data_array.append(cca_order["cca_buyer_phone"])

                            # Load database:
                            fields = "order_id, eid, create_time, email, first_name, last_name, quantity, cost, paid, variable_time, extra_data, transaction_id, buyer_name, buyer_phone"
                            #vals = str(data_array).lstrip('[').rstrip(']')
                            vals = data_array
                            #sql = f"INSERT INTO orders ({fields}) VALUES ({vals})"
                            sql = f"INSERT INTO orders ({fields}) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"

                            c = db.cursor()
                            #c.execute(sql)
                            c.execute(sql, vals)
                            c.close()

                        except:
                            pass

                    # So that this event json doesn't get processed again:
                    try:
                        # If existing json for this event exists, add new json to it:
                        event_dict = json.loads(
                            read_file(f"orders/loaded/{f}"))
                        event_dict.update(event_orders_data)
                        write_file(f"orders/loaded/{f}",
                                   json.dumps(event_dict, indent=4))
                        # REMOVE the file
                        os.remove(f"orders/{f}")
                    except:
                        # MOVE the file to /orders/loaded/event_eid_value.json
                        os.rename(f"orders/{f}", f"orders/loaded/{f}")

            # PART-2: Select future-event-date orders from database for admin view
            #c = db.cursor()
            query = f"SELECT e.title, e.edatetime, e.elimit, o.* \
                FROM events e, orders o WHERE e.eid = o.eid AND e.edatetime {gtlt} CURDATE() ORDER BY e.edatetime {ascdesc}"

            #c.execute(query)
            #allrows = c.fetchall()
            #c.close()

            # NOTE: changing from db.cursor / c.execute to db.query / db.store_result for use of dict in template

            db.query(query)
            r = db.store_result()
            allrows = r.fetch_row(maxrows=1000, how=1)

            template = env.get_template("admin-booking-list.html")
            response = template.render(orders=allrows)

        ####
        elif environ['PATH_INFO'] == '/admin/booking/add-edit':

            c = db.cursor()
            c.execute(
                f"SELECT * FROM events WHERE edatetime > CURTIME() order by edatetime asc"
            )
            allevents = c.fetchall()
            c.close()

            if len(environ['QUERY_STRING']) > 1:
                order_id = environ['QUERY_STRING'].split("=")[1]
                db.query(f"SELECT * FROM orders WHERE id = {order_id}")
                r = db.store_result()
                row = r.fetch_row(maxrows=1, how=1)[0]
                form = BookingForm(**row)
            else:
                form = BookingForm()

            template = env.get_template("admin-booking-add-edit.html")
            response = template.render(form=form,
                                       allevents=allevents,
                                       this_now=this_now)

        elif environ['PATH_INFO'] == '/home':
            # UP-NEXT EVENT
            month = int(this_now.strftime("%m"))
            year = int(this_now.strftime("%Y"))
            html_cal = make_cal(db, month, year)

            #print(html_cal)

            db.query(f"SELECT * FROM events WHERE edatetime > CURTIME() \
                and (tags <> 'invisible' or tags is null) \
                and title != 'Private Event' \
                and description not like '%Private Event%' \
                and title not like '%Studio Closed%' \
                and title != 'Studio Closed' \
                ORDER BY edatetime limit 1")
            r = db.store_result()
            next_event = r.fetch_row(maxrows=1, how=1)[0]
            #html_cal = ""
            #next_event = ""

            # FEATURED / PINNED EVENTS
            db.query(
                f"SELECT * FROM events WHERE edatetime > CURTIME() and tags = 'home' ORDER BY edatetime limit 10"
            )
            r = db.store_result()
            events_tagged_home = r.fetch_row(maxrows=10, how=1)

            # RANDOM PRODUCT
            db.query(
                f"select pid, name, image_path_array, price from products where inventory > 0 and active = 1 ORDER BY RAND() LIMIT 1"
            )
            r = db.store_result()
            random_product = r.fetch_row(maxrows=10, how=1)[0]

            # GALLERY / SLIDESHOW
            #random_number = random.randint(1,len(galleries_dict))
            random_number = random.choice(galleries_dict_vals)
            g = Gallery(random_number)
            gallery = g.get_gallery()
            images = g.get_images()

            # Event List:
            event_list_html = make_list(db)

            template = env.get_template("home.html")
            response = template.render(next_event=next_event,
                                       calendar={"html": html_cal},
                                       events_tagged_home=events_tagged_home,
                                       gallery=gallery,
                                       images=images,
                                       event_list_html=event_list_html,
                                       random_product=random_product)

        elif environ['PATH_INFO'] == '/admin/pages':
            template = env.get_template("admin-pages.html")
            if environ['QUERY_STRING']:
                page_name = environ['QUERY_STRING'].split("=")[1]
                try:
                    page_content = read_file(f"data/{page_name}.html")
                except:
                    page_content = None
                response = template.render(page_name=page_name,
                                           page_content=page_content)
            else:
                response = template.render(pages=pages)

        elif environ['PATH_INFO'] == '/admin/registration':
            #c = db.cursor()
            #c.execute("SELECT * FROM registration ORDER BY session_detail")
            #allrows = c.fetchall()
            #c.close()

            view = ""
            if environ['QUERY_STRING'] and "view" in environ['QUERY_STRING']:
                view = environ['QUERY_STRING'].split("=")[1]

            if view == "all":
                special = ""
                orderby = "order_id, session_detail"
            else:
                special = "where order_id is not NULL"
                orderby = "session_detail"

            db.query(
                f"SELECT * FROM registration {special} ORDER BY {orderby}")
            r = db.store_result()
            allrows = r.fetch_row(maxrows=100, how=1)

            data = json.loads(
                read_file("../registration/data/wheel-wars.json"))

            template = env.get_template("admin-registration.html")
            response = template.render(allrows=allrows, data=data)

        #elif environ['PATH_INFO'] == '/admin/registration2':
        #    data = json.loads(read_file("../registration/data/wheel-wars.json"))
        #    template = env.get_template("admin-registration2.html")
        #    response = template.render(data=data)

        elif environ['PATH_INFO'] == '/summer-camp-registration':
            template = env.get_template("summer-camp-registration.html")
            form = RegistrationForm()
            response = template.render(form=form)

        elif environ['PATH_INFO'] == '/art-camp-registration':
            template = env.get_template("art-camp-registration.html")
            form = RegistrationForm()
            response = template.render(form=form)

        elif environ['PATH_INFO'].lstrip('/') in pages:
            page_name = environ['PATH_INFO'].lstrip('/')
            page_content = str(read_file(f"data/{page_name}.html"))
            template = env.get_template("pages.html")
            response = template.render(page_name=page_name,
                                       page_content=page_content)

        elif environ['PATH_INFO'] == '/admin/products/list':
            c = db.cursor()
            c.execute("SELECT * FROM products order by pid desc")
            allrows = c.fetchall()
            c.close()
            template = env.get_template("admin-products-list.html")
            response = template.render(allrows=allrows)

        elif environ['PATH_INFO'] == '/admin/products/add-edit':
            if len(environ['QUERY_STRING']) > 1:
                pid = environ['QUERY_STRING'].split("=")[1]
                db.query(f"SELECT * FROM products WHERE pid = {pid}")
                r = db.store_result()
                row = r.fetch_row(maxrows=1, how=1)[0]
                form = ProductsForm(**row)
            else:
                form = ProductsForm()
            template = env.get_template("admin-products-add-edit.html")
            response = template.render(form=form)

        elif environ['PATH_INFO'] == '/admin/products/delete':
            if len(environ['QUERY_STRING']) > 1:
                pid = int(environ['QUERY_STRING'].split("=")[1])
                sql = f"DELETE FROM products WHERE pid = {pid}"
                c = db.cursor()
                c.execute(sql)
                c.close()
                sql = f"DELETE FROM cart_order_product WHERE product_id = {pid}"
                c = db.cursor()
                c.execute(sql)
                c.close()
                response = '<meta http-equiv="refresh" content="0; url=/app/admin/products/list" />'
            else:
                response = ""

        else:
            path_info = environ['PATH_INFO'].lstrip('/')
            template = env.get_template("main.html")
            response = template.render(path_info=path_info)

    ####
    ####
    elif environ['REQUEST_METHOD'] == "POST" and environ[
            'PATH_INFO'] == "/paypal-transaction-complete":

        length = int(environ.get('CONTENT_LENGTH', '0'))
        post_input = environ['wsgi.input'].read(length).decode('UTF-8')
        form_orders = json.loads(post_input)
        event_id = str(form_orders['event_id'])

        try:
            orders = json.loads(read_file(f"orders/{event_id}.json"))
        except:
            orders = []

        orders.append(form_orders)
        write_file(f"orders/{event_id}.json", json.dumps(orders, indent=4))
        response = "200"

        #scrape_and_write("calendar")

    ####
    ####
    elif environ['REQUEST_METHOD'] == "POST" and environ[
            'PATH_INFO'] == "/product-image/upload":
        length = int(environ.get('CONTENT_LENGTH', '0'))
        # NOTICE: NOT DECODING post_input below FOR IMAGES
        post_input = environ['wsgi.input'].read(length)
        # NOTICE: BYTES STRING below FOR IMAGES
        image_pid = post_input.split(b'Content-Disposition: form-data')[1]
        pid = re.sub(b'^.*name="pid"(.*?)------.*$',
                     r"\1",
                     image_pid,
                     flags=re.DOTALL).strip()
        pid = int(pid.decode('UTF-8'))
        image_data = post_input.split(b'Content-Disposition: form-data')[2]
        image_filename = re.sub(b'^.*filename="(.*?)".*$',
                                r"\1",
                                image_data,
                                flags=re.DOTALL).strip()
        image_contents = re.sub(b'^.*Content-Type: image/jpeg(.*)$',
                                r"\1",
                                image_data,
                                flags=re.DOTALL).strip()
        img_name = image_filename.decode('UTF-8')
        open(f"../www/img/orig/{img_name}", 'wb').write(image_contents)
        size = 350, 350
        image = Image.open(f"../www/img/orig/{img_name}")
        image.thumbnail(size)
        image.save(f"../www/img/small/{img_name}", 'JPEG')
        sql = f"UPDATE products SET image_path_array = concat(ifnull(image_path_array,''), ',{img_name}') WHERE pid = {pid}"
        c = db.cursor()
        c.execute(sql)
        c.close()
        response = f'<meta http-equiv="refresh" content="0; url=/app/admin/products/list" />'

    ####
    ####
    elif environ['REQUEST_METHOD'] == "POST" and environ[
            'PATH_INFO'] == "/image/upload":
        length = int(environ.get('CONTENT_LENGTH', '0'))
        # NOTICE: NOT DECODING post_input below FOR IMAGES
        post_input = environ['wsgi.input'].read(length)

        #print(post_input)

        # NOTICE BYTES STRING below FOR IMAGES
        image_eid = post_input.split(b'Content-Disposition: form-data')[1]
        eid = re.sub(b'^.*name="eid"(.*?)------.*$',
                     r"\1",
                     image_eid,
                     flags=re.DOTALL).strip()
        eid = int(eid.decode('UTF-8'))
        image_data = post_input.split(b'Content-Disposition: form-data')[2]
        image_filename = re.sub(b'^.*filename="(.*?)".*$',
                                r"\1",
                                image_data,
                                flags=re.DOTALL).strip()
        image_contents = re.sub(b'^.*Content-Type: image/jpeg(.*)$',
                                r"\1",
                                image_data,
                                flags=re.DOTALL).strip()
        img_name = image_filename.decode('UTF-8')
        if img_name and image_contents:
            open(f"../www/img/orig/{img_name}", 'wb').write(image_contents)
            size = 350, 350
            image = Image.open(f"../www/img/orig/{img_name}")
            image.thumbnail(size)
            image.save(f"../www/img/small/{img_name}", 'JPEG')
            sql = f"UPDATE events SET image = '{img_name}' WHERE eid = {eid}"
            c = db.cursor()
            c.execute(sql)
            c.close()
        response = f'<meta http-equiv="refresh" content="0; url=/app/admin/events/list" />'

    ####
    ####
    elif environ['REQUEST_METHOD'] == "POST" and environ[
            'PATH_INFO'] == "/contact":
        length = int(environ.get('CONTENT_LENGTH', '0'))
        post_input = environ['wsgi.input'].read(length).decode('UTF-8')

        data_object = json.loads(read_file("data/contactus.json"))

        post_input_array = post_input.split('------')

        message_object = {}
        for d in post_input_array:
            post_data_key = re.sub(r'^.*name="(.*?)".*$',
                                   r"\1",
                                   d,
                                   flags=re.DOTALL).strip()
            post_data_val = re.sub(r'^.*name=".*?"(.*)$',
                                   r"\1",
                                   d,
                                   flags=re.DOTALL).strip()
            if len(post_data_key) > 1 and not post_data_key.startswith(
                    'WebKitForm') and post_data_key != "submit":
                message_object[post_data_key] = post_data_val

            data_object[str(this_now)] = message_object

        email = data_object[str(this_now)]["email"]

        write_file(f"data/contactus.json", json.dumps(data_object, indent=4))

        #template = env.get_template("about-us.html")
        #response = template.render(thanks=data_object[str(this_now)])

        page_content = str(read_file(f"data/about-us.html"))
        template = env.get_template("pages.html")
        page_name = "about-us"
        response = template.render(page_name=page_name,
                                   page_content=page_content,
                                   email=email)

    ####
    ####
    elif environ['REQUEST_METHOD'] == "POST" and environ[
            'PATH_INFO'] == "/admin/pages":
        length = int(environ.get('CONTENT_LENGTH', '0'))
        post_input = environ['wsgi.input'].read(length).decode('UTF-8')

        post_input_array = post_input.split('------')

        data_object = {}
        for d in post_input_array:
            post_data_key = re.sub(r'^.*name="(.*?)".*$',
                                   r"\1",
                                   d,
                                   flags=re.DOTALL).strip()
            post_data_val = re.sub(r'^.*name=".*?"(.*)$',
                                   r"\1",
                                   d,
                                   flags=re.DOTALL).strip()
            if len(post_data_key) > 1 and not post_data_key.startswith(
                    'WebKitForm') and post_data_key != "submit":
                data_object[post_data_key] = post_data_val

        page_name = data_object['page_name']
        page_content = data_object['page_content']

        # Backup current version just in case cuz why not
        os.rename(f"data/{page_name}.html", f"data/{page_name}.html.bak")

        write_file(f"data/{page_name}.html", page_content)
        response = '<meta http-equiv="refresh" content="0; url=/app/admin/pages"/>'

        scrape_and_write(page_name)

    ####
    ####
    elif environ['REQUEST_METHOD'] == "POST" and environ[
            'PATH_INFO'] == "/admin/products/add-edit":

        length = int(environ.get('CONTENT_LENGTH', '0'))
        post_input = environ['wsgi.input'].read(length).decode('UTF-8')

        data_object = {}
        data_array = []

        post_input_array = post_input.split('------')

        for d in post_input_array:
            post_data_key = re.sub(r'^.*name="(.*?)".*$',
                                   r"\1",
                                   d,
                                   flags=re.DOTALL).strip()
            post_data_val = re.sub(r'^.*name=".*?"(.*)$',
                                   r"\1",
                                   d,
                                   flags=re.DOTALL).strip()
            if len(post_data_key) > 1 and not post_data_key.startswith(
                    'WebKitForm'
            ) and post_data_key != "submit" and not post_data_val.startswith(
                    '-----'):
                data_object[post_data_key] = post_data_val
                data_array.append(post_data_val)

        try:
            if int(data_object['pid']) > 0:
                action = "Update"
                pid = data_object['pid']
            else:
                action = "Insert"
        except:
            action = "Insert"

        # Cleanup: Remove "pid"
        del data_object['pid']
        del data_array[0]

        # Todo: More validation
        products_form = ProductsForm(**data_object)

        # Set query based on update vs insert
        if action == "Update":
            keys_vals = ""
            for k, v in data_object.items():
                v = v.replace("'", "''")
                keys_vals += str(f"{k}='{v}', ")
            keys_vals = keys_vals.rstrip(', ')
            sql = f"UPDATE products SET {keys_vals} WHERE pid = {pid}"
            #print(sql)
            c = db.cursor()
            c.execute(sql)
        else:
            fields = "name, description, image_path_array, inventory, price, keywords_array, active"
            vals = data_array
            sql = f"INSERT INTO products ({fields}) VALUES (%s, %s, %s, %s, %s, %s, %s)"
            c = db.cursor()
            c.execute(sql, vals)
        c.close()

        # Next template needs to know the pid
        if action == "Insert":
            # Now retrieve the pid from the item we just added
            n = data_object['name']
            p = data_object['price']
            sql2 = f"SELECT pid FROM products WHERE name = '{n}' AND price = '{p}'"
            #print(sql2)
            d = db.cursor()
            d.execute(sql2)
            results = d.fetchone()
            pid = int(results[0])
            d.close()
        else:
            sql2 = "just an update"

        image_form = ImageForm()

        template = env.get_template("admin-products-image.html")
        response = template.render(product_data=data_object,
                                   image_form=image_form,
                                   sql={"sql": sql},
                                   pid={"pid": pid},
                                   sql2={"sql2": sql2})

    ####
    ####
    elif environ['REQUEST_METHOD'] == "POST":

        #print("HERE AAA")

        length = int(environ.get('CONTENT_LENGTH', '0'))
        post_input = environ['wsgi.input'].read(length).decode('UTF-8')

        #print("post_input", post_input)

        data_object = {}
        data_array = []

        post_input_array = post_input.split('------')

        #with open("stderr.log", "a") as logfile:
        #    logfile.write(str(f"++++{post_input}++++"))

        for d in post_input_array:
            post_data_key = re.sub(r'^.*name="(.*?)".*$',
                                   r"\1",
                                   d,
                                   flags=re.DOTALL).strip()
            post_data_val = re.sub(r'^.*name=".*?"(.*)$',
                                   r"\1",
                                   d,
                                   flags=re.DOTALL).strip()
            if len(post_data_key) > 1 and not post_data_key.startswith(
                    'WebKitForm'
            ) and "submit" not in post_data_key and not post_data_val.startswith(
                    '-----'):
                data_object[post_data_key] = post_data_val
                data_array.append(post_data_val)

        #print("data_object", data_object)
        #print("data_array", data_array)

        # TEMPORARILY removing series input data
        #data_object_temp = data_object
        #for k, v in data_object_temp.items():
        #    if "series" in k:
        #        del data_object[k]

        # If form passes an eid value then query
        # is an update as opposed to an insert

        try:
            if int(data_object['eid']) > 0:
                action = "Update"
                eid = data_object['eid']
            else:
                action = "Insert"
        except:
            action = "Insert"

        # Cleanup: Remove "eid"
        del data_object['eid']
        del data_array[0]

        # Cleanup: Remove "append_time"
        del data_object['append_time']
        del data_array[1]

        # For the variable-field stuff:
        price_text = data_object["price_text"]
        elimit = data_object["elimit"]

        # Todo: More validation
        events_form = EventsForm(**data_object)

        # Set query based on update vs insert
        if action == "Update":
            keys_vals = ""
            for k, v in data_object.items():
                v = v.replace("'", "''")
                keys_vals += str(f"{k}='{v}', ")
            keys_vals = keys_vals.rstrip(', ')
            sql = f"UPDATE events SET {keys_vals} WHERE eid = {eid}"

            c = db.cursor()
            c.execute(sql)
            #c.execute(sql, vals)

        else:
            # fields MUST match keys coming in via "data_array":
            fields = "edatetime, title, duration, price, elimit, location, image, description, price_text, tags, extra_data"
            #vals = str(data_array).lstrip('[').rstrip(']')
            vals = data_array
            #sql = f"INSERT INTO events ({fields}) VALUES ({vals})"
            # Number of items (%s) MUST match num of vals coming in via "data_array":
            sql = f"INSERT INTO events ({fields}) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"

            c = db.cursor()
            #c.execute(sql)
            c.execute(sql, vals)

        c.close()

        # Next template needs to know the eid
        if action == "Insert":
            # Now retrieve the eid from the item we just added
            e = data_object['edatetime']
            t = data_object['title'].replace("'", "''")
            sql2 = f"SELECT eid, price_text, elimit FROM events WHERE edatetime = '{e}' AND title = '{t}'"
            #print("____", sql2)
            d = db.cursor()
            d.execute(sql2)
            results = d.fetchone()
            #print("____results", results)
            #print("____type of results", type(results))
            eid = int(results[0])
            try:
                price_text = results[1]
            except:
                price_text = ""
            try:
                elimit = int(results[2])
            except:
                elimit = ""
            d.close()

            #
            # TESTING THE NEW VARIABLE-TIME STUFF:
            if "am" in price_text or "pm" in price_text:
                u = UpdateExtra(eid, price_text, elimit)
                u.set_via_admin()
                u.update_extra()
            #

        else:
            sql2 = "just an update"

        image_form = ImageForm()

        template = env.get_template("admin-events-image.html")
        response = template.render(event_data=data_object,
                                   image_form=image_form,
                                   sql={"sql": sql},
                                   eid={"eid": eid},
                                   sql2={"sql2": sql2})

        #scrape_and_write("calendar")
        #time.sleep(2)
        #scrape_and_write("home")

    ####
    ####
    else:
        response = "error"

    #response += f"<hr>{str(environ)}"

    db.close()

    return [response.encode()]
Exemplo n.º 22
0
def main(_):
    logging.basicConfig(level=FLAGS.verbose)
    try:
        os.makedirs(FLAGS.model)
    except:
        pass
    assert FLAGS.db and os.path.exists(FLAGS.db)

    X = tf.placeholder(tf.float32,
                       shape=(None, None, None, FLAGS.channels),
                       name="images")
    Y = tf.placeholder(tf.float32, shape=(None, None, None, 1), name="labels")

    with slim.arg_scope([slim.conv2d, slim.conv2d_transpose, slim.max_pool2d],
                        padding=FLAGS.padding):
        logits, stride = getattr(nets, FLAGS.net)(X)
    loss, metrics = fcn_loss(logits, Y)
    prob = logits2prob(logits)
    #tf.summary.scalar("loss", loss)
    metric_names = [x.name[:-2] for x in metrics]
    for x in metrics:
        tf.summary.scalar(x.name.replace(':', '_'), x)

    rate = FLAGS.learning_rate
    if FLAGS.opt == 'adam':
        rate /= 100
    global_step = tf.Variable(0, name='global_step', trainable=False)
    if FLAGS.decay:
        rate = tf.train.exponential_decay(rate,
                                          global_step,
                                          FLAGS.decay_steps,
                                          FLAGS.decay_rate,
                                          staircase=True)
        tf.summary.scalar('learning_rate', rate)
    if FLAGS.opt == 'adam':
        optimizer = tf.train.AdamOptimizer(rate)
    elif FLAGS.opt == 'mom':
        optimizer = tf.train.MomentumOptimizer(rate, FLAGS.momentum)
    else:
        optimizer = tf.train.GradientDescentOptimizer(rate)
        pass

    train_op = optimizer.minimize(loss, global_step=global_step)
    summary_writer = None
    train_summaries = tf.constant(1)
    #val_summaries = tf.constant(1)
    if FLAGS.log:
        train_summaries = tf.summary.merge_all()
        assert not train_summaries is None
        if not train_summaries is None:
            summary_writer = tf.summary.FileWriter(FLAGS.log,
                                                   tf.get_default_graph(),
                                                   flush_secs=20)
        #assert train_summaries
        #val_summaries = tf.summary.merge_all(key='val_summaries')

    picpac_config = dict(
        seed=2016,
        shuffle=True,
        reshuffle=True,
        batch=1,
        split=1,
        split_fold=0,
        round_div=stride,
        annotate='json',
        channels=FLAGS.channels,
        stratify=True,
        pert_color1=20,
        pert_angle=20,
        pert_min_scale=0.9,
        pert_max_scale=1.5,
        pert_hflip=True,
        pert_vflip=True,
        channel_first=False  # this is tensorflow specific
        # Caffe's dimension order is different.
    )
    if not FLAGS.mixin is None:
        picpac_config['mixin'] = FLAGS.mixin
        picpac_config['mixin_group_delta'] = 1

    tr_stream = picpac.ImageStream(FLAGS.db,
                                   perturb=True,
                                   loop=True,
                                   **picpac_config)
    val_stream = None
    if FLAGS.val:
        assert os.path.exists(FLAGS.val)
        val_stream = picpac.ImageStream(FLAGS.val,
                                        perturb=False,
                                        loop=False,
                                        **picpac_config)

    init = tf.global_variables_initializer()

    saver = tf.train.Saver(max_to_keep=FLAGS.max_to_keep)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    # check if padding have been properly setup
    # this should be ensured if all layers are added via slim
    graph = tf.get_default_graph()
    graph.finalize()
    graph_def = graph.as_graph_def()
    for node in graph_def.node:
        if 'padding' in node.attr and not 'Backprop' in node.name:
            padding = node.attr['padding'].s
            if padding != FLAGS.padding:
                logging.error(
                    "node %s type %s incorrect padding %s, should be %s" %
                    (node.name, node.op, padding, FLAGS.padding))
            pass
        pass

    with tf.Session(config=config) as sess:
        sess.run(init)
        if FLAGS.resume:
            saver.restore(sess, FLAGS.resume)
        step = 0
        epoch = 0
        global_start_time = time.time()
        while step < FLAGS.max_steps:
            start_time = time.time()
            avg = np.array([0] * len(metrics), dtype=np.float32)
            for _ in tqdm(range(FLAGS.epoch_steps), leave=False):
                while True:
                    images, labels, _ = tr_stream.next()
                    _, H, W, _ = images.shape
                    if FLAGS.max_size:
                        if max(H, W) > FLAGS.max_size:
                            continue
                    break
                if FLAGS.padding == 'SAME' and FLAGS.clip:
                    images = clip(images, stride)
                    labels = clip(labels, stride)
                feed_dict = {X: images, Y: labels}
                mm, _, summaries = sess.run(
                    [metrics, train_op, train_summaries], feed_dict=feed_dict)
                avg += np.array(mm)
                step += 1
                pass
            avg /= FLAGS.epoch_steps
            stop_time = time.time()
            txt = ', '.join(
                ['%s=%.4f' % (a, b) for a, b in zip(metric_names, list(avg))])
            print('step %d: elapsed=%.4f time=%.4f, %s' %
                  (step, (stop_time - global_start_time),
                   (stop_time - start_time), txt))
            if summary_writer:
                summary_writer.add_summary(summaries, step)
            epoch += 1
            if epoch and (epoch % FLAGS.ckpt_epochs == 0):
                ckpt_path = '%s/%d' % (FLAGS.model, step)
                start_time = time.time()
                saver.save(sess, ckpt_path)
                stop_time = time.time()
                print('epoch %d step %d, saving to %s in %.4fs.' %
                      (epoch, step, ckpt_path, stop_time - start_time))
            if epoch and (epoch % FLAGS.val_epochs == 0) and val_stream:
                val_stream.reset()
                avg = np.array([0] * len(metrics), dtype=np.float32)
                cc = 0
                gal = None
                if FLAGS.val_plot:
                    gal = Gallery(os.path.join(FLAGS.val_plot, str(step)))
                for images, labels, _ in val_stream:
                    _, H, W, _ = images.shape
                    if FLAGS.max_size:
                        if max(H, W) > FLAGS.max_size:
                            continue
                    if FLAGS.padding == 'SAME' and FLAGS.clip:
                        images = clip(images, stride)
                        labels = clip(labels, stride)
                    feed_dict = {X: images, Y: labels}
                    #print("XXX", images.shape)
                    pp, mm, = sess.run([prob, metrics], feed_dict=feed_dict)
                    if gal:
                        save_vis(gal.next(), pp, images)
                    avg += np.array(mm)
                    cc += 1
                if gal:
                    gal.flush()
                avg /= cc
                txt = ', '.join([
                    '%s=%.4f' % (a, b) for a, b in zip(metric_names, list(avg))
                ])
                print('epoch %d step %d, validation %s' % (epoch, step, txt))

            pass
        pass
    if summary_writer:
        summary_writer.close()
    pass
Exemplo n.º 23
0
def main (_):
    global PIXEL_MEANS

    logging.basicConfig(filename='train-%s-%s.log' % (FLAGS.net, datetime.datetime.now().strftime('%Y%m%d-%H%M%S')),level=logging.DEBUG, format='%(asctime)s %(message)s')

    if FLAGS.model:
        try:
            os.makedirs(FLAGS.model)
        except:
            pass

    X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")

    # ground truth labels
    Y = tf.placeholder(tf.int32, shape=(None, None, None, 1), name="labels")
    is_training = tf.placeholder(tf.bool, name="is_training")

    #with \
    #     slim.arg_scope([slim.batch_norm], decay=0.9, epsilon=5e-4): 

    with slim.arg_scope([slim.conv2d, slim.conv2d_transpose, slim.max_pool2d],
                            padding='SAME'), \
                                    slim.arg_scope([slim.conv2d, slim.conv2d_transpose], weights_regularizer=slim.l2_regularizer(2.5e-4), normalizer_fn=slim.batch_norm, normalizer_params={'decay': 0.9, 'epsilon': 5e-4, 'scale': False, 'is_training':is_training}), \
         slim.arg_scope([slim.batch_norm], is_training=is_training):
        logits, FLAGS.stride = getattr(nets, FLAGS.net)(X-PIXEL_MEANS)

    # probability of class 1 -- not very useful if FLAGS.classes > 2
    #probs = tf.squeeze(tf.slice(tf.nn.softmax(logits), [0,0,0,1], [-1,-1,-1,1]), 3)
    probs = tf.squeeze(tf.slice(tf.nn.softmax(logits), [0,0,0,1], [-1,-1,-1,1]), 1)
    loss, metrics = fcn_loss(logits, Y)
    metric_names = [x.name[:-2] for x in metrics]

    def format_metrics (avg):
        return ' '.join(['%s=%.3f' % (a, b) for a, b in zip(metric_names, list(avg))])

    global_step = tf.train.create_global_step()
    LR = tf.train.exponential_decay(FLAGS.lr, global_step, FLAGS.decay_steps, FLAGS.decay_rate, staircase=True)
    if FLAGS.adam:
        print("Using Adam optimizer, reducing LR by 100x")
        optimizer = tf.train.AdamOptimizer(LR/100)
    else:
        optimizer = tf.train.MomentumOptimizer(learning_rate=LR, momentum=0.9)

    train_op = slim.learning.create_train_op(loss, optimizer, global_step=global_step)
    saver = tf.train.Saver(max_to_keep=FLAGS.max_to_keep)

    stream = create_picpac_stream(FLAGS.db, True)
    if FLAGS.gallery:
        gal = Gallery(FLAGS.gallery, ext='.png')
        C = 0 
        for _, images, label  in stream:
            image = images[0]
            print(type(image), image.shape) #, type(anno))
            #print(image.dtype, anno.dtype)
            #print(np.mean(image[:, :, 0]), np.mean(image[:, :, 1]), np.mean(image[:, :, 2]))
            C += 1
            #print('%d / %d' % (C, stream.size()))
            cv2.imwrite(gal.next(), image)
            #cv2.imwrite(gal.next(), anno)
            #cv2.imwrite(gal.next(), label * 255)
            #cv2.imwrite(gal.next(), draw)
            if C == FLAGS.gallery_max:
                break
            pass
        gal.flush()
        sys.exit(0)
    
    # load validation db
    val_stream = None
    if FLAGS.val_db:
        val_stream = create_picpac_stream(FLAGS.val_db, False)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth=True

    epoch_steps = FLAGS.epoch_steps
    if epoch_steps is None:
        epoch_steps = (stream.size() + FLAGS.batch-1) // FLAGS.batch
    best = 0
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        if FLAGS.resume:
            saver.restore(sess, FLAGS.resume)

        global_start_time = time.time()
        epoch = 0
        step = 0
        while epoch < FLAGS.max_epochs:
            start_time = time.time()
            cnt, metrics_sum = 0, np.array([0] * len(metrics), dtype=np.float32)
            progress = tqdm(range(epoch_steps), leave=False)
            for _ in progress:
                _, images, labels = stream.next()
                #print ("labels:",labels)
                feed_dict = {X: images, Y: labels, is_training: True}
                mm, _ = sess.run([metrics, train_op], feed_dict=feed_dict)
                metrics_sum += np.array(mm) * images.shape[0]
                cnt += images.shape[0]
                metrics_txt = format_metrics(metrics_sum/cnt)
                progress.set_description(metrics_txt)
                step += 1
                pass
            stop = time.time()
            msg = 'train epoch=%d step=%d ' % (epoch, step)
            msg += metrics_txt
            msg += ' elapsed=%.3f time=%.3f ' % (stop - global_start_time, stop - start_time)
            print_green(msg)
            logging.info(msg)

            epoch += 1

            if (epoch % FLAGS.val_epochs == 0) and val_stream:
                lr = sess.run(LR)
                # evaluation
                Ys, Ps = [], []
                cnt, metrics_sum = 0, np.array([0] * len(metrics), dtype=np.float32)
                val_stream.reset()
                progress = tqdm(val_stream, leave=False)
                for _, images, labels in progress:
                    feed_dict = {X: images, Y: labels, is_training: False}
                    p, mm = sess.run([probs, metrics], feed_dict=feed_dict)
                    metrics_sum += np.array(mm) * images.shape[0]
                    cnt += images.shape[0]
                    Ys.extend(list(meta.labels))
                    Ps.extend(list(p))
                    metrics_txt = format_metrics(metrics_sum/cnt)
                    progress.set_description(metrics_txt)
                    pass
                assert cnt == val_stream.size()
                avg = metrics_sum / cnt
                if avg[0] > best:
                    best = avg[0]
                msg = 'valid epoch=%d step=%d ' % (epoch-1, step)
                msg += metrics_txt
                if FLAGS.classes == 2:
                    # display scikit-learn metrics
                    Ys = np.array(Ys, dtype=np.int32)
                    Ps = np.array(Ps, dtype=np.float32)
                    msg += ' sk_acc=%.3f auc=%.3f' % (accuracy_score(Ys, Ps > 0.5), roc_auc_score(Ys, Ps))
                    pass
                msg += ' lr=%.4f best=%.3f' % (lr, best)
                print_red(msg)
                logging.info(msg)
                #log.write('%d\t%s\t%.4f\n' % (epoch, '\t'.join(['%.4f' % x for x in avg]), best))
            # model saving
            if (epoch % FLAGS.ckpt_epochs == 0) and FLAGS.model:
                ckpt_path = '%s/%d' % (FLAGS.model, epoch)
                saver.save(sess, ckpt_path)
                print('saved to %s.' % ckpt_path)
            pass
        pass
    pass
Exemplo n.º 24
0
def main(_):
    setup_params()
    model = VoxelNet()
    model.build_graph()
    saver = tf.train.Saver()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    is_val = True
    if is_val:
        db = FLAGS.val_db
        columns = 2  # two columns to visualize groundtruth
    else:
        db = FLAGS.test_db
        columns = 1
        pass

    if FLAGS.results:
        sp.check_call('mkdir -p %s/data' % FLAGS.results, shell=True)

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        if not FLAGS.test_labels:
            saver.restore(sess, FLAGS.model)

        gal = Gallery('output', cols=columns)
        C = 0
        with open(db, 'r') as f:
            for l in f:
                pk = int(l.strip())
                sample = Sample(pk,
                                LOAD_IMAGE2 | LOAD_VELO | LOAD_LABEL2,
                                is_training=is_val)
                points = sample.get_voxelnet_points()
                points, mask, index = model.vxl.voxelize_points([points], T)
                feed_dict = {
                    model.is_training: False,
                    model.points: points,
                    model.mask: mask,
                    model.index: index
                }

                if is_val:
                    # for validation set, produce the ground-truth boxes
                    boxes_gt = sample.get_voxelnet_boxes(["Car"])
                    #for row in boxes_gt:
                    #    print(row)
                    if FLAGS.test_labels:  # 2 lines below are for testing the C++ code
                        probs, _, params, _ = model.vxl.voxelize_labels(
                            [boxes_gt], np.array(model.priors,
                                                 dtype=np.float32),
                            FLAGS.rpn_stride, FLAGS.lower_th, FLAGS.upper_th)
                    sample.load_voxelnet_boxes(boxes_gt, 'Car')
                    # visualize groundtruth labels
                    image3d = np.copy(sample.image2)
                    for box in sample.label2:
                        draw_box3d(image3d, box, sample.calib)
                        pass
                    cv2.imwrite(gal.next(), image3d)

                if not FLAGS.test_labels:
                    probs, params = sess.run([model.probs, model.params],
                                             feed_dict=feed_dict)

                boxes = model.vxl.generate_boxes(
                    probs, params, np.array(model.priors, dtype=np.float32),
                    FLAGS.anchor_th)
                boxes = cpp.nms(boxes, FLAGS.nms_th)
                boxes = boxes[0]
                #print("++++")
                #for row in boxes:
                #    print(row)
                #print('====')
                print(np.max(probs), len(boxes))
                sample.load_voxelnet_boxes(boxes, 'Car')

                if FLAGS.results:
                    save_label2('%s/data/%06d.txt' % (FLAGS.results, pk),
                                sample.label2)

                image3d = np.copy(sample.image2)
                for box in sample.label2:
                    draw_box3d(image3d, box, sample.calib)
                    pass

                cv2.imwrite(gal.next(), image3d)

                C += 1
                if C >= FLAGS.max:
                    break
                pass
            pass
        gal.flush()
        pass
    pass
Exemplo n.º 25
0
    def __init__(self, people=0, db=0, mode=0):
        super().__init__()
        self.font = mainfont
        self.resize(700, self.height())
        layout = QGridLayout()
        self.buttext = []

        self.db = db
        self.people = people
        #print(people)
        self.pid = self.people['pid']
        self.edit_people = db.edit_people
        self.mode = mode
        self.gallery = Gallery(self.pid, self.db, mode)
        self.docviewer = DocViewer(self.pid, self.db, self.mode)
        #self.photos = self.gallery.curpixmap
        #print(self.photos)

        self.photo = QPushButton()
        if self.gallery.curpixmap:
            self.pixmap = QPixmap(self.gallery.curpixmap)
            #self.photofile = self.photo
        else:
            self.pixmap = QPixmap(os.path.join('images', 'f.png'))
        self.change_icon()
        self.photo.clicked.connect(self.show_photo)

        self.docs = QPushButton()
        if self.docviewer.docs_ids and type(self.docviewer.docs_ids) == type(
            []) and len(self.docviewer.docs_ids) > 0:
            self.docs_pixmap = QPixmap(os.path.join('images', 'docs.jpg'))
        else:
            self.docs_pixmap = QPixmap(
                os.path.join('images', 'empty_folder.png'))
        self.docs.setIcon(self.docs_pixmap)
        #self.docs.setAlignment(Qt.AlignCenter)
        self.docs.setIconSize(rect.size())
        self.docs.clicked.connect(self.show_docs)

        self.inb = QInvisibleButton(translate('Описание'))
        self.inb.setFont(self.font)
        self.desc = QTextEdit()
        if mode == 0:
            self.desc.setReadOnly(True)
        if 'desc' in people.keys():
            self.desc.setPlainText(people['desc'])

        layout.addWidget(self.photo, 0, 0, len(people.keys()), 1)
        layout.addWidget(self.docs, len(people.keys()) + 2, 0, 1, 1)
        layout.addWidget(self.desc, len(people.keys()) + 2, 1, 1, 1)
        layout.addWidget(self.inb, len(people.keys()) + 2, 1, 1, 1)
        #layout.addWidget(self.eventList, 0, 2, len(people.keys())+3, 1)

        i = 0
        for k, v in self.people.items():
            #	if mode == 0:
            #		if json_people.columns[i] == 'date_end' and str(e) == '':
            #			continue
            #		if json_people.columns[i] == 'maiden' and len(get_pol(data)) > 0 and get_pol(data)[0] == 'м':
            #			continue
            if k == 'desc':
                continue

            lb = QInvisibleButton(translate(k))
            lb.setFont(self.font)
            te = QLineEdit(str(v))
            te.setFont(self.font)
            te.setReadOnly(True)
            self.buttext.append([lb, te])
            #if json_people.columns[i] == 'maiden' and len(get_pol(data)) > 0 and  get_pol(data)[0] == 'м':
            #	continue
            layout.addWidget(te, i, 1, 1, 1)
            layout.addWidget(lb, i, 1, 1, 1)
            if mode:
                te.setReadOnly(False)
                lb.clicked.connect(self.button_pressed)
                te.editingFinished.connect(self.line_edit_finished)
            i = i + 1

        if (mode == 1) or (mode == 2):
            bn = QPushButton('Сохранить')
            bn.setStyleSheet("font-weight: bold; font-size:11pt;")
            #bn.setFont(self.font)
            layout.addWidget(bn, len(people.keys()) + 3, 0, 1, 2)
            #layout.addWidget(bn, 1 , len(json_people.columns) + 1, 1, 1)
            bn.clicked.connect(self.save_press)

        self.setLayout(layout)
Exemplo n.º 26
0
def main(_):
    X = tf.placeholder(tf.float32,
                       shape=(None, None, None, FLAGS.channels),
                       name="images")
    model = Model(X, FLAGS.model, 'xxx')
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        model.loader(sess)

        gal = Gallery('output', cols=2, ext='.jpg')
        CC = 0
        if FLAGS.list:
            with open(FLAGS.list, 'r') as f:
                for path in f:
                    if CC > FLAGS.max:
                        break
                    path = path.strip()
                    print(path)
                    if FLAGS.channels == 3:
                        image = cv2.imread(path, cv2.IMREAD_COLOR)
                    elif FLAGS.channels == 1:
                        image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
                        image = np.expand_dims(image, axis=3)
                    else:
                        assert False
                    H, W = image.shape[:2]

                    if max(H, W) > FLAGS.max_size:
                        f = FLAGS.max_size / max(H, W)
                        image = cv2.resize(image, None, fx=f, fy=f)
                        H, W = image.shape[:2]
                    '''BEGIN INFERENCE'''
                    # clip edge
                    H = H // FLAGS.clip_stride * FLAGS.clip_stride
                    W = W // FLAGS.clip_stride * FLAGS.clip_stride
                    image = image[:H, :W].astype(np.float32)
                    # change from BGR to RGB
                    if FLAGS.channels == 3 and FLAGS.colorspace == 'RGB':
                        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                        batch = np.expand_dims(image_rgb, axis=0)
                    else:
                        batch = np.expand_dims(image, axis=0)
                    prob = sess.run(model.prob, feed_dict={X: batch})
                    '''END INFERENCE'''
                    save_prediction_image(gal, image, prob[0])
                    CC += 1
        if FLAGS.db:
            stream = picpac.ImageStream({
                'db':
                FLAGS.db,
                'loop':
                False,
                'channels':
                FLAGS.channels,
                'colorspace':
                FLAGS.colorspace,
                'threads':
                1,
                'shuffle':
                False,
                'transforms': [{
                    "type": "resize",
                    "max_size": FLAGS.max_size
                }, {
                    "type": "clip",
                    "round": FLAGS.clip_stride
                }]
            })
            for meta, batch in stream:
                if CC > FLAGS.max:
                    break
                print(meta.ids)
                image = batch[0]
                if FLAGS.channels == 3 and FLAGS.colorspace == 'RGB':
                    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
                prob = sess.run(model.prob, feed_dict={X: batch})
                '''END INFERENCE'''
                save_prediction_image(gal, image, prob[0])
                CC += 1
        gal.flush()
    pass
Exemplo n.º 27
0
def main (argv):
    nodule_model = Model(FLAGS.prob, FLAGS.mode, FLAGS.fts, FLAGS.channels, FLAGS.prob_dropout, FLAGS.fts_dropout)
    with open(os.path.join('models', FLAGS.score), 'rb') as f:
        score_model = pickle.load(f)

    case = FsCase(FLAGS.input)

    case.normalizeHU()
    case = case.rescale3D(SPACING)
    lung, _ = mesh.segment_lung(case.images)
    save_mesh(lung, os.path.join(FLAGS.output, 'lung'))
    mask = mesh.convex_hull(lung)
    #body, _ = mesh.segment_body(case.images)
    #save_mesh(body, os.path.join(FLAGS.output, 'body'))
    case.standardize_color()

    case.round_stride(FLAGS.stride)

    mask = case.copy_replace_images(mask)
    mask.round_stride(FLAGS.stride)
    mask = mask.images

    views = [case.transpose(AXIAL),
             case.transpose(SAGITTAL),
             case.transpose(CORONAL)]

    if FLAGS.dilate > 0:
        ksize = FLAGS.dilate * 2 + 1
        mask = grey_dilation(mask, size=(ksize, ksize, ksize), mode='constant')
        pass
    if True:
        with tf.Session() as sess:
            tf.global_variables_initializer().run()
            nodule_model.load(sess)
            dim, nodules = nodule_model.apply(sess, views, mask)
            pass
        pass
    else:
        dim = 11
        nodules = []

    fts = []
    pos = []
    #print(nodules)
    fts.append(pyramid(dim, nodules))   # global
    pos.append(None)                    # global
    for nodule in nodules:
        fts.append(pyramid(dim, [nodule]))
        pos.append(nodule[3])
        pass
    Nt = np.array(fts, dtype=np.float32)
    Ny = score_model.predict_proba(Nt)[:,1]
    global_score = float(Ny[0])
    #print('GLOBAL SCORE:', global_score)
    pw = sorted(zip(pos, list(Ny)), key=lambda x:x[1], reverse=True)

    gal = Gallery(FLAGS.output, cols=5, header=['nodule','score','axial','sagittal','coronal'])
    anno = Annotations()
    C = 1
    for box, score in pw:
        if box is None:
            continue
        if score < 0.1:
            break
        anno.add(box, str(score))
        gal.text('%d' % C)
        gal.text('%.4f' % score)
        for v in VIEWS:
            cc, (y0, x0, y1, x1) = box_center(box, v)
            view = views[v]
            image = get3c(view.images, cc)
            cv2.rectangle(image, (x0,y0), (x1,y1), (0,255,255))
            if v == AXIAL:
                image = cv2.flip(image, 1)
            elif v == SAGITTAL:
                image = cv2.transpose(image)
                image = cv2.flip(image, 0)
            elif v == CORONAL:
                image = cv2.flip(image, -1)
            cv2.imwrite(gal.next(), image)
            pass
        C += 1
        pass
    gal.flush('plumo.html', extra={'score':global_score})
    Papaya(os.path.join(FLAGS.output, 'papaya'), case, annotations=anno)
    pass
Exemplo n.º 28
0
def papaya(samples=['126823']):
    gal = Gallery('/output/preview', cols=4)
    for sample in samples:
        try:
            pbb = np.load('grt123-DSB2017/bbox_result/%s_pbb.npy' % sample)
            pbb = np.asarray(pbb, np.float32)
            pbb_original = pbb[:]
            vol = np.load('grt123-DSB2017/prep_result/%s_clean.npy' % sample)
            meta = pickle.load(
                open('grt123-DSB2017/prep_result/' + sample + '.pickle', 'rb'))
        except:
            continue

        pbb = nms(pbb, 0.4)
        ii_list = pbb[:, 0].argsort()[-5:][::-1]

        extendbox = meta['extendbox']
        resolution = meta['resolution']
        spacing = meta['spacing']
        mask_shape = meta['mask_shape']

        vol = vol[0]
        boxes = []
        output_dic = {}
        anno = Annotations()
        for index, ii in enumerate(ii_list):
            p, z, y, x, r = pbb[ii, :]
            if index == 0:
                gal.text('case %s' % sample)
                cv2.imwrite(gal.next(),
                            draw_bb(vol[int(round(z)), :, :], y, x, r))
                cv2.imwrite(gal.next(),
                            draw_bb(vol[:, int(round(y)), :], z, x, r))
                cv2.imwrite(gal.next(),
                            draw_bb(vol[:, :, int(round(x))], z, y, r))

            dicom_z = mask_shape[0] - int(
                round((z + extendbox[0][0] - r / 2) * resolution[0] /
                      spacing[0]))
            dicom_y = int(
                round((y + extendbox[1][0] - r / 2) * resolution[1] /
                      spacing[1]))
            dicom_x = mask_shape[2] - int(
                round((x + extendbox[2][0] + r / 2) * resolution[2] /
                      spacing[2]))
            dicom_r_xy = int(round(r * resolution[1] / spacing[1]))
            dicom_r_z = int(round(r * resolution[0] / spacing[0]))
            dicom_z = max(dicom_z, 0)
            box = [
                max(dicom_z - dicom_r_z, 0), dicom_y, dicom_x, dicom_z,
                dicom_y + dicom_r_xy, dicom_x + dicom_r_xy
            ]
            anno.add(box=box, hint='Blob %d: %.3f' % (index + 1, p))

            dicom_z_to_save = int(
                round((z + extendbox[0][0] - r / 2) * resolution[0] /
                      spacing[0]))
            dicom_y_to_save = int(
                round((y + extendbox[1][0] - r / 2) * resolution[1] /
                      spacing[1]))
            dicom_x_to_save = int(
                round((x + extendbox[2][0] - r / 2) * resolution[2] /
                      spacing[2]))
            box_to_save = [
                dicom_z_to_save, dicom_y_to_save, dicom_x_to_save,
                dicom_z_to_save + dicom_r_z, dicom_y_to_save + dicom_r_xy,
                dicom_x_to_save + dicom_r_xy
            ]
            output_dic[str(p)] = box_to_save

        papaya = Papaya('/output/papaya_' + sample,
                        case_path=dicom_path + sample,
                        annotations=anno)
        pickle.dump(output_dic, open('/output/boxes/%s.pickle' % sample, 'wb'))
    gal.flush()
    return
Exemplo n.º 29
0
#!/usr/bin/env python3
from lung import *
from glob import glob
from gallery import Gallery

gal = Gallery('nodule_samples', cols=3)


def visualize(image, y, x, r):
    image = np.copy(image, order='C')
    image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX)
    image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
    cv2.circle(image, (int(x), int(y)), int(r + 5), (0, 255, 0))
    return image


for path in glob('scratch/luna16/*.h5')[:20]:
    volume = H5Volume(path)
    for i in range(volume.annotation.shape[0]):
        print(path, i)
        sub, anno = extract_nodule(volume,
                                   volume.annotation[i],
                                   0.8, [128, 128, 128],
                                   random_crop=True)
        anno = np.round(anno).astype(np.int32)
        z, y, x, r = anno
        cv2.imwrite(gal.next(), visualize(sub.images[z, :, :], y, x, r))
        cv2.imwrite(gal.next(), visualize(sub.images[:, y, :], z, x, r))
        cv2.imwrite(gal.next(), visualize(sub.images[:, :, x], z, y, r))
        pass
gal.flush()
Exemplo n.º 30
0
def write_dicom_volume_html (volume, path, title):
    gal = Gallery('pethtmlview/'+path, score=False, title=title)
    for i in range(volume.shape[0]):
        cv2.imwrite(gal.next(), volume[i])
        pass
    gal.flush()