示例#1
0
    def renderold(self, t):
        M = np.eye(4, dtype=np.float32)
        transforms.translate(M, -.5, 0, 0)
        transforms.rotate(M, t*360, 0, 0, 1)
        transforms.scale(M, 5, 5, 5)

        p = math.floor(t / 8)
        off = t - p * 8

        b = max(0, 1 - ((t % 1) * 4))
        m = 1 if off % 0.75 < 0.2 else 0
        d = max(0, 1 - ((t % 16) * 1))
		
		
        self.square.setProjection(self.projection)
        self.square.setModelView(M)
        self.square.setColor((b,m,d,1))
        self.square.render()
        M = np.eye(4, dtype=np.float32)

        transforms.translate(M, -.5, 0, 0)
        transforms.rotate(M, t*360+180, 0, 0, 1)
        transforms.scale(M, 5, 5, 5)

        self.square.setProjection(self.projection)
        self.square.setModelView(M)
        #self.square.setColor((.1,.1,.1,1))
        self.square.render()
示例#2
0
 def render(self, t):
     reltime = t - self.start
     M = np.eye(4, dtype=np.float32)
     transforms.scale(M, 1.5/50, .4, 1)
     transforms.translate(M, (self.slot+.5) * 2.0/50 - 1, (reltime / -5) +.6 , 0)
     self.bar.setModelView(M)
     self.bar.render()
示例#3
0
 def render(self, t):
     reltime = t - self.start
     M = np.eye(4, dtype=np.float32)
     transforms.scale(M, 1.5 / 50, .4, 1)
     transforms.translate(M, (self.slot + .5) * 2.0 / 50 - 1,
                          (reltime / -5) + .6, 0)
     self.bar.setModelView(M)
     self.bar.render()
示例#4
0
 def render(self, t):
     M = np.eye(4, dtype=np.float32)
     transforms.scale(M, self.size, self.size, 1)
     transforms.translate(M, self.pos[0], self.pos[1])
     transforms.translate(M, math.sin(t * math.pi * 2 / 2 + self.phase) * 0.008 , 0)
     self.geometry.setModelView(M)
     self.geometry.setProjection(self.projection)
     self.geometry.setColor(self.color)
     self.geometry.render()
示例#5
0
        def render(self, t):
            self.update(t - self.reltime)
            M = np.eye(4, dtype=np.float32)
            transforms.scale(M, self.size, self.size, 1)
            transforms.translate(M, self.pos[0], self.pos[1])

            self.geometry.setColor(self.color)
            self.geometry.setModelView(M)
            self.geometry.setProjection(self.projection)
            self.geometry.render()
示例#6
0
        def render(self, t):
            self.update(t - self.reltime)
            M = np.eye(4, dtype=np.float32)
            transforms.scale(M, self.size, self.size, 1)
            transforms.translate(M, self.pos[0], self.pos[1])

            self.geometry.setColor(self.color)
            self.geometry.setModelView(M)
            self.geometry.setProjection(self.projection)
            self.geometry.render()
示例#7
0
 def render(self, t):
     M = np.eye(4, dtype=np.float32)
     transforms.scale(M, self.size, self.size, 1)
     transforms.translate(M, self.pos[0], self.pos[1])
     transforms.translate(
         M,
         math.sin(t * math.pi * 2 / 2 + self.phase) * 0.008, 0)
     self.geometry.setModelView(M)
     self.geometry.setProjection(self.projection)
     self.geometry.setColor(self.color)
     self.geometry.render()
示例#8
0
    def one2one_identity(self, im1, im2):
        normalized_im1 = T.normalize(im1, mean=self.mean, std=self.std)
        scale_im1, scale_ratio1 = T.scale(normalized_im1, short_size=self.base_size)
        input_im1 = T.center_crop(scale_im1, crop_size=self.crop_size)

        normalized_im2 = T.normalize(im2, mean=self.mean, std=self.std)
        scale_im2, scale_ratio2 = T.scale(normalized_im2, short_size=self.base_size)
        input_im2 = T.center_crop(scale_im2, crop_size=self.crop_size)

        batch = np.asarray([input_im1, input_im2], dtype=np.float32)
        scores = self.inference(batch, output_layer=self.prob_layer)

        return M.cosine_similarity(scores[0], scores[1])
示例#9
0
        def render(self, t):
            M = np.eye(4, dtype=np.float32)
            transforms.scale(M, .03 * self.m, .03 * self.m, 1)
            transforms.translate(M, self.pos[0], self.pos[1], 0)
            
            if t > self.flicker:
                dt = t - self.flicker

                a = max(0, .5 - (dt/4))

                n = math.sin(dt*20)/2 + 0.5
                self.part.setColor((1,.5 + n * .5, n, a))

            self.part.setModelView(M)
            self.part.render()
示例#10
0
        def render(self, t):
            M = np.eye(4, dtype=np.float32)
            transforms.scale(M, .03 * self.m, .03 * self.m, 1)
            transforms.translate(M, self.pos[0], self.pos[1], 0)
            
            if t > self.flicker:
                dt = t - self.flicker

                a = max(0, .5 - math.pow(dt/4, 2))

                n = math.sin(dt*30)/2 + 0.5
                self.part.setColor((1,.5 + n * .5, n, a))

            self.part.setModelView(M)
            self.part.render()
示例#11
0
 def render(self, t):
     n = 0
     d = (t * 10) % len(self.lamps)
     for p in self.lamps:
         M = np.eye(4, dtype=np.float32)
         transforms.scale(M, 1.0 / 60, 1.0 / 60, 1)
         transforms.translate(M, p[0], p[1], 0)
         self.star.setModelView(M)
         M = np.eye(4, dtype=np.float32)
         transforms.scale(M, 1, 1, 1)
         self.star.setProjection(M)
         if self.enabledLamp is not None:
             self.star.color = (1, 1, 1,
                                1) if n == self.enabledLamp else (.1, 0, 0,
                                                                  1)
         else:
             self.star.color = (1, 1, 1, 1) if int(d) == n else (.1, 0, 0,
                                                                 1)
         self.star.render()
         n += 1
示例#12
0
    def cls_batch(self, batch_ims):
        input_ims = []
        for im in batch_ims:
            im = im.astype(np.float32, copy=True)
            normalized_im = T.normalize(im, mean=self.mean, std=self.std)
            scale_im, scale_ratio = T.scale(normalized_im, short_size=self.base_size)
            input_ims.append(T.center_crop(scale_im, crop_size=self.crop_size))

        scores = self.inference(np.asarray(input_ims, dtype=np.float32), output_layer=self.prob_layer)

        return scores
示例#13
0
    def det_im(self, im):
        im = im.astype(np.float32, copy=True)
        normalized_im = T.normalize(im, mean=self.mean, std=self.std)
        scale_im, scale_ratio = T.scale(normalized_im, short_size=self.scales[0], max_size=self.max_sizes[0])

        input_data = scale_im.transpose(2, 0, 1)
        input_data = input_data.reshape((1,) + input_data.shape)
        self.net.blobs['data'].reshape(*input_data.shape)
        input_blob = {'data': input_data, 'rois': None}

        input_blob['im_info'] = np.array([[scale_im.shape[0], scale_im.shape[1], 1.0]], dtype=np.float32)
        self.net.blobs['im_info'].reshape(*input_blob['im_info'].shape)

        # do forward
        forward_kwargs = {'data': input_blob['data'].astype(np.float32, copy=False)}
        forward_kwargs['im_info'] = input_blob['im_info'].astype(np.float32, copy=False)
        output_blob = self.net.forward(**forward_kwargs)

        rois = self.net.blobs['rois'].data.copy()
        boxes = rois[:, 1:5]

        scores = output_blob['cls_prob']
        scores = scores.reshape(*scores.shape[:2])

        # Apply bounding-box regression deltas
        box_deltas = output_blob['bbox_pred']
        box_deltas = box_deltas.reshape(*box_deltas.shape[:2])
        pred_boxes = bbox_transform_inv(boxes, box_deltas)
        pred_boxes = clip_boxes(pred_boxes, scale_im.shape)

        objs = []
        for cls_ind, cls in enumerate(self.class_map[1:]):
            cls_ind += 1  # because we skipped background
            if cfg.TEST.AGNOSTIC:
                cls_boxes = pred_boxes[:, 4:8]
            else:
                cls_boxes = pred_boxes[:, cls_ind * 4:(cls_ind + 1) * 4]
            cls_scores = scores[:, cls_ind]
            dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
            inds = np.where(dets[:, 4] > self.conf_thresh)
            cls_dets = dets[inds]

            keep = nms(cls_dets, self.nms_thresh)
            dets_NMSed = cls_dets[keep, :]
            if self.box_vote:
                VOTEed = bbox_vote(dets_NMSed, cls_dets)
            else:
                VOTEed = dets_NMSed

            _obj = boxes_filter(VOTEed, bbox_id=cls_ind, class_name=cls, color=self.color_map[cls_ind],
                                scale=scale_ratio, thresh=self.conf_thresh)
            objs.extend(_obj)

        return objs
示例#14
0
    def render(self, t):
        gl.glEnable(gl.GL_DEPTH_TEST)
        gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)

        M = np.eye(4, dtype=np.float32)
        #transforms.rotate(M, t*20, 0, 1, 0)
        #transforms.rotate(M, t*20, 1, 0, 0)
        transforms.scale(M, .5, .5, .5)
        transforms.translate(M, 0, 0, -2)

        transforms.rotate(M, 00, 1, 0, 0)
        transforms.scale(M, .4, .4, .4)
        transforms.translate(M, 0, 0, -10)

        projection = pyrr.matrix44.create_perspective_projection(
            3, 1, 0.001, 10000)
        self.tree.setProjection(projection)
        self.tree.setModelView(M)
        self.tree.render()
        #        self.lt.render()
        gl.glDisable(gl.GL_DEPTH_TEST)
示例#15
0
    def render(self, t):
        dt = t - self.last

        if int(t * self.freq) > int(self.last * self.freq):
            x = int(random.uniform(-25, 25))
            y = int(random.uniform(-5, 5))
            self.addstar(t, x, y)

        self.last = t

        positions = []
        colors = []
        for star in self.stars:
            star.step(t, dt)
            positions.append((star.x, star.y))
            colors.append(star.color)

        M = np.eye(4, dtype=np.float32)
        transforms.scale(M, 1.0 / 25, 1.0 / 25, 1)
        self.geometry.setStars(positions, colors)
        self.geometry.setModelView(M)
        self.geometry.render()
示例#16
0
    def cls_im(self, im):
        im = im.astype(np.float32, copy=True)
        normalized_im = T.normalize(im, mean=self.mean, std=self.std)
        scale_im, scale_ratio = T.scale(normalized_im, short_size=self.base_size)
        crop_ims = []
        if self.crop_type == 'center' or self.crop_type == 'single':  # for single crop
            crop_ims.append(T.center_crop(scale_im, crop_size=self.crop_size))
        elif self.crop_type == 'mirror' or self.crop_type == 'multi':  # for 10 crops
            crop_ims.extend(T.mirror_crop(scale_im, crop_size=self.crop_size))
        else:
            crop_ims.append(scale_im)

        scores = self.inference(np.asarray(crop_ims, dtype=np.float32), output_layer=self.prob_layer)

        return np.sum(scores, axis=0)
示例#17
0
    def render(self, t):
        dt = t - self.last
        x,y = self.getCenter(t)
        dx = (x - self.lastx)/dt
        dy = (y - self.lasty)/dt
        self.lastx = x
        self.lasty = y

        if int(t*self.freq) > int(self.last*self.freq):
            self.addstar(t, dx, dy)

        self.last = t

        positions = []
        colors = []
        for star in self.stars:
            star.step(t, dt)
            positions.append((star.x, star.y))
            colors.append(star.color)

        M = np.eye(4, dtype=np.float32)
        transforms.scale(M, 1.0/25, 1.0/25, 1)
        self.geometry.setStars(positions, colors)
        self.geometry.setModelView(M)
        self.geometry.render()			

        now = datetime.now()

        digits = [ now.hour / 10, now.hour % 10, now.minute / 10, now.minute % 10, now.second / 10, now.second % 10 ]
        digits = [ int(x) for x in digits ]
        
        n = 0
        for digit in digits:
            M = np.eye(4, dtype=np.float32)
            d = now.microsecond / 1000000
            s = 1.2 - d * 0.2
            transforms.scale(M, s, s, 1)
            transforms.scale(M, 1.0/12, -1.0/10, 1)
            transforms.translate(M, -.8 + (n * 0.3 ) , 0, 0)
            if n % 2 == 0:
                transforms.translate(M, 0.1 , 0, 0)
            self.digits[digit].setModelView(M)
            #self.digits[digit].color = (1,1,1, 0.5 + (1-d) * 0.5)
            self.digits[digit].render()
            n += 1
示例#18
0
    signalgenerator.setTexture(mainfbo.getTexture())
elif args.display == 'hub75e':
    signalgenerator = geometry.hub75e.signalgenerator()
    signalgenerator.setTexture(mainfbo.getTexture())

# Emulation shader
texquad = geometry.simple.texquad()
texquad.setTexture(mainfbo.getTexture())

# Tree emulator
tree = assembly.tree.tree(layoutfile)
tree.setTexture(mainfbo.getTexture())

# Projection matrix
M = np.eye(4, dtype=np.float32)
transforms.scale(M, 1, 1, 1)

# Effect
try:
    i = __import__('assembly.%s' % args.effect)

    effect = getattr(getattr(i, args.effect), args.effect)()
    effect.setProjection(M)
except ImportError:
    print('Unable to initialize effect %s' % args.effect)
    raise

if args.music:
    start_music()

if not args.raw and not args.preview and not args.emulate:
示例#19
0
 def __init__(self, factors, name=''):
     super(ScaleNode, self).__init__(
         name + " <scale by %s>" % str(tuple(factors.flatten())),
         transforms.scale(factors))
示例#20
0
def eval_batch():
    # shuffle_conv1_channel()
    eval_len = len(SET_DICT)
    # eval_len = 1000
    accuracy = np.zeros(len(args.top_k))
    start_time = datetime.datetime.now()

    for i in xrange(eval_len - args.skip_num):
        im = cv2.imread(SET_DICT[i + args.skip_num]['path'])
        if (PIXEL_MEANS == np.array([103.52, 116.28, 123.675])).all() and \
                (PIXEL_STDS == np.array([57.375, 57.12, 58.395])).all():
            scale_im = T.pil_scale(Image.fromarray(im), args.base_size)
            scale_im = np.asarray(scale_im)
        else:
            scale_im, _ = T.scale(im, short_size=args.base_size) 
        input_im = T.normalize(scale_im, mean=PIXEL_MEANS, std=PIXEL_STDS)
        crop_ims = []
        if args.crop_type == 'center':  # for single crop
            crop_ims.append(T.center_crop(input_im, crop_size=args.crop_size))
        elif args.crop_type == 'multi':  # for 10 crops
            crop_ims.extend(T.mirror_crop(input_im, crop_size=args.crop_size))
        else:
            crop_ims.append(input_im)

        score_vec = np.zeros(args.class_num, dtype=np.float32)
        iter_num = int(len(crop_ims) / args.batch_size)
        timer_pt1 = datetime.datetime.now()
        for j in xrange(iter_num):
            scores = CLS.inference(
                np.asarray(crop_ims, dtype=np.float32)[j * args.batch_size:(j + 1) * args.batch_size],
                output_layer=args.prob_layer
            )
            score_vec += np.sum(scores, axis=0)
        score_index = (-score_vec / len(crop_ims)).argsort()
        timer_pt2 = datetime.datetime.now()

        SET_DICT[i + args.skip_num]['evaluated'] = True
        SET_DICT[i + args.skip_num]['score_vec'] = score_vec / len(crop_ims)

        print 'Testing image: {}/{} {} {}/{} {}s' \
            .format(str(i + 1), str(eval_len - args.skip_num), str(SET_DICT[i + args.skip_num]['path'].split('/')[-1]),
                    str(score_index[0]), str(SET_DICT[i + args.skip_num]['gt']),
                    str((timer_pt2 - timer_pt1).microseconds / 1e6 + (timer_pt2 - timer_pt1).seconds)),

        for j in xrange(len(args.top_k)):
            if SET_DICT[i + args.skip_num]['gt'] in score_index[:args.top_k[j]]:
                accuracy[j] += 1
            tmp_acc = float(accuracy[j]) / float(i + 1)
            if args.top_k[j] == 1:
                print '\ttop_' + str(args.top_k[j]) + ':' + str(tmp_acc),
            else:
                print 'top_' + str(args.top_k[j]) + ':' + str(tmp_acc)
    end_time = datetime.datetime.now()

    w = open(LOG_PTH, 'w')
    s1 = 'Evaluation process ends at: {}. \nTime cost is: {}. '.format(str(end_time), str(end_time - start_time))
    s2 = '\nThe model is: {}. \nThe val file is: {}. \n{} images has been tested, crop_type is: {}, base_size is: {}, ' \
         'crop_size is: {}.'.format(args.model_weights, args.val_file, str(eval_len - args.skip_num),
                                    args.crop_type, str(args.base_size), str(args.crop_size))
    s3 = '\nThe PIXEL_MEANS is: ({}, {}, {}), PIXEL_STDS is : ({}, {}, {}).' \
        .format(str(PIXEL_MEANS[0]), str(PIXEL_MEANS[1]), str(PIXEL_MEANS[2]), str(PIXEL_STDS[0]), str(PIXEL_STDS[1]),
                str(PIXEL_STDS[2]))
    s4 = ''
    for i in xrange(len(args.top_k)):
        _acc = float(accuracy[i]) / float(eval_len - args.skip_num)
        s4 += '\nAccuracy of top_{} is: {}; correct num is {}.'.format(str(args.top_k[i]), str(_acc),
                                                                       str(int(accuracy[i])))
    print s1, s2, s3, s4
    w.write(s1 + s2 + s3 + s4)
    w.close()

    if args.save_score_vec:
        w = open(LOG_PTH.replace('.txt', 'scorevec.txt'), 'w')
        for i in xrange(eval_len - args.skip_num):
            w.write(SET_DICT[i + args.skip_num]['score_vec'])
    w.close()
    print('DONE!')