def flipPasteImage(self, paste_img, labels):
        flip_val = np.random.randint(0, 100)
        if flip_val < 50:
            logging.info("Rotating by -90.")
            rotate_deg = -90
        else:
            logging.info("Rotating by 90.")
            rotate_deg = 90

        rotated_paste_img, = utils.rotateImg(paste_img, rotate_deg)

        new_labels = []
        for l in labels:
            new_right = utils.rotate([0.0, 0.0], l['x'], rotate_deg)
            new_top = utils.rotate([0.0, 0.0], l['y'], rotate_deg)
            new_left = utils.rotate([0.0, 0.0], l['x'] + l['width'],
                                    rotate_deg)
            new_bottom = utils.rotate([0.0, 0.0], l['y'] + l['height'],
                                      rotate_deg)

            new_l = l.copy()
            new_l['x'] = new_right
            new_l['y'] = new_top
            new_l['width'] = new_left - new_right
            new_l['height'] = new_bottom - new_top

            new_labels.append(new_l)
            self.printLabelDims(new_labels)

        return rotated_paste_img, new_labels
Ejemplo n.º 2
0
    def __init__(self, start, end, size, gap, angle=None, align='center', layer=None, datatype=None):

        self.start=np.array(start)
        self.end=np.array(end)
        self.size=np.array(size)
        self.gap=gap
        self.align=align

        pts=np.array([[0,0], [0, size[1]], size, [size[0], 0]])
        if angle is not None:
            pts=rotate(pts, angle, 'com')
            
        if align.lower()=='bottom':
            pass
        elif align.lower()=='top':
            pts=translate(pts, (0, -self.size[1]))
        elif align.lower()=='center':
            pts=translate(pts, (0, -self.size[1]/2))        
        else:
            raise ValueError('Align parameter must be one of bottom/top/center')

        strip_width=size[0]+gap
        
        v=self.end-self.start
        l=np.sqrt(np.dot(v,v))        
        N=int(np.floor(l/strip_width))
        spacing=v/N
        rotation=math.atan2(v[1], v[0])*180/np.pi
        pts=rotate(pts, rotation)

        origin = start + 0.5* v* (l-(N*strip_width - gap))/l

        polys=[translate(pts, origin + i*spacing) for i in range(N)]

        Elements.__init__(self, polys, layer, datatype)
Ejemplo n.º 3
0
    def get_next_batch_train(self, iteration, batch_size=32):

        end = self.index_train + batch_size
        if iteration == 0:  # because we use only full batch
            self.index_train = 0
            end = batch_size
            self.train_images_in, self.train_images_gt = shuffle(
                self.train_images_in, self.train_images_gt)

        input_images = np.zeros((batch_size, self.dim_patch_in_rows,
                                 self.dim_patch_in_cols, params.num_channels))
        output_images = np.zeros((batch_size, self.dim_patch_gt_rows,
                                  self.dim_patch_gt_cols, params.num_channels))

        start = self.index_train
        for idx in range(start, end):
            image_in = self.train_images_in[idx].copy()
            image_gt = self.train_images_gt[idx].copy()
            # augumentation
            idx_degree = random.randint(0, len(self.rotation_degrees) - 1)
            image_in = utils.rotate(image_in,
                                    self.rotation_degrees[idx_degree])
            image_gt = utils.rotate(image_gt,
                                    self.rotation_degrees[idx_degree])
            input_images[idx - start] = image_in.copy()
            output_images[idx - start] = image_gt.copy()

            if self.SHOW_IMAGES:
                cv.imshow('input', input_images[idx - start] / 255)
                cv.imshow('output', output_images[idx - start] / 255)
                cv.waitKey(1000)

        self.index_train = end
        return input_images, output_images
Ejemplo n.º 4
0
    def new_test(self):
        # img_path = os.path.join(DATA_DIR, 'datasets_val', 'val_1000', 'O1CN01395Jpz1zErXatzlXa_!!6000000006683-0-quark.jpg')
        # image = cv2.imread(img_path)
        #
        # image, angle, rotated_ratio, is_ok = generate_rotated_image(
        #     image,
        #     270,
        #     size=None,
        #     crop_center=False,
        #     crop_largest_rect=True
        # )

        img_path = os.path.join(DATA_DIR, 'datasets_val', 'x2.jpg')
        image = cv2.imread(img_path)
        image = rotate(image, 90)
        image = rotate(image, -90)

        # url = "https://img.alicdn.com/imgextra/i2/6000000006683/O1CN01395Jpz1zErXatzlXa_!!6000000006683-0-quark.jpg"
        # is_ok, image = download_url_img(url)
        # show_img_bgr(image)

        show_img_bgr(image)

        x = self.predict_img_bgr(image)

        print(x)
Ejemplo n.º 5
0
def vicreb_rot(deltas, seed, dim, nA, nB, t_max, v, R_v, L, D_rot, out=None):
    n = nA + nB

    np.random.seed(seed)

    r = np.random.uniform(-L / 2.0, L / 2.0, [n, dim])
    u = utils.sphere_pick(n=n, d=dim)

    As = np.zeros([n], dtype=np.bool)
    As[:nA] = True
    Bs = np.logical_not(As)
    r0 = r.copy()
    wraps = np.zeros_like(r, dtype=np.int)

    if out is not None:
        np.savez(os.path.join(out, 'static.npz'), L=L, As=As, r0=r0, v=v)
    ums, ums_A, ums_B = [], [], []
    for t in range(t_max):
        abssep = np.abs(r[:, np.newaxis] - r[np.newaxis, :])
        seps = np.minimum(abssep, L - abssep)
        withins = utils.vector_mag_sq(seps) < R_v ** 2.0

        u_o = u.copy()
        u[...] = 0.0
        for i_n in range(n):
            delta_A, delta_B = deltas[not As[i_n]]

            w_As = np.logical_and(withins[i_n], As)
            w_Bs = np.logical_and(withins[i_n], Bs)

            u_net_A = np.sum(u_o[w_As], axis=0)
            u_net_B = np.sum(u_o[w_Bs], axis=0)
            u[i_n] += utils.rotate(u_net_A, delta_A)[0]
            u[i_n] += utils.rotate(u_net_B, delta_B)[0]

            if np.all(u[i_n] == 0.0):
                u[i_n] = u_o[i_n]

        u = utils.vector_unit_nonull(u)
        u = utils.rot_diff(u, D_rot, 1.0)

        r += v * u

        wraps_cur = ((r > L / 2.0).astype(np.int) -
                     (r < -L / 2.0).astype(np.int))
        wraps += wraps_cur
        r -= wraps_cur * L

        if out is not None:
            np.savez(os.path.join(out, 'dyn_{:010d}'.format(t)),
                     t=t, r=r, u=u, w=wraps)
        ums_A.append(utils.vector_mag(np.mean(u[As], axis=0)))
        ums_B.append(utils.vector_mag(np.mean(u[Bs], axis=0)))
        ums.append(utils.vector_mag(np.mean(u, axis=0)))
    return (np.mean(ums_A), scipy.stats.sem(ums_A),
            np.mean(ums_B), scipy.stats.sem(ums_B),
            np.mean(ums), scipy.stats.sem(ums),
            )
Ejemplo n.º 6
0
 def getMVP( self ):
     width, height = self.GetGLExtents()
     MVP = perspective(45.0, width / height, self.near_plane, self.far_plane);
     
     MVP = translate( MVP, self.world_pos[0], self.world_pos[1], self.world_pos[2] )
     MVP = rotate( MVP, self.world_rot[1], 0, 1, 0 )
     MVP = rotate( MVP, self.world_rot[0], 1, 0, 0 )
     
     return (1, True, MVP)
Ejemplo n.º 7
0
def generate_hdf5(cephaTxt,output,fname,argument=False):
    
    data = getDataFromTxt(cephaTxt)#return [(img_path,landmark,bbox)]   
    cepha_imgs = []
    cepha_landmarks = []
    for (imgPath,landmarkGt,bbox) in data:
        img = cv2.imread(imgPath,cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)
        
        #downsampled by 3: 3x3 patch
        height,width = img.shape[:2]
        size = (int(width/3),int(height/3))
        cephaImg = cv2.resize(img,size,interpolation=cv2.INTER_NEAREST)
        
        cepha_bbox = bbox#.subBBox(-0.05,1.05,-0.05,1.05)
        cepha_img = cephaImg[cepha_bbox.top:cepha_bbox.bottom+1,cepha_bbox.left:cepha_bbox.right+1]
    
        if argument and np.random.rand()>-1:
            ###rotation
            if np.random.rand() > 0.5:
                cepha_rotated_alpha,landmark_rotated = rotate(cepha_img,cepha_bbox,bbox.reprojectLandmark(landmarkGt),5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)#relative
                cepha_rotated_alpha = cv2.resize(cepha_rotated_alpha,(39,39))
                cepha_imgs.append(cepha_rotated_alpha.reshape((1,39,39)))
                cepha_landmarks.append(landmark_rotated.reshape(38))
            if np.random.rand() > 0.5:
                cepha_rotated_alpha,landmark_rotated = rotate(cepha_img,cepha_bbox,bbox.reprojectLandmark(landmarkGt),-5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                cepha_rotated_alpha = cv2.resize(cepha_rotated_alpha,(39,39))
                cepha_imgs.append(cepha_rotated_alpha.reshape((1,39,39)))
                cepha_landmarks.append(landmark_rotated.reshape(38))
        
        cepha_img = cv2.resize(cepha_img,(39,39))
        cepha_img = cepha_img.reshape((1,39,39))
        cepha_landmark = landmarkGt.reshape((38))
        
        cepha_imgs.append(cepha_img)
        cepha_landmarks.append(cepha_landmark)

    cepha_imgs,cepha_landmarks = np.asarray(cepha_imgs),np.asarray(cepha_landmarks)
    
    cepha_imgs = processImage(cepha_imgs)
    shuffle_in_unison_scary(cepha_imgs,cepha_landmarks)
    
    #save file
    base = join(OUTPUT,'1_cepha')#train/1_cepha   (or test)
    createDir(base)
    output = join(base,fname)#train/1_cepha/train.h5  (or test)
    output = output.replace('\\','/')
    logger("generate %s" % output)
    with h5py.File(output,'w') as h5:
        h5['data'] = cepha_imgs.astype(np.float32)
        h5['landmark'] = cepha_landmarks.astype(np.float32)
Ejemplo n.º 8
0
    def adjust(self):
        print("mirror #", self.id_, ":", "start adjusting", get_time_h_m_s())
        light_src_coord = self.receive_light_src_coord()
        if light_src_coord is None:
            return
        intensity_before_adjusting = self.receive_intensity()
        if intensity_before_adjusting is None:
            return
        cur_intensity = intensity_before_adjusting

        dir_from_light = [
            self.coord_[0] - light_src_coord[0],
            self.coord_[1] - light_src_coord[1]
        ]

        rotation_angle = 0
        iter_count = 0
        dir_on_focus = []

        while intensity_before_adjusting == cur_intensity:

            if iter_count != 0:
                rotate(self.normal_, transm_global_params.ROTATION_DELTA_RAD)
                rotation_angle += transm_global_params.ROTATION_DELTA_RAD

            if dir_from_light[0] * self.normal_[0] + dir_from_light[
                    1] * self.normal_[1] <= 0:
                dir_on_focus = get_reflected_vector(dir_from_light,
                                                    self.normal_)
                normalize(dir_on_focus)

                ray = Ray(point=self.coord_, vector=dir_on_focus)
                is_sent = self.send_ray(ray)
                if not is_sent:
                    return

                time.sleep(0.5)

                cur_intensity = self.receive_intensity()
                if cur_intensity is None:
                    return

            iter_count += 1

        print("mirror #", self.id_, ":", "final angle:", rotation_angle,
              get_time_h_m_s())
        print("mirror #", self.id_, ":", "rotations count:", iter_count - 1,
              get_time_h_m_s())
        print("mirror #", self.id_, ":", "final normal:", self.normal_[0],
              self.normal_[1], get_time_h_m_s())
        print("mirror #", self.id_, ":", "final dir on focus:",
              dir_on_focus[0], dir_on_focus[1], get_time_h_m_s())
        print("mirror #", self.id_, ":", "end adjusting", get_time_h_m_s())
Ejemplo n.º 9
0
 def random_full_rotation(img):
     #rotate either 0, 90, 180 or 270 degrees
     rot = np.random.randint(0, 4)
     if rot == 0:
         return img
     elif rot == 1:
         img, _ = utils.rotate(img, 90)
     elif rot == 2:
         img, _ = utils.rotate(img, 180)
     elif rot == 3:
         img, _ = utils.rotate(img, 270)
     return img
Ejemplo n.º 10
0
 def random_full_rotation(img):
     # rotate either 0, 90, 180 or 270 degrees
     rot = np.random.randint(0, 4)
     if rot == 0:
         return img
     elif rot == 1:
         img, _ = utils.rotate(img, 90)
     elif rot == 2:
         img, _ = utils.rotate(img, 180)
     elif rot == 3:
         img, _ = utils.rotate(img, 270)
     return img
Ejemplo n.º 11
0
    def process_frame(self, thresholded_frame):
        if self.area and self.area.is_complete:
            thresholded_frame = thresholded_frame[self.area.y1:self.area.y2, self.area.x1:self.area.x2]

        processed_frame = Frame()
        points = numpy.transpose(thresholded_frame.nonzero())

        if not len(points):
            return

        # Precalculations
        tan_half_fov_x = math.tan(self.fov_x/2)
        tan_half_fov_y = math.tan(self.fov_y/2)

        # m is the vector from the camera position to the origin
        m = self.camera_position * -1
        w = self.width/2
        h = self.height/2

        for point in points:
            img_y, img_x = point

            if self.area and self.area.is_complete:
                img_y += self.area.y1
                img_x += self.area.x1

            # Horizontal angle between platform middle (in image) and point
            delta_x = float(img_x - self.platform_middle[0])/2
            tau = math.atan(delta_x/w*tan_half_fov_x)

            # Vertical angle
            delta_y = float(img_y - self.platform_middle[1])/2
            rho = math.atan(delta_y/h*tan_half_fov_y)

            # Rotate vector m around tau and rho to point towards 'point'
            v = m
            v = rotate('z', v, tau) # Rotate around z axis for horizontal angle
            v = rotate('x', v, rho) # Rotate around x axis for vertical angle

            v = self.get_laser_plane_intersection(v)

            # Ignore any vertices that have negative z coordinates (pre scaling)
            if v[2] < 0:
                continue

            x,y,z = v*self.scale
            x,y,z = rotate('z', v, self.rotation_angle)

            vertex = Vertex(x, y, z)
            processed_frame.append(vertex)

        self.processed_frames.append(processed_frame)
Ejemplo n.º 12
0
def create_side_images(class_image, inout, corners):

    how_to_rotate = [(90, -90), (180, 0), (-90, 90), (0, 180)]
    side_images = []

    for cl in (1, 2, 3, 4):

        side_image = np.zeros(class_image.shape, dtype='uint8')
        side_image[class_image == cl] = cl

        io = inout[cl - 1]
        htw = how_to_rotate[cl - 1]
        side_corners_idx = _corner_indexes[cl - 1]

        htw = htw[0] if io == 'in' else htw[1]
        side_image_rot, M = rotate(side_image, htw)

        side_corners = np.array(
            np.round([
                M.dot((corners[corner_idx][0], corners[corner_idx][1], 1))
                for corner_idx in side_corners_idx
            ])).astype(np.int)

        # Order the corners from higher (smaller y coordinate)
        if side_corners[0, 1] > side_corners[1, 1]:
            side_corners = side_corners[::-1]

        # Correct the angle on each side separately
        if side_corners[0, 0] != side_corners[1, 0]:
            m = float(side_corners[1, 1] - side_corners[0, 1]) / (
                side_corners[1, 0] - side_corners[0, 0])
            corners_angle = np.arctan(m) * 180 / np.pi
            correction_angle = -(corners_angle / abs(corners_angle) * 90 -
                                 corners_angle)

            side_image_rot, M = rotate(side_image_rot, correction_angle)

        side_image_rot[side_image_rot <= 0.5] = 0
        side_image_rot[side_image_rot > 0.5] = 1

        nz = np.nonzero(side_image_rot)
        min_y, max_y, min_x, max_x = np.min(nz[0]), np.max(nz[0]), np.min(
            nz[1]), np.max(nz[1])
        side_image_rot = side_image_rot[min_y:max_y + 1, min_x:max_x + 1]

        side_images.append(side_image_rot)

    return side_images
Ejemplo n.º 13
0
	def update(self):
		if self.isAlive is False:
			return
		
		outputs = self.neuralNet.update(self.inputs)
		
		self.direction += (outputs[0] - 0.5) * 20
		self.speed = outputs[1] * 10
		
		rad = math.radians(self.direction - 90)
		x, y = self.position
		x += self.speed*math.sin(rad)
		y += self.speed*math.cos(rad)
		self.position = (x, y)

		self.edgesPoints = 	[[self.position[0] - self.sideWidth//2, self.position[1] - self.frontWidth//2],
							[self.position[0] - self.sideWidth//2, self.position[1] + self.frontWidth//2],
							[self.position[0] + self.sideWidth//2, self.position[1] + self.frontWidth//2],
							[self.position[0] + self.sideWidth//2, self.position[1] - self.frontWidth//2],
							[self.position[0] - self.sideWidth//2, self.position[1] - self.frontWidth//2]]

		aux = 0
		for p in self.edgesPoints:
			self.edgesPoints[aux] = utils.rotate(self.position, p, -self.direction)
			self.edgesPointsAprox[aux] = int(round(self.edgesPoints[aux][0])), int(round(self.edgesPoints[aux][1]))
			aux += 1
Ejemplo n.º 14
0
def findAngle2(data, lat_spacing_guess_px, rot_angle_scan_range_deg=(-10, -5)):
    """
    Find the angle phi (degrees) in scan_range for each lattice axis, under
    which the image "data" is rotated with respect to the imaging coordinates.
    
    Returns [[xphi, xamp, xrchisq, yphi, yamp, yrchisq], [x, xamps, yamps]]
    """
    xamps = []
    yamps = []
    dx = 0.1
    x = numpy.arange(rot_angle_scan_range_deg[0], rot_angle_scan_range_deg[1], dx)

    for phi in x:
        temp = data.copy()
        temp = utils.rotate(temp, phi)

        xlinescan = temp.sum(axis=0)
        ylinescan = temp.sum(axis=1)
        xamps.append(analyzeAutocorrelation(xlinescan, lat_spacing_guess_px)[1])
        yamps.append(analyzeAutocorrelation(ylinescan, lat_spacing_guess_px)[1])

    xfit, xpars = gauss1d(xamps)
    yfit, ypars = gauss1d(yamps)

    xchisq = ((xamps - xfit) ** 2).sum() / 0.003 ** 2
    ychisq = ((yamps - yfit) ** 2).sum() / 0.003 ** 2
    xrchisq = xchisq / (len(xamps) - 4)
    yrchisq = ychisq / (len(yamps) - 4)

    xphi = x[0] + xpars[1] * dx
    yphi = x[0] + ypars[1] * dx
    return [[xphi, xpars[0], xrchisq, yphi, ypars[0], yrchisq], [x, xamps, yamps]]
Ejemplo n.º 15
0
def get_fake_mid(cones, color):
    mid = []
    cones = sort_cones(cones)
    angle = 0
    size = len(cones)
    for i in range(size):
        #a = angle_to_point([cones[i+1][0]-cones[i][0], cones[i+1][0]-cones[i][1]])
        a = angle_to_point(cones[i])
        print("angle:", a)
        angle = angle + a

    angle = angle / size
    print("final angle:", angle)

    for i in range(size - 1):
        m = (cones[i] + cones[i + 1]) / 2
        if color == "blue":
            of = [0, -HALF_TRACK_WIDTH]
        else:
            of = [0, HALF_TRACK_WIDTH]
        of = rotate(of, math.radians(angle))
        print("offset", of)
        mid.append(m + of)

    return mid
Ejemplo n.º 16
0
def fix_face():
    r = request
    image = np.fromstring(r.data, np.uint8)
    image = cv2.imdecode(image, cv2.IMREAD_COLOR)
    
    gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    detector = dlib.get_frontal_face_detector()

    '''
    dlib provides face detector which detects only faces that are in the right orientation.
    my approach is simple, try to rotate original picture with every angels and if dlib's face detector can detect some faces 
    it means we have to fix original picture with that angel
    '''
    angels = [0, 90, 180, 270]
    for angel in angels:
        rotated_gray_image = rotate(gray_img, angel)
        rects = detector(rotated_gray_image, 1)
        if len(rects) > 0:
            break
        
    response = {
        'fixing_angel' : str(angel)
    }
    response_pickled = jsonpickle.encode(response)
    return Response(response=response_pickled,
     status=200, 
     mimetype="application/json")
Ejemplo n.º 17
0
 def checkOptions(self, player, otherPos, rotSeq):
     currentPos = (self.dict['x'], self.dict['y'])
     if otherPos == currentPos:
         options = self.generateCollisionOptions(player)
         bestDirection = utils.direction((self.dict['x'], self.dict['y']), self.dict['prevpos'])
         bdpos = utils.round_vector(bestDirection)
         if utils.magnitude(bdpos) == 0:
             bdpos = rand.choice(optKeys)
         if options[bdpos] and player.getRoom().validPosition(utils.add_vector(currentPos, bdpos)):
             self.dict['x'] += bdpos[0]
             self.dict['y'] += bdpos[1]
             return True
         else:
             for rot in rotSeq:
                 testvec = utils.rotate(bdpos, rot)
                 testvec = utils.round_vector(testvec)
                 if options[testvec]:
                     self.dict['x'] += testvec[0]
                     self.dict['y'] += testvec[1]
                     return True
             # if nothing worked -- this could be expanded to do proper step-by-step
             # checking until invalid
             self.dict['x'] = self.dict['prevpos'][0]
             self.dict['y'] = self.dict['prevpos'][1]
             return True
     return False
def split_image_to_4(image):
    image_1 = image
    image_2 = rotate(image, 20, BATCH_SIZE_DEFAULT)
    image_3 = scale(image, 20, 4, BATCH_SIZE_DEFAULT)
    image_4 = random_erease(image, BATCH_SIZE_DEFAULT)

    image_1 = image_1.to('cuda')
    image_2 = image_2.to('cuda')
    image_3 = image_3.to('cuda')
    image_4 = image_4.to('cuda')

    # image = image.to('cuda')
    # show_mnist(image_1[0], 20, 28)
    # show_mnist(image_1[1], 20, 28)
    # show_mnist(image_1[2], 20, 28)
    # show_mnist(image_1[3], 20, 28)
    #
    # show_mnist(image_2[0], 20, 28)
    # show_mnist(image_2[1], 20, 28)
    # show_mnist(image_2[2], 20, 28)
    # show_mnist(image_2[3], 20, 28)
    #
    # input()
    # print(image_1.shape)
    # print(image_2.shape)
    # print(image_3.shape)
    # print(image_4.shape)
    # input()

    return image_1, image_2, image_3, image_4
Ejemplo n.º 19
0
 def physicsStep_(self):
     gravity = utils.rotate(self.original_gravity, self.world_angle.get())
     self.world.SetGravity(gravity)
     self.world.Step(
             self.settings.time_step,
             self.settings.vel_iters,
             self.settings.pos_iters)
Ejemplo n.º 20
0
    def render(self, return_numpy=True):
        self.view.fill(colors.white)

        # plot all the obstacles
        self.view.fill(colors.white)
        for obstacle_position in self.obstacle_positions:
            self.view.blit(self.black, obstacle_position)

        # plot the goal
        self.view.blit(self.green, self.goal_position)

        car_rect = self.car.get_rect()
        car_rect.center = self.car_position

        rotated_surface, rotated_rect = utils.rotate(self.car, car_rect,
                                                     self.car_rotation)
        self.view.blit(rotated_surface, rotated_rect)

        if return_numpy:
            # get numpy surface
            np_arr = pygame.surfarray.array3d(self.view)

            return np_arr

        else:
            return self.view
Ejemplo n.º 21
0
    def apply(self, wrench, dt):
        if self.drag_model.enabled:
            self.drag_model.u[0] = (self.state.twist.linear[0] - self.wind[0])
            self.drag_model.u[1] = -(self.state.twist.linear[1] - self.wind[1])
            self.drag_model.u[2] = -(self.state.twist.linear[2] - self.wind[2])
            self.drag_model.u[3] = self.state.twist.angular[0]
            self.drag_model.u[4] = -self.state.twist.angular[1]
            self.drag_model.u[5] = -self.state.twist.angular[2]

            self.drag_model.u[0:3] = utils.rotate(self.drag_model.u[0:3],
                                                  self.state.quaternion)
            self.drag_model.u[3:6] = utils.rotate(self.drag_model.u[3:6],
                                                  self.state.quaternion)

            self.drag_model.limit(-100.0, 100.0)

            if self.verbose:
                print
                utils.pv('self.__class__.__name__')

            self.f(self.drag_model.u, dt, self.drag_model.y)

            if self.verbose:
                utils.pv('self.drag_model')

            if self.verbose:
                utils.pv('wrench')

            if len(wrench):
                wrench.force.x += -self.drag_model.y[0]
                wrench.force.y += self.drag_model.y[1]
                wrench.force.z += self.drag_model.y[2]
                wrench.torque.x += -self.drag_model.y[3]
                wrench.torque.y += self.drag_model.y[4]
                wrench.torque.z += self.drag_model.y[5]
            else:
                wrench.force.x = -self.drag_model.y[0]
                wrench.force.y = self.drag_model.y[1]
                wrench.force.z = self.drag_model.y[2]
                wrench.torque.x = -self.drag_model.y[3]
                wrench.torque.y = self.drag_model.y[4]
                wrench.torque.z = self.drag_model.y[5]

        if self.verbose:
            utils.pv('wrench')

        return wrench
Ejemplo n.º 22
0
def parallel_pe_blocks(blocks,num_blocks,num_new_sym,modify_pa=True,use_ILP=False):

    # New symbols:
    new_symbols=range(len(blocks[0][0]),len(blocks[0][0])+num_new_sym)

    # Number of parts:
    num_parts=num_blocks

    # Compute P and Q on the blocks:
    if use_ILP:
        P,Q=pe_ilp_fun(blocks[:-num_new_sym],num_blocks=num_parts)
    else:
        P,Q=pe_sud_fun(blocks,num_blocks=num_parts)
    
    # If not previously extended, do it here:
    if modify_pa:
        extend_cosets(blocks,num_new_sym=num_new_sym)

    cycles=rotate(new_symbols)
    print 

    # Output container:
    out=[]

    # Extend:
    curr_part=0
    for idx in xrange(num_parts):

        # Get te corresponding parts for this block:
        PS=P[0:num_new_sym]
        QS=Q[0:num_new_sym]

        # # print idx,PS
        P=signle_rot(P)
        Q=signle_rot(Q)

        if modify_pa:
            extended_block=block_coverage(blocks[idx],PS,QS,new_symbols)
        else:
            extended_block=block_coverage(blocks[idx],PS,QS)

        cov_per=len(extended_block)/float(len(blocks[idx]))
        print idx,"%.3f" %(cov_per*100)
        out+=extended_block
        # print hd_pairwise(extended_block)

    # Freebies:
    for idx in xrange(num_parts,num_parts+2):
    # for idx in xrange(3):
        if modify_pa:
            if len(cycles)==1: # When extending by only one symbol is used
                cycles=[cycles]
            extend(blocks[idx],new_symbols,cycles[idx%num_new_sym])
        out+=blocks[idx]

    # print pa2str(out)
    # print "pa_size:",len(out)
    # print hd_pairwise(out)
    return len(out),out,P,Q
Ejemplo n.º 23
0
 def circular(prime_number: int) -> bool:
     digits = tuple(number_to_digits(prime_number))
     rotated_digits = (rotate(digits, position)
                       for position in range(len(digits)))
     for rotated_prime in map(digits_to_number, rotated_digits):
         if rotated_prime not in primes_set:
             return False
     return True
Ejemplo n.º 24
0
    def apply(self, wrench, dt):
        if self.drag_model.enabled:
            self.drag_model.u[0] = (self.state.twist.linear[0] - self.wind[0])
            self.drag_model.u[1] = -(self.state.twist.linear[1] - self.wind[1])
            self.drag_model.u[2] = -(self.state.twist.linear[2] - self.wind[2])
            self.drag_model.u[3] = self.state.twist.angular[0]
            self.drag_model.u[4] = -self.state.twist.angular[1]
            self.drag_model.u[5] = -self.state.twist.angular[2]

            self.drag_model.u[0:3] = utils.rotate(self.drag_model.u[0:3], self.state.quaternion)
            self.drag_model.u[3:6] = utils.rotate(self.drag_model.u[3:6], self.state.quaternion)

            self.drag_model.limit(-100.0, 100.0)

            if self.verbose:
                print
                utils.pv('self.__class__.__name__')

            self.f(self.drag_model.u, dt, self.drag_model.y)

            if self.verbose:
                utils.pv('self.drag_model')

            if self.verbose:
                utils.pv('wrench')

            if len(wrench):
                wrench.force.x += -self.drag_model.y[0]
                wrench.force.y += self.drag_model.y[1]
                wrench.force.z += self.drag_model.y[2]
                wrench.torque.x += -self.drag_model.y[3]
                wrench.torque.y += self.drag_model.y[4]
                wrench.torque.z += self.drag_model.y[5]
            else:
                wrench.force.x = -self.drag_model.y[0]
                wrench.force.y = self.drag_model.y[1]
                wrench.force.z = self.drag_model.y[2]
                wrench.torque.x = -self.drag_model.y[3]
                wrench.torque.y = self.drag_model.y[4]
                wrench.torque.z = self.drag_model.y[5]

        if self.verbose:
            utils.pv('wrench')

        return wrench
Ejemplo n.º 25
0
 def _rotate_cone(self, theta):
     """
     Rotate cone at theta degrees around the pivot symbolised by the end of the cone (near the Enemy character)
     """
     new_cone, coordinates = rotate(
         self.cone, (self.WIDTH - self.CHAR_WIDTH, self.HEIGHT // 2),
         (self.CONE_WIDTH, self.CONE_HEIGHT // 2), theta)
     self._update_image(new_cone, coordinates)
     self.detection_cone = new_cone
Ejemplo n.º 26
0
    def execute(self, userdata):
        self.ar_pose_marker_sub = rospy.Subscriber("/ar_pose_marker", AlvarMarkers, self.ar_tag_callback)
        rospy.wait_for_message("/ar_pose_marker", AlvarMarkers)

        search_angle = {'middle': 0, 'left': 45, 'right': -45}

        while not rospy.is_shutdown():        
            for angle in search_angle:
                if rospy.is_shutdown():
                    return 'end'
                
                utils.rotate(search_angle[angle])
                rospy.sleep(0.1)
                utils.rotate(-search_angle[angle])
                if self.ar_tag_id != None:
                    return 'found'

        self.ar_pose_marker_sub.unregister()
        return 'end'
Ejemplo n.º 27
0
    def resize(self, vec1, vec2):
        level = self.level
        self.destroy()

        w,h = self.size

        vec1 = utils.rotate(vec1 - self.position, -self.angle)
        vec2 = utils.rotate(vec2 - self.position, -self.angle)
        try:
            w *= 1. * vec2[0] / vec1[0]
            h *= 1. * vec2[1] / vec1[1]
        except ZeroDivisionError:
            pass #whatever

        w = max(w, 0.1)
        h = max(h, 0.1)
        self.size = (w,h)

        self.create(level)
Ejemplo n.º 28
0
def save_images(images,
                species,
                directory='train',
                csv_name='temp.csv',
                augment=False):
    cropped_images = []
    image_species = []
    image_paths = []
    count = 1
    write_dir = 'dataset/{}'.format(directory)
    if not os.path.exists(write_dir):
        os.mkdir(write_dir)

    for index in range(len(images['original'])):
        image = utils.load_image_and_preprocess(images['original'][index],
                                                images['segmented'][index])
        if type(image) != type([]):
            image_dir = '{}/{}'.format(
                write_dir, species[index].lower().replace(' ', '_'))
            if not os.path.exists(image_dir):
                os.mkdir(image_dir)

            file_name = '{}.jpg'.format(count)

            image_to_write = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            cv2.imwrite(os.path.join(image_dir, file_name), image_to_write)
            image_paths.append(os.path.join(image_dir, file_name))
            cropped_images.append(image)
            image_species.append(species[index])
            count += 1

            if augment:
                angle = 90
                while angle < 360:
                    rotated_image = utils.rotate(image, angle)

                    file_name = '{}.jpg'.format(count)
                    image_to_write = cv2.cvtColor(rotated_image,
                                                  cv2.COLOR_RGB2BGR)
                    #cv2.imwrite(os.path.join(image_dir, file_name), image_to_write)
                    result = Image.fromarray((image_to_write).astype(np.uint8))
                    result.save(os.path.join(image_dir, file_name))
                    image_paths.append(os.path.join(image_dir, file_name))
                    cropped_images.append(rotated_image)
                    image_species.append(species[index])

                    angle += 90
                    count += 1

        if index > 0 and index % 1000 == 0:
            print('[INFO] Processed {:5d} images'.format(index))

    print('[INFO] Final Number of {} Samples: {}'.format(
        directory, len(image_paths)))
    raw_data = {'image_paths': image_paths, 'species': image_species}
Ejemplo n.º 29
0
 def convert_nonrotating_to_rotating_reference_frame(self):
     path2 = []
     t0 = None
     for t, r, v, a in self.path:
         if t0 == None:
             t0 = t  # first time
         x, y, z = r[0], r[1], r[2]
         x, z = utils.rotate(x, z, (t - t0) * self.body_rotspeed)
         r2 = np.array([x, y, z])
         path2.append((t, r2, v, a))
     return path2
 def update_particles(self, pose):
     if pose is not None:
         u_noise = np.random.multivariate_normal(np.zeros(3), self.x_cov,
                                                 (self.Np, ))
         # new_particles = self.robot.state_transition(self.particles, u, dt)
         origin = self.transform(pose)
         self.particles[..., :2] -= origin[..., :2]
         self.particles = utils.rotate(self.particles, u_noise[..., 2:])
         self.particles[..., :2] += u_noise[..., :2]
         self.particles[..., :2] += origin[..., :2]
     return self.particles
Ejemplo n.º 31
0
    def actonK(self,k,ig):
        '''
        Get the k' vector after group action on k specified by ig

        k:
            the input k-vector.
        ig:
            specify the group element to act on k.
        '''
        #get the transformation to k: G^-1(ig)*k
        k1=rotate(k,ig*pi*2/self.n)
        return k1
Ejemplo n.º 32
0
 def ai(self, themap, engine):
     if self.hp > 0:
         lx,ly = self.lp[0]
         rx,ry = self.rp[0]
         pcoords = tuple(themap.player.coords)
         if pcoords in self.los:
             self.playermemory = pcoords
             if pcoords in self.loe:
                 engine.attack(self,themap.player)
                 
             else:
                 self.move(themap)
                 
         elif pcoords in self.lp and themap.passable(lx,ly):
             rotate(self,"snwe"[self.facing])
             
         elif pcoords in self.rp and themap.passable(rx,ry):
             rotate(self,"nswe"[self.facing])
             
         else:
             self.move(themap)
Ejemplo n.º 33
0
    def stopGrab(self):
        if self.grabJoint:
            self.level.world.DestroyJoint(self.grabJoint)
            self.grabJoint = None

            offs = b2d.b2Vec2(self.radius*1.5, self.radius*1.5)
            offs = utils.rotate(offs, self.level.world_angle.get())
            saypos = self.body.position + offs
            self.level.putStaticActor(SaySomething(saypos), removeAfter = 1)

            self.candy.release()
            self.candy = None
Ejemplo n.º 34
0
    def straighten(self, stepsize=3, low_angle=-5, high_angle=5):
        # width is shape[1], height is shape[0]
        #cv2.imwrite("tmp//before.jpg", self.img)
        origWidth = self.img.shape[1]
        if low_angle < -20 or high_angle > 20:
            raise Exception("Photo is too skewed. Please straighten photo before trying to process it")


        img = resize(self.img, width=600)         # for some reason, straightening works better at this width :-??

        # straighten out images
        # using histograms: rotate +-5 in .3 steps, get max of each histogram
        #                   and ideal rotation is argmax of those maxes

        simg = img
        hists = []
        rng = list(range(low_angle*stepsize, high_angle*stepsize))
        bincount = 600 if img.shape[0] > 600 else img.shape[0]
        for ang in rng:
            pimg = rotate(simg, ang/float(stepsize), fixed=True) # was true, but doesn't make sense and doens't work
            # pimg = rotate(simg, ang/float(stepsize))
            # cv2.imwrite("tmp//rotate %d.jpg" % ang, pimg)
            hist, _ = np.histogram(pimg.sum(axis=1), bincount)
            #plt.plot(hist)
            #plt.savefig('tmp//hist %d.png' % ang, bbox_inches='tight')
            #plt.close()
            hists.append(max(hist))
        rot = np.argmax(hists)

        # if the best rotation angle is the one on the edge of our threshold, try to rotate again with an extended
        # threshold in that direction
        if rot == 0:
            self.straighten(low_angle=low_angle-5, high_angle=high_angle-5)
        elif rot == len(rng) - 1:
            self.straighten(low_angle=low_angle+5, high_angle=high_angle+5)
        img = rotate(self.img, rng[rot]/float(stepsize), fixed=False)   # otsu's method removes
                                                                          # background noise better

        # self.img = img.resize(w=origWidth//2)        # so that all letters are small enough
        self.img = resize(img, width=600)              # maybe I should look at average size of a blob ?
Ejemplo n.º 35
0
def stupid_user(usernum, rotate_tested = 10, rotate_module=10, accepted_results=15, test=False):
    results = dict.fromkeys(range(0,150), 0)
    reached_leafs = [1000] * 150
    train_set = complete_train_set(usernum)
    all_sets = parted_all_for_user(usernum)
    for jj in range(0, rotate_module):
        pztree = PZFTree(rotate(train_set, randint(0,5000)))
        print "Avg tree depth = {0}".format(pztree.avg_tree_depth)
        # for i, run_set in enumerate(all_sets[50:], start=50):
        for i, run_set in enumerate(all_sets, start=0):
            for j in range(0,3):
                run_set= rotate(run_set,randint(0,len(run_set)))
                res_p, leafs_run = pztree.run_seq_weigted(run_set, factor=25)
                if res_p > results[i]:
                    results[i] = res_p
                if leafs_run < reached_leafs[i]:
                    reached_leafs[i] = leafs_run
    lst_leafs = pick_best_leafs(reached_leafs, accepted_results)
    lst_prob = pick_best(results, test_data[usernum], reached_leafs, rotate_module, accepted_results)
    # lst_prob = []
    to_csv = [0]*150

    hits_leafs = 0
    hits_prob = 0
    # compare_results(lst_leafs,lst_prob, test_data[usernum])
    for loc in lst_leafs:
        to_csv[loc] = 1
        if test:
            if test_data[usernum][loc]==1:
                print "Hit Leafs: " + str(loc)
                hits_leafs += 1

    for loc in lst_prob:
        # to_csv[loc] = 1
        if test:
            if test_data[usernum][loc]==1:
                print "Hit prob: " + str(loc)
                hits_prob += 1
    return hits_leafs, hits_prob, len(lst_leafs), len(lst_prob), to_csv
Ejemplo n.º 36
0
def save_faces(frame, output, verbose=False):
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
		
	detector = dlib.get_frontal_face_detector()
	rects = detector(gray, 0)

	i=1
	while len(rects) is 0 and i<4:
		gray = rotate(gray,i)
		frame = rotate(frame,i)
		rects = detector(gray, 0)
		i+=1

	for (_, rect) in enumerate(rects):
		l_x = int(rect.tl_corner().x - rect.tl_corner().x*0.1)
		t_y = int(rect.tl_corner().y - rect.tl_corner().y*0.2)
		r_x = int(rect.br_corner().x + rect.br_corner().x*0.1)
		b_y = int(rect.br_corner().y + rect.br_corner().y*0.1)
		face_image = frame[t_y:b_y , l_x:r_x, :]

		cv2.imwrite(output, face_image)
		if verbose:
			print("[INFO] saved {}".format(output))
Ejemplo n.º 37
0
def split_image_to_4(image, vae_enc, vae_dec):
    # split_at_pixel = 19
    # width = image.shape[2]
    # height = image.shape[3]
    #
    # image_1 = image[:, :, 0: split_at_pixel, :]
    # image_2 = image[:, :, width - split_at_pixel:, :]
    # image_3 = image[:, :, :, 0: split_at_pixel]
    # image_4 = image[:, :, :, height - split_at_pixel:]

    # # image_1, _ = torch.split(image, split_at_pixel, dim=3)
    # # image_3, _ = torch.split(image, split_at_pixel, dim=2)
    #
    image_1 = image
    image_2 = rotate(image, 20, BATCH_SIZE_DEFAULT)
    image_3 = scale(image, BATCH_SIZE_DEFAULT)
    #image_4 = random_erease(image, BATCH_SIZE_DEFAULT)

    vae_in = torch.reshape(image, (BATCH_SIZE_DEFAULT, 784))

    sec_mean, sec_std = vae_enc(vae_in)
    e = torch.zeros(sec_mean.shape).normal_()
    sec_z = sec_std * e + sec_mean
    image_4 = vae_dec(sec_z)
    image_4 = torch.reshape(image_4, (BATCH_SIZE_DEFAULT, 1, 28, 28))

    image_1 = image_1.to('cuda')
    image_2 = image_2.to('cuda')
    image_3 = image_3.to('cuda')
    image_4 = image_4.to('cuda')

    #image = image.to('cuda')
    # show_mnist(image_1[0], 20, 28)
    # show_mnist(image_1[1], 20, 28)
    # show_mnist(image_1[2], 20, 28)
    # show_mnist(image_1[3], 20, 28)
    #
    # show_mnist(image_2[0], 20, 28)
    # show_mnist(image_2[1], 20, 28)
    # show_mnist(image_2[2], 20, 28)
    # show_mnist(image_2[3], 20, 28)
    #
    # input()
    # print(image_1.shape)
    # print(image_2.shape)
    # print(image_3.shape)
    # print(image_4.shape)
    # input()

    return image_1, image_2, image_3, image_4
def process_images(model, input_path, output_path,
                   batch_size=64, crop=True):
    extensions = ['.jpg', '.jpeg', '.bmp', '.png']

    output_is_image = False
    if os.path.isfile(input_path):
        image_paths = [input_path]
        if os.path.splitext(output_path)[1].lower() in extensions:
            output_is_image = True
            output_filename = output_path
            output_path = os.path.dirname(output_filename)
    else:
        image_paths = [os.path.join(input_path, f)
                       for f in os.listdir(input_path)
                       if os.path.splitext(f)[1].lower() in extensions]
        if os.path.splitext(output_path)[1].lower() in extensions:
            print('Output must be a directory!')

    predictions = model.predict_generator(
        RotNetDataGenerator(
            image_paths,
            input_shape=(224, 224, 3),
            batch_size=64,
            one_hot=True,
            preprocess_func=preprocess_input,
            rotate=False,
            crop_largest_rect=True,
            crop_center=True
        ),
        val_samples=len(image_paths)
    )

    predicted_angles = np.argmax(predictions, axis=1)

    if output_path == '':
        output_path = '.'

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    for path, predicted_angle in zip(image_paths, predicted_angles):
        image = cv2.imread(path)
        rotated_image = rotate(image, -predicted_angle)
        if crop:
            size = (image.shape[0], image.shape[1])
            rotated_image = crop_largest_rectangle(rotated_image, -predicted_angle, *size)
        if not output_is_image:
            output_filename = os.path.join(output_path, os.path.basename(path))
        cv2.imwrite(output_filename, rotated_image)
Ejemplo n.º 39
0
    def actonK(self,k,ig):
        '''
        Get the k' vector after group action on k specified by ig

        k:
            the input k-vector.
        ig:
            specify the group element to act on k.
        '''
        #get the transformation to k: G^-1(ig)*k
        k1=rotate(k,ig*pi*2/self.n)
        if ig>=self.ng/2:
            #image about the y axis
            k1=sv(k1,n=array([1.0,0.0]))
        return k1
Ejemplo n.º 40
0
def run(car_pos=[0, 0, 0, 0]):
    global track_blue, track_yellow
    viewed_blue = []
    viewed_yellow = []

    for cone in track_blue:
        local = global_to_local(car_pos, cone)
        if dist([0, 0], local) < VIEW_MAX_DISTANCE:
            if is_in_fov(local):
                viewed_blue.append(local)
    for cone in track_yellow:
        local = global_to_local(car_pos, cone)
        if dist([0, 0], local) < VIEW_MAX_DISTANCE:
            if is_in_fov(local):
                viewed_yellow.append(local)

    n_blue = len(viewed_blue)
    n_yellow = len(viewed_yellow)
    for i in range(int(DROPOUT_PERCENTAGE * n_blue)):
        viewed_blue.pop(random.randrange(len(viewed_blue)))
    for i in range(int(DROPOUT_PERCENTAGE * n_yellow)):
        viewed_yellow.pop(random.randrange(len(viewed_yellow)))

    fov_p0 = [0, VIEW_MAX_DISTANCE]
    fov_p0 = rotate(fov_p0, car_pos[2] - math.pi / 2)
    fov_p1 = fov_p0
    fov_p0 = rotate(fov_p0, VIEW_HFOV / 2)
    fov_p1 = rotate(fov_p1, -VIEW_HFOV / 2)
    fov_p0[0] += car_pos[0]
    fov_p0[1] += car_pos[1]
    fov_p1[0] += car_pos[0]
    fov_p1[1] += car_pos[1]
    time.sleep(SOFTWARE_LATENCY)

    return [fov_p0, fov_p1], np.array(track_blue), np.array(
        track_yellow), np.array(viewed_blue), np.array(viewed_yellow), "", img
 def init_particles(self, local_pose, local_pose_cov, d, d_var, rel_pose,
                    rel_pose_cov):
     noisy_local = np.random.multivariate_normal(local_pose,
                                                 local_pose_cov,
                                                 size=self.Np)
     distances = np.random.normal(d, np.sqrt(d_var), size=(self.Np, 1))
     angles = np.random.uniform(0, 2 * np.pi, size=(self.Np, 1))
     rel = noisy_local[..., :2] + distances * np.concatenate(
         (np.cos(angles), np.sin(angles)), axis=-1)
     thetas = np.random.uniform(0, 2 * np.pi, size=(self.Np, 1))
     noisy_rel = np.random.multivariate_normal(rel_pose,
                                               rel_pose_cov,
                                               size=self.Np)
     rel_rotated = utils.rotate(noisy_rel, thetas)
     offsets = rel - rel_rotated[..., :2]
     return np.concatenate((offsets, thetas), axis=-1)
Ejemplo n.º 42
0
    def _get_elem_extents(self, elem):
        elem_extents = Rect.far_extents()
        tag_suffix = elem.tag.split('}')[-1]
        if tag_suffix not in SVG_TAGS:
            return elem_extents

        shape_class = getattr(shapes, tag_suffix)
        shape_obj = shape_class(elem)
        path = shape_obj.d_path()
        mtx = shape_obj.transformation_matrix()
        if path:
            points = shapes.point_generator(path, mtx, self.settings.smoothness)
            for point in points:
                elem_extents = elem_extents.expand_to(rotate(Vector2(point), self.rotate_rads))

        return elem_extents
Ejemplo n.º 43
0
    def drawHand(self, graphics):
        if not self.grabJoint: return

        a = self.grabJoint.GetAnchor1()
        b = self.grabJoint.GetAnchor2()
        pos = (a+b)/2.
        h = (a-b).Length()/2.
        w = h/4.
        right = utils.rotate(b2d.b2Vec2(-10,0), self.level.world_angle.get())
        angle = utils.angle_between(right, b-a)*180/math.pi-90

        graphics.putSprite(
                pos,
                self.grab_spr_name,
                (w,h),
                angle=angle)
Ejemplo n.º 44
0
def findAngle(data, length_guess, scan_range=(-12, -4)):
    """
   Finds the angle phi in scan_range, under which the image in "data" is rotated with
   respect to the imaging coordinates.
   The image must contain at least one lattice structure in one direction for this to work.
   """
    breiten_neu = []
    breiten_alt = []
    x = numpy.arange(scan_range[0], scan_range[1], 0.1)

    for phi in x:
        temp = data.copy()
        temp = utils.rotate(temp, phi)

        linescan_neu = temp.sum(axis=0)
        linescan_alt = temp.sum(axis=1)
        breiten_neu.append(analyzeAutocorrelation(linescan_neu, length_guess)[1])
        breiten_alt.append(analyzeAutocorrelation(linescan_alt, length_guess)[1])

    def fitfunction(p, xachse):
        return p[0] * numpy.exp(-((xachse - p[1]) / p[2]) ** 2) + p[3]

    def errorfunction(p, xachse, data):
        return data - fitfunction(p, xachse)

    (pars_neu, success_neu) = optimize.leastsq(
        errorfunction, [0.15, scan_range[len(scan_range) // 2], 5, 0], args=(x, breiten_neu)
    )
    (pars_alt, success_alt) = optimize.leastsq(
        errorfunction, [0.15, scan_range[len(scan_range) // 2], 5, 0], args=(x, breiten_alt)
    )

    (neu, alt) = (True, True)
    if pars_neu[0] < 0.12:
        neu = False
    if pars_alt[0] < 0.12:
        alt = False
    if not alt and not neu:
        raise Exception, "No lattice structure found"

    phi = (pars_neu[1] * neu + pars_alt[1] * alt) / (alt + neu)

    return phi
Ejemplo n.º 45
0
    def draw(self, graphics):
        #super(Gorilla, self).draw(graphics)
        right = utils.rotate(b2d.b2Vec2(-10,0), self.level.world_angle.get())
        if self.body.linearVelocity.Length()<1:
            angle = 0.
        else:
            angle = utils.angle_between(right, self.body.linearVelocity-right)
            angle = int(angle * 180 / math.pi)

        if self.hasCandy(): sprite = self.happy_spr_name
        else: sprite = self.spr_name
        graphics.putSprite(
                self.body.position,
                sprite,
                (self.radius, self.radius),
                angle=angle,
                flipX = True,
                flipY = abs(angle)>90)

        self.drawHand(graphics)
Ejemplo n.º 46
0
def vicrot(delta, n, seed, dim, t_max, v, R_v, L, D_rot, out=None):
    np.random.seed(seed)

    r = np.random.uniform(-L / 2.0, L / 2.0, [n, dim])
    u = utils.sphere_pick(n=n, d=dim)

    r0 = r.copy()
    wraps = np.zeros_like(r, dtype=np.int)

    if out is not None:
        np.savez(os.path.join(out, 'static.npz'), L=L, r0=r0, v=v)
    ums = []
    for t in range(t_max):
        abssep = np.abs(r[:, np.newaxis] - r[np.newaxis, :])
        seps = np.minimum(abssep, L - abssep)
        withins = utils.vector_mag_sq(seps) < R_v ** 2.0

        u_o = u.copy()
        u[...] = 0.0
        for i_n in range(n):
            u_net = np.sum(u_o[withins[i_n]], axis=0)
            u[i_n] = utils.rotate(u_net, delta)

        u = utils.vector_unit_nonull(u)
        u = utils.rot_diff(u, D_rot, 1.0)

        r += v * u

        wraps_cur = ((r > L / 2.0).astype(np.int) -
                     (r < -L / 2.0).astype(np.int))
        wraps += wraps_cur
        r -= wraps_cur * L

        if out is not None:
            np.savez(os.path.join(out, 'dyn_{:010d}'.format(t)),
                     t=t, r=r, u=u, w=wraps)
        ums.append(utils.vector_mag(np.mean(u, axis=0)))
    return np.mean(ums), scipy.stats.sem(ums)
Ejemplo n.º 47
0
def key_expand(key, nr, nk):
    expanded = [k for k in key]
    tmp = [0]*4
    rcon_iter = 1

    # size is either 16, 24 or 42 byte
    size = nk*4

    # length of the expended keysize is either
    # 176, 208 or 240, depending on the keysize
    expanded_keysize = (nr+1)*16
    currentsize = size

    while currentsize < expanded_keysize:

        for i in range(4):
            tmp[i] = expanded[(currentsize-4)+i]

        if currentsize%size == 0:
            tmp = rotate(tmp)
            for i in range(4):
                tmp[i] = S[tmp[i]]

            tmp[0] = tmp[0]^RCON[rcon_iter]
            rcon_iter += 1

        # Add an extra s-box for 256 bit keys
        if currentsize%size == 16 and size==32:
            for i in range(4):
                tmp[i] = S[tmp[i]]

        for i in range(4):
            expanded.append(expanded[currentsize-size]^tmp[i])
            currentsize += 1

    return expanded
Ejemplo n.º 48
0
 def set_state(self, state):
     self.tf = utils.rotate(state[0])
     for line in self.lines:
         line.set_tf(self.tf)
Ejemplo n.º 49
0
 def to_body(self, v):
     """
     Convert vel/accel from world frame to body frame
     """
     return utils.rotate(v, self.inverse_quaternion)
Ejemplo n.º 50
0
def admin_inner(client, nick, crawler):
    if nick not in client.mod_conf['admin']['admins']:
        return 'You do not have administrator privileges!'
    
    try:
        cmd = crawler.normal().lower()
        text = crawler.chain
    except IndexError:
        cmd = 'help'
    
    ### Position #############
    
    if cmd == 'sit':
        client.sit()
    
    elif cmd == 'stand':
        client.stand()
    
    elif cmd == 'face':
        client.face(crawler.chain[0])
    
    ### Joke moves ###########
    
    elif cmd == 'spin':
        args = ['l', 's', '4'] if not text else text.split()
        
        rot = ['n', 'e', 's', 'w']
        rot = rotate(rot, rot.index(args[1][0].lower()))
        if args[0][0].lower() == 'l':
            rot.reverse()
        
        rot *= int(args[2])
        client.emote(10)
        client.sleep(600)
        for d in rot:
            client.face(d)
            client.sleep(300)
        client.sleep(300)
        client.emote(115)
    
    
    elif cmd == 'juke':
        # 2 args or no args
        if len(crawler) not in (0, 2):
            return 'Bad args! See the help sub-command.'
        args = ['u', 's'] if not text else text.lower().split()
        e_up = args[0][0] == 'u'
        s_dir = args[1][0]
        f2 = 'n' if s_dir in 'ew' else 'w'
        f3 = 's' if s_dir in 'ew' else 'e'
        
        main = ['d', s_dir, '.500', ':122', '.500']
        
        routine = ['u', 'd', 'u', 'd', 'u', f2, s_dir, f2, s_dir, 
                   f3, s_dir, f3, s_dir, 'd', 'u', 'd', ':127']
        routine += ['u'] if e_up else []
        for p in routine:
            main.append(p)
            main.append('.500')
        
        for p in main:
            if p[0] == '.':
                client.sleep(int(p[1:]))
            elif p[0] == ':':
                client.emote(int(p[1:]))
            elif p == 'u':
                client.stand()
            elif p == 'd':
                client.sit()
            else:
                client.face(p)
    
    ### Communications #######
    
    elif cmd == 'emote':
        client.emote(crawler.chain)
    
    elif cmd == 'say':
        client.msg(crawler.chain)
    
    elif cmd == 'whisper':
        # Need the last arg first :-/
        crawler.flip()
        to = crawler.quoted()
        crawler.flip()
        client.whisper(to, crawler.chain)
    
    ### Interaction ##########
    
    elif cmd == 'attack':
        whom = crawler.quoted()
        being_id = client.mod_whois_nmap.get(whom)
        if not being_id:
            if whom.isdigit():
                being_id = int(whom)
            else:
                return 'Sorry, I don\'t know that name.'
        
        name = client.mod_whois_imap.get(being_id)
        if name:
           more = ' (%s)' % name
        else:
            more = ''
            client.whois(being_id)
        
        text = crawler.chain
        keep = not text or text[0].lower() in 'yt'
        client.attack(being_id, keep)
        return 'Attacking %s%s!' % (being_id, more)
    
    
    elif cmd == 'goto':
        x, y = text.split()
        client.goto(int(x), int(y))
    
    ### Information ##########
    
    elif cmd == 'names':
        return ',\n'.join(
          ', '.join('%s=%s' % pair for pair in ten) 
          for ten in isection(client.mod_whois_imap.items(), 10)
        )
    
    elif cmd == 'pos':
        return 'x=%s, y=%s' % tuple(client.pos)
    
    ### Resetters ############
    
    elif cmd == 'quit':
        client.done = True
    
    elif cmd == 'respawn':
        client.respawn()
    
    elif cmd == 'refresh':
        if rebuild_prices.rebuild():
            return 'Successfully rebuilt TMW price DB!'
        else:
            return 'Rebuilding the TMW price DB failed!'
    
    elif cmd == 'reload':
        for mod in client.installed_mods.values():
            reload(mod)
        commands.setup_commands(client)
        
        return 'Successfully reloaded all mods installed on this bot.'
    
    ### Help #################
    
    elif cmd == 'help':
        return '\n'.join([
          '`sit` to sit; `stand` to stand; '
          '`face n|s|e|w` to face a particular direction;',
          
          '`spin [l|r] [n|s|e|w] [num]` to make the bot spin rapidly; '
          '`juke [u|d] [n|s|e|w]` to execute a lame move sequence;',
          
          '`emote <number>` to display an emote; '
          '`say <some message>` to say something; '
          '`whisper <some thing> to <player>` to whisper to a player;',
          
          '`goto <x> <y>` to move the bot;',
          
          '`names` to see all the names that the bot recognizes;',
          
          '`respawn` to respawn; '
          '`refresh` to update the db of TMW prices (long!); '
          '`reload` to reload all mods installed on this bot.'
        ])
    
    else:
        return 'Unknown command `%s`; see the `help` sub-command.' % cmd
Ejemplo n.º 51
0
 def from_body(self, v):
     """
     Conver vel/accel from body frame to world frame
     """
     return utils.rotate(v, self.quaternion)
Ejemplo n.º 52
0
 def getOriginalVec(self, *args):
     vec = b2d.b2Vec2(*args)
     return utils.rotate(vec, self.world_angle.get())
Ejemplo n.º 53
0
escala = [TONO, TONO, SEMITONO, TONO, TONO, TONO, SEMITONO]


# Pueden obtenerse 7 acordes por cada escala
inicio = 0
for x in range(8):
    actual = inicio
    notas = [frecuencias[actual]]
    for grupo in grouper(escala, 2, 0):
        actual += sum(grupo)
        notas.append(frecuencias[actual])

    acordes.append(notas)
    inicio += escala[0]

    escala = rotate(escala)


muestras_por_segundo = 44100
duracion = 2

muestras_totales = duracion * muestras_por_segundo

muestras = []

for indice, acorde in enumerate(acordes):
    inicio_acorde = indice * duracion * muestras_por_segundo

    for frecuencia in acorde:
        ciclos_por_muestra = frecuencia / muestras_por_segundo
        incremento = 2 * math.pi * ciclos_por_muestra
Ejemplo n.º 54
0
def generate_hdf5(ftxt, output, fname, argument=False):

    data = getDataFromTxt(ftxt)
    F_imgs = []
    F_landmarks = []
    EN_imgs = []
    EN_landmarks = []
    NM_imgs = []
    NM_landmarks = []

    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)
        # F
        f_bbox = bbox.subBBox(-0.05, 1.05, -0.05, 1.05)
        f_face = img[f_bbox.top:f_bbox.bottom+1,f_bbox.left:f_bbox.right+1]

        ## data argument
        if argument and np.random.rand() > -1:
            ### flip
            face_flipped, landmark_flipped = flip(f_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (39, 39))
            F_imgs.append(face_flipped.reshape((1, 39, 39)))
            F_landmarks.append(landmark_flipped.reshape(10))
            ### rotation
            if np.random.rand() > 0.5:
                face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox, \
                    bbox.reprojectLandmark(landmarkGt), 5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (39, 39))
                F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
                F_landmarks.append(landmark_rotated.reshape(10))
                ### flip with rotation
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (39, 39))
                F_imgs.append(face_flipped.reshape((1, 39, 39)))
                F_landmarks.append(landmark_flipped.reshape(10))
            ### rotation
            if np.random.rand() > 0.5:
                face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox, \
                    bbox.reprojectLandmark(landmarkGt), -5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (39, 39))
                F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
                F_landmarks.append(landmark_rotated.reshape(10))
                ### flip with rotation
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (39, 39))
                F_imgs.append(face_flipped.reshape((1, 39, 39)))
                F_landmarks.append(landmark_flipped.reshape(10))

        f_face = cv2.resize(f_face, (39, 39))
        en_face = f_face[:31, :]
        nm_face = f_face[8:, :]

        f_face = f_face.reshape((1, 39, 39))
        f_landmark = landmarkGt.reshape((10))
        F_imgs.append(f_face)
        F_landmarks.append(f_landmark)

        # EN
        # en_bbox = bbox.subBBox(-0.05, 1.05, -0.04, 0.84)
        # en_face = img[en_bbox.top:en_bbox.bottom+1,en_bbox.left:en_bbox.right+1]

        ## data argument
        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(en_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape((1, 31, 39))
            landmark_flipped = landmark_flipped[:3, :].reshape((6))
            EN_imgs.append(face_flipped)
            EN_landmarks.append(landmark_flipped)

        en_face = cv2.resize(en_face, (31, 39)).reshape((1, 31, 39))
        en_landmark = landmarkGt[:3, :].reshape((6))
        EN_imgs.append(en_face)
        EN_landmarks.append(en_landmark)

        # NM
        # nm_bbox = bbox.subBBox(-0.05, 1.05, 0.18, 1.05)
        # nm_face = img[nm_bbox.top:nm_bbox.bottom+1,nm_bbox.left:nm_bbox.right+1]

        ## data argument
        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(nm_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape((1, 31, 39))
            landmark_flipped = landmark_flipped[2:, :].reshape((6))
            NM_imgs.append(face_flipped)
            NM_landmarks.append(landmark_flipped)

        nm_face = cv2.resize(nm_face, (31, 39)).reshape((1, 31, 39))
        nm_landmark = landmarkGt[2:, :].reshape((6))
        NM_imgs.append(nm_face)
        NM_landmarks.append(nm_landmark)

    #imgs, landmarks = process_images(ftxt, output)

    F_imgs, F_landmarks = np.asarray(F_imgs), np.asarray(F_landmarks)
    EN_imgs, EN_landmarks = np.asarray(EN_imgs), np.asarray(EN_landmarks)
    NM_imgs, NM_landmarks = np.asarray(NM_imgs),np.asarray(NM_landmarks)

    F_imgs = processImage(F_imgs)
    shuffle_in_unison_scary(F_imgs, F_landmarks)
    EN_imgs = processImage(EN_imgs)
    shuffle_in_unison_scary(EN_imgs, EN_landmarks)
    NM_imgs = processImage(NM_imgs)
    shuffle_in_unison_scary(NM_imgs, NM_landmarks)

    # full face
    base = join(OUTPUT, '1_F')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = F_imgs.astype(np.float32)
        h5['landmark'] = F_landmarks.astype(np.float32)

    # eye and nose
    base = join(OUTPUT, '1_EN')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = EN_imgs.astype(np.float32)
        h5['landmark'] = EN_landmarks.astype(np.float32)

    # nose and mouth
    base = join(OUTPUT, '1_NM')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = NM_imgs.astype(np.float32)
        h5['landmark'] = NM_landmarks.astype(np.float32)
Ejemplo n.º 55
0
modo = [TONO, TONO, SEMITONO, TONO, TONO, TONO, SEMITONO]
inicio = 0

# Existen 8 modos, se obtienen rotando los intervalos de la escala
for i in range(8):
    actual = inicio
    frecuencias_modo = [frecuencias[actual]]

    for intervalo in modo:
        actual += intervalo
        frecuencias_modo.append(frecuencias[actual])

    notas += frecuencias_modo
    inicio += modo[0]

    modo = rotate(modo)


muestras_por_segundo = 44100
duracion = 0.5

muestras_totales = duracion * muestras_por_segundo

muestras = []

for frecuencia in notas:
    ciclos_por_muestra = frecuencia / muestras_por_segundo
    incremento = 2 * math.pi * ciclos_por_muestra
    fase = 0

    for i in range(int(muestras_totales)):
        print 'regenerating '+pair[0]+' with label '+pair[1]
        image = skimage.io.imread(Image_Path+filename)
        transformed_img = random_crop(image)
        skimage.io.imsave(Image_Path+str(next_image_num)+'.png',transformed_img)
        rep_image_label_file.write(str(next_image_num)+'.png '+pair[1].split('\n')[0]+'\n')
        next_image_num += 1

    # do random rotate
    # int(augment_fraction*train_image_num)
    aug_idxs = np.random.choice(train_image_num, int(augment_fraction*train_image_num), replace=False)+1
    for augidx in aug_idxs:
        filename = str(augidx)+'.png'
        pair = train_image_labels[augidx-1].split(' ')
        print 'regenerating '+pair[0]+' with label '+pair[1]
        image = skimage.io.imread(Image_Path+filename)
        transformed_img = rotate(image)
        skimage.io.imsave(Image_Path+str(next_image_num)+'.png',transformed_img)
        rep_image_label_file.write(str(next_image_num)+'.png '+pair[1].split('\n')[0]+'\n')
        next_image_num += 1


# for augidx in aug_idxs:
#     filename = str(augidx)+'.png'
#     image = skimage.io.imread(Image_Path+filename)
#     # do some transform here
#     print image.shape
#
#     skimage.io.imshow(image)
#     skimage.io.show()
#
#     test_img = random_crop(image)