Ejemplo n.º 1
0
    def get_precision_recall_by_Hamming_Radius(database, query, radius=2):
        query_output = sign(query.output)
        database_output = sign(database.output)

        bit_n = query_output.shape[1]

        ips = np.dot(query_output, database_output.T)
        ips = (bit_n - ips) / 2
        ids = np.argsort(ips, 1)

        precX = []
        recX = []
        mAPX = []
        query_labels = query.label
        database_labels = database.label

        for i in range(ips.shape[0]):
            label = query_labels[i, :]
            label[label == 0] = -1
            idx = np.reshape(np.argwhere(ips[i, :] <= radius), (-1))
            all_num = len(idx)

            if all_num != 0:
                imatch = np.sum(database_labels[idx[:], :] == label, 1) > 0
                match_num = np.sum(imatch)
                precX.append(np.float(match_num) / all_num)

                all_sim_num = np.sum(
                    np.sum(database_labels[:, :] == label, 1) > 0)
                recX.append(np.float(match_num) / all_sim_num)

                if radius < 10:
                    ips_trad = np.dot(
                        query.output[i, :], database.output[ids[i, 0:all_num], :].T)
                    ids_trad = np.argsort(-ips_trad, axis=0)
                    db_labels = database_labels[ids[i, 0:all_num], :]

                    rel = match_num
                    imatch = np.sum(db_labels[ids_trad, :] == label, 1) > 0
                    Lx = np.cumsum(imatch)
                    Px = Lx.astype(float) / np.arange(1, all_num + 1, 1)
                    if rel != 0:
                        mAPX.append(np.sum(Px * imatch) / rel)
                else:
                    mAPX.append(np.float(match_num) / all_num)

            else:
                precX.append(np.float(0.0))
                recX.append(np.float(0.0))
                mAPX.append(np.float(0.0))

        return np.mean(np.array(precX)), np.mean(np.array(recX)), np.mean(np.array(mAPX))
Ejemplo n.º 2
0
    def _set_velocity(self, val):
        if self.maxVelocity:
            _vx, _vy = val

            if abs(self.maxVelocity.x) < abs(_vx):
                _vx = self.maxVelocity.x * util.sign(_vx)
            if abs(self.maxVelocity.y) < abs(_vy):
                _vy = self.maxVelocity.y * util.sign(_vy)

            self._velocity = point.Vector(_vx, _vy)
        else:
            self._velocity = point.Vector(val)
        self.redraw()
Ejemplo n.º 3
0
    def update(self):
        """Updates this sprite for the next frame."""
        if self.alive and not self.fixed:
            # Game.elapsed is in ms, but all our calculations are in seconds
            dt = world.Game.elapsed/1000.0

            if dt > 0.001:
                # linear motion
                if self.drag.x and not self.acceleration.x:
                    # drag is just deceleration when there's no acceleration
                    if abs(self.velocity.x) > abs(self.drag.x):
                        self.velocity.x -= (self.drag.x * util.sign(self.velocity.x))
                    else:
                        self.velocity.x = 0.0
                if self.drag.y and not self.acceleration.y:
                    if abs(self.velocity.y) > abs(self.drag.y):
                        self.velocity.y -= (self.drag.y * util.sign(self.velocity.y))
                    else:
                        self.velocity.y = 0.0

                self.velocity += self.acceleration * dt
                if self.maxVelocity is not None:
                    if self.velocity.x > self.maxVelocity.x:
                        self.velocity.x = self.maxVelocity.x
                    if self.velocity.y > self.maxVelocity.y:
                        self.velocity.y = self.maxVelocity.y

                # move the entity, with hooks after moving by x and y
                self.x += self.velocity.x * dt
                self.onMoveX()
                self.y += self.velocity.y * dt
                self.onMoveY()
                # hook for post-movement code (e.g., collision detection)
                self.onMove()

                # rotation
                # Setting angular velocity or acceleration overrides the
                # 'rotating' property since it is assumed that, by applying
                # angular velocity, you really want an object to rotate.
                if (self.angle and self.rotating) or \
                   self.angularVelocity or self.angularAcceleration:
                    self.angularVelocity += self.angularAcceleration * dt
                    if self.maxAngularVelocity and self.angularVelocity > \
                       self.maxAngularVelocity:
                        self.angularVelocity = self.maxAngularVelocity
                    self.angle += self.angularVelocity * dt
                    self.rect = self.image.get_rect()

        self._recenter()
        super(Entity, self).update()
Ejemplo n.º 4
0
    def build(self, grid, s, t):
        v0, v1 = t[0] - s[0], t[1] - s[1]
        i, j = s
        grid[i][j] = 1
        while v0 or v1:  # remaining components of the vector
            print i, j
            prev = i, j
            if v0:  # increment position, decrement vector component
              i += sign(v0)
              v0 += -sign(v0)
            if v1:
              j += sign(v1)
              v1 += -sign(v1)
            if grid[i][j] in [2, -1]: # if position is unbuildable, repel in vector favoring direction
                i, j = prev  # revert
                if abs(v0) > abs(v1):
                    i += sign(v0)
                    v0 += -sign(v0)
                else:
                    j += sign(v1)
                    v1 += -sign(v1)

            grid[i][j] = 1

        return grid
Ejemplo n.º 5
0
def test_sign():
    key = base64.b64encode('abc')
    string_to_sign = '/account/GET200601021504'

    signature = util.sign(string_to_sign, key)
    assert util.verify(
        string_to_sign, key, signature) is True
Ejemplo n.º 6
0
 def test_sign(self):
     self.assertEqual(util.sign(-3.14), -1)
     self.assertEqual(util.sign(-1), -1)
     self.assertEqual(util.sign(-0.001), -1)
     self.assertEqual(util.sign(0.0), 0)
     self.assertEqual(util.sign(0), 0)
     self.assertEqual(util.sign(3.14), 1)
     self.assertEqual(util.sign(1), 1)
     self.assertEqual(util.sign(0.001), 1)
Ejemplo n.º 7
0
    def get_precision_recall_by_Hamming_Radius_All(database, query):
        query_output = sign(query.output)
        database_output = sign(database.output)

        bit_n = query_output.shape[1]

        ips = np.dot(query_output, database_output.T)
        ips = (bit_n - ips) / 2
        precX = np.zeros((ips.shape[0], bit_n + 1))
        recX = np.zeros((ips.shape[0], bit_n + 1))
        mAPX = np.zeros((ips.shape[0], bit_n + 1))

        query_labels = query.label
        database_labels = database.label

        ids = np.argsort(ips, 1)

        for i in range(ips.shape[0]):
            label = query_labels[i, :]
            label[label == 0] = -1

            idx = ids[i, :]
            imatch = np.sum(database_labels[idx[:], :] == label, 1) > 0
            all_sim_num = np.sum(imatch)

            counts = np.bincount(ips[i, :].astype(np.int64))

            for r in range(bit_n + 1):
                if r >= len(counts):
                    precX[i, r] = precX[i, r - 1]
                    recX[i, r] = recX[i, r - 1]
                    mAPX[i, r] = mAPX[i, r - 1]
                    continue

                all_num = np.sum(counts[0:r + 1])

                if all_num != 0:
                    match_num = np.sum(imatch[0:all_num])
                    precX[i, r] = np.float(match_num) / all_num
                    recX[i, r] = np.float(match_num) / all_sim_num

                    rel = match_num
                    Lx = np.cumsum(imatch[0:all_num])
                    Px = Lx.astype(float) / np.arange(1, all_num + 1, 1)
                    if rel != 0:
                        mAPX[i, r] = np.sum(Px * imatch[0:all_num]) / rel
        return np.mean(np.array(precX), 0), np.mean(np.array(recX), 0), np.mean(np.array(mAPX), 0)
Ejemplo n.º 8
0
    def get_U_init(self, x0, x_d):
        '''
        Get initial guess of controls u using a straight, constant velocity to the goal
        '''
        U_init = []
        x_error = x_d[-1][0] - x0[0]
        y_error = x_d[-1][1] - x0[1]

        if fabs(x_error) > fabs(y_error):
            u_x = sign(x_error)*self.u_max[0]
            u_y = y_error/abs(x_error)*self.u_max[0]
        else:
            u_y = sign(y_error)*self.u_max[1]
            u_x = x_error/abs(y_error)*self.u_max[1]
        u_theta = 0.0
        for k in xrange(self.H):
            U_init.append(np.array([u_x, u_y, u_theta]))
        return U_init
Ejemplo n.º 9
0
def distance(x1, x2=None, pair=True, dist_type="euclidean2", ifsign=False):
    '''
    Param:
        x2: if x2 is None, distance between x1 and x1 will be returned.
        pair: if True, for i, j, x1_i, x2_j will be calculated
              if False, for i, x1_i, x2_i will be calculated, and it requires the dimension of x1 and x2 is same.
        dist_type: distance type, can be euclidean2, normed_euclidean2, inner_product, cosine
    '''
    if x2 is None:
        x2 = x1
    if ifsign:
        x1 = util.sign(x1)
        x2 = util.sign(x2)
    if dist_type == 'inner_product':
        return inner_product(x1, x2, pair)
    if pair:
        x1 = np.expand_dims(x1, 1)
        x2 = np.expand_dims(x2, 0)
    return getattr(sys.modules[__name__], dist_type)(x1, x2)
Ejemplo n.º 10
0
def submit(id, url):
	"""
	Отправляет на сервер ссылку на готовый album.xml. Ссылка должна быть
	доступна извне, сервер будет её запрашивать.
	"""
	print "Submitting item %u (%s)" % (id, url)
	fetch('http://' + settings['host'] + '/upload/queue', {
		'id': id,
		'url': url,
		'signature': sign(url),
	})
Ejemplo n.º 11
0
    def sign_request(self):
        utcnow = datetime.datetime.utcnow()
        string_to_sign = '{}{}{}'.format(
            self.endpoint.format(**self.payload),
            self.verb,
            utcnow.strftime(HEADER_DATETIME_FORMAT))

        self.extra_headers = {
            'X-Cron-Key': self.api_key,
            'X-Cron-Date': utcnow.strftime(HEADER_DATETIME_FORMAT),
            'X-Cron-Signature': util.sign(string_to_sign, self.secret_key)
        }
Ejemplo n.º 12
0
def get_mAPs_rerank(q_output, q_labels, db_output, db_labels, Rs, dist_type):
    query_output = sign(q_output)
    database_output = sign(db_output)

    bit_n = query_output.shape[1]

    ips = np.dot(query_output, database_output.T)
    ips = (bit_n - ips) / 2

    mAPX = []
    query_labels = q_labels
    database_labels = db_labels
    for i in range(ips.shape[0]):
        label = query_labels[i, :]
        label[label == 0] = -1

        imatch = np.array([])
        for j in range(bit_n):
            idx = np.reshape(np.argwhere(np.equal(ips[i, :], j)), (-1))
            all_num = len(idx)

            if all_num != 0:
                ips_trad = np.dot(q_output[i, :], db_output[idx[:], :].T)
                ids_trad = np.argsort(-ips_trad, axis=0)
                db_labels_1 = database_labels[idx[:], :]

                imatch = np.append(imatch, np.sum(
                    np.equal(db_labels_1[ids_trad, :], label), 1) > 0)
                if imatch.shape[0] > Rs:
                    break

        imatch = imatch[0:Rs]
        rel = np.sum(imatch)
        Lx = np.cumsum(imatch)
        Px = Lx.astype(float) / np.arange(1, Rs + 1, 1)
        if rel != 0:
            mAPX.append(np.sum(Px * imatch) / rel)

    return np.mean(np.array(mAPX))
Ejemplo n.º 13
0
 def update_mouse_track(self, time_elapsed):
     eps = 0.1
     ax, ay = self.angles
     tx, ty = self.mouse_target
     
     if (fabs(ax - tx) <= eps):
         self.angles[0] = 0.0
         self.mouse_target[0] = 0.0
     else:
         before = sign(tx - ax)
         d = self.spin_velocity * time_elapsed * sign(tx - ax)
         self.angles[0] += d
         after = sign(tx - self.angles[0])
         
         if (before != after):
             self.angles[0] = 0.0
             self.mouse_target[0] = 0.0
         
         r = Quaternion.from_axis_angle(Vector3d(0.,1.,0.), d)
         self.rotation = self.rotation * r
         
     if (fabs(ay - ty) <= eps):
         self.angles[1] = 0.0
         self.mouse_target[1] = 0.0
     else:
         before = sign(ty - ay)
         d = self.spin_velocity * time_elapsed * sign(ty - ay)
         self.angles[1] += d
         after = sign(ty - self.angles[1])
         
         if (before != after):
             self.angles[1] = 0.0
             self.mouse_target[1] = 0.0
         
         r = Quaternion.from_axis_angle(Vector3d(1.,0.,0.), d)
         self.rotation = self.rotation * r
Ejemplo n.º 14
0
    def ali_oauth(self, **post):
        """
        支付宝用户授权回调地址,在此处根据auth_code来获取支付宝用户信息,进行登录/注册处理
        """
        cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
        ali_code = post['auth_code']  # 授权code,属于GET参数
        _logger.info('----->auth_code:%s'%ali_code)

        """
        根据code获取支付宝用户ID和access_token, Python方法
        """
        sign_params_dic = {
            'app_id': '2016030201177117',  # 支付宝APPID
            'method': 'alipay.system.oauth.token',
            'charset': 'UTF-8',
            'sign_type': 'RSA',
            'timestamp': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'version': '1.0',
            'grant_type': 'authorization_code',
            'code': ali_code,
        }

        # 待签名参数按key值排序,组合成待加密串
        _, sign_query_str = util.params_filter(sign_params_dic)
        #  sign_query_str = util.sort(sign_params_dic)
        rsa_private_key = RSA.importKey(open('addons-extra/wxsite/static/rsa_private_key.pem','r').read())
        rsa_private_sign=util.sign(sign_query_str, rsa_private_key)
        sign_params_dic['sign'] = rsa_private_sign

        # post到支付宝,获取userId(类似微信的OpenId)
        post_param_query = urllib.urlencode(sign_params_dic)

        httpsClient = httplib.HTTPSConnection('openapi.alipay.com', 443, timeout=6000)
        httpsClient.request('POST', '/gateway.do', post_param_query, {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/html","charset":"utf8"})
        ret_json = httpsClient.getresponse().read()
        ret_dict = json.loads(ret_json.decode('GB2312').encode('UTF-8'))  # 解析json
        ali_userid = ret_dict['alipay_system_oauth_token_response']['user_id']  # 获取userid
        ali_access_token = ret_dict['alipay_system_oauth_token_response']['access_token']  # 获取access_token

        """
        在此处根据支付宝userId判断用户是否已存在数据库中
        """
        users_obj = pool.get('res.users')
        company_id = request.session['company_id']
        login = str(company_id)+ali_userid
        # company_id = 1  # 支付宝授权链接中不能带自定义参数,不能像微信授权那样传递company_id,此处暂默认为1

        wx_user_exists = users_obj.search(cr, SUPERUSER_ID, [('login','=',login),('company_id','=',int(company_id))])
        if not wx_user_exists:
            # 获取用户详细信息如用户名等
            sign_params_dic = {
                'app_id': '2016030201177117',  # 支付宝APPID
                'method': 'alipay.user.userinfo.share',
                'charset': 'UTF-8',
                'sign_type': 'RSA',
                'timestamp': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),  # 不知为什么,now()总是晚8小时
                'version': '1.0',
                'auth_token': ali_access_token,
            }

            _, sign_query_str = util.params_filter(sign_params_dic)
            rsa_private_key = RSA.importKey(open('addons-extra/wxsite/static/rsa_private_key.pem','r').read())
            rsa_private_sign=util.sign(sign_query_str, rsa_private_key)
            sign_params_dic['sign'] = rsa_private_sign

            # post到支付宝,获取用户名等信息,用于注册
            post_param_query = urllib.urlencode(sign_params_dic)
            httpsClient = httplib.HTTPSConnection('openapi.alipay.com', 443, timeout=6000)
            httpsClient.request('POST', '/gateway.do', post_param_query, {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/html","charset":"utf8"})
            ret_json = httpsClient.getresponse().read()

            ret_dict = json.loads(ret_json.decode('GB2312').encode('UTF-8'))  # 解析json

            if 'nick_name' in ret_dict['alipay_user_userinfo_share_response']:
                ali_username = ret_dict['alipay_user_userinfo_share_response']['nick_name']  # 获取支付宝用户昵称
            else:
                ali_username = ret_dict['alipay_user_userinfo_share_response']['alipay_user_id']

            # 注册用户
            users_obj.create(cr, SUPERUSER_ID, {'login':login,'name':ali_username,'password':'******','oauth_provider_id':'','company_id':int(company_id),'company_ids':[[6, False, [int(company_id)]]]})

        request.session['alipay_userid'] = ali_userid

        return request.redirect("/login?db=%s&login=%s&key=%s&redirect=%s"%(request.session.db,login, 'qdodoo', 'shop/wx/main'))
Ejemplo n.º 15
0
def get_reward(modeltype,
               base_model,
               env,
               ep_max_step,
               sigma,
               CONFIG,
               reference,
               seed_and_id=None,
               test=False):
    # reference : reference batch torch
    # start = time.time()
    if seed_and_id is not None:
        index_seed, k_id = seed_and_id
        if modeltype == '2015':
            from model_15 import build_model
        elif modeltype == '2013':
            from model_13 import build_model
        model = build_model(CONFIG)
        model.load_state_dict(base_model.state_dict())
        model.switch_to_vbn()
        model_size = model.get_size()
        slice_dict = model.get_name_slice_dict()
        noise_array = shared_noise_table.get(index_seed, model_size)
        with torch.no_grad():
            for name, params in model.named_parameters():
                tmp = model
                for attr_value in name.split('.'):
                    tmp = getattr(tmp, attr_value)
                noise = torch.tensor(
                    noise_array[slice_dict[name][0]:slice_dict[name][1]],
                    dtype=torch.float).reshape(tmp.shape)
                tmp.add_(noise * sigma * sign(k_id))
    else:
        model = base_model
    # first send in reference
    # second switch to vbn
    model.switch_to_bn()
    output = model(reference)
    model.switch_to_vbn()

    env.frameskip = 1
    observation = env.reset()
    break_is_true = False
    ep_r = 0.
    frame_count = 0
    # print('k_id mid:', k_id,time.time()-start)
    if ep_max_step is None:
        raise TypeError("test")
    else:
        ProcessU = ProcessUnit(FRAME_SKIP)
        ProcessU.step(observation)

        if test == True:
            ep_max_step = 108000
        #no_op_frames = np.random.randint(FRAME_SKIP+1, 30)
        no_op_frames = np.random.randint(1, 31)
        for i in range(no_op_frames):
            # TODO: I think 0 is Null Action
            # but have not found any article about the meaning of every actions
            observation, reward, done, _ = env.step(0)
            ProcessU.step(observation)
            frame_count += 1

        for step in range(ep_max_step):
            action = model(ProcessU.to_torch_tensor())[0].argmax().item()
            for i in range(FRAME_SKIP):
                observation, reward, done, _ = env.step(action)
                ProcessU.step(observation)
                frame_count += 1
                ep_r += reward
                if done:
                    break_is_true = True
            if break_is_true:
                break
    return ep_r, frame_count
Ejemplo n.º 16
0
 def get_mAPs_after_sign(self, database, query, Rs=None, dist_type='inner_product'):
     if Rs is None:
         Rs = self.R
     q_output = sign(query.output)
     db_output = sign(database.output)
     return get_mAPs(q_output, query.label, db_output, database.label, Rs, dist_type)
Ejemplo n.º 17
0
 def _set_angularvelocity(self, val):
     if self.maxAngularVelocity and abs(self.maxAngularVelocity) < abs(val):
         self._angular = self.maxAngularVelocity * util.sign(val)
     else:
         self._angular = val
     self.redraw()
Ejemplo n.º 18
0
    
    base = len(rewards)
    rank = np.arange(1, base + 1)
    util_ = np.maximum(0, np.log(base / 2 + 1) - np.log(rank))
    utility = util_ / util_.sum() - 1 / base
    kids_rank = np.argsort(rewards)[::-1]               # rank kid id by reward

    cumulative_update = {}       # initialize update values
    for ui, k_id in enumerate(kids_rank):
        # reconstruct noise using seed
        noise_array = shared_noise_table.get(index_seed[k_id], model_size)
        for name, params in model.named_parameters():
            if not name in cumulative_update.keys():
                cumulative_update[name] = torch.zeros_like(params, dtype=torch.float)
            noise = torch.tensor(noise_array[slice_dict[name][0]:slice_dict[name][1]], dtype=torch.float).reshape(params.shape)
            cumulative_update[name].add_(utility[ui]*sign(k_id)*noise)
    for name, params in cumulative_update.items():
        cumulative_update[name].mul_(1/(ARGS.population_size*ARGS.sigma))
    # weight decay
    for name, params in model.named_parameters():
        tmp = model
        for attr_value in name.split('.'):
            tmp = getattr(tmp, attr_value)
        cumulative_update[name].add_(-ARGS.l2coeff*tmp)
    optimizer.update_model_parameters(model, cumulative_update)
    return model, rewards, timesteps_count


# TODO
def test(model, pool, env, test_times, CONFIG, reference):
    # distribute training in parallel
Ejemplo n.º 19
0
def train(model, optimizer, pool, sigma, env, N_KID, CONFIG, modeltype,
          reference_batch_torch):
    # pass seed instead whole noise matrix to parallel will save your time
    # reference_batch_torch: torch.tensor (128, 4, 84, 84)
    # mirrored sampling
    model_size = model.get_size()
    slice_dict = model.get_name_slice_dict()
    stream = np.random.RandomState()
    index_seed = shared_noise_table.sample_index(stream, model_size,
                                                 N_KID).repeat(2)

    # distribute training in parallel
    jobs = [
        pool.apply_async(get_reward, (
            modeltype,
            model,
            env,
            CONFIG['ep_max_step'],
            sigma,
            CONFIG,
            reference_batch_torch,
            [index_seed[k_id], k_id],
        )) for k_id in range(N_KID * 2)
    ]
    from config import timesteps_per_batch
    # N_KID means episodes_per_batch
    rewards = []
    timesteps = []
    timesteps_count = 0
    for idx, j in enumerate(jobs):
        rewards.append(j.get()[0])
        timesteps.append(j.get()[1])
        timesteps_count += j.get()[1]

    base = len(rewards)
    rank = np.arange(1, base + 1)
    util_ = np.maximum(0, np.log(base / 2 + 1) - np.log(rank))
    utility = util_ / util_.sum() - 1 / base
    kids_rank = np.argsort(rewards)[::-1]  # rank kid id by reward

    cumulative_update = {}  # initialize update values
    for ui, k_id in enumerate(kids_rank):
        # reconstruct noise using seed
        noise_array = shared_noise_table.get(index_seed[k_id], model_size)
        for name, params in model.named_parameters():
            if not name in cumulative_update.keys():
                cumulative_update[name] = torch.zeros_like(params,
                                                           dtype=torch.float)
            noise = torch.tensor(
                noise_array[slice_dict[name][0]:slice_dict[name][1]],
                dtype=torch.float).reshape(params.shape)
            cumulative_update[name].add_(utility[ui] * sign(k_id) * noise)
    for name, params in cumulative_update.items():
        cumulative_update[name].mul_(1 / (2 * N_KID * sigma))
    # weight decay
    for name, params in model.named_parameters():
        tmp = model
        for attr_value in name.split('.'):
            tmp = getattr(tmp, attr_value)
        cumulative_update[name].add_(-CONFIG['l2coeff'] * tmp)
    optimizer.update_model_parameters(model, cumulative_update)
    return model, rewards, timesteps_count
Ejemplo n.º 20
0
def splitmerge(network, pairs=None, beg_year=1, end_year=2, **kwargs):
    
    ## EXPERIMENTAL PLACEHOLDERS - will eventually be replaced with a master
    ## loop to do all the id pairs.
    id_list = network.stations.keys()
    pair_results = dict()
    
    def dict_to_tuples(d):
        keys = d.keys()
        return [(key, d[key]) for key in keys]
    ## Generate station pairs for use in splitmerge by iteratively going through the
    ## station_list and adding stations in order of decreasing correlation. Skip a 
    ## neighbor if the pair is already present; want 20 stations or until all the
    ## correlated neighbors are used up.
#    pairs = []
#    for id1 in id_list:
#        neighbors = dict_to_tuples(network.correlations[id1])
#        sorted_neighbors = sorted(neighbors, key=operator.itemgetter(1))
#        added_pairs = 0
#        while sorted_neighbors and (added_pairs < 5):
#            id2, _ = sorted_neighbors.pop()
#            ordered_pair = tuple(sorted((id1, id2)))
#            if not ordered_pair in pairs:
#                pairs.append(ordered_pair)
#                added_pairs += 1
    
    for (id1, id2) in pairs:
        print "Pair %s with %s" % (id1, id2)
        pair_str = "%6s-%6s" % (id1, id2)
        #if pair_str != "051528-298107":
        #    continue
        
        raw_series = network.raw_series
        stations = network.stations
        series_copy = deepcopy(raw_series)
        
        min_ann = 5
        num_years = end_year - beg_year
        num_months = num_years*12
            
        for s in series_copy.itervalues():
            data = s.series
            scaled = scale_series(data, 0.1, s.MISSING_VAL)
            anomalies = compute_monthly_anomalies(scaled, s.MISSING_VAL)
            s.set_series(anomalies, s.years)
        
        ## Retrieve the data for each of the stations.
        station1 = stations[id1]
        series1 = series_copy[id1]
        data1 = series1.monthly_series
                
        station2 = stations[id2]
        series2 = series_copy[id2]
        data2 = series2.monthly_series
        
        
        #print data1[:50]
        #print data2[:50]
        #print "################################################################"
        ## Compute the difference series        
        diff_data = diff(data1, data2)
        MISS = series1.MISSING_VAL # Missing value placeholder
        
        ## Quickly pass through the data to find where it starts. We need to do this
        ## because it's possible that beg_year is earlier than the first year of 
        ## valid data in either data1 or data2. Furthermore, the original PHA code
        ## deliberately clipped off the first year of good data, so emulate that 
        ## effect here as well.
        ##
        ## Ultimately, we save the extreme early and extreme late month with valid
        ## data to use as our first guess at the undocumented changepoints.
        first = 0
        first_set = False
        last = 0
        for (i, d1, d2) in zip(xrange(num_months), data1, data2):
            if d1!=MISS and d2!=MISS:
                if first < 12:
                    first = i
                    #first_set = True
                #if not first_set:
                #    first = i
                #    first_set = True
                last = i
                
        ## Set the initial breakpoints and the list of already-found, homogenous
        ## segments.    
        breakpoints = [first, last, ]
        homog_segs = []
        
        #####################################################################
        ## BEGIN SPLITMERGE PROCESS TO GENERATE FIRST GUESS AT UNDOCUMENTED
        ## CHANGEPOINTS
        iter = 0 # counts how many times we've repeated the splitmerge process
        enter_BIC = False # break out of iterations into the BIC process?
        last_breakpoints = []
        while (iter < 10) and not enter_BIC:
            
            seg_bounds = zip(breakpoints[:-1], breakpoints[1:])
            last_breakpoints = deepcopy(breakpoints)
            new_breakpoints = deepcopy(breakpoints)
                
            new_homog_segs = []
        
            print "Parse segments (isplit = 1), ipass: "******"Too short: ", imo2iym(l), imo2iym(r)
                    continue
                
            ## If we've previously found that this segment is homogenous (has no
            ## potential changepoint), then we can skip it as well and proceed to
            ## the next one.
                # Set the within() method to check if this segment is within any
                # previously found homogenous ones. Use lambda, since we can't pass
                # keyword or positional arguments to map().
                within_this_seg = lambda seg: within((l, r), seg)
                within_stable_segs = map(within_this_seg, homog_segs)
                if any(within_stable_segs):
                    print "Stable segment: ", imo2iym(l), imo2iym(r)
                    if l == first: 
                        new_breakpoints.append(first)
                    continue
                
            ## The standard normal homogeneity test - which is the statistical test
            ## we'll use to see if there is a potential changepoint in this segment
            ## - requires us to normalize our paired difference series. We can do
            ## that in snht(), but we'll do it right now so we can inspect those
            ## standardized values later.
                z = standardize(segment, MISS)

            ## Apply standard normal homogeneity test. 
            ## For mechanics, see Alexandersson and Moberg 1997, Int'l Jrnl of
            ## Climatology (pp 25-34)
                likelihood_ratios = snht(z, MISS, standardized=True)
                z_count = len(get_valid_data(z))
                        
            ## We're left with the likelihood ratio for each value being a potential
            ## changepoint. Find the max ratio, and if that value is significant, let
            ## it be the newest potential changepoint.
                ind_max_ratio = 0
                max_ratio = 0.0
                clip_ratios = likelihood_ratios[2:-2] # clip the beginning and end,
                                                      # they can't be changepoints.
                for (ind, ratio) in zip(xrange(len(clip_ratios)), clip_ratios):
                    if ratio > max_ratio:
                        ind_max_ratio = ind
                        max_ratio = ratio
            ## Now we find the critical value for this data set, and check our max
            ## likelihood ratio against it
                crit_val = lrt_lookup(z_count)
                
                # The possible changepoint is the index of the max ratio we found. 
                # We have to shift it the following ways to align it to the original
                # data -
                #    1) shift by 2 re-aligns it from clip_ratios to likelihood_ratios
                #    2) shift by adjust re-aligns it to this segment in diff_data
                #    3) shift by l re-aligns it to the first index in diff_data
                possible_changepoint = l + ind_max_ratio + 2 + adjust
                
                y_new, m_new = imo2iym(possible_changepoint) # year, month
                
            ## If this is the first iteration, we indicate as such, and add the new
            ## changepoint
                if iter == 0: 
                    print "%6s-%6s MD        FIRST series %4d %2d to %4d %2d | at %4d %2d ts: %4.2f limit >: %3.2f" % (id1,id2,y1,m1,y2,m2,y_new,m_new,max_ratio,crit_val)
                    breakpoints.append(possible_changepoint)
                    breakpoints = sorted(breakpoints)
            
                else:
            ## Else, if we found a new possible changepoint, add it to our list.
                    if max_ratio > crit_val:
                        print "%6s-%6s MD Inhomogenity for series %4d %2d to %4d %2d | at %4d %2d ts: %4.2f limit >: %3.2f %4d" % (id1,id2,y1,m1,y2,m2,y_new,m_new,max_ratio,crit_val,z_count)
                        new_breakpoints.append(possible_changepoint)
                        
            ## If not, record that we found a homogeneous segment.   
                    else:
                        print "%6s-%6s MD      Homogeneous series %4d %2d to %4d %2d | at %4d %2d ts: %4.2f limit >: %3.2f %4d" % (id1,id2,y1,m1,y2,m2,y_new,m_new,max_ratio,crit_val,z_count)
                        new_homog_segs.append((l, r))
            
            ## Now we need to update our account of which segments were homogeneous,
            ## because we need to know during the next iteration. We will do this,
            ## as well as condense stable segments that lie adjacent to each other
            ## i.e, if we have the segments [(1,5), (5, 10,),, (12, 15)], then we 
            ## really have [(1,10), (12, 15)].
            homog_segs.extend(new_homog_segs)
            if homog_segs:
                homog_segs = sorted(homog_segs, key=operator.itemgetter(0))
                final_homog_segs = [homog_segs[0], ] # this will be like a stack
                for seg in homog_segs[1:]:
                    last_seg = final_homog_segs[-1]
                    if last_seg[1] == seg[0]:
                        new_seg = (last_seg[0], seg[1])
                        final_homog_segs.pop()
                        final_homog_segs.append(new_seg)
                    else:
                        final_homog_segs.append(seg)
                homog_segs = final_homog_segs
        
            ## So we have new segments that can be generated from these new
            ## breakpoints. Now, the PHA routine enters a "merge" process
            ## to see whether or not to keep these newly found changepoints or throw
            ## them out as false alarms. 
            ##
            ## We do this by "leapfrogging" every other breakpoint. This gives us
            ## a set of segments that all have another breakpoint in them. We want
            ## to see if these segments are homogeneous, because if they are, it
            ## means that the breakpoint we previously found in the segment has 
            ## been superseded.
            new_breakpoints = sorted(new_breakpoints)
            seg_bounds = zip(new_breakpoints[:-2], new_breakpoints[2:])
            
            remove_breakpoints = set()
            merged_breakpoints = set()
            if iter > 0:
                
                print "Merge segments (isplit = 0), ipass: "******"Stable segment: ", imo2iym(l), imo2iym(r)
    #                    if l == first: 
    #                        new_breakpoints.append(first)
    #                    seg_lookup.append(((l, r), 'stable'))
    #                    continue
                    # Set the within() method to check if this segment is within any
                    # previously found homogenous ones. Use lambda, since we can't pass
                    # keyword or positional arguments to map().
                    within_this_seg = lambda seg: within((l, r), seg)
                    within_stable_segs = map(within_this_seg, homog_segs)
                    if any(within_stable_segs):
                        print "Stable segment: ", imo2iym(l), imo2iym(r)
                        #if l == first: 
                        #    new_breakpoints.append(first)
                        merged_breakpoints.update([l, r])
                        continue
            
            ## Apply the same adjustments and the same standard normal homogeneity
            ## test that we did in the previous splitting process. There is no 
            ## difference here until we consider what to do if we find a new 
            ## homogeneous segment.
                    adjust = int(seg_bounds.index((l, r)) > 0)
                    segment = diff_data[l+adjust:r+1]
                    
                    z = standardize(segment, MISS)
                    likelihood_ratios = snht(z, MISS, standardized=True)
                    z_count = len(get_valid_data(z))
                        
                    ind_max_ratio = 0
                    max_ratio = 0.0
                    clip_ratios = likelihood_ratios[2:-2] # We clip the beginning and end
                    for (ind, ratio) in zip(xrange(len(clip_ratios)), clip_ratios):
                        if ratio > max_ratio:
                            ind_max_ratio = ind
                            max_ratio = ratio
                            
                    crit_val = lrt_lookup(z_count)
                    possible_changepoint = l + ind_max_ratio + 2 + adjust
                    
                    y_new, m_new = imo2iym(possible_changepoint)
                    
    
                    if z_count < 2:
                        y1, m1 = imo2iym(l)
                        y2, m2 = imo2iym(r)
                        print "%6s-%6s MD  No found peaks %4d %2d to %4d %2d" % (id1,id2,y1,m1,y2,m2)
                        print "%6s-%6s MD  Compress 1 out peak at %4d %2d" % (id1,id2,y_new,m_new)
                        #remove_breakpoints.add_
            ## If we found a new breakpoint that is statistically significant, then
            ## great! Let's keep it.
                    if max_ratio > crit_val:
                        print "%6s-%6s MD  Peak kept in merge at %4d %2d | ts: %4.2f limit >: %3.2f" % (id1,id2,y_new,m_new,max_ratio,crit_val)
                        merged_breakpoints.add(l)
                        merged_breakpoints.add(new_bp)
                        merged_breakpoints.add(r)
            ## If not, then this segment was homogeneous, so the breakpoint which
            ## already exists in it is no good.
                    else:
                        print "%6s-%6s MD Compress 2 out peak at %4d %2d | ts: %4.2f limit >: %3.2f" % (id1,id2,y_new,m_new,max_ratio,crit_val)
                        # Crap, if there are any potential breakpoints in this segment,
                        # we need to remove them because this segment is homogeneous. Let's
                        # remember this homogeneous segment for now and come back once
                        # we've found all of them.    
                        merged_breakpoints.update([l, r])
                        remove_breakpoints.add(new_bp)
            
            ## At this point, we have a set of all the breakpoints we've accumulated
            ## during this iteration of split/merge, as well as a set of breakpoints
            ## which we've found to be of no further use. We can difference update
            ## our set of breakpoints to remove these guys, and let those merged
            ## breakpoints be the set of newest breakpoints for the next splitmerge
            ## iteration.
                merged_breakpoints.difference_update(remove_breakpoints)
                breakpoints = list(merged_breakpoints)
            
            breakpoints = sorted(breakpoints)
            
            ## Did we actually find new breakpoints? If not, then we're done
            ## with splitmerge and can move on to the BIC process.
            enter_BIC = (breakpoints == last_breakpoints)
            iter = iter + 1
            
        ## Okay wow, we've potentially made it to the BIC stage now... !
        if first not in breakpoints:
            breakpoints.insert(0, first)
        ym_breakpoints = map(imo2iym, breakpoints)
        #print ym_breakpoints
        
        ## ENTERING MINBIC    
        bp_dictionary = dict()
####################################
##### MULTIPROCESS
        from multiprocessing import Pool

        global counter
        multi_bp_dict = {}
        counter = 0
        def cb(r):
            global counter
            #print counter, r
            counter += 1
        
        start = time.clock()         
        po = Pool(processes=4)
        for left,bp,right in zip(breakpoints[0:], breakpoints[1:], breakpoints[2:]):
                    
            if left != first:
                left = left + 1
            # recall that we only consider data after the first full year. we will be 
            # computing regressions with the independent variable indexed from this 
            # starting point, so we need to shift these indices. we also need to shift them
            # by +1 if this is any segment beyond the first one, so that we don't include
            # changepoints in more than one analysis.
            # TOTAL_SHIFT = -12 + 1 = -11
            # 
            # However, this shift is only necessary while looking at the array indices that
            # we generate using range(). the data should already be aligned correctly.
            total_shift = -12 + 1
            left_shift, bp_shift, right_shift = left+total_shift, bp+total_shift, right+total_shift
            y1, m1 = imo2iym(left)
            yb, mb = imo2iym(bp)
            y2, m2 = imo2iym(right)
            #print "Entering MINBIC - %4d %2d    %4d %2d    %4d %2d" % (y1, m1, yb,
            #                                                           mb, y2, m2)
            (seg_x, seg_data) = range(left_shift, right_shift+1), diff_data[left:right+1]
            bp_index = bp-left
            #print len(seg_x), len(seg_data), bp_index
            #bp_analysis = minbic(seg_x, seg_data, bp_index, MISS)
            multi_bp_dict[bp] = po.apply_async(minbic,(seg_x,seg_data,bp_index,MISS,),callback=cb)
        po.close()
        po.join()
        for bp in multi_bp_dict:
            r = multi_bp_dict[bp]
            multi_bp_dict[bp] = r.get()
        #print "counter - %d" % counter
        elapsed = (time.clock() - start)
        print "ELAPSED TIME - %2.3e" % elapsed
        #print new_bp_dict
####################################
##### NORMAL        
#        start = time.clock()
#        for left,bp,right in zip(breakpoints[0:], breakpoints[1:], breakpoints[2:]):
#                    
#            if left != first:
#                left = left + 1
#            # recall that we only consider data after the first full year. we will be 
#            # computing regressions with the independent variable indexed from this 
#            # starting point, so we need to shift these indices. we also need to shift them
#            # by +1 if this is any segment beyond the first one, so that we don't include
#            # changepoints in more than one analysis.
#            # TOTAL_SHIFT = -12 + 1 = -11
#            # 
#            # However, this shift is only necessary while looking at the array indices that
#            # we generate using range(). the data should already be aligned correctly.
#            total_shift = -12 + 1
#            left_shift, bp_shift, right_shift = left+total_shift, bp+total_shift, right+total_shift
#            y1, m1 = imo2iym(left)
#            yb, mb = imo2iym(bp)
#            y2, m2 = imo2iym(right)
#            print "Entering MINBIC - %4d %2d    %4d %2d    %4d %2d" % (y1, m1, yb,
#                                                                       mb, y2, m2)
#            (seg_x, seg_data) = range(left_shift, right_shift+1), diff_data[left:right+1]
#            bp_index = bp-left
#            #print len(seg_x), len(seg_data), bp_index
#            bp_analysis = minbic(seg_x, seg_data, bp_index, MISS)
#            
#            bp_dictionary[bp] = bp_analysis    
#        elapsed2 = (time.clock() - start)
#        print "ELAPSED TIME = %3.2e" % elapsed2
        
        ##################################3
        ## Print the adjustment summaries
        bp_dictionary = multi_bp_dict
        sorted_bps = sorted(bp_dictionary.keys())
        ndelete = []
        valid_bps = {}
        for bp in sorted_bps:
            stats = bp_dictionary[bp]
            
            cmodel=stats['cmodel']
            iqtype=stats['iqtype']
            asigx=stats['offset']
            azscr=stats['offset_z']
            rslp=stats['slopes']
            
            end1 = bp
            y_end1, m_end1 = imo2iym(end1)
            beg2 = bp+1
            y_beg2, m_beg2 = imo2iym(beg2)
            
            # If cmodel is *SLR*, then there is no breakpoint
            if 'SLR' in cmodel:
                print ("%s-%s  --  -- MD TESTSEG SKIP: %7.2f %5d %5d %3d %5d %5d %3d" %
                       (id1, id2, asigx, end1, y_end1, m_end1, beg2, y_beg2, m_beg2))
                # Don't store it!
            else:
                print ("%6s-%6s  --  -- MD TESTSEG ADJ: %7.2f %7.2f %8.4f %8.4f %5d %5d %3d %5d %5d %3d %2d" % 
                       (id1,id2, asigx, azscr, rslp[0], rslp[1], end1, y_end1, m_end1, beg2, y_beg2, m_beg2, iqtype))
                # Store it!
                valid_bps[bp] = stats
        
        ###############################
        ## Go back and see if we can get rid of some of the change points.
        ## If 2 or more of the chgpts are within MINLEN,
        ##    a) if the chgpt estimates are the same sign, then test each
        ##        singly with same endpoints and keep lowest BIC 
        ##    b) if not the same sign,
        ##        retain earliest changepoint
        # add the first, last to valid_bps
        interior_bps = valid_bps.keys()
        # Add first, last if not already in interior_bps
        for bp in [first, last]:
            if bp not in interior_bps:
                interior_bps.append(bp)
        sorted_bps = sorted(interior_bps)
        for left in sorted_bps:
            print sorted_bps, left
            ## We're looking for the next interim breakpoint that satisfies two
            ## conditions:
            ##    1) at least MINLEN valid data (non-missing to the right)
            ##    2) has at least one breakpoint between 'left' and it
            right = 0
            close_bps = []
            for right in sorted_bps: 
                if right <= left: continue
                
                if not close_bps:
                    close_bps.append(right)
                else:
                    valid_between_bps = diff_data[close_bps[-1]:right]
                    valid_length = len(get_valid_data(valid_between_bps, MISS))
                    print imo2iym(close_bps[-1]),valid_length,imo2iym(right)
                    if valid_length > MINLEN:
                        break
                    close_bps.append(right)
            # We could actually run out of things in sorted_bps, and wind up with
            # right == close_bps[-1]. Detect that and break out of this analysis
            # if that happens.
            if close_bps[-1]==right: break
            
            if left != first:
                left = left + 1
            close_bp_results = {}
            for bp in close_bps:
                        
#                # recall that we only consider data after the first full year. we will be 
#                # computing regressions with the independent variable indexed from this 
#                # starting point, so we need to shift these indices. we also need to shift them
#                # by +1 if this is any segment beyond the first one, so that we don't include
#                # changepoints in more than one analysis.
#                # TOTAL_SHIFT = -12 + 1 = -11
#                # 
#                # However, this shift is only necessary while looking at the array indices that
#                # we generate using range(). the data should already be aligned correctly.
                total_shift = -12 + 1
                left_shift, bp_shift, right_shift = left+total_shift, bp+total_shift, right+total_shift
                y1, m1 = imo2iym(left)
                yb, mb = imo2iym(bp)
                y2, m2 = imo2iym(right)
                
                print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
                print y1,m1,"-",yb,mb,"-",y2,m2
                print "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
                
                (seg_x, seg_data) = range(left_shift, right_shift+1), diff_data[left:right+1]
                bp_index = bp-left
                bp_analysis = minbic(seg_x, seg_data, bp_index, MISS, kthslr0_on=True)
                
                cmodel=bp_analysis['cmodel']
                iqtype= bp_analysis['iqtype']
                offset= bp_analysis['offset']
                rslp= bp_analysis['slopes']
                crit_val = bp_analysis['crit_val']
                test_stat = bp_analysis['test_stat']
                bic = bp_analysis['bic']
                
                print ("Interim chgpt: %s %4d %2d %4d %2d %4d %2d %8.2f %8.2f %8.2f %8.2f %7.3f %7.3f %2d" %
                       (pair_str, y1, m1, yb, mb, y2, m2, bic, test_stat, crit_val, offset, rslp[0], rslp[1], iqtype))                    
                
                close_bp_results[bp] = bp_analysis

            # Now we have a small problem... we might have more than one breakpoint,
            # so we need to choose which one is best. We will check the sign of
            # the breakpoint amplitude changes:
            sign_of_amps = map(sign, [close_bp_results[bp]['offset'] for bp in close_bps])
            positive = lambda x: sign(x) >= 0
            negative = lambda x: sign(x) <= 0
            zero = lambda x: sign(x) == 0
            print "------------>",[close_bp_results[bp]['offset'] for bp in close_bps]
            if (all(map(positive, sign_of_amps)) or 
                all(map(negative, sign_of_amps))):    
                # Pick the best (minimum BIC)          
                bics = [(bp, close_bp_results[bp]['bic']) for bp in close_bps]
                sorted_bics = sorted(bics, key=operator.itemgetter(1))
                smallest_bp = sorted_bics[0][0]
                
                # Remove this smallest-bic bp from the in-interval bps 
                close_bps.remove(smallest_bp)
                valid_bps[smallest_bp] = close_bp_results[smallest_bp] 
                
                #print "leftovers",close_bps
                for bp in close_bps: # The remaining bps which we will reject
                    sorted_bps.remove(bp) # Remove them from this loop
                    del valid_bps[bp] # Remove them as valid 
                    
                yb, mb = imo2iym(smallest_bp)
                print ("Same domain - Lowest Interim: %s %4d %2d" % 
                       (pair_str, yb, mb))
            elif (all(map(zero, sign_of_amps))):
                # Choose the earliest changepoint; the rest of these have
                # amplitude changes which are 0.
                first_bp, last_bp = close_bps[0], close_bps[-1]
                
                # Remove the first interim bp and update valid_bps with this new
                # computation. 
                close_bps.remove(first_bp)
                valid_bps[first_bp] = close_bp_results[first_bp]
                
                # Reject remaining interim bps
                for bp in close_bps:
                    sorted_bps.remove(bp)
                    del valid_bps[bp]
                    
                yb, mb = imo2iym(first_bp)
                print ("Null domain - Earliest Interim : %s %4d %2d" %
                       (pair_str, yb, mb))
            else:
                # We'll use the earliest interim changepoint, but we need
                # to get rid of bad data. Replace all the data between the 
                # interim changepoints as missing and re-compute BIC.
                first_bp, last_bp = close_bps[0], close_bps[-1]
                first_bp_index = first_bp-left
                last_bp_index = last_bp-left
                
                print len(seg_x), len(seg_data)
                print first_bp_index+1, last_bp_index+1
                print left, bp, right
                for i in range(first_bp_index+1, last_bp_index+1):
                    print i, imo2iym(i), i+left, imo2iym(i+left)
                    seg_x[i] = MISS
                    seg_data[i] = MISS
                    # Recall that seg_data[0] == diff_data[left]. ndelete records
                    # the *true month where there is unviable data*, so it needs to
                    # point back to the original element in diff_data we are 
                    # worried about.
                    ndelete.append(i+left) 
                bp_analysis = minbic(seg_x, seg_data, first_bp_index, MISS, kthslr0_on=True)
                
                # Remove the first interim bp and update valid_bps with this new
                # computation. 
                close_bps.remove(first_bp)
                valid_bps[first_bp] = bp_analysis
                
                # Reject remaining interim bps
                for bp in close_bps:
                    sorted_bps.remove(bp)
                    del valid_bps[bp]
                
                yb, mb = imo2iym(first_bp)
                print ("Diff domain - Earliest Interim : %s %4d %2d" %
                       (pair_str, yb, mb))                
    
        ## Remove changepoints which are an SLR model.
        nspan = [0]*num_months
        bp_count = 1
        for bp in sorted(valid_bps.keys()):
            bp_analysis = valid_bps[bp]
            
            if "SLR" in bp_analysis['cmodel']:
                del valid_bps[bp]
                continue
            
            print "   IN: ",bp
            nspan[bp] = bp_count
            ## If adjacent months are missing next to this breakpoint, then
            ## assume that those could be a breakpoint as well and copy this
            ## breakpoint's analysis results for them.
            for month in range(bp+1, last):
                if (month in ndelete) or (diff_data[month] == MISS):
                    nspan[month] = bp_count
                    print "   IN: ",month
                    valid_bps[month] = bp_analysis
                else:
                    break
            bp_count += 1
            
        valid_bps['del'] = ndelete
        valid_bps['nspan'] = nspan
        pair_results[pair_str] = valid_bps
        
        #print "ELAPSED TIMES = %3.2e %3.2e" % (elapsed1, elapsed2)
    print "done"
    ##
    import pickle
    f = open("pair_results", 'w')
    pickle.dump(pair_results, f)
    return pair_results
Ejemplo n.º 21
0
def invert_angle(angle: float):
    return angle - sign(angle) * pi
Ejemplo n.º 22
0
 def get_controls(self):
     """Decides what strategy to uses and gives corresponding output"""
     self.drive.power_turn = False
     if self.step is Step.Steer or self.step is Step.Drive:
         self.step = Step.Catching
     if self.step is Step.Catching:
         # Enable power turning for catching, since we don't halfflip
         self.drive.power_turn = True
         target = get_bounce(self)
         if target is None:
             self.step = Step.Shooting
         else:
             self.catching.target = target[0]
             self.catching.speed = (distance_2d(self.info.my_car.location,
                                                target[0]) + 50) / target[1]
             self.catching.step(self.info.time_delta)
             self.controls = self.catching.controls
             ball = self.info.ball
             car = self.info.my_car
             if distance_2d(ball.location, car.location) < 150 and 65 < abs(
                     ball.location[2] - car.location[2]) < 127:
                 self.step = Step.Dribbling
                 self.dribble = Dribbling(self.info.my_car, self.info.ball,
                                          self.their_goal)
             if self.defending:
                 self.step = Step.Defending
             ball = self.info.ball
             if abs(ball.velocity[2]) < 100 and sign(self.team) * ball.velocity[1] < 0 and sign(self.team) * \
                     ball.location[1] < 0:
                 self.step = Step.Shooting
     elif self.step is Step.Dribbling:
         self.dribble.step()
         self.controls = self.dribble.controls
         ball = self.info.ball
         car = self.info.my_car
         bot_to_opponent = self.info.cars[
             1 - self.index].location - self.info.my_car.location
         local_bot_to_target = dot(bot_to_opponent,
                                   self.info.my_car.rotation)
         angle_front_to_target = math.atan2(local_bot_to_target[1],
                                            local_bot_to_target[0])
         opponent_is_near = norm(vec2(bot_to_opponent)) < 2000
         opponent_is_in_the_way = math.radians(
             -10) < angle_front_to_target < math.radians(10)
         if not (distance_2d(ball.location, car.location) < 150
                 and 65 < abs(ball.location[2] - car.location[2]) < 127):
             self.step = Step.Catching
         if self.defending:
             self.step = Step.Defending
         if opponent_is_near and opponent_is_in_the_way:
             self.step = Step.Dodge
             self.dodge = Dodge(self.info.my_car)
             self.dodge.duration = 0.25
             self.dodge.target = self.their_goal.center
     elif self.step is Step.Defending:
         defending(self)
     elif self.step in [
             Step.Dodge, Step.Dodge_1, Step.Dodge_2, Step.HalfFlip
     ]:
         halfflipping = self.step is Step.HalfFlip
         if halfflipping:
             self.halfflip.step(self.info.time_delta)
         else:
             self.dodge.step(self.info.time_delta)
         if self.halfflip.finished if halfflipping else self.dodge.finished:
             self.step = Step.Catching
         else:
             self.controls = (self.halfflip.controls
                              if halfflipping else self.dodge.controls)
     elif self.step is Step.Shooting:
         shooting(self)
Ejemplo n.º 23
0
 def get_controls(self):
     if self.step == "Steer" or self.step == "Dodge2":
         self.step = "Catching"
     if self.step == "Catching":
         target = get_bounce(self)
         if target is None:
             self.step = "Defending"
         else:
             self.catching.target_pos = target[0]
             self.catching.target_speed = (distance_2d(
                 self.info.my_car.pos, target[0]) + 50) / target[1]
             self.catching.step(self.FPS)
             self.controls = self.catching.controls
             ball = self.info.ball
             car = self.info.my_car
             if distance_2d(ball.pos,
                            car.pos) < 150 and 65 < abs(ball.pos[2] -
                                                        car.pos[2]) < 127:
                 self.step = "Dribbling"
                 self.dribble = Dribbling(self.info.my_car, self.info.ball,
                                          self.info.their_goal)
             if self.defending:
                 self.step = "Defending"
             if not self.info.my_car.on_ground:
                 self.step = "Recovery"
             ball = self.info.ball
             if abs(ball.vel[2]) < 100 and sign(
                     self.team) * ball.vel[1] < 0 and sign(
                         self.team) * ball.pos[1] < 0:
                 self.step = "Shooting"
     elif self.step == "Dribbling":
         self.dribble.step(self.FPS)
         self.controls = self.dribble.controls
         ball = self.info.ball
         car = self.info.my_car
         bot_to_opponent = self.info.opponents[0].pos - self.info.my_car.pos
         local_bot_to_target = dot(bot_to_opponent, self.info.my_car.theta)
         angle_front_to_target = math.atan2(local_bot_to_target[1],
                                            local_bot_to_target[0])
         opponent_is_near = norm(vec2(bot_to_opponent)) < 2000
         opponent_is_in_the_way = math.radians(
             -10) < angle_front_to_target < math.radians(10)
         if not (distance_2d(ball.pos, car.pos) < 150
                 and 65 < abs(ball.pos[2] - car.pos[2]) < 127):
             self.step = "Catching"
         if self.defending:
             self.step = "Defending"
         if opponent_is_near and opponent_is_in_the_way:
             self.step = "Dodge"
             self.dodge = AirDodge(self.info.my_car, 0.25,
                                   self.info.their_goal.center)
         if not self.info.my_car.on_ground:
             self.step = "Recovery"
     elif self.step == "Defending":
         defending(self)
     elif self.step == "Dodge":
         self.dodge.step(self.FPS)
         self.controls = self.dodge.controls
         self.controls.boost = 0
         if self.dodge.finished and self.info.my_car.on_ground:
             self.step = "Catching"
     elif self.step == "Recovery":
         self.recovery.step(self.FPS)
         self.controls = self.recovery.controls
         if self.info.my_car.on_ground:
             self.step = "Catching"
     elif self.step == "Shooting":
         shooting(self)