Ejemplo n.º 1
0
def train_one_vs_one(make_classifier_function,
                     X,
                     Y,
                     epsilon,
                     n_jobs=1,
                     **kwargs):
    global _classifier_combos
    global _classifier_generator
    _classifier_generator = make_classifier_function
    global _X
    _X = X
    global _Y
    _Y = Y
    global _epsilon
    _epsilon = epsilon
    global _train_args
    _train_args = kwargs
    global _coefs
    classes = np_sort(unique(Y))
    _classifier_combos = list()
    n_classes = len(classes)
    for i in range(n_classes):
        for j in range(i + 1, n_classes):
            _classifier_combos.append((classes[i], classes[j]))
    with Pool(n_jobs) as pool:
        print("Starting %d jobs in a multiprocessing pool" % n_jobs)
        all_betas = pool.map(train_model, _classifier_combos)
    _coefs = dict(zip(_classifier_combos, all_betas))
    return _coefs
Ejemplo n.º 2
0
    def _calculateValue(data):
        '''
        取排名75%-87.5%的像素区间求均值
        :param data:
        :return:
        '''
        data = data.flatten()

        # s = data.size
        # print( "dsize=",s, "-",end='')
        i = 3
        while data.size > 80 and i > 0:
            data = StripRegion._filteringAnomaly(data, StripRegion._two_sigma)
            i -= 1
        count = data.size
        # print(count, '=', s-count,round((s-count)*100/s),"%")
        data = np_sort(data)
        value = (count >> 3) + 1
        i1 = (count >> 1)
        i0 = i1 - value if i1 > value else 0

        value = np_average(data[i0:i1])
        # value += 0xff - bgColor

        # print("val:",value)
        return value
Ejemplo n.º 3
0
def predict_ovr(newX=None, n_jobs=1):
    global _X
    global _Y
    global _newX
    global _coefs_ovr
    _newX = _X if newX is None else newX
    classes = np_sort(unique(_Y))
    with Pool(n_jobs) as pool:
        preds = pool.map(_predict_class_ovr, classes)
    preds = DataFrame(dict(zip(classes, preds)))
    return array(preds.idxmax(axis="columns"))
Ejemplo n.º 4
0
 def _neighbor_distance(self):
     print("Finding Neighbor Distance")
     neighbors = NearestNeighbors(n_neighbors=2, n_jobs=8).fit(self.pct_change_frame)
     distances, indices = neighbors.kneighbors(self.pct_change_frame)
     distances = np_sort(distances, axis=0)
     distances = distances[:, 1]
     print(f"Median: {np_median(distances)}")
     print(f"Mean: {np_mean(distances)}")
     plot = plt.figure()
     plt.plot(distances)
     plot.show()
     chosen_dist = float(input("Choose Neighborhood Distance: "))
     print(chosen_dist)
     return chosen_dist
Ejemplo n.º 5
0
    def score_icfof(self, query: np_array, ntss: np_ndarray, rho=[0.001, 0.005, 0.01, 0.05, 0.1],
                    each_tree_score: bool = False, fast_method: bool = True):
        """
        Compute the *i*\ CFOF approximations.
        Call one of the two functions according to the parameter ``fast_method`` :
         - if ``True`` (default) : :func:`~pyCFOFiSAX._forest_iSAX.ForestISAX.vranglist_by_idtree_faster`
         - if ``False`` : :func:`~pyCFOFiSAX._forest_iSAX.ForestISAX.vranglist_by_idtree`
        Then sort the vrang list to get CFOF scores approximations based on ``rho`` parameter values.

        :param numpy.array query: The sequence to be evaluated
        :param numpy.ndarray ntss: Reference sequences
        :param list rho: Rho values for the computation of approximations
        :param bool each_tree_score: if `True`, returns the scores obtained in each of the trees
        :param bool fast_method: if `True`, uses the numpy functions for computation, otherwise goes  through the tree via a FIFO list of nodes

        :returns: *i*\ CFOF score approximations
        :rtype: numpy.ndarray
        """

        k_rho = _convert_rho_to_krho(rho, len(ntss))

        k_list_result_mean = np_zeros(len(ntss))

        if each_tree_score:
            k_list_result_ndarray = np_ndarray(shape=(self.forest_isax.number_tree, len(ntss)))

        for id_tree, tree in self.forest_isax.forest.items():

            ntss_tmp = np_array(ntss)[:, self.forest_isax.indices_partition[id_tree]]
            sub_query = query[self.forest_isax.indices_partition[id_tree]]

            if fast_method:
                k_list_result_tmp = tree.vrang_list_faster(sub_query, ntss_tmp)
            else:
                k_list_result_tmp = tree.vrang_list(sub_query, ntss_tmp)

            ratio_klist_tmp = (len(self.forest_isax.indices_partition[id_tree]) / self.forest_isax.size_word)
            k_list_result_mean = np_add(k_list_result_mean, np_array(k_list_result_tmp) * ratio_klist_tmp)
            if each_tree_score:
                k_list_result_ndarray[id_tree] = k_list_result_tmp

        k_list_result_mean = np_sort(k_list_result_mean, axis=None)

        if each_tree_score:
            k_list_result_ndarray.sort()
            return score_by_listvrang(k_list_result_mean.tolist(), k_rho), \
                   [score_by_listvrang(list(k_l_r), k_rho) for k_l_r in k_list_result_ndarray]

        return score_by_listvrang(k_list_result_mean.tolist(), k_rho)
Ejemplo n.º 6
0
    def _rqa_lv_max(self, th, freq_dist, measure_dict, offset):
        try:
            self.rplot
        except AttributeError:
            return -1

        try:
            return measure_dict[str(th)]
        except KeyError:
            d = freq_dist(th)
            n = np_hstack((1, np_array(d.values())))
            l = np_hstack((0, np_array(map(int, d.keys()))))
            measure_dict[str(th)] = np_sort(l * (n > 0).astype(int))[
                -offset]  #[-2] to avoid considering diagonal line

            return measure_dict[str(th)]
Ejemplo n.º 7
0
def train_one_vs_rest(make_classifier_function,
                      X,
                      Y,
                      epsilon,
                      n_jobs=1,
                      **kwargs):
    global _classifier_combos
    global _classifier_generator
    _classifier_generator = make_classifier_function
    global _X
    _X = X
    global _Y
    _Y = Y
    global _epsilon
    _epsilon = epsilon
    global _train_args
    _train_args = kwargs
    global _coefs_ovr
    classes = np_sort(unique(Y))
    with Pool(n_jobs) as pool:
        betas = pool.map(train_ovr_model, classes)
    _coefs_ovr = dict(zip(classes, betas))
    return _coefs_ovr
img = imread('img.png')

# 1 Переведите изображение в вещественные числа от 0 до 1.
img_f = img_as_float(img)

# 2. Переведите изображение в пространство YUV по формулам:
img_y = 0.2126 * img_f[:, :, 0] + 0.7152 * img_f[:, :,
                                                 1] + 0.0722 * img_f[:, :, 2]
img_u = -0.0999 * img_f[:, :, 0] - 0.3360 * img_f[:, :,
                                                  1] + 0.4360 * img_f[:, :, 2]
img_v = 0.6150 * img_f[:, :, 0] - 0.5586 * img_f[:, :,
                                                 1] - 0.0563 * img_f[:, :, 2]

# 3. Найдите максимум и минимум для устойчивого автоконтраста
# с отбрасыванием 5% самых светлых и 5% самых темных пикселей.
img_sort_list = np_sort(img_y, axis=None)
k = round(img_sort_list.size * 0.05)
x_min, x_max = img_sort_list[k], img_sort_list[-k - 1]

# 4. Примените линейное растяжение канала Y по формуле
# 5. Обрежьте значения канала Y от 0 до 1.
img_y = np_clip((img_y - x_min) / (x_max - x_min), 0, 1)

# 6. Переведите изображение в пространство RGB по формулам:
# 7. Обрежьте значения изображения от 0 до 1.
img_r = np_clip(img_y + 1.2803 * img_v, 0, 1)
img_g = np_clip(img_y - 0.2148 * img_u - 0.3805 * img_v, 0, 1)
img_b = np_clip(img_y + 2.1279 * img_u, 0, 1)

# 8. Переведите изображение в целые числа от 0 до 255.
img_combined = img_as_ubyte(dstack((img_r, img_g, img_b)))
Ejemplo n.º 9
0
def find_max_neig(neig_list,g1,perc,model,scaler,inputs):

    n_maxs = len(neig_list)
    if n_maxs == 0:
        return None    
    if n_maxs > 10:
        # Ascending order         
        n_maxs = int(np_ceil(perc*len(neig_list)))    
    
    neig_key_list = [k for k in neig_list]
    neig_wt_list = [float(neig_list[k]['weight']) for k in neig_list]
    sorted_ind = np_argsort(neig_wt_list)
    sorted_wts = [{'weight':val} for val in np_sort(neig_wt_list)][-n_maxs:]
    sorted_neig_keys = [neig_key_list[i] for i in sorted_ind][-n_maxs:]
    imp_neigs = dict(zip(sorted_neig_keys,sorted_wts))
    
    folNm = inputs['folNm']
    
    if len(imp_neigs) == 1:
        imp_neig = list(imp_neigs.keys())[0]
        wt = imp_neigs[imp_neig]
        wt_edge = wt['weight']          
        node_to_add = imp_neig
        #ADD ALL EDGES OF NEW NODE TO ORIG GRAPH
        with open(folNm+"/"+node_to_add,'rb') as f:
            its_neig_list = pickle_load(f)      
                    
        orig_nodes = g1.nodes()
        all_nodesWedges = set(orig_nodes).intersection(its_neig_list)
        
        for node in all_nodesWedges:
            wt = its_neig_list[node]
            wt_edge = wt['weight']
            g1.add_edge(node_to_add,node,weight=wt_edge)        
        (score_imp_neig,comp_bool) = get_score(g1,model,scaler,inputs['model_type'])
        g1.remove_node(node_to_add)
    else:
        scores = {}
        for neig in imp_neigs:
            # Add to graph 
            wt = imp_neigs[neig]
            wt_edge = wt['weight']          
            node_to_add = neig
            #ADD ALL EDGES OF NEW NODE TO ORIG GRAPH
            with open(folNm+"/"+node_to_add,'rb') as f:
                its_neig_list = pickle_load(f)                              
            orig_nodes = g1.nodes()
            all_nodesWedges = set(orig_nodes).intersection(its_neig_list)
            
            for node in all_nodesWedges:
                wt = its_neig_list[node]
                wt_edge = wt['weight']
                g1.add_edge(node_to_add,node,weight=wt_edge)
            # Check score
            (score_curr,comp_bool) = get_score(g1,model,scaler,inputs['model_type'])         
            scores[neig] = score_curr
            g1.remove_node(node_to_add)
            
            
        imp_neig = max(iter(scores.items()), key=operator_itemgetter(1))[0]  
        score_imp_neig = scores[imp_neig]

    return(imp_neig,score_imp_neig)
Ejemplo n.º 10
0
def fixPosWLAN(len_wlan=None, wlan=None, wppdb=None, verb=False):
    """
    Returns the online fixed user location in lat/lon format.
    
    Parameters
    ----------
    len_wlan: int, mandatory
        Number of online visible WLAN APs.
    wlan: np.array, string list, mandatory
        Array of MAC/RSS for online visible APs.
        e.g. [['00:15:70:9E:91:60' '00:15:70:9E:91:61' '00:15:70:9E:91:62' '00:15:70:9E:6C:6C']
              ['-55' '-56' '-57' '-68']]. 
    verb: verbose mode option, default: False
        More debugging info if enabled(True).
    
    Returns
    -------
    posfix: np.array, float
        Final fixed location(lat, lon).
        e.g. [ 39.922942  116.472673 ]
    """
    interpart_offline = False; interpart_online = False

    # db query result: [ maxNI, keys:[ [keyaps:[], keycfps:(())], ... ] ].
    # maxNI=0 if no cluster found.
    maxNI,keys = wppdb.getBestClusters(macs=wlan[0])
    #maxNI,keys = [2, [
    #    [['00:21:91:1D:C0:D4', '00:19:E0:E1:76:A4', '00:25:86:4D:B4:C4'], 
    #        [[5634, 5634, 39.898019, 116.367113, '-83|-85|-89']] ],
    #    [['00:21:91:1D:C0:D4', '00:25:86:4D:B4:C4'],
    #        [[6161, 6161, 39.898307, 116.367233, '-90|-90']] ] ]]
    if maxNI == 0: # no intersection found
        wpplog.error('NO cluster found! Fingerprinting TERMINATED!')
        return []
    elif maxNI < CLUSTERKEYSIZE:
        # size of intersection set < offline key AP set size:4, 
        # offline keymacs/keyrsss (not online maxmacs/maxrsss) need to be cut down.
        interpart_offline = True
        if maxNI < len_wlan: #TODO: TBE.
            # size of intersection set < online AP set size(len_wlan) < CLUSTERKEYSIZE,
            # not only keymacs/keyrsss, but also maxmacs/maxrsss need to be cut down.
            interpart_online = True
        if verb: wpplog.debug('Partly[%d] matched cluster(s) found:' % maxNI)
    else: 
        if verb: wpplog.debug('Full matched cluster(s) found:')
    if verb: wpplog.debug('keys:\n%s' % keys)

    # Evaluation|sort of similarity between online FP & radio map FP.
    # fps_cand: [ min_spid1:[cid,spid,lat,lon,rsss], min_spid2, ... ]
    # keys: ID and key APs of matched cluster(s) with max intersect APs.
    all_pos_lenrss = []
    fps_cand = []; sums_cand = []
    for keyaps,keycfps in keys:
        if verb: wpplog.debug('keyaps:\n%s\nkeycfps:\n%s' % (keyaps, keycfps))
        # Fast fix when the ONLY 1 selected cid has ONLY 1 fp in 'cfps'.
        if len(keys)==1 and len(keycfps)==1:
            fps_cand = [ list(keycfps[0]) ]
            break
        pos_lenrss = (array(keycfps)[:,1:3].astype(float)).tolist()
        keyrsss = np_char_array(keycfps)[:,4].split('|') #4: column order in cfps.tbl
        keyrsss = array([ [float(rss) for rss in spid] for spid in keyrsss ])
        for idx,pos in enumerate(pos_lenrss):
            pos_lenrss[idx].append(len(keyrsss[idx]))
        all_pos_lenrss.extend(pos_lenrss)
        # Rearrange key MACs/RSSs in 'keyrsss' according to intersection set 'keyaps'.
        if interpart_offline:
            if interpart_online:
                wl = deepcopy(wlan) # mmacs->wl[0]; mrsss->wl[1]
                idxs_inters = [ idx for idx,mac in enumerate(wlan[0]) if mac in keyaps ]
                wl = wl[:,idxs_inters]
            else: wl = wlan
        else: wl = wlan
        idxs_taken = [ keyaps.index(x) for x in wl[0] ]
        keyrsss = keyrsss.take(idxs_taken, axis=1)
        mrsss = wl[1].astype(int)
        # Euclidean dist solving and sorting.
        sum_rss = np_sum( (mrsss-keyrsss)**2, axis=1 )
        fps_cand.extend( keycfps )
        sums_cand.extend( sum_rss )
        if verb: wpplog.debug('sum_rss:\n%s' % sum_rss)

    # Location estimation.
    if len(fps_cand) > 1:
        # KNN
        # lst_set_sums_cand: list format for set of sums_cand.
        # bound_dist: distance boundary for K-min distances.
        lst_set_sums_cand =  array(list(set(sums_cand)))
        idx_bound_dist = argsort(lst_set_sums_cand)[:KNN][-1]
        bound_dist = lst_set_sums_cand[idx_bound_dist]
        idx_sums_sort = argsort(sums_cand)

        sums_cand = array(sums_cand)
        fps_cand = array(fps_cand)

        sums_cand_sort = sums_cand[idx_sums_sort]
        idx_bound_fp = searchsorted(sums_cand_sort, bound_dist, 'right')
        idx_sums_sort_bound = idx_sums_sort[:idx_bound_fp]
        #idxs_kmin = argsort(min_sums)[:KNN]
        sorted_sums = sums_cand[idx_sums_sort_bound]
        sorted_fps = fps_cand[idx_sums_sort_bound]
        if verb: wpplog.debug('k-dists:\n%s\nk-locations:\n%s' % (sorted_sums, sorted_fps))
        # DKNN
        if sorted_sums[0]: 
            boundry = sorted_sums[0]*KWIN
        else: 
            if sorted_sums[1]:
                boundry = KWIN
                # What the hell are the following two lines doing here!
                #idx_zero_bound = searchsorted(sorted_sums, 0, side='right')
                #sorted_sums[:idx_zero_bound] = boundry / (idx_zero_bound + .5)
            else: boundry = 0
        idx_dkmin = searchsorted(sorted_sums, boundry, side='right')
        dknn_sums = sorted_sums[:idx_dkmin].tolist()
        dknn_fps = sorted_fps[:idx_dkmin]
        if verb: wpplog.debug('dk-dists: \n%s\ndk-locations: \n%s' % (dknn_sums, dknn_fps))
        # Weighted_AVG_DKNN.
        num_dknn_fps = len(dknn_fps)
        if  num_dknn_fps > 1:
            coors = dknn_fps[:,1:3].astype(float)
            num_keyaps = array([ rsss.count('|')+1 for rsss in dknn_fps[:,-2] ])
            # ww: weights of dknn weights.
            ww = np_abs(num_keyaps - len_wlan).tolist()
            #wpplog.debug(ww)
            if not np_all(ww):
                if np_any(ww):
                    ww_sort = np_sort(ww)
                    #wpplog.debug('ww_sort: %s' % ww_sort)
                    idx_dknn_sums_sort = searchsorted(ww_sort, 0, 'right')
                    #wpplog.debug('idx_dknn_sums_sort: %s' % idx_dknn_sums_sort)
                    ww_2ndbig = ww_sort[idx_dknn_sums_sort] 
                    w_zero = ww_2ndbig / (len(ww)*ww_2ndbig)
                else: w_zero = 1
                #for idx,sum in enumerate(ww):
                #    if not sum: ww[idx] = w_zero
                ww = [ w if w else w_zero for w in ww ]
            ws = array(ww) + dknn_sums
            weights = reciprocal(ws)
            if verb: wpplog.debug('coors:%s, weights:%s' % (coors, weights))
            posfix = average(coors, axis=0, weights=weights)
        else: posfix = array(dknn_fps[0][1:3]).astype(float)
        # ErrRange Estimation (more than 1 relevant clusters).
        idxs_clusters = idx_sums_sort_bound[:idx_dkmin]
        if len(idxs_clusters) == 1: 
            if maxNI == 1: poserr = 200
            else: poserr = 100
        else: 
            if verb: wpplog.debug('idxs_clusters: %s\nall_pos_lenrss: %s' % (idxs_clusters, all_pos_lenrss))
            #allposs_dknn = vstack(array(all_pos_lenrss, object)[idxs_clusters])
            allposs_dknn = array(all_pos_lenrss, object)[idxs_clusters]
            if verb: wpplog.debug('allposs_dknn: %s' % allposs_dknn)
            poserr = max( average([ dist_km(posfix[1], posfix[0], p[1], p[0])*1000 
                for p in allposs_dknn ]), 100 )
    else: 
        fps_cand = fps_cand[0][:-2]
        if verb: wpplog.debug('location:\n%s' % fps_cand)
        posfix = array(fps_cand[1:3]).astype(float)
        # ErrRange Estimation (only 1 relevant clusters).
        N_fp = len(keycfps)
        if N_fp == 1: 
            if maxNI == 1: poserr = 200
            else: poserr = 150
        else:
            if verb: 
                wpplog.debug('all_pos_lenrss: %s' % all_pos_lenrss)
                wpplog.debug('posfix: %s' % posfix)
            poserr = max( np_sum([ dist_km(posfix[1], posfix[0], p[1], p[0])*1000 
                for p in all_pos_lenrss ]) / (N_fp-1), 100 )
    ret = posfix.tolist()
    ret.append(poserr)

    return ret
Ejemplo n.º 11
0
_id = 4
batch_size = 1
time_size = 100
freq_size = 108
transform_type = 'cqt'
window_size = 11.61
data_dir = 'D:\\Documents\\Thesis\\Project Skaterbot\\Playlists\Mixxx\\3\\transforms\\locator_v1_cqt\\'

locator = Locator1(_id, batch_size, time_size, freq_size, transform_type,
                   window_size)
locator.load_trained_model()
locator.rnn.decoder.summary()

data_handler = Locator1DataHandler(data_dir, time_size, freq_size, window_size,
                                   transform_type)
playlist = os_listdir(data_dir)
for song_name in playlist:
    print('Predictions of:', song_name)
    x = data_handler.read_input(song_name, 0,
                                data_handler.max_time_steps * time_size)
    print(x.shape)
    predictions = locator.predict(x)[0]
    print('Time step duration:', (time_size / 1000) * window_size, 'seconds.')
    print('Time steps:', predictions.size)
    print('Indexes:', predictions.argsort()[-8:][::-1])
    print(
        'Start of Intervals:',
        np_sort(predictions.argsort()[-8:][::-1] * window_size *
                (time_size / 1000)))
    print('------------------------------------------------------------------')
Ejemplo n.º 12
0
def n_smallest_safe(arr, n):
    return np_sort(np_ravel(arr))[:n]
Ejemplo n.º 13
0
def n_largest_safe(arr, n):
    return np_sort(np_ravel(arr))[-n:]