Beispiel #1
0
def compute_cfss():
    '''
    计算shop-shop相似关系矩阵。
    Input:
        shop_actu:用户对店铺做的动作
    Process: 
        取用户动作表示的shop向量,计算向量点积。
    Output:
        shop-shop 相似关系,cfss.kv
    '''
    # shop_actu -> shop-shop关系矩阵,并保存cfss.kv,shop\tshop:weight;
    kvg = KVEngine()
    kvg.load([full_path('shop_actu.kv')])

    # get normialized vectors
    shop_users = {}
    skeys = kvg.keymatch('S\d+_ACTU')
    for skey in skeys:
        sid = key_id(skey)
        vector = dict([(int(key), float(value))
                       for (key, value) in kvg.getd(skey).items()
                       if key and value])
        # tailor to top 20
        items = vector.items()
        items.sort(key=lambda x: x[1], reverse=True)
        items = items[:20]
        vector = dict(items)
        normalize(vector)
        shop_users[sid] = vector

    # similarity calculation
    shop_similarity = {}
    sids = shop_users.keys()
    sids.sort()
    l = len(sids)
    print "Calculating shop-shop similarity matrix, total %d..." % l
    for i in range(l):
        if i % 1000 == 0:
            print "%d" % i
            sys.stdout.flush()
        for j in range(i + 1, l):
            sim = norm_dot_product(shop_users[sids[i]], shop_users[sids[j]])
            if abs(sim) < 1e-5:
                continue
            shop_similarity.setdefault(sids[i], {})[sids[j]] = sim
            shop_similarity.setdefault(sids[j], {})[sids[i]] = sim

    # save as kvfile
    write_kv_dict(shop_similarity, 'S%s_CFSIMS', 'cfss.kv')
Beispiel #2
0
def compute_cfgg():
    '''
    计算goods-goods相似关系矩阵。
    Input:
        user_actg.kv -> goods_actu.kv:用户对店铺做的动作
    Process: 
        取用户动作表示的goods向量,计算向量点积。
    Output:
        goods-goods 相似关系,cfss.kv
    '''
    kvg = KVEngine()
    kvg.load([full_path('goods_actu.kv')])

    # get normialized vectors
    goods_users = {}
    gkeys = kvg.keymatch('G\d+_ACTU')
    for gkey in gkeys:
        gid = key_id(gkey)
        vector = dict([(int(key), float(value))
                       for (key, value) in kvg.getd(gkey).items()
                       if key and value])
        # tailor to top 20
        items = vector.items()
        items.sort(key=lambda x: x[1], reverse=True)
        items = items[:20]
        vector = dict(items)
        normalize(vector)
        goods_users[gid] = vector

    # similarity calculation
    goods_similarity = {}
    gids = goods_users.keys()
    gids.sort()
    l = len(gids)
    print "Calculating goods-goods similarity matrix, total %d..." % l
    for i in range(l):
        if i % 100 == 0:
            print "%d" % i
            sys.stdout.flush()
        for j in range(i + 1, l):
            sim = norm_dot_product(goods_users[gids[i]], goods_users[gids[j]])
            if abs(sim) < 1e-5:
                continue
            goods_similarity.setdefault(gids[i], {})[gids[j]] = sim
            goods_similarity.setdefault(gids[j], {})[gids[i]] = sim

    # save as kvfile
    write_kv_dict(goods_similarity, 'G%s_CFSIMG', 'cfgg.kv')
Beispiel #3
0
def compute_cfss():
    '''
    计算shop-shop相似关系矩阵。
    Input:
        shop_actu:用户对店铺做的动作
    Process: 
        取用户动作表示的shop向量,计算向量点积。
    Output:
        shop-shop 相似关系,cfss.kv
    '''
    # shop_actu -> shop-shop关系矩阵,并保存cfss.kv,shop\tshop:weight;
    kvg = KVEngine()
    kvg.load([full_path('shop_actu.kv')])

    # get normialized vectors
    shop_users = {}
    skeys = kvg.keymatch('S\d+_ACTU')
    for skey in skeys:
        sid = key_id(skey)
        vector = dict([(int(key), float(value)) for (key, value) in kvg.getd(skey).items() if key and value])
        # tailor to top 20
        items = vector.items()
        items.sort(key=lambda x:x[1], reverse=True)
        items = items[:20]
        vector = dict(items)
        normalize(vector)
        shop_users[sid] = vector

    # similarity calculation
    shop_similarity = {}
    sids = shop_users.keys()
    sids.sort()
    l = len(sids)
    print "Calculating shop-shop similarity matrix, total %d..." % l
    for i in range(l):
        if i % 1000 == 0:
            print "%d" % i
            sys.stdout.flush()
        for j in range(i+1, l):
            sim = norm_dot_product(shop_users[sids[i]], shop_users[sids[j]])
            if abs(sim) < 1e-5:
                continue
            shop_similarity.setdefault(sids[i], {})[sids[j]] = sim
            shop_similarity.setdefault(sids[j], {})[sids[i]] = sim

    # save as kvfile
    write_kv_dict(shop_similarity, 'S%s_CFSIMS', 'cfss.kv')
Beispiel #4
0
def compute_cfgg():
    '''
    计算goods-goods相似关系矩阵。
    Input:
        user_actg.kv -> goods_actu.kv:用户对店铺做的动作
    Process: 
        取用户动作表示的goods向量,计算向量点积。
    Output:
        goods-goods 相似关系,cfss.kv
    '''
    kvg = KVEngine()
    kvg.load([full_path('goods_actu.kv')])

    # get normialized vectors
    goods_users = {}
    gkeys = kvg.keymatch('G\d+_ACTU')
    for gkey in gkeys:
        gid = key_id(gkey)
        vector = dict([(int(key), float(value)) for (key, value) in kvg.getd(gkey).items() if key and value])
        # tailor to top 20
        items = vector.items()
        items.sort(key=lambda x:x[1], reverse=True)
        items = items[:20]
        vector = dict(items)
        normalize(vector)
        goods_users[gid] = vector

    # similarity calculation
    goods_similarity = {}
    gids = goods_users.keys()
    gids.sort()
    l = len(gids)
    print "Calculating goods-goods similarity matrix, total %d..." % l
    for i in range(l):
        if i % 100 == 0:
            print "%d" % i
            sys.stdout.flush()
        for j in range(i+1, l):
            sim = norm_dot_product(goods_users[gids[i]], goods_users[gids[j]])
            if abs(sim) < 1e-5:
                continue
            goods_similarity.setdefault(gids[i], {})[gids[j]] = sim
            goods_similarity.setdefault(gids[j], {})[gids[i]] = sim

    # save as kvfile
    write_kv_dict(goods_similarity, 'G%s_CFSIMG', 'cfgg.kv')
Beispiel #5
0
def get_test_loader(args):
    signals_test = np.load(args.dataset + "_signals_test.npy")
    masks_test = np.load(args.dataset + "_masks_test.npy")

    signals_test = normalize(signals_test)

    test_dataset = TestDataset(signals_test, masks_test, args.dataset, args.resample, args.add_noise)
    test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
    return test_loader
Beispiel #6
0
def get_loaders(args):
    signals = np.load(args.dataset + "_signals_train.npy")
    masks = np.load(args.dataset + "_masks_train.npy")
    signals_train, signals_val, masks_train, masks_val = train_test_split(
        signals, masks)

    signals_train, signals_val = normalize(signals_train), normalize(
        signals_val)

    train_dataset = TrainDataset(signals_train, masks_train, args.add_noise)
    val_dataset = TrainDataset(signals_val, masks_val, args.add_noise)

    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=True)
    return train_loader, val_loader
Beispiel #7
0
def read_input(input_filename):
    fin = open(input_filename, 'r')
    rows = {}
    cid2rids = {}

    for no, line in enumerate(fin):
        if no % 10000 == 0:
            print ' %d\r' % no,
            sys.stdout.flush()
        parts = line.strip().split()
        try:
            rid = int(parts[0])
            columns = parts[1:]
            row_vector = {}
            for col in columns:
                subparts = col.split(':')
                if len(subparts) != 2:
                    continue
                cid = int(subparts[0])
                value = float(subparts[1])
                if value <= 0.1:
                    continue
                row_vector[cid] = value
                #cid2rids.setdefault(cid, []).append(rid)
            items = row_vector.items()
            items.sort(key=lambda x: x[1], reverse=True)
            items = items[:10]
            row_vector = dict(items)
            for cid in row_vector:
                cid2rids.setdefault(cid, []).append(rid)
            normalize(row_vector)
            rows[rid] = row_vector
        except ValueError:
            continue
    fin.close()
    return rows, cid2rids
Beispiel #8
0
def read_input(input_filename):
    fin = open(input_filename, 'r')
    rows = {}
    cid2rids = {}

    for no,line in enumerate(fin):
        if no % 10000 == 0:
            print ' %d\r' % no,
            sys.stdout.flush()
        parts = line.strip().split()
        try:
            rid = int(parts[0])
            columns = parts[1:]
            row_vector = {}
            for col in columns:
                subparts = col.split(':')
                if len(subparts) != 2:
                    continue
                cid = int(subparts[0])
                value = float(subparts[1])
                if value <= 0.1:
                    continue
                row_vector[cid] = value
                #cid2rids.setdefault(cid, []).append(rid)
            items = row_vector.items()
            items.sort(key=lambda x:x[1], reverse=True)
            items = items[:10]
            row_vector = dict(items)
            for cid in row_vector:
                cid2rids.setdefault(cid, []).append(rid)
            normalize(row_vector)
            rows[rid] = row_vector
        except ValueError:
            continue
    fin.close()
    return rows, cid2rids
Beispiel #9
0
    def fit(self, x, y, w0=None, epochs=1):
        n = x.shape[0]
        d = x.shape[1]

        if w0 is not None:
            w = np.copy(w0)
        elif self.w is None:
            w = np.zeros(d, dtype=float)
        else:
            w = self.w

        # cos_theta = w0.dot(w0)
        # logger.debug("initial angle: %f (%f)" % (np.arccos(cos_theta) * 180. / np.pi, cos_theta))

        last_epoch = epochs
        for epoch in range(epochs):
            errors1 = 0
            errors2 = 0
            for i in range(n):
                v = x[i].dot(w)
                if y[i] * v < 0:
                    w -= 2 * self.learning_rate * v * x[i]
                    if y[i] == 1:
                        errors1 += 1
                    else:
                        errors2 += 1
                    cos_theta = w.dot(w0)
                    # logger.debug("epoch %d[%d] angle: %f" % (epoch, i, np.arccos(cos_theta)*180./np.pi))
            errors = errors1 + errors2
            if errors == 0:
                last_epoch = epoch
                break
            logger.debug("epoch: %d, errors: %d (+1=%d / -1=%d)" % (epoch, errors, errors1, errors2))
        logger.debug("last_epoch: %d" % last_epoch)
        self.w = normalize(w)
        return self.w
Beispiel #10
0
def compute_cfus():
    '''
    计算给用户推荐的店铺列表。
    Input: 
        cfss: 店铺关系
        user_favu: 用户关注店铺
        user_actu: 用户有动作店铺
    Process:
        从用户直接相关店铺出发,找这些店铺的相关店铺,再过滤。
    Output:
        存储CF算法产生的给用户推荐的店铺列表。cfus.kv
    '''
    kvg = KVEngine()
    kvg.load([full_path('cfss.kv')])
    kvg.load([full_path('user_favs.kv')])
    kvg.load([full_path('user_actu.kv')])
    kvg.load([full_path('shop_binfo.kv')])

    # get shop_similarity
    keys = kvg.keymatch('S\d+_CFSIMS')
    shop_similarity = dict([(int(key),
                             dict([(int(k), float(v))
                                   for (k, v) in kvg.getd(key).items()]))
                            for key in keys])

    # get user_fav_shops
    keys = kvg.keymatch('U\d+_FAVS')
    user_fav_shops = dict([(int(key), set([int(k) for k in kvg.getl(key)]))
                           for key in keys])

    # get blocked shop set
    keys = kvg.keymatch('S\d+_BINFO')
    blocked_shops = set()
    for key in keys:
        if kvg.getk(key, 'block') != '0':
            blocked_shops.add(key_id(key))

    # get user tags by fav shops
    shop_tags

    # get user_shops

    # shop idf

    # weigting and normalizing user_shops

    # 给每个用户做推荐
    print "Recommend for each user, total %d" % len(self.user_shops)
    sys.stdout.flush()
    for no, uid in enumerate(self.user_shops):
        shop_weight = {}  # 给该用户推荐的店铺列表及权重
        shops = self.user_shops[uid]  # 用户有动作的店铺列表
        fav_shops = self.user_fav_shops.get(uid, {})  # 用户关注的店铺
        if no % 1000 == 0:
            print "%d" % no
            sys.stdout.flush()

        for sid in shops:
            if sid not in self.shop_similarity:
                continue
            simi_shops = self.shop_similarity[sid]
            for ssid in simi_shops:
                if ssid in shop_weight:
                    shop_weight[ssid] += shops[sid] * simi_shops[ssid]
                else:
                    shop_weight[ssid] = shops[sid] * simi_shops[ssid]

        # 过滤shop_weight
        shop_weight_new = {}
        for sid in shop_weight:
            # 店铺sid是否适合推荐给用户uid
            if sid in fav_shops:
                continue  # 原本就关注
            if sid in self.shop_info and self.shop_info[sid][2] != 0:
                continue  # 店铺的block属性非0,被屏蔽,不使用
            if sid in self.shop_tags and uid in self.user_tags and \
                    self._tag_conflict(self.user_tags[uid], self.shop_tags[sid]):
                continue  # 用户关注店铺的类型与该店铺不符
            shop_weight_new[sid] = shop_weight[sid]

        if not shop_weight_new:
            continue  # 没有为此用户推荐一个店铺,都被过滤掉,不记录

        # 排序,取TOP
        normalize(shop_weight_new)
        items = shop_weight_new.items()
        items.sort(reverse=True, key=lambda x: x[1])  # sort by weight desc

        self.user_recommend_list[uid] = items[:TOP_SHOP_NUM]  # limit n
Beispiel #11
0
def compute_cfus():
    '''
    计算给用户推荐的店铺列表。
    Input: 
        cfss: 店铺关系
        user_favu: 用户关注店铺
        user_actu: 用户有动作店铺
    Process:
        从用户直接相关店铺出发,找这些店铺的相关店铺,再过滤。
    Output:
        存储CF算法产生的给用户推荐的店铺列表。cfus.kv
    '''
    kvg = KVEngine()
    kvg.load([full_path('cfss.kv')])
    kvg.load([full_path('user_favs.kv')])
    kvg.load([full_path('user_actu.kv')])
    kvg.load([full_path('shop_binfo.kv')])

    # get shop_similarity
    keys = kvg.keymatch('S\d+_CFSIMS')
    shop_similarity = dict([(int(key), dict([(int(k), float(v)) for (k, v) in kvg.getd(key).items()])) for key in keys])

    # get user_fav_shops
    keys = kvg.keymatch('U\d+_FAVS')
    user_fav_shops = dict([(int(key), set([int(k) for k in kvg.getl(key)])) for key in keys])

    # get blocked shop set
    keys = kvg.keymatch('S\d+_BINFO')
    blocked_shops = set()
    for key in keys:
        if kvg.getk(key, 'block') != '0':
            blocked_shops.add(key_id(key))

    # get user tags by fav shops
    shop_tags

    # get user_shops

    # shop idf

    # weigting and normalizing user_shops

    # 给每个用户做推荐
    print "Recommend for each user, total %d" % len(self.user_shops)
    sys.stdout.flush()
    for no, uid in enumerate(self.user_shops):
        shop_weight = {} # 给该用户推荐的店铺列表及权重
        shops = self.user_shops[uid] # 用户有动作的店铺列表
        fav_shops = self.user_fav_shops.get(uid, {}) # 用户关注的店铺
        if no % 1000 == 0:
            print "%d" % no
            sys.stdout.flush()

        for sid in shops:
            if sid not in self.shop_similarity:
                continue
            simi_shops = self.shop_similarity[sid]
            for ssid in simi_shops:
                if ssid in shop_weight:
                    shop_weight[ssid] += shops[sid] * simi_shops[ssid]
                else:
                    shop_weight[ssid] = shops[sid] * simi_shops[ssid]
        
        # 过滤shop_weight
        shop_weight_new = {}
        for sid in shop_weight:
            # 店铺sid是否适合推荐给用户uid
            if sid in fav_shops:
                continue # 原本就关注
            if sid in self.shop_info and self.shop_info[sid][2] != 0:
                continue # 店铺的block属性非0,被屏蔽,不使用
            if sid in self.shop_tags and uid in self.user_tags and \
                    self._tag_conflict(self.user_tags[uid], self.shop_tags[sid]):
                continue # 用户关注店铺的类型与该店铺不符
            shop_weight_new[sid] = shop_weight[sid]

        if not shop_weight_new:
            continue # 没有为此用户推荐一个店铺,都被过滤掉,不记录

        # 排序,取TOP
        normalize(shop_weight_new)
        items = shop_weight_new.items()
        items.sort(reverse=True, key=lambda x: x[1]) # sort by weight desc

        self.user_recommend_list[uid] = items[:TOP_SHOP_NUM] # limit n
Beispiel #12
0
from common.utils import normalize
from common.point import Point
from KNN.knn_classifier import *
from common.cross_validator import *

def optimal_k(chips):
    result = 0
    result_k = 1
    for k in range(1, min(20, len(chips))):
        cur = cross_validate(KnnClassifier(k), chips)
        print('with k = {} : result = {}'.format(k, cur.measure()))
        if cur.measure() > result:
            result = cur.measure()
            result_k = k
    return result_k

with open('chips.txt', 'r') as f:
    chips = [Point(*line.split(',')) for line in f.readlines()]
    chips = normalize(normalize(chips, 0), 1)
    shuffle(chips)
    count_to_test = len(chips)//5
    test = chips[:count_to_test]
    chips = chips[count_to_test:]
    k = optimal_k(chips)
    classifier = KnnClassifier(k)
    classifier.learn(chips)
    result = calc_score(classifier, test)
    print('optimal k = {}, measure = {}'.format(k, result.measure()))
Beispiel #13
0
from common.utils import normalize
from common.point import Point
from KNN.knn_classifier import *
from common.cross_validator import *


def optimal_k(chips):
    result = 0
    result_k = 1
    for k in range(1, min(20, len(chips))):
        cur = cross_validate(KnnClassifier(k), chips)
        print('with k = {} : result = {}'.format(k, cur.measure()))
        if cur.measure() > result:
            result = cur.measure()
            result_k = k
    return result_k


with open('chips.txt', 'r') as f:
    chips = [Point(*line.split(',')) for line in f.readlines()]
    chips = normalize(normalize(chips, 0), 1)
    shuffle(chips)
    count_to_test = len(chips) // 5
    test = chips[:count_to_test]
    chips = chips[count_to_test:]
    k = optimal_k(chips)
    classifier = KnnClassifier(k)
    classifier.learn(chips)
    result = calc_score(classifier, test)
    print('optimal k = {}, measure = {}'.format(k, result.measure()))