def global_feature(): cutoffLine('-') print 'Generate global feature' # 统计每种商品每天销量,为统计每种商品在同类商品种排名服务, 为了避免使用未来信息 global ci_sale if os.path.exists('data/ci_sale.pkl'): ci_sale_file = open('data/ci_sale.pkl', 'rb') ci_sale = pickle.load(ci_sale_file) # for c in ci_rank: print ci_rank[c] ci_sale_file.close() else: u_file = file('data/nuser.csv', 'r') u_reader = csv.reader(u_file) ci_sale = {} for line in u_reader: doneCount(u_reader.line_num) item = int(line[1]) behavior = int(line[2]) category = int(line[4]) date = int(line[5]) if not ci_sale.has_key(category): ci_sale[category] = {} if behavior == 4: if not ci_sale[category].has_key(item): ci_sale[category][item] = [0]*(TOTAL_DAY+1) ci_sale[category][item][date] += 1 ci_sale_file = open('data/ci_sale.pkl', 'wb') pickle.dump(ci_sale, ci_sale_file) ci_sale_file.close() u_file.close()
def drop_no_buy_user(): cutoffLine('-') rfile = file('data/nuser.csv','r') reader = csv.reader(rfile) buyed_user = set() print 'user behavior stat' for line in reader: doneCount(reader.line_num) if int(line[2]) == 4: buyed_user.add(int(line[0])) rfile.close() print '\ndrop...' rfile = file('data/nuser.csv','r') wfile = file('data/nuser_cleaned','w') reader = csv.reader(rfile) writer = csv.writer(wfile) count = 0 for line in reader: doneCount(reader.line_num) if int(line[0]) in buyed_user: writer.writerow(line) count += 1 cutoffLine('-') print count rfile.close() wfile.close()
def predict(window, model, item_subset, proportion, algo, confidence): cutoffLine('-') print 'Generate result set with confidence %f' % confidence feature_file = file('splited_data_%d/set_for_prediction.csv'%window, 'r') result_file = file('data/tianchi_mobile_recommendation_predict_%d_%s_%d_%s.csv'%\ (window, algo, proportion, str(confidence)), 'w') f_reader = csv.reader(feature_file) r_writer = csv.writer(result_file) r_writer.writerow(['user_id','item_id']) predict_set = set() UI = [] X = [] each_time = 500000 for line in f_reader: doneCount(f_reader.line_num) line = map(int, line) UI.append(tuple(line[0:2])) X.append(line[3:]) if f_reader.line_num % each_time == 0: if algo == 'lr' or algo == 'svm': X = preprocessing.scale(X) if algo == 'lr' or algo == 'rf': y_pred = model.predict_proba(X) print y_pred for index, y in enumerate(y_pred): if y[1] > confidence: predict_set.add(UI[index]) if algo == 'svm': y_pred = model.predict(X) for index, y in enumerate(y_pred): if y == 1: predict_set.add(UI[index]) UI = [] X = [] if len(UI) > 0: if algo == 'lr' or algo == 'svm': X = preprocessing.scale(X) if algo == 'lr' or algo == 'rf': y_pred = model.predict_proba(X) for index, y in enumerate(y_pred): if y[1] > confidence: predict_set.add(UI[index]) if algo == 'svm': y_pred = model.predict(X) for index, y in enumerate(y_pred): if y == 1: predict_set.add(UI[index]) UI = [] X = [] cutoffLine('-') print "Prediction set size before drop: %d" % len(predict_set) predict_set = dropItemsNotInSet(predict_set, item_subset) r_writer.writerows(predict_set) print "Prediction set size after drop: %d" % len(predict_set) feature_file.close() result_file.close() return len(predict_set)
def merge_training_set(): cutoffLine('*') print 'Merge training set' start_time = time.time() positive_count = 0 negative_count = 0 total_count = 0 total_file = file(PRE_DIR + '/' + 'train_set.csv', 'w') pos_file = file(PRE_DIR + '/' + 'positive_set.csv', 'w') neg_file = file(PRE_DIR + '/' + 'negative_set.csv', 'w') total_writer = csv.writer(total_file) pos_writer = csv.writer(pos_file) neg_writer = csv.writer(neg_file) for i in range(1, FILES-1): cutoffLine('-') print 'load train set %d' % i r_file = file(PRE_DIR + '/' + 'set_%d.csv' % i) reader = csv.reader(r_file) for line in reader: doneCount(reader.line_num) line = map(int, line) if line[-1] == 1: positive_count += 1 pos_writer.writerow(line) if line[-1] == 0: negative_count += 1 neg_writer.writerow(line) total_count += 1 total_writer.writerow(line) r_file.close() total_file.close() pos_file.close() neg_file.close() cutoffLine('-') # 44114 print 'Positive Example: %d' % positive_count # 59373295 print 'Negative Example: %d' % (total_count - positive_count) # 59417409 print 'Total Example: %d' % total_count # 一致性判断 print 'Is right? %s'%('Yes' if positive_count + negative_count == total_count else 'No') end_time = time.time() duration = timekeeper(start_time, end_time) cutoffLine('*') print 'It takes %s to merge training set and backup negative and positive set' % duration
def predict(model, index): cutoffLine('-') print 'Generate result set %d' % index feature_file = file('splited_data/set_for_prediction.csv', 'r') result_file = file(TRAIN_SET_DIR + '/' + 'lr_result_%d.csv' % index, 'w') f_reader = csv.reader(feature_file) r_writer = csv.writer(result_file) r_writer.writerow(['user_id','item_id']) for line in f_reader: doneCount(f_reader.line_num) line = map(int, line) if model.predict([line[2:]])[0] == 1: r_writer.writerow(line[0:2]) feature_file.close() result_file.close()
def stat(): cutoffLine('-') print 'stat some information...' user_file = file('data/nuser.csv','r') item_file = file('data/item.csv','r') stat_file = open('data/stat.txt','w') row_count = 0 user_set = set() sub_item_set = set() all_item_set = set() category_set = set() user_geo_count = 0 item_geo_count = 0 reader = csv.reader(item_file) for line in reader: doneCount(reader.line_num) if reader.line_num == 1: continue if line[1]: item_geo_count += 1 category_set.add(line[2]) sub_item_set.add(line[0]) reader = csv.reader(user_file) for line in reader: doneCount(reader.line_num) row_count += 1 user_set.add(line[0]) all_item_set.add(line[1]) if line[3]: user_geo_count += 1 interact_item_set = all_item_set & sub_item_set stat_file.write('%s : %s\n'%(u'Total Count',row_count)) stat_file.write('%s : %s\n'%(u'User Count',len(user_set))) stat_file.write('%s : %s\n'%(u'All Item Count',len(all_item_set))) stat_file.write('%s : %s\n'%(u'Sub Item Count',len(sub_item_set))) stat_file.write('%s : %s %f\n'%(u'Interact Item Count', len(interact_item_set), float(len(interact_item_set))/len(sub_item_set))) stat_file.write('%s : %s\n'%(u'Category Count',len(category_set))) stat_file.write('%s : %s\n'%(u'User Geo Count',user_geo_count)) stat_file.write('%s : %s\n'%(u'Item Geo Count',item_geo_count)) stat_file.close() user_file.close() item_file.close()
def evaluate_model(model, index): cutoffLine('-') print 'offline evaluate RF model %d' % index test_file = file('splited_data/set_test.csv', 'r') test_reader = csv.reader(test_file) predict_set = set() real_set = set() for line in test_reader: doneCount(test_file.line_num) line = map(int, line) if line[-1] == 1 : real_set.add((line[0],line[1])) if model.predict([line[2:-1]])[0] == 1: predict_set.add((line[0],line[1])) import evaluate P, R, F = evaluate.evaluate(predict_set, real_set) test_file.close() return P, R, F
def stat(): cutoffLine('-') print 'stat some information...' user_file = file('data/nuser.csv', 'r') item_file = file('data/item.csv', 'r') stat_file = open('data/stat.txt', 'w') row_count = 0 user_set = set() sub_item_set = set() all_item_set = set() category_set = set() user_geo_count = 0 item_geo_count = 0 reader = csv.reader(item_file) for line in reader: doneCount(reader.line_num) if reader.line_num == 1: continue if line[1]: item_geo_count += 1 category_set.add(line[2]) sub_item_set.add(line[0]) reader = csv.reader(user_file) for line in reader: doneCount(reader.line_num) row_count += 1 user_set.add(line[0]) all_item_set.add(line[1]) if line[3]: user_geo_count += 1 interact_item_set = all_item_set & sub_item_set stat_file.write('%s : %s\n' % (u'Total Count', row_count)) stat_file.write('%s : %s\n' % (u'User Count', len(user_set))) stat_file.write('%s : %s\n' % (u'All Item Count', len(all_item_set))) stat_file.write('%s : %s\n' % (u'Sub Item Count', len(sub_item_set))) stat_file.write('%s : %s %f\n' % (u'Interact Item Count', len(interact_item_set), float(len(interact_item_set)) / len(sub_item_set))) stat_file.write('%s : %s\n' % (u'Category Count', len(category_set))) stat_file.write('%s : %s\n' % (u'User Geo Count', user_geo_count)) stat_file.write('%s : %s\n' % (u'Item Geo Count', item_geo_count)) stat_file.close() user_file.close() item_file.close()
def evaluate_model(algo, window, model, item_subset, confidence): cutoffLine('-') print 'offline evaluate model with confidence %f' % confidence test_file = file('splited_data_%d/set_test.csv'%window, 'r') test_reader = csv.reader(test_file) predict_set = set() real_set = set() UI = [] X = [] each_time = 500000 for line in test_reader: doneCount(test_reader.line_num) line = map(int, line) UI.append(tuple(line[0:2])) X.append(line[3:-1]) if line[-1] == 1 : real_set.add((line[0],line[1])) if test_reader.line_num % each_time == 0: if algo == 'lr' or algo == 'svm': X = preprocessing.scale(X) if algo == 'lr' or algo == 'rf': y_pred = model.predict_proba(X) for index, y in enumerate(y_pred): if y[1] > confidence: predict_set.add(UI[index]) if algo == 'svm': y_pred = model.predict(X) for index, y in enumerate(y_pred): if y == 1: predict_set.add(UI[index]) UI = [] X = [] if len(UI) > 0: if algo == 'lr' or algo == 'svm': X = preprocessing.scale(X) if algo == 'lr' or algo == 'rf': y_pred = model.predict_proba(X) for index, y in enumerate(y_pred): if y[1] > confidence: predict_set.add(UI[index]) if algo == 'svm': y_pred = model.predict(X) for index, y in enumerate(y_pred): if y == 1: predict_set.add(UI[index]) UI = [] X = [] predict_set = dropItemsNotInSet(predict_set, item_subset) real_set = dropItemsNotInSet(real_set, item_subset) import evaluate P, R, F = evaluate.evaluate(predict_set, real_set) test_file.close() return P, R, F
def train(window, proportion, algo, confidence): start_time = time.time() cutoffLine('*') print '%s model training with sample proportion 1:%d...' %(algo, proportion) t_file = file('data/training_set_%d_%d.csv' % (window, proportion), 'r') t_reader = csv.reader(t_file) X = [] y = [] for line in t_reader: doneCount(t_reader.line_num) line = map(int, line) X.append(line[3:-1]) y.append(line[-1]) model_name = 'data/model/%s_%d_%d.model'%(algo, window, proportion) if os.path.exists(model_name): model = joblib.load(model_name) else: if algo == 'lr': model = LR(X, y) if algo == 'rf': model = RF(X, y) if algo == 'svm': model = SVM(X, y) joblib.dump(model, model_name) cutoffLine('-') print model.classes_ item_subset = loadItemSubset() record_file = open('data/model_evaluate_record.txt','a') P, R, F= evaluate_model(algo, window, model, item_subset, confidence) predict_set_size = predict(window, model, item_subset, proportion, algo, confidence) record_file.write('window %d '%window + algo+' %d'%proportion + ' %.2f\n'%confidence) record_file.write('\tP: %f\n'%P) record_file.write('\tR: %f\n'%R) record_file.write('\tF1: %f\n'%F) record_file.write('Predict Set Size: %d\n'%predict_set_size) record_file.write('-'*30+'\n') record_file.close() t_file.close() cutoffLine('*') end_time = time.time() duration = timekeeper(start_time, end_time) print 'I takes %s to train , evaluate model and generate result' % duration
def predict(window, model, item_subset, proportion, algo, confidence): cutoffLine('-') print 'Generate result set with confidence %f' % confidence feature_file = file('splited_data_%d/set_for_prediction.csv' % window, 'r') result_file = file('data/tianchi_mobile_recommendation_predict_%d_%s_%d_%s.csv'%\ (window, algo, proportion, str(confidence)), 'w') f_reader = csv.reader(feature_file) r_writer = csv.writer(result_file) r_writer.writerow(['user_id', 'item_id']) predict_set = set() UI = [] X = [] each_time = 500000 for line in f_reader: doneCount(f_reader.line_num) line = map(int, line) UI.append(tuple(line[0:2])) X.append(line[3:]) if f_reader.line_num % each_time == 0: if algo == 'lr': X = preprocessing.scale(X) y_pred = model.predict_proba(X) for index, y in enumerate(y_pred): if y[1] > confidence: predict_set.add(UI[index]) UI = [] X = [] if len(UI) > 0: if algo == 'lr': X = preprocessing.scale(X) y_pred = model.predict_proba(X) for index, y in enumerate(y_pred): if y[1] > confidence: predict_set.add(UI[index]) UI = [] X = [] cutoffLine('-') print "Prediction set size before drop: %d" % len(predict_set) predict_set = dropItemsNotInSet(predict_set, item_subset) r_writer.writerows(predict_set) print "Prediction set size after drop: %d" % len(predict_set) feature_file.close() result_file.close() return len(predict_set)
def cartBuy(): user_file = file('data/nuser.csv','r') reader = csv.reader(user_file) cart_30 = set() buy_31 = set() cart_31 = set() for line in reader: doneCount(reader.line_num) if int(line[5]) == 30 and int(line[6]) > 15: if int(line[2]) == 3:cart_30.add((int(line[0]),int(line[1]))) if int(line[2]) == 4: if (line[0],line[1]) in cart_30:cart_30.remove((int(line[0]),int(line[1]))) if int(line[5]) == 31 and int(line[6]) > 15: if int(line[2]) == 3:cart_31.add((int(line[0]),int(line[1]))) if int(line[2]) == 4: if (line[0],line[1]) in cart_31:cart_31.remove((int(line[0]),int(line[1]))) if int(line[5]) == 31 and int(line[2]) == 4: buy_31.add((int(line[0]),int(line[1]))) user_file.close() return cart_30 , buy_31 , cart_31
def predict(model, item_subset): cutoffLine('-') print 'Generate result set' feature_file = file('splited_data/set_for_prediction.csv', 'r') result_file = file('data/prediction_lr.csv', 'w') f_reader = csv.reader(feature_file) r_writer = csv.writer(result_file) r_writer.writerow(['user_id','item_id']) predict_set = set() for line in f_reader: doneCount(f_reader.line_num) line = map(int, line) if model.predict([line[2:]])[0] == 1: predict_set.add((line[0], line[1])) cutoffLine('-') print "Prediction set size before drop: %d" % len(predict_set) predict_set = dropItemsNotInSet(predict_set, item_subset) r_writer.writerows(predict_set) print "Prediction set size after drop: %d" % len(predict_set) feature_file.close() result_file.close()
def lineCount(): stat_file = file(PRE_DIR + '/stat.csv','w') writer = csv.writer(stat_file) for i in range(1,FILES+1): print '\n' + '-'*50 if i == FILES: file_name = 'for_prediction.csv' else: file_name = '%d.csv' % i file_path = PRE_DIR + '/' + file_name print 'processing %s' % file_name rfile = file(file_path,'r') reader = csv.reader(rfile) count = 1 for line in reader: doneCount(reader.line_num) count += 1 writer.writerow([file_name, count]) rfile.close() stat_file.close()