def is_sanshoku_doujun(groups): sequences = get_sequences(groups) if len(sequences) < 3: return False suit_counts = get_suit_counts_groups(sequences) if 3 in suit_counts or 4 in suit_counts: return False heads = [sequence.tiles[0] for sequence in sequences] return # TODO
def is_ryanpeikou(groups): """ Note: Chitoitsu is implicitly excluded since it is checked as individual tiles instead of groups; see is_chitoi(). """ sequences = get_sequences(groups) if len(sequences) < 4: return False return sum([sequences.count(sequence) == 2 for sequence in set(sequences)]) == 2
def is_ittsu(groups): sequences = get_sequences(groups) if len(sequences) < 3: return False suit_counts = get_suit_counts_groups(sequences) if not (3 in suit_counts.values() or 4 in suit_counts.values()): return False suit = [suit for suit, count in suit_counts.iteritems() if count >= 3][0] return (Sequence(suit(1)) in groups and Sequence(suit(4)) in groups and Sequence(suit(7)) in groups)
# -*- coding: UTF-8 -*- #Author zhang import utils from tensorflow.keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Embedding, Flatten, Dense, GlobalAveragePooling1D sequences, data = utils.read_corpus("data\data.tsv") w2v_model = utils.load_w2v("data\word2vec.model") word2id = utils.word2id(w2v_model) X_data = utils.get_sequences(word2id, sequences) # 截长补短 maxlen = 20 X_pad = pad_sequences(X_data, maxlen=maxlen) # 取得标签 Y = data.sentiment.values # 划分数据集 X_train, X_test, Y_train, Y_test = train_test_split( X_pad, Y, test_size=0.2, random_state=42) """ 构建分类模型
def is_iipeikou(groups): sequences = get_sequences(groups) return (not is_ryanpeikou(groups) and any([sequences.count(sequence) == 2 for sequence in sequences]))