コード例 #1
0
import argparse
import tensorflow as tf
from flyai.dataset import Dataset

from model import Model

# 数据获取辅助类
from path import MODEL_PATH

dataset = Dataset()
dataset.get_all_processor_data()
model = Model(dataset)

parser = argparse.ArgumentParser()
parser.add_argument("-e", "--EPOCHS", default=10, type=int, help="train epochs")
parser.add_argument("-b", "--BATCH", default=32, type=int, help="batch size")
args = parser.parse_args()
'''
使用tensorflow实现自己的算法

'''
# 定义命名空间
x = tf.placeholder(tf.float32, shape=[None, 200, 200, 3], name='input_x')
y = tf.placeholder(tf.int64, shape=[None], name='input_y')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')


# 初始化权值
def weight_variable(shape, name):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial, name=name)
コード例 #2
0
model_cnn.add(MaxPooling1D(1))

model_cnn.add(Flatten())
model_cnn.add(Dense(hidden_dim, activation='relu'))
model_cnn.add(Dropout(0.5))
model_cnn.add(Dense(numclass, activation='softmax'))

model_cnn.summary()
model_cnn.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=learning_rate),
                  metrics=['acc'])

lr_level = 2
best_score_by_acc = 0.
best_score_by_loss = 999.
x_train, y_train, x_val, y_val = dataset.get_all_processor_data()
# x_val, y_val =dataset.get_all_validation_data()
for step in range(args.EPOCHS):
    cur_step = str(step + 1) + "/" + str(args.EPOCHS)
    print('\n步骤' + cur_step)

    x_3, y_3, x_4, y_4 = [], [], [], []
    for iters in range(numclass):
        xx_tmp_train, yy_tmp_train, xx_tmp_val, yy_tmp_val = dataset_slice[
            iters].next_batch()

        # 合并3类train
        x_3.append(xx_tmp_train)
        y_3.append(yy_tmp_train)
        x_4.append(xx_tmp_val)
        y_4.append(yy_tmp_val)
コード例 #3
0
args = parser.parse_args()


def eval(preds_prob, y_test):
    preds = np.zeros(preds_prob.shape)
    preds[preds_prob >= 0.5] = 1
    train_accuracy = (preds == y_test).sum() / preds_prob.shape[0]

    return train_accuracy


# 训练并评估模型
data = Dataset(epochs=args.EPOCHS, batch=args.BATCH,)
model = Model(data)

x, y, x_test, y_test = data.get_all_processor_data()
#
# validateNum = 30
# x_train = x[0:x.shape[0]-validateNum,:]
# y_train = y[0:y.shape[0]-validateNum]
# x_test = x[-validateNum:,:]
# y_test = y[-validateNum:]

x_train = x
y_train = y

print("the length of train data: %d" % data.get_train_length())
print("the length of x_train: %d" % x_train.shape[0])
print("the length of x_test: %d" % x_test.shape[0])
# the length of train data: 162
# the length of x_train: 162
コード例 #4
0
项目的超参
'''
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--EPOCHS", default=10,
                    type=int, help="train epochs")
parser.add_argument("-b", "--BATCH", default=32, type=int, help="batch size")
args = parser.parse_args()

'''
flyai库中的提供的数据处理方法
传入整个数据训练多少轮,每批次批大小
'''
dataset = Dataset(epochs=args.EPOCHS, batch=args.BATCH)
model = Model(dataset)

train_x, train_y, val_x, val_y = dataset.get_all_processor_data()
train_dataset = FlyAIDataSet(train_x, train_y)
val_dataset = FlyAIDataSet(val_x, val_y)


def collate_fn(data):
    data.sort(key=lambda x: len(x[0]), reverse=True)
    inputs, labels = zip(*data)
    inputs_len = [len(item) for item in inputs if len(item) != 0]
    inputs = [torch.Tensor(x) for x in inputs[:len(inputs_len)]]
    inputs = rnn_utils.pad_sequence(inputs, batch_first=True)
    labels = torch.LongTensor(labels[:len(inputs_len)])
    return inputs, inputs_len, labels


train_loader = torch.utils.data.DataLoader(train_dataset, shuffle=True,
コード例 #5
0
ファイル: main.py プロジェクト: yunwuyue/flyai_contest
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--EPOCHS", default=1, type=int, help="train epochs")
parser.add_argument("-b", "--BATCH", default=64, type=int, help="batch size")
args = parser.parse_args()
'''
flyai库中的提供的数据处理方法
传入整个数据训练多少轮,每批次批大小
'''
dataset = Dataset(epochs=args.EPOCHS, batch=args.BATCH)

vocab_size = Processor().getWordsCount()

# region 准备数据
allDataLength = dataset.get_train_length()
print('length of all dev data: %d' % allDataLength)
x, y, x_, y_ = dataset.get_all_processor_data()

# trainLen = (int)(95*allDataLength/100)
# x_train = x[0:trainLen]
# y_train = y[0:trainLen]
# x_val = x[trainLen:]
# y_val = y[trainLen:]

x_train = x
y_train = y
x_val = x_
y_val = y_
# endregion

myModel = Model(dataset)