Exemplo n.º 1
0
parser.add_argument("-e",
                    "--EPOCHS",
                    default=200,
                    type=int,
                    help="train epochs")
parser.add_argument("-b", "--BATCH", default=64, type=int, help="batch size")
args = parser.parse_args()
'''
flyai库中的提供的数据处理方法
传入整个数据训练多少轮,每批次批大小
'''
dataset = Dataset(epochs=args.EPOCHS, batch=args.BATCH)
model = flyai_model(dataset)
print('dataset.get_train_length()', dataset.get_train_length())
print('dataset.get_validation_length()', dataset.get_validation_length())
dataset_slice = wangyi.getDatasetListByClassfy(classify_count=3)
x_train_slice, y_train_slice, x_val_slice, y_val_slice = [], [], [], []
for epoch in range(3):
    x_1, y_1, x_2, y_2 = dataset_slice[epoch].get_all_processor_data()
    x_train_slice.append(x_1)
    y_train_slice.append(y_1)
    x_val_slice.append(x_2)
    y_val_slice.append(y_2)

# 超参
vocab_size = 20655  # 总词汇量
embedding_dim = 64  # 嵌入层大小
hidden_dim = 1024  # Dense层大小
max_seq_len = 34  # 最大句长
num_filters = 256  # 卷积核数目
kernel_size = 5  # 卷积核尺寸
Exemplo n.º 2
0
val_batch_size = {0: 33, 1: 17, 2: 16, 3: 34}
train_epoch = args.EPOCHS
history_train = 0
history_test = 0
best_score_by_acc = 0.
best_score_by_loss = 999.
lr_level = 0
# 训练集的每类的batch的量,组成的list
train_batch_List = [100] * num_classes
'''
flyai库中的提供的数据处理方法
传入整个数据训练多少轮,每批次批大小
'''
dataset = Dataset(epochs=args.EPOCHS, batch=args.BATCH)
model = Model(dataset)
wangyi.ReadFileNames()
dataset_wangyi = wangyi.DatasetByWangyi(num_classes)
dataset_wangyi.set_Batch_Size(train_batch_List, val_batch_size)
myhistory = wangyi.historyByWangyi()
'''
实现自己的网络机构
'''
time_0 = clock()
# 创建最终模型

model_cnn = Net(num_classes=num_classes).get_Model()
# model_cnn = keras_model(inputs=Inp, outputs=predictions)

# 输出模型的整体信息
model_cnn.summary()
Exemplo n.º 3
0
    3: 40,
    4: 26,
    5: 39,
    6: 27,
    7: 16,
    8: 13,
    9: 6,
}

# 训练集的每类的batch的量,组成的list
train_batch_List = [50] * num_classes

# wx+b,这是允许分类的loss最低程度,比如class-9 允许loss在1.2
train_allow_loss = [-0.0, -0.1, -0.1, -0.1, -0.8, -0.1, -0.8, -0.8, -0.8, -1.2]

myhistory = wangyi.historyByWangyi()
'''
flyai库中的提供的数据处理方法
传入整个数据训练多少轮,每批次批大小
'''

dataset = Dataset(epochs=args.EPOCHS, batch=args.BATCH)
model = Model(dataset)
dataset_wangyi = wangyi.DatasetByWangyi(num_classes)
dataset_wangyi.set_Batch_Size(train_batch_List, val_batch_size)
'''
实现自己的网络机构
'''
time_0 = clock()
# 创建最终模型
Inp = Input((img_size[0], img_size[1], 3))
Exemplo n.º 4
0
    weights_path = 'imagenet'
'''
2019-07-26
获取数据值,是否train set有问题??读取label
'''
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--EPOCHS", default=10, type=int, help="train epochs")
parser.add_argument("-b", "--BATCH", default=8, type=int, help="batch size")
args = parser.parse_args()

'''
flyai库中的提供的数据处理方法
传入整个数据训练多少轮,每批次批大小
'''
dataset2 = Dataset(epochs=args.EPOCHS, batch=args.BATCH)
dataset = wangyi.DatasetExtendToSize(False ,train_size=1773,val_size= 572,classify_count=10)
# dataset = wangyi.DatasetExtendToSize(True ,train_size=40,val_size= 40,classify_count=10)
model = Model(dataset)
'''
dataset.get_train_length() : 5866
dataset.get_all_validation_data(): 1956

'''
print('dataset.get_train_length()',dataset.get_train_length())
print('dataset.get_validation_length()',dataset.get_validation_length())
x_train, y_train , x_val, y_val =dataset.get_all_processor_data()

'''
实现自己的网络机构
'''
num_classes = 10
Exemplo n.º 5
0
    0: 120,
    1: 48,
    2: 58,
    3: 40,
    4: 26,
    5: 38,
    6: 26,
    7: 16,
    8: 12,
    9: 6,
}

# 训练集的每类的batch的量,组成的list
train_batch_List = [16] * num_classes

myhistory = wangyi.historyByWangyi()
'''
flyai库中的提供的数据处理方法
传入整个数据训练多少轮,每批次批大小
'''

dataset = Dataset(epochs=args.EPOCHS, batch=args.BATCH)
# dataset = wangyi.DatasetExtendToSize(False ,train_size=1773,val_size= 572,classify_count=num_classes)
# dataset = wangyi.DatasetExtendToSize(True ,train_size=40,val_size= 40,classify_count=num_classes)
model = Model(dataset)
dataset_wangyi = wangyi.DatasetByWangyi(num_classes)
dataset_wangyi.set_Batch_Size(train_batch_List, val_batch_size)
'''
dataset.get_train_length() : 5866
dataset.get_all_validation_data(): 1956
predict datas :  1956