Exemplo n.º 1
0
def main(k,batch_size,epochs):
    kf = KFold(n_splits=k)
    kf.get_n_splits(fault_0_files)
    print('kf', kf)
    train_0_file_names,test_0_file_names = [],[]
    train_1_file_names,test_1_file_names = [],[]

    for train_index, test_index in kf.split(fault_0_files):
        train_0, test_0 = list(np.asarray(fault_0_files)[train_index]), list(np.asarray(fault_0_files)[test_index])
        train_0_file_names.append(train_0)
        test_0_file_names.append(test_0)
    kf.get_n_splits(fault_1_files)
    for train_index, test_index in kf.split(fault_1_files):
        train_1, test_1 = list(np.asarray(fault_1_files)[train_index]), list(np.asarray(fault_1_files)[test_index])
        train_1_file_names.append(train_1)
        test_1_file_names.append(test_1)

    train_numbers = train_0 + train_1
    test_numbers = test_0 + test_1
    train_0 = []
    all_acc =[]
    print("Number of train_files:", len(train_numbers))
    print("Number of test_files:", len(test_numbers))


    for i in range(k):

        filepath = 'test' + str(i) + '.h5'
        train = train_1_file_names[i]+train_0_file_names[i]
        test = test_1_file_names[i]+test_0_file_names[i]

        train_dataset = tf.data.Dataset.from_generator(csv_data, args=[train, batch_size],
                                                       output_shapes=((None, 8192, 1), (None,)),
                                                       output_types=(tf.float64, tf.float64))

        test_dataset = tf.data.Dataset.from_generator(csv_data, args=[test, batch_size],
                                                      output_shapes=((None, 8192, 1), (None,)),
                                                      output_types=(tf.float64, tf.float64))

        model = create_model()
        model_loss = tf.keras.losses.SparseCategoricalCrossentropy()
        model_optimizer = tf.keras.optimizers.SGD()

        model.compile(loss=model_loss, optimizer=model_optimizer, metrics=["accuracy"])

        steps_per_epoch = np.int(np.ceil(len(train) / batch_size))
        steps = np.int(np.ceil(len(test) / batch_size))
        print("steps_per_epoch = ", steps_per_epoch)
        print("steps = ", steps)
        # model.load_weights(filepath)
        # checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath, save_best_only=True)
        train_log = model.fit(train_dataset,validation_data=test_dataset,
                            steps_per_epoch=steps_per_epoch, validation_steps=steps,epochs=epochs)

        model.save(filepath)
        test_loss, test_accuracy = model.evaluate(test_dataset, steps=steps)
        print("Test loss: ", test_loss)
        print("Test accuracy:", test_accuracy)

        train_0.append(train_log)
        all_acc.append(test_accuracy)

    print("all the accuracy:{}, test accuracy:{:.4f}.".format(all_acc, sum(all_acc) / len(all_acc)))

    plot_learning_data(train_0)
Exemplo n.º 2
0
opt = parser.parse_args()

# FIXME: for data-flow economy
if 'try' in opt.name:
    opt.roc_fig = False

print("name: ", opt.name)
tb = SummaryWriter(os.path.join('runs/', opt.name))

from dataset import create_dataLoader
# TODO: test_loader
train_loader, validate_loader = create_dataLoader(opt)

from net import create_model
net = create_model(opt.length)
net.cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=opt.lr)

# FIXME: previously used CrossEntropyLoss
# loss_func = nn.CrossEntropyLoss()
loss_func = nn.BCELoss()
print("train start")
for epoch in range(opt.epoch):
    print("epoch =", epoch)

    losses, acc = [], 0
    # totally 42 mini-batches
    for step, (pair, label) in enumerate(train_loader):
        # FIXME: half of train data
        # a_chain = torch.from_numpy(np.random.randint(10, size=x[:,:,:opt.length,:].shape)).float()
Exemplo n.º 3
0
import torch
import data_management as d
import net
import numpy as np

state_dict = torch.load("model9000", map_location="cuda:3")
device = torch.device("cuda:3")
model = net.create_model()
model.load_state_dict(state_dict)
model.to(device)
data = d.load_pickle("data999999")

model.eval()
image = np.array(data["image"], dtype="f4")
print(image.shape)
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image)

image = image.to(device)
image = image.reshape((1, 3, 224, 224))
print(image.shape)
out = model(image)
print(f"output is: {out}")
print(f'labels were: {data["camera_pos"]}, {data["camera_angle"]}')
label = torch.from_numpy(
    np.array(data["camera_pos"] + data["camera_angle"], dtype="f4").reshape(
        (1, 6)))
label = label.to(device)
print(out)
print(label)
err, dist = net.accuracy(out, label)
Exemplo n.º 4
0
def train_and_predict():

    pretrained_weights_path = args.weights
    structure_mode = args.structure

    print_pretty('Creating and compiling model...')

    network_input_shp = (config.NETWORK_INPUT_H, config.NETWORK_INPUT_W,
                         config.NETWORK_INPUT_C)
    output_shp = (config.NETWORK_INPUT_H, config.NETWORK_INPUT_W, 1)

    train_model, infer_model = create_model(input_shape=network_input_shp,
                                            lr=1e-4)

    if structure_mode:
        print_summary(train_model)
        plot_model(infer_model,
                   rankdir='LB',
                   show_shapes=True,
                   show_layer_names=True,
                   to_file='train_model.png')
        return

    if pretrained_weights_path:
        train_model.load_weights(args.weights)

    print_pretty('Loading and preprocessing train data...')

    train_imgs, train_masks, valid_imgs, valid_masks = robofest_data_get_samples_preprocessed(
        network_input_shp, output_shp)

    if len(np.unique(valid_masks)) > 2:
        print(
            'Valid: Preprocessing created mask with more than two binary values'
        )
        exit(1)

    if len(np.unique(train_masks)) > 2:
        print(
            'Train: Preprocessing created mask with more than two binary values'
        )
        exit(1)

    print_pretty('Setup data generator...')

    imgs_valid = valid_imgs
    imgs_mask_valid = valid_masks

    imgs_train = train_imgs
    imgs_mask_train = train_masks

    print('Train:', imgs_train.shape)
    print('Valid:', imgs_valid.shape)

    data_gen_args = dict(rotation_range=5,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         zoom_range=0.1,
                         horizontal_flip=True,
                         fill_mode='constant',
                         cval=0)

    image_datagen = ImageDataGenerator(**data_gen_args)
    mask_datagen = ImageDataGenerator(**data_gen_args)

    seed = 1
    batch_size = 8
    image_datagen.fit(imgs_train, augment=True, seed=seed)
    mask_datagen.fit(imgs_mask_train, augment=True, seed=seed)

    print_pretty('Flowing data...')

    image_generator = image_datagen.flow(imgs_train,
                                         batch_size=batch_size,
                                         seed=seed)
    mask_generator = mask_datagen.flow(imgs_mask_train,
                                       batch_size=batch_size,
                                       seed=seed)

    print_pretty('Zipping generators...')

    train_generator = zip(image_generator, mask_generator)

    print_pretty('Fitting model...')

    checkpoint_vloss = CustomModelCheckpoint(
        model_to_save=infer_model,
        filepath=config.NET_BASENAME +
        '_ep{epoch:03d}-iou{iou_metrics:.3f}-val_iou{val_iou_metrics:.3f}' +
        '.h5',
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        mode='min',
        period=1)

    train_model.fit_generator(train_generator,
                              steps_per_epoch=20,
                              epochs=10000,
                              verbose=1,
                              validation_data=(imgs_valid, imgs_mask_valid),
                              callbacks=[
                                  ModelCheckpoint('chk/weights_best.h5',
                                                  monitor='val_iou_metrics',
                                                  mode='max',
                                                  save_best_only=True,
                                                  save_weights_only=False,
                                                  verbose=1)
                              ])
Exemplo n.º 5
0
sys.path.append(os.path.abspath('.'))
import torch

from transform import create_transform
from dataset import create_dataset
from net import create_model

if __name__ == '__main__':
    transform = create_transform('transform1')()
    dataset = create_dataset('dataset1')(transform)
    data_loader = torch.utils.data.DataLoader(dataset=dataset,
                                              batch_size=24,
                                              shuffle=False,
                                              num_workers=24)

    model = create_model('u-net')(3, 1)
    model = model.cuda()
    model = torch.nn.DataParallel(model, device_ids=[i for i in range(8)])

    optimizer = torch.optim.Adam(model.parameters(), 0.001)

    # log = SummaryWriter('./logs/loss_1epoch')
    print("totally %d steps" % len(data_loader))
    for step, (batch_x, batch_y) in enumerate(data_loader):
        batch_x = batch_x.cuda()
        batch_y = batch_y.cuda()
        # print(batch_x.shape, batch_y.shape)

        output = model(batch_x).squeeze(dim=1)
        # print(output.shape)
        loss = torch.nn.MSELoss()(output, batch_y)