示例#1
0
def main():
    config_path = args.conf
    initial_weights = args.weights

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    train_set, valid_set, classes = data.create_training_instances(config['train']['train_folder'],
                                                                   None,
                                                                   config['train']['cache_name'],
                                                                   config['model']['labels'])

    num_classes = len(classes)
    print('Readed {} classes: {}'.format(num_classes, classes))

    train_generator = gen.BatchGenerator(
        instances=train_set,
        labels=classes,
        batch_size=config['train']['batch_size'],
        input_sz=config['model']['infer_shape'],
        shuffle=True,
        norm=data.normalize
    )

    valid_generator = gen.BatchGenerator(
        instances=valid_set,
        labels=classes,
        batch_size=config['train']['batch_size'],
        input_sz=config['model']['infer_shape'],
        norm=data.normalize,
        infer=True
    )

    early_stop = EarlyStopping(
        monitor='val_loss',
        min_delta=0,
        patience=20,
        mode='min',
        verbose=1
    )

    reduce_on_plateau = ReduceLROnPlateau(
        monitor='val_loss',
        factor=0.5,
        patience=5,
        verbose=1,
        mode='min',
        min_delta=0.01,
        cooldown=0,
        min_lr=0
    )

    net_input_shape = (config['model']['infer_shape'][0],
                       config['model']['infer_shape'][1],
                       3)

    train_model = models.create(
        base_name=config['model']['base'],
        num_classes=num_classes,
        input_shape=net_input_shape)

    if initial_weights:
        train_model.load_weights(initial_weights)

    print(train_model.summary())
    # plot_model(train_model, to_file='images/MobileNetv2.png', show_shapes=True)

    optimizer = Adam(lr=config['train']['learning_rate'], clipnorm=0.001)

    train_model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])

    checkpoint_name = utils.get_checkpoint_name(config)
    utils.makedirs_4_file(checkpoint_name)

    static_chk_name = utils.get_static_checkpoint_name(config)
    utils.makedirs_4_file(static_chk_name)

    checkpoint_vloss = cbs.CustomModelCheckpoint(
        model_to_save=train_model,
        filepath=checkpoint_name,
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        mode='min',
        period=1
    )
    
    neptune_mon = cbs.NeptuneMonitor(
        monitoring=['loss', 'val_loss', 'accuracy', 'val_accuracy'],
        neptune=neptune
    )

    chk_static = ModelCheckpoint(
        filepath=static_chk_name,
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        mode='min',
        period=1
    )

    callbacks = [early_stop, reduce_on_plateau, checkpoint_vloss, neptune_mon, chk_static]

    ### NEPTUNE ###
    sources_to_upload = [
        'models.py',
        'config.json'
    ]

    params = {
        'infer_size': "H{}xW{}".format(*config['model']['infer_shape']),
        'classes': config['model']['labels'],
    }

    neptune.create_experiment(
        name=utils.get_neptune_name(config),
        upload_stdout=False,
        upload_source_files=sources_to_upload,
        params=params
    )
    ### NEPTUNE ###
    
    hist = train_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=len(train_generator) * config['train']['train_times'],

        validation_data=valid_generator,
        validation_steps=len(valid_generator) * config['valid']['valid_times'],

        epochs=config['train']['nb_epochs'],
        verbose=2 if config['train']['debug'] else 1,
        callbacks=callbacks,
        workers=multiprocessing.cpu_count(),
        max_queue_size=100
    )
    
    neptune.send_artifact(static_chk_name)
    neptune.send_artifact('config.json')
示例#2
0
    files_path, labels = get_path_labels(data_path)

    for i in range(len(labels)):
        print('{} : {}'.format(files_path[i], labels[i]))

    # パスとラベルから学習、テスト、評価データを作成
    X_train, X_test, y_train, y_test = train_test_split(files_path,
                                                        labels,
                                                        train_size=0.8)
    X_train, X_val, y_train, y_val = train_test_split(X_train,
                                                      y_train,
                                                      train_size=0.8)

    # ジェネレータを作成
    train_batch_generator = generator.BatchGenerator(X_train, y_train,
                                                     batch_size, sample_rate,
                                                     sample_length, threshold)
    test_batch_generator = generator.BatchGenerator(X_val, y_val, batch_size,
                                                    sample_rate, sample_length,
                                                    threshold)

    # モデル構築
    dnn = CNN()
    model = dnn.build_model()

    # 学習
    fit_history = model.fit_generator(
        train_batch_generator,
        epochs=epoch,
        verbose=1,
        steps_per_epoch=train_batch_generator.batches_per_epoch,
示例#3
0
def main():
    config_path = args.conf
    initial_weights = args.weights

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    train_set, valid_set = data.create_training_instances(config['train']['train_folder'],
                                                          config['train']['train_masks'],
                                                          config['valid']['valid_folder'],
                                                          config['valid']['valid_masks'],
                                                          config['train']['cache_name'])

    train_generator = gen.BatchGenerator(
        instances           = train_set,
        batch_size          = config['train']['batch_size'],
        input_sz            = config['model']['input_shape'],
        shuffle             = True,
        jitter              = 0.3,
        norm                = data.normalize,
        downsample          = 2
    )

    valid_generator = gen.BatchGenerator(
        instances           = valid_set,
        batch_size          = config['train']['batch_size'],
        input_sz            = config['model']['input_shape'],
        norm                = data.normalize,
        infer               = True,
        downsample          = 2
    )

    early_stop = EarlyStopping(
        monitor     = 'val_loss',
        min_delta   = 0,
        patience    = 100,
        mode        = 'min',
        verbose     = 1
    )

    reduce_on_plateau = ReduceLROnPlateau(
        monitor  = 'loss',
        factor   = 0.5,
        patience = 5,
        verbose  = 1,
        mode     = 'min',
        min_delta= 0.01,
        cooldown = 0,
        min_lr   = 0
    )

    # Swapped as net input -> [H x W x C]
    net_input_shape = (config['model']['input_shape'][1],
                       config['model']['input_shape'][0],
                       3)

    train_model = models.create(
        base            = config['model']['base'],
        input_shape     = net_input_shape)

    if initial_weights:
        train_model.load_weights(initial_weights)

    model_render_file = 'images/{}.png'.format(config['model']['base'])
    if not os.path.isdir(os.path.dirname(model_render_file)):
        os.makedirs(os.path.dirname(model_render_file))

    plot_model(train_model, to_file=model_render_file, show_shapes=True)
    # print_summary(train_model)

    optimizer = Adam(lr=config['train']['learning_rate'], clipnorm=0.001)
    # optimizer = SGD(lr=config['train']['learning_rate'], clipnorm=0.001)

    train_model.compile(loss=models.result_loss, optimizer=optimizer,
                        metrics=[models.iou_loss, models.dice_coef_loss, models.pixelwise_crossentropy])

    chk_name = config['train']['saved_weights_name']
    chk_root, chk_ext = os.path.splitext(chk_name)
    checkpoint_vloss = CustomModelCheckpoint(
        model_to_save   = train_model,
        filepath        = chk_root+'_ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}'+chk_ext,
        monitor         = 'val_loss',
        verbose         = 1,
        save_best_only  = True,
        mode            = 'min',
        period          = 1
    )

    if chk_name:
        if not os.path.isdir(os.path.dirname(chk_name)):
            os.makedirs(os.path.dirname(chk_name))

    callbacks = [early_stop, reduce_on_plateau, checkpoint_vloss]

    hist = train_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=len(train_generator) * config['train']['train_times'],

        validation_data=valid_generator,
        validation_steps=len(valid_generator) * config['valid']['valid_times'],

        epochs=config['train']['nb_epochs'],
        verbose=2 if config['train']['debug'] else 1,
        callbacks=callbacks,
        workers=os.cpu_count(),
        max_queue_size=100
    )

    if not os.path.exists('model'):
        os.makedirs('model')

    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/hist.csv', encoding='utf-8', index=False)