コード例 #1
0
def _try_fit():
    architecture = 'alexnet'
    model_info = create_model_info(architecture)

    data_pools = load_pickle(
        '/home/long/Desktop/Hela_split_30_2018-12-04.pickle')
    pool = data_pools['data']['0']
    print(pool['data_name'])
    print(len(pool['train_files']))
    print_split_report('train', pool['train_report'])
    num_classes = len(pool['class_names'])

    model, num_base_layers = declare_model(num_classes, architecture,
                                           model_info)
    model = set_model_trainable(model, num_base_layers, -1)

    batch_size = 16
    nb_epoch = 10

    is_augmented = False
    (X_train, Y_train), (X_val, Y_val), (X_test, Y_test) = get_np_data(
        pool, "/mnt/6B7855B538947C4E/Dataset/JPEG_data/Hela_JPEG", model_info,
        is_augmented)
    optimizer = optimizers.SGD(lr=0.01, decay=1e-6)

    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0.001,
                                   patience=5,
                                   verbose=0,
                                   mode='auto')
    # # # TODO: fix that
    model.compile(
        loss="categorical_crossentropy",
        optimizer=optimizer,
        metrics=[
            'accuracy'
        ])  # cal accuracy and loss of the model; result will be a dict

    model.fit(X_train,
              Y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              shuffle=True,
              verbose=1,
              validation_data=(X_val, Y_val),
              callbacks=[early_stopping])

    test_score = model.evaluate(X_test, Y_test, 32)
    test_score = {'loss': test_score[0], 'acc': test_score[1]}
    print('test score: ', test_score)
コード例 #2
0
def main(_):
    '''
    prepare data
    '''
    data_pools = load_pickle('/home/long/Desktop/Hela_split_30_2018-07-19.pickle')
    pool = data_pools['data']['0']
    print(pool['data_name'])
    print (len(pool['train_files']))
    print_split_report('train', pool['train_report'])
    num_classes = len(pool['class_names'])

    # '''
    # Test train
    # '''
    #
    # train_score, val_score, test_score = train(pool, '/mnt/6B7855B538947C4E/Dataset/JPEG_data/Hela_JPEG', 'resnet_v2',
    #       {'lr': 0.1, 'lr_decay': 0, 'momentum': 0,  'nesterov': False}, save_model_path='/home/long/keras_resnet', train_batch=4, test_batch=8)
    #

    '''
    Test restore and eval
    '''

    hyper_params = {'lr': 0.2, 'lr_decay': 0, 'momentum': 0, 'nesterov': False}
    model_info = create_model_info('resnet_v2')

    # model, _ = restore_model('/home/ndlong95/finetune/saved_models/Hela_split_30_2018-07-19_0_resnet_v2', hyper_params)
    model, _ = resnet_model(model_info, num_classes)
    model.load_weights('/home/long/Desktop/Hela_split_30_2018-07-19_0_resnet_v2.h5', by_name=True)

    train_generator, validation_generator, test_generator = get_generators(model_info, pool, '/mnt/6B7855B538947C4E/Dataset/JPEG_data/Hela_JPEG', 8,
                                                                           16)
    train_len = len(pool['train_files'])
    validation_len = len(pool['val_files'])
    test_len = len(pool['test_files'])
    train_score = model.evaluate_generator(train_generator, train_len // 8 + 1)
    train_score = {'loss': train_score[0], 'acc': train_score[1]}
    print('train_score: ', train_score)

    val_score = model.evaluate_generator(validation_generator, validation_len // 16 + 1)
    val_score = {'loss': val_score[0], 'acc': val_score[1]}
    print('val_score: ', val_score)

    test_score = model.evaluate_generator(test_generator, test_len //16 + 1)
    test_score = {'loss': test_score[0], 'acc': test_score[1]}
    print('test score: ', test_score)

    export_pb(model, '/home/long/keras_inception_resnet')
    view_graphdef('/home/long/keras_inception_resnet/t.pb', '/tmp/')
コード例 #3
0
def train_pools(_):
    print(FLAGS)
    pools = load_pickle(FLAGS.pool_dir)
    start_pool_idx = int(FLAGS.start_pool)
    end_pool_idx = int(FLAGS.end_pool)

    now = datetime.datetime.now()
    time = current_time(now)

    if not os.path.exists(FLAGS.save_model_dir):
        os.makedirs(FLAGS.save_model_dir)
    if not os.path.exists(FLAGS.result_dir):
        os.makedirs(FLAGS.result_dir)
    trained_models_info = []

    for idx in range(start_pool_idx, end_pool_idx + 1):
        pool = pools['data'][str(idx)]
        print('pool idx: ', idx)
        print('****************')
        print_split_report('train', pool['train_report'])
        print_split_report('val', pool['val_report'])
        print_split_report('test', pool['test_report'])
        print('-----------------')

        name = pools['pool_name'] + '_' + str(idx)
        log_path = os.path.join(FLAGS.log_dir, name, FLAGS.architecture)
        save_model_path = os.path.join(FLAGS.save_model_dir,
                                       name + '_' + str(FLAGS.architecture))

        results = train_single_pool(pool, FLAGS.image_dir, log_path,
                                    FLAGS.architecture, save_model_path,
                                    FLAGS.train_batch, FLAGS.test_batch,
                                    FLAGS.is_augmented)
        model_info = {
            'hyper_param_setting': sgd_hyper_params,
            'pool_idx': str(idx),
            'pool_name': pool['data_name'],
            'time': time,
            'architecture': FLAGS.architecture,
            'train_batch': FLAGS.train_batch,
            'test_batch': FLAGS.test_batch,
            'log_path': log_path,
            'save_model_path': save_model_path,
            'results': results,
            'final_results': results['final_result']
        }
        trained_models_info.append(model_info)

    # save result to .pickle
    trained_models_info_pickle_name = pools['pool_name'] + '_' + str(
        start_pool_idx) + '_' + str(end_pool_idx)
    dump_pickle(
        trained_models_info,
        os.path.join(FLAGS.result_dir, trained_models_info_pickle_name))
    return trained_models_info
コード例 #4
0
def testing():
    data_pool = load_pickle(
        '/home/long/Desktop/Hela_split_30_2018-07-19.pickle')
    print(data_pool['data']['29']['data_name'])
    print(len(data_pool['data']['29']['train_files']))
    print(data_pool['data']['29']['train_files'])

    split = data_pool['data']['29']
    print_split_report('train', split['train_report'])
    print_split_report('val', split['val_report'])
    print_split_report('test', split['test_report'])
    print(
        get_features(split['train_files'], split['train_label_names'],
                     '/mnt/6B7855B538947C4E/Dataset/features/off_the_shelf'))
コード例 #5
0
def train(pool,
          image_dir,
          architecture,
          hyper_params,
          is_augmented,
          log_path=None,
          save_model_path=None,
          restore_model_path=None,
          train_batch=16,
          test_batch=32,
          num_last_layer_to_finetune=-1):
    model_info = create_model_info(architecture)
    print(pool['data_name'])
    print(len(pool['train_files']))
    print_split_report('train', pool['train_report'])
    num_classes = len(pool['class_names'])
    train_len = len(pool['train_files'])
    validation_len = len(pool['val_files'])
    test_len = len(pool['test_files'])
    print("train, val, test len: ", train_len, validation_len, test_len)

    # train the model from scratch or train the model from some point
    if restore_model_path == None:
        print("training from scratch")
        model, num_base_layers = declare_model(num_classes, architecture,
                                               model_info)
        model = set_model_trainable(model, num_base_layers,
                                    num_last_layer_to_finetune)
    else:
        print("restoring model to train")
        model, num_layers = restore_model(restore_model_path, hyper_params)
        model = set_model_trainable(model, num_layers,
                                    num_last_layer_to_finetune)

    print('training the model with hyper params: ', hyper_params)
    optimizer = optimizers.SGD(lr=hyper_params['lr'],
                               decay=hyper_params['lr_decay'],
                               momentum=hyper_params['momentum'],
                               nesterov=hyper_params['nesterov'])  # Inception
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0.001,
                                   patience=5,
                                   verbose=0,
                                   mode='auto')
    model.compile(
        loss="categorical_crossentropy",
        optimizer=optimizer,
        metrics=[
            'accuracy'
        ])  # cal accuracy and loss of the model; result will be a dict

    train_generator = ThreadSafeGenerator(model_info, image_dir,
                                          pool['train_files'],
                                          pool['train_labels'], train_batch,
                                          num_classes, is_augmented)

    validation_generator = ThreadSafeGenerator(model_info, image_dir,
                                               pool['val_files'],
                                               pool['val_labels'], test_batch,
                                               num_classes, False)

    test_generator = ThreadSafeGenerator(model_info, image_dir,
                                         pool['test_files'],
                                         pool['test_labels'], test_batch,
                                         num_classes, False)

    model.fit_generator(train_generator,
                        nb_epoch=50,
                        samples_per_epoch=train_len // train_batch + 1,
                        validation_data=validation_generator,
                        nb_val_samples=validation_len // test_batch + 1,
                        callbacks=[])

    train_score = model.evaluate_generator(train_generator,
                                           train_len // train_batch + 1)
    train_score = {'loss': train_score[0], 'acc': train_score[1]}
    print('train_score: ', train_score)

    val_score = model.evaluate_generator(validation_generator,
                                         validation_len // test_batch + 1)
    val_score = {'loss': val_score[0], 'acc': val_score[1]}
    print('val_score: ', val_score)

    test_score = model.evaluate_generator(test_generator,
                                          test_len // test_batch + 1)
    test_score = {'loss': test_score[0], 'acc': test_score[1]}
    print('test score: ', test_score)

    if save_model_path is not None:
        save_model(model, save_model_path)

    return train_score, val_score, test_score