Exemplo n.º 1
0
def test_loader():
    parser = NeonArgparser(__doc__)
    args = parser.parse_args()

    train_dir = os.path.join(args.data_dir, 'macrotrain')
    test_dir = os.path.join(args.data_dir, 'macrotest')
    write_batches(args, train_dir, trainimgs, 0)
    write_batches(args, test_dir, testimgs, 1)
    train = ImageLoader(set_name='train', do_transforms=False, inner_size=32,
                        repo_dir=train_dir)
    test = ImageLoader(set_name='validation', do_transforms=False, inner_size=32,
                       repo_dir=test_dir)
    err = run(args, train, test)
    return err
Exemplo n.º 2
0
def test_loader():
    print('Testing image loader')
    parser = NeonArgparser(__doc__)
    args = parser.parse_args()

    train_archive = os.path.join(args.data_dir, traindir + '-ingested')
    test_archive = os.path.join(args.data_dir, testdir + '-ingested')
    write_batches(args, train_archive, traindir, 0)
    write_batches(args, test_archive, testdir, 1)
    train = ImageLoader(set_name='train', do_transforms=False, inner_size=32,
                        scale_range=0, repo_dir=train_archive)
    test = ImageLoader(set_name='validation', do_transforms=False, inner_size=32,
                       scale_range=0, repo_dir=test_archive)
    err = run(args, train, test)
    return err
Exemplo n.º 3
0
def test_loader():
    parser = NeonArgparser(__doc__)
    args = parser.parse_args()

    train_dir = os.path.join(args.data_dir, 'macrotrain')
    test_dir = os.path.join(args.data_dir, 'macrotest')
    write_batches(args, train_dir, trainimgs, 0)
    write_batches(args, test_dir, testimgs, 1)
    train = ImageLoader(set_name='train', do_transforms=False, inner_size=32,
                        repo_dir=train_dir)
    test = ImageLoader(set_name='validation', do_transforms=False, inner_size=32,
                       repo_dir=test_dir)
    train.init_batch_provider()
    test.init_batch_provider()
    err = run(args, train, test)
    test.exit_batch_provider()
    train.exit_batch_provider()
    return err
Exemplo n.º 4
0
from neon.optimizers import GradientDescentMomentum, MultiOptimizer
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, TopKMisclassification
from neon.models import Model
from neon.data import ImageLoader

parser = NeonArgparser(__doc__)
parser.add_argument('--subset_pct', type=float, default=100,
                    help='subset of training dataset to use (percentage)')
parser.add_argument('--test_only', action='store_true',
                    help='skip fitting - evaluate metrics on trained model weights')
args = parser.parse_args()

# setup data provider
img_set_options = dict(repo_dir=args.data_dir, inner_size=224,
                       dtype=args.datatype, subset_pct=args.subset_pct)
test = ImageLoader(set_name='validation', scale_range=(256, 256),
                   do_transforms=False, **img_set_options)

init1 = Xavier(local=False)
initx = Xavier(local=True)
bias = Constant(val=0.20)
relu = Rectlin()

common = dict(activation=relu, init=initx, bias=bias)
commonp1 = dict(activation=relu, init=initx, bias=bias, padding=1)
commonp2 = dict(activation=relu, init=initx, bias=bias, padding=2)
pool3s1p1 = dict(fshape=3, padding=1, strides=1)
pool3s2p1 = dict(fshape=3, padding=1, strides=2, op='max')


def inception(kvals):
    (p1, p2, p3, p4) = kvals
Exemplo n.º 5
0
parser.add_argument(
    '--test_only',
    action='store_true',
    help='skip fitting - evaluate metrics on trained model weights')
args = parser.parse_args()

if args.test_only:
    if args.model_file is None:
        raise ValueError('To test model, trained weights need to be provided')

# setup data provider
img_set_options = dict(repo_dir=args.data_dir,
                       inner_size=224,
                       subset_pct=args.subset_pct)
train = ImageLoader(set_name='train',
                    scale_range=(256, 256),
                    shuffle=True,
                    **img_set_options)
test = ImageLoader(set_name='validation',
                   scale_range=(256, 256),
                   do_transforms=False,
                   **img_set_options)

init_g1 = Gaussian(scale=0.01)
init_g2 = Gaussian(scale=0.005)

relu = Rectlin()

layers = []
layers.append(
    Conv((11, 11, 96),
         padding=0,
Exemplo n.º 6
0
be = gen_backend(backend=args.backend,
                 rng_seed=1,
                 batch_size=batch_size,
                 device_id=0,
                 deterministic_update=True)

data_dir = '/usr/local/data/I1K/macrobatches'

assert os.path.isdir(data_dir), 'Data dir is missing'

# subset pct is set to make sure that every epoch has the same mb count
img_set_options = dict(repo_dir=data_dir,
                       inner_size=224,
                       dtype=np.float32,
                       subset_pct=0.09990891117239205)
train = ImageLoader(set_name='train', scale_range=(256, 256), shuffle=False,
                    do_transforms=False, **img_set_options)

init1 = Xavier(local=False)
initx = Xavier(local=True)
bias = Constant(val=0.20)
relu = Rectlin()

common = dict(activation=relu, init=initx, bias=bias)
commonp1 = dict(activation=relu, init=initx, bias=bias, padding=1)
commonp2 = dict(activation=relu, init=initx, bias=bias, padding=2)
pool3s1p1 = dict(fshape=3, padding=1, strides=1)
pool3s2p1 = dict(fshape=3, padding=1, strides=2, op='max')


def inception(kvals, name):
    (p1, p2, p3, p4) = kvals
Exemplo n.º 7
0
    help='first run without this and then run with so resume from saved file')
args = parser.parse_args()

if args.backend == 'mgpu':
    batch_size = 32 * 8  # for some reason bs 128 does not work with bias layers
else:
    batch_size = 32

# subset pct is set to make sure that every epoch has the same mb count
img_set_options = dict(repo_dir=args.data_dir,
                       inner_size=224,
                       dtype=np.float32,
                       subset_pct=0.09990891117239205)
train = ImageLoader(set_name='train',
                    scale_range=(256, 256),
                    shuffle=False,
                    do_transforms=False,
                    **img_set_options)

init1 = Xavier(local=False)
initx = Xavier(local=True)
bias = Constant(val=0.20)
relu = Rectlin()

common = dict(activation=relu, init=initx, bias=bias)
commonp1 = dict(activation=relu, init=initx, bias=bias, padding=1)
commonp2 = dict(activation=relu, init=initx, bias=bias, padding=2)
pool3s1p1 = dict(fshape=3, padding=1, strides=1)
pool3s2p1 = dict(fshape=3, padding=1, strides=2, op='max')

Exemplo n.º 8
0
parser = NeonArgparser(__doc__)
args = parser.parse_args()
args.backend = 'gpu'
args.datatype = np.float16
be = gen_backend(backend=args.backend,
                 batch_size=128,
                 rng_seed=123,
                 device_id=args.device_id,
                 datatype=args.datatype)

options = dict(repo_dir='../dataset/Cifar10',
               inner_size=32,
               subset_pct=100,
               dtype=args.datatype)
train_set = ImageLoader(set_name='train',
                        scale_range=32,
                        shuffle=True,
                        **options)
valid_set = ImageLoader(set_name='validation',
                        scale_range=32,
                        do_transforms=False,
                        **options)

# define model
nfilters = [96, 192, 256]
init_w = Gaussian(scale=0.01)
relu = Rectlin()
common_params = dict(init=init_w, activation=relu)
convp1 = dict(padding=1, **common_params)
layers = [
    Conv((3, 3, nfilters[0]), bias=Constant(0.1), **convp1),
    Conv((3, 3, nfilters[0]), bias=Constant(0.1), **convp1),
Exemplo n.º 9
0
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from neon.util.argparser import NeonArgparser
from neon.util.persist import load_obj
from neon.transforms import Misclassification
from neon.models import Model
from neon.data import ImageLoader

# parse the command line arguments (generates the backend)
parser = NeonArgparser(__doc__)
args = parser.parse_args()

# setup data provider
test_set = ImageLoader(set_name='validation',
                       repo_dir=args.data_dir,
                       inner_size=32,
                       scale_range=40,
                       do_transforms=False)
model = Model(load_obj(args.model_file))
print 'Accuracy: %.1f %% (Top-1)' % (
    1.0 - model.eval(test_set, metric=Misclassification()) * 100)
Exemplo n.º 10
0
                    default=9,
                    help='depth of each stage (network depth will be 9n+2)')
parser.add_argument('--subset_pct',
                    type=float,
                    default=100,
                    help='subset of training dataset to use (percentage)')
args = parser.parse_args()

# setup data provider
imgset_options = dict(inner_size=32,
                      scale_range=40,
                      aspect_ratio=110,
                      repo_dir=args.data_dir,
                      subset_pct=args.subset_pct)
train = ImageLoader(set_name='train',
                    shuffle=True,
                    do_transforms=True,
                    **imgset_options)
test = ImageLoader(set_name='validation',
                   shuffle=False,
                   do_transforms=False,
                   **imgset_options)
tune_set = ImageLoader(set_name='train',
                       shuffle=False,
                       do_transforms=False,
                       inner_size=32,
                       scale_range=40,
                       repo_dir=args.data_dir,
                       subset_pct=20)


def conv_params(fsize, nfm, stride=1, relu=True, batch_norm=True):
Exemplo n.º 11
0
# hyperparameters
batch_size = 64

# setup backend
be = gen_backend(backend=args.backend,
                 batch_size=batch_size,
                 rng_seed=args.rng_seed,
                 device_id=args.device_id,
                 datatype=args.datatype)

# setup data provider
img_set_options = dict(repo_dir=args.data_dir,
                       inner_size=224,
                       dtype=args.datatype,
                       subset_pct=100)
train = ImageLoader(set_name='train', **img_set_options)
test = ImageLoader(set_name='validation',
                   do_transforms=False,
                   **img_set_options)

relu = Rectlin()

init_uni = GlorotUniform()

# The parameters below are straight out of [Springenberg2014]
opt_gdm = GradientDescentMomentum(learning_rate=0.01,
                                  schedule=Schedule(step_config=[10],
                                                    change=0.1),
                                  momentum_coef=0.9,
                                  wdecay=.0005)
Exemplo n.º 12
0
    stages = (3, 8, 36, 3)
elif args.depth in (4, 98, 138):
    stages = (3, 7, 35, 3)
else:
    neon_logger.display("Bad depth parameter")
    sys.exit()

# setup data provider
img_set_options = dict(repo_dir=args.data_dir,
                       inner_size=224,
                       subset_pct=args.subset_pct)
train = ImageLoader(set_name='train',
                    scale_range={
                        'min_area_pct': 8,
                        'max_area_pct': 100
                    },
                    aspect_ratio=133,
                    contrast_range=(60, 140),
                    shuffle=True,
                    **img_set_options)
test = ImageLoader(set_name='validation',
                   scale_range=256,
                   do_transforms=False,
                   **img_set_options)
tune = ImageLoader(set_name='train',
                   scale_range=256,
                   do_transforms=False,
                   repo_dir=args.data_dir,
                   inner_size=224,
                   subset_pct=10)
Exemplo n.º 13
0
                    type=float,
                    default=100,
                    help='subset of training dataset to use (percentage)')
parser.add_argument(
    '--test_only',
    action='store_true',
    help='skip fitting - evaluate metrics on trained model weights')
args = parser.parse_args()

# setup data provider
img_set_options = dict(repo_dir=args.data_dir,
                       inner_size=224,
                       dtype=args.datatype,
                       subset_pct=args.subset_pct)
test = ImageLoader(set_name='validation',
                   scale_range=(256, 256),
                   do_transforms=False,
                   **img_set_options)

init1 = Xavier(local=False)
initx = Xavier(local=True)
bias = Constant(val=0.20)
relu = Rectlin()

common = dict(activation=relu, init=initx, bias=bias)
commonp1 = dict(activation=relu, init=initx, bias=bias, padding=1)
commonp2 = dict(activation=relu, init=initx, bias=bias, padding=2)
pool3s1p1 = dict(fshape=3, padding=1, strides=1)
pool3s2p1 = dict(fshape=3, padding=1, strides=2, op='max')


def inception(kvals):
Exemplo n.º 14
0
parser = NeonArgparser(__doc__)
parser.add_argument('--network',
                    default='plain',
                    choices=['plain', 'resnet'],
                    help='type of network to create (plain or resnet)')
parser.add_argument('--depth',
                    type=int,
                    default=9,
                    help='depth of each stage (network depth will be 6n+2)')
args = parser.parse_args()

# setup data provider
imgset_options = dict(inner_size=112, scale_range=140, repo_dir=args.data_dir)
train = ImageLoader(set_name='train',
                    shuffle=True,
                    do_transforms=True,
                    inner_size=112,
                    scale_range=(128, 240),
                    repo_dir=args.data_dir)

test = ImageLoader(set_name='validation',
                   shuffle=False,
                   do_transforms=False,
                   inner_size=112,
                   scale_range=0,
                   repo_dir=args.data_dir)


def conv_params(fsize, nfm, stride=1, relu=True):
    return dict(fshape=(fsize, fsize, nfm),
                strides=stride,
                padding=(1 if fsize > 1 else 0),
Exemplo n.º 15
0
from neon.models import Model
from neon.data import ImageLoader
from neon.callbacks.callbacks import Callbacks

# parse the command line arguments (generates the backend)
parser = NeonArgparser(__doc__)
parser.add_argument('--depth',
                    type=int,
                    default=9,
                    help='depth of each stage (network depth will be 6n+2)')
args = parser.parse_args()

# setup data provider
imgset_options = dict(inner_size=32, scale_range=40, repo_dir=args.data_dir)
train = ImageLoader(set_name='train',
                    shuffle=True,
                    do_transforms=True,
                    **imgset_options)
test = ImageLoader(set_name='validation',
                   shuffle=False,
                   do_transforms=False,
                   **imgset_options)


def conv_params(fsize, nfm, stride=1, relu=True):
    return dict(fshape=(fsize, fsize, nfm),
                strides=stride,
                padding=(1 if fsize > 1 else 0),
                activation=(Rectlin() if relu else None),
                init=Kaiming(local=True),
                batch_norm=True)
Exemplo n.º 16
0
scales = [112, 128, 160, 240]

for scale in scales:
    print scale

    layers = []
    layers += [Conv(**conv_params(7, 32, 2))]
    for nfm, stride in zip(nfms, strides):
        layers.append(module_factory(nfm, stride))
    layers.append(Pooling(7, op='avg'))

    layers.append(
        Conv(fshape=(1, 1, 100), init=Kaiming(local=True), batch_norm=True))
    layers.append(Pooling(fshape='all', op='avg'))
    layers.append(Activation(Softmax()))

    model = Model(layers=layers)
    test = ImageLoader(set_name='validation',
                       shuffle=False,
                       do_transforms=False,
                       inner_size=scale,
                       scale_range=scale,
                       repo_dir=args.data_dir)

    model.load_params("/home/users/hunter/bigfeat_dropout.pkl")

    softmaxes = model.get_outputs(test)
    from neon.util.persist import save_obj
    save_obj(softmaxes, "bigfeat_dropout_SM_{}.pkl".format(scale))